mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 11:02:08 +00:00
Merge remote-tracking branch 'origin/master' into ADQM-868
This commit is contained in:
commit
d2635bff64
1
.github/workflows/woboq.yml
vendored
1
.github/workflows/woboq.yml
vendored
@ -12,6 +12,7 @@ jobs:
|
|||||||
# don't use dockerhub push because this image updates so rarely
|
# don't use dockerhub push because this image updates so rarely
|
||||||
WoboqCodebrowser:
|
WoboqCodebrowser:
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
timeout-minutes: 420 # the task is pretty heavy, so there's an additional hour
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
|
11
README.md
11
README.md
@ -22,12 +22,13 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
* [**v23.5 Release Webinar**](https://clickhouse.com/company/events/v23-5-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-05) - Jun 8 - 23.5 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||||
* [**ClickHouse Meetup in Bangalore**](https://www.meetup.com/clickhouse-bangalore-user-group/events/293740066/) - Jun 7
|
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4
|
||||||
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
|
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||||
|
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||||
|
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||||
|
|
||||||
|
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||||
Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <array>
|
||||||
|
|
||||||
#if defined(__SSE2__)
|
#if defined(__SSE2__)
|
||||||
#include <emmintrin.h>
|
#include <emmintrin.h>
|
||||||
|
@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio)
|
|||||||
assert(min > 0 && ratio >= 0 && ratio <= 1);
|
assert(min > 0 && ratio >= 0 && ratio <= 1);
|
||||||
return min * std::pow(max / min, ratio);
|
return min * std::pow(max / min, ratio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr double interpolateLinear(double min, double max, double ratio)
|
||||||
|
{
|
||||||
|
return std::lerp(min, max, ratio);
|
||||||
|
}
|
||||||
|
@ -116,43 +116,79 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A
|
|||||||
# ARROW_ORC + adapters/orc/CMakefiles
|
# ARROW_ORC + adapters/orc/CMakefiles
|
||||||
set(ORC_SRCS
|
set(ORC_SRCS
|
||||||
"${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h"
|
"${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h"
|
||||||
"${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc"
|
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc"
|
"${ORC_SOURCE_SRC_DIR}/Adaptor.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc"
|
"${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in"
|
||||||
"${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc"
|
"${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc"
|
"${ORC_SOURCE_SRC_DIR}/BlockBuffer.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc"
|
"${ORC_SOURCE_SRC_DIR}/BloomFilter.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
"${ORC_SOURCE_SRC_DIR}/BloomFilter.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
"${ORC_SOURCE_SRC_DIR}/Bpacking.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
"${ORC_SOURCE_SRC_DIR}/BpackingDefault.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ByteRLE.cc"
|
"${ORC_SOURCE_SRC_DIR}/ByteRLE.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/ByteRLE.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/CMakeLists.txt"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc"
|
"${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ColumnReader.cc"
|
"${ORC_SOURCE_SRC_DIR}/ColumnReader.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/ColumnReader.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc"
|
"${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/ColumnWriter.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Common.cc"
|
"${ORC_SOURCE_SRC_DIR}/Common.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Compression.cc"
|
"${ORC_SOURCE_SRC_DIR}/Compression.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Compression.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Dispatch.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Int128.cc"
|
"${ORC_SOURCE_SRC_DIR}/Int128.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc"
|
"${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/LzoDecompressor.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/MemoryPool.cc"
|
"${ORC_SOURCE_SRC_DIR}/MemoryPool.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Murmur3.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Murmur3.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Options.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RLE.cc"
|
"${ORC_SOURCE_SRC_DIR}/RLE.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/RLE.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/RLEV2Util.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RLEv1.cc"
|
"${ORC_SOURCE_SRC_DIR}/RLEv1.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/RLEv1.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/RLEv2.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Reader.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RleDecoderV2.cc"
|
"${ORC_SOURCE_SRC_DIR}/RleDecoderV2.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RleEncoderV2.cc"
|
"${ORC_SOURCE_SRC_DIR}/RleEncoderV2.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc"
|
"${ORC_SOURCE_SRC_DIR}/SchemaEvolution.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/SchemaEvolution.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Statistics.cc"
|
"${ORC_SOURCE_SRC_DIR}/Statistics.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Statistics.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/StripeStream.cc"
|
"${ORC_SOURCE_SRC_DIR}/StripeStream.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/StripeStream.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Timezone.cc"
|
"${ORC_SOURCE_SRC_DIR}/Timezone.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Timezone.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/TypeImpl.cc"
|
"${ORC_SOURCE_SRC_DIR}/TypeImpl.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/TypeImpl.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Utils.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Vector.cc"
|
"${ORC_SOURCE_SRC_DIR}/Vector.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Writer.cc"
|
"${ORC_SOURCE_SRC_DIR}/Writer.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Adaptor.cc"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/BloomFilter.cc"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/Murmur3.cc"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/wrap/orc-proto-wrapper.cc"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/io/InputStream.cc"
|
"${ORC_SOURCE_SRC_DIR}/io/InputStream.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/io/InputStream.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc"
|
"${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc"
|
||||||
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
"${ORC_SOURCE_SRC_DIR}/io/OutputStream.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.hh"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc"
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_orc ${ORC_SRCS})
|
add_library(_orc ${ORC_SRCS})
|
||||||
|
@ -4,7 +4,7 @@ if (SANITIZE OR NOT (
|
|||||||
))
|
))
|
||||||
if (ENABLE_JEMALLOC)
|
if (ENABLE_JEMALLOC)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||||
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds.")
|
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds. Use -DENABLE_JEMALLOC=0")
|
||||||
endif ()
|
endif ()
|
||||||
set (ENABLE_JEMALLOC OFF)
|
set (ENABLE_JEMALLOC OFF)
|
||||||
else ()
|
else ()
|
||||||
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c5d7755ba0b9a95631c8daea4d094101f26ec761
|
Subproject commit 568d1d60c250af1890f226c182bc15bd8cc94cf1
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.5.2.7"
|
ARG VERSION="23.5.3.24"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.5.2.7"
|
ARG VERSION="23.5.3.24"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.5.2.7"
|
ARG VERSION="23.5.3.24"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -16,7 +16,6 @@ def process_result(result_folder):
|
|||||||
"TLPGroupBy",
|
"TLPGroupBy",
|
||||||
"TLPHaving",
|
"TLPHaving",
|
||||||
"TLPWhere",
|
"TLPWhere",
|
||||||
"TLPWhereGroupBy",
|
|
||||||
"NoREC",
|
"NoREC",
|
||||||
]
|
]
|
||||||
failed_tests = []
|
failed_tests = []
|
||||||
|
@ -33,7 +33,7 @@ cd /workspace
|
|||||||
|
|
||||||
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
|
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
|
||||||
|
|
||||||
cd /sqlancer/sqlancer-master
|
cd /sqlancer/sqlancer-main
|
||||||
|
|
||||||
TIMEOUT=300
|
TIMEOUT=300
|
||||||
NUM_QUERIES=1000
|
NUM_QUERIES=1000
|
||||||
|
@ -59,6 +59,8 @@ install_packages previous_release_package_folder
|
|||||||
# available for dump via clickhouse-local
|
# available for dump via clickhouse-local
|
||||||
configure
|
configure
|
||||||
|
|
||||||
|
# it contains some new settings, but we can safely remove it
|
||||||
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
|
|
||||||
start
|
start
|
||||||
@ -85,6 +87,8 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
|||||||
export ZOOKEEPER_FAULT_INJECTION=0
|
export ZOOKEEPER_FAULT_INJECTION=0
|
||||||
configure
|
configure
|
||||||
|
|
||||||
|
# it contains some new settings, but we can safely remove it
|
||||||
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
|
|
||||||
start
|
start
|
||||||
@ -115,6 +119,13 @@ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/c
|
|||||||
install_packages package_folder
|
install_packages package_folder
|
||||||
export ZOOKEEPER_FAULT_INJECTION=1
|
export ZOOKEEPER_FAULT_INJECTION=1
|
||||||
configure
|
configure
|
||||||
|
|
||||||
|
# Just in case previous version left some garbage in zk
|
||||||
|
sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||||
|
| sed "s|>1<|>0<|g" \
|
||||||
|
> /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp
|
||||||
|
sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml
|
||||||
|
|
||||||
start 500
|
start 500
|
||||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||||
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
|
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
|
||||||
|
19
docs/changelogs/v22.8.19.10-lts.md
Normal file
19
docs/changelogs/v22.8.19.10-lts.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.19.10-lts (989bc2fe8b0) FIXME as compared to v22.8.18.31-lts (4de7a95a544)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
22
docs/changelogs/v23.3.4.17-lts.md
Normal file
22
docs/changelogs/v23.3.4.17-lts.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.4.17-lts (2c99b73ff40) FIXME as compared to v23.3.3.52-lts (cb963c474db)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
19
docs/changelogs/v23.3.5.9-lts.md
Normal file
19
docs/changelogs/v23.3.5.9-lts.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.5.9-lts (f5fbc2fd2b3) FIXME as compared to v23.3.4.17-lts (2c99b73ff40)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Cleanup moving parts [#50489](https://github.com/ClickHouse/ClickHouse/pull/50489) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Do not apply projection if read-in-order was enabled. [#50923](https://github.com/ClickHouse/ClickHouse/pull/50923) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Increase max array size in group bitmap [#50620](https://github.com/ClickHouse/ClickHouse/pull/50620) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
22
docs/changelogs/v23.4.4.16-stable.md
Normal file
22
docs/changelogs/v23.4.4.16-stable.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.4.4.16-stable (747ba4fc6a0) FIXME as compared to v23.4.3.48-stable (d9199f8d3cc)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
26
docs/changelogs/v23.5.3.24-stable.md
Normal file
26
docs/changelogs/v23.5.3.24-stable.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.5.3.24-stable (76f54616d3b) FIXME as compared to v23.5.2.7-stable (5751aa1ab9f)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Revert recent grace hash join changes [#50699](https://github.com/ClickHouse/ClickHouse/pull/50699) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add compat setting for non-const timezones [#50834](https://github.com/ClickHouse/ClickHouse/pull/50834) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
@ -233,6 +233,12 @@ libhdfs3 support HDFS namenode HA.
|
|||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
- `_file` — Name of the file.
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -35,6 +35,10 @@ The table structure can differ from the original MySQL table structure:
|
|||||||
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
|
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
|
||||||
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
|
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The MySQL Table Engine is currently not available on the ClickHouse builds for MacOS ([issue](https://github.com/ClickHouse/ClickHouse/issues/21191))
|
||||||
|
:::
|
||||||
|
|
||||||
**Engine Parameters**
|
**Engine Parameters**
|
||||||
|
|
||||||
- `host:port` — MySQL server address.
|
- `host:port` — MySQL server address.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/redis
|
slug: /en/engines/table-engines/integrations/redis
|
||||||
sidebar_position: 43
|
sidebar_position: 43
|
||||||
sidebar_label: Redis
|
sidebar_label: Redis
|
||||||
---
|
---
|
||||||
|
@ -127,6 +127,12 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
|||||||
ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV');
|
ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
## S3-related Settings {#settings}
|
## S3-related Settings {#settings}
|
||||||
|
|
||||||
The following settings can be set before query execution or placed into configuration file.
|
The following settings can be set before query execution or placed into configuration file.
|
||||||
|
@ -853,7 +853,7 @@ Tags:
|
|||||||
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
||||||
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
|
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
|
||||||
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
||||||
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3).
|
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default (if enabled) if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3). If disabled then already expired data part is written into a default volume and then right after moved to TTL volume.
|
||||||
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
|
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
|
||||||
|
|
||||||
Configuration examples:
|
Configuration examples:
|
||||||
|
@ -92,3 +92,11 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
|||||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||||
|
|
||||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||||
|
|
||||||
|
## Settings {#settings}
|
||||||
|
|
||||||
|
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||||
|
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
@ -102,3 +102,7 @@ SELECT * FROM url_engine_table
|
|||||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||||
|
|
||||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
@ -470,6 +470,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
|||||||
- [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
|
- [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
|
||||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||||
|
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||||
|
|
||||||
## CSVWithNames {#csvwithnames}
|
## CSVWithNames {#csvwithnames}
|
||||||
|
|
||||||
@ -1297,8 +1298,8 @@ For output it uses the following correspondence between ClickHouse types and BSO
|
|||||||
| [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x04` array |
|
| [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x04` array |
|
||||||
| [Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x03` document |
|
| [Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x03` document |
|
||||||
| [Map](/docs/en/sql-reference/data-types/map.md) | `\x03` document |
|
| [Map](/docs/en/sql-reference/data-types/map.md) | `\x03` document |
|
||||||
| [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `\x10` int32 |
|
| [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `\x10` int32 |
|
||||||
| [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `\x05` binary, `\x00` binary subtype |
|
| [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `\x05` binary, `\x00` binary subtype |
|
||||||
|
|
||||||
For input it uses the following correspondence between BSON types and ClickHouse types:
|
For input it uses the following correspondence between BSON types and ClickHouse types:
|
||||||
|
|
||||||
@ -1308,7 +1309,7 @@ For input it uses the following correspondence between BSON types and ClickHouse
|
|||||||
| `\x02` string | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
| `\x02` string | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||||
| `\x03` document | [Map](/docs/en/sql-reference/data-types/map.md)/[Named Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
| `\x03` document | [Map](/docs/en/sql-reference/data-types/map.md)/[Named Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
||||||
| `\x04` array | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
| `\x04` array | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
||||||
| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) |
|
| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/ipv6.md) |
|
||||||
| `\x05` binary, `\x02` old binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
| `\x05` binary, `\x02` old binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||||
| `\x05` binary, `\x03` old uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
| `\x05` binary, `\x03` old uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
||||||
| `\x05` binary, `\x04` uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
| `\x05` binary, `\x04` uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
||||||
@ -1318,7 +1319,7 @@ For input it uses the following correspondence between BSON types and ClickHouse
|
|||||||
| `\x0A` null value | [NULL](/docs/en/sql-reference/data-types/nullable.md) |
|
| `\x0A` null value | [NULL](/docs/en/sql-reference/data-types/nullable.md) |
|
||||||
| `\x0D` JavaScript code | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
| `\x0D` JavaScript code | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||||
| `\x0E` symbol | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
| `\x0E` symbol | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||||
| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) |
|
| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) |
|
||||||
| `\x12` int64 | [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal64](/docs/en/sql-reference/data-types/decimal.md)/[DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
|
| `\x12` int64 | [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal64](/docs/en/sql-reference/data-types/decimal.md)/[DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
|
||||||
|
|
||||||
Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8).
|
Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8).
|
||||||
@ -1668,8 +1669,8 @@ The table below shows supported data types and how they match ClickHouse [data t
|
|||||||
| `ENUM` | [Enum(8/16)](/docs/en/sql-reference/data-types/enum.md) | `ENUM` |
|
| `ENUM` | [Enum(8/16)](/docs/en/sql-reference/data-types/enum.md) | `ENUM` |
|
||||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||||
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` |
|
||||||
| `DATA` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `DATA` |
|
| `DATA` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `DATA` |
|
||||||
| `DATA` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `DATA` |
|
| `DATA` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `DATA` |
|
||||||
| `DATA` | [Decimal128/Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DATA` |
|
| `DATA` | [Decimal128/Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DATA` |
|
||||||
| `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | [Map](/docs/en/sql-reference/data-types/map.md) | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` |
|
| `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | [Map](/docs/en/sql-reference/data-types/map.md) | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` |
|
||||||
@ -1871,19 +1872,19 @@ The table below shows supported data types and how they match ClickHouse [data t
|
|||||||
| `long (timestamp-millis)` \** | [DateTime64(3)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \** |
|
| `long (timestamp-millis)` \** | [DateTime64(3)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \** |
|
||||||
| `long (timestamp-micros)` \** | [DateTime64(6)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \** |
|
| `long (timestamp-micros)` \** | [DateTime64(6)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \** |
|
||||||
| `bytes (decimal)` \** | [DateTime64(N)](/docs/en/sql-reference/data-types/datetime.md) | `bytes (decimal)` \** |
|
| `bytes (decimal)` \** | [DateTime64(N)](/docs/en/sql-reference/data-types/datetime.md) | `bytes (decimal)` \** |
|
||||||
| `int` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `int` |
|
| `int` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `int` |
|
||||||
| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `fixed(16)` |
|
| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `fixed(16)` |
|
||||||
| `bytes (decimal)` \** | [Decimal(P, S)](/docs/en/sql-reference/data-types/decimal.md) | `bytes (decimal)` \** |
|
| `bytes (decimal)` \** | [Decimal(P, S)](/docs/en/sql-reference/data-types/decimal.md) | `bytes (decimal)` \** |
|
||||||
| `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** |
|
| `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** |
|
||||||
| `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` |
|
| `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` |
|
||||||
| `fixed(32)` | [Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(32)` |
|
| `fixed(32)` | [Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(32)` |
|
||||||
|
| `record` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `record` |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern)
|
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern)
|
||||||
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||||
|
|
||||||
Unsupported Avro data types: `record` (non-root), `map`
|
|
||||||
|
|
||||||
Unsupported Avro logical data types: `time-millis`, `time-micros`, `duration`
|
Unsupported Avro logical data types: `time-millis`, `time-micros`, `duration`
|
||||||
|
|
||||||
### Inserting Data {#inserting-data-1}
|
### Inserting Data {#inserting-data-1}
|
||||||
@ -1922,7 +1923,26 @@ Output Avro file compression and sync interval can be configured with [output_fo
|
|||||||
|
|
||||||
Using the ClickHouse [DESCRIBE](/docs/en/sql-reference/statements/describe-table) function, you can quickly view the inferred format of an Avro file like the following example. This example includes the URL of a publicly accessible Avro file in the ClickHouse S3 public bucket:
|
Using the ClickHouse [DESCRIBE](/docs/en/sql-reference/statements/describe-table) function, you can quickly view the inferred format of an Avro file like the following example. This example includes the URL of a publicly accessible Avro file in the ClickHouse S3 public bucket:
|
||||||
|
|
||||||
``` DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro');
|
```
|
||||||
|
DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro);
|
||||||
|
```
|
||||||
|
```
|
||||||
|
┌─name───────────────────────┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||||
|
│ WatchID │ Int64 │ │ │ │ │ │
|
||||||
|
│ JavaEnable │ Int32 │ │ │ │ │ │
|
||||||
|
│ Title │ String │ │ │ │ │ │
|
||||||
|
│ GoodEvent │ Int32 │ │ │ │ │ │
|
||||||
|
│ EventTime │ Int32 │ │ │ │ │ │
|
||||||
|
│ EventDate │ Date32 │ │ │ │ │ │
|
||||||
|
│ CounterID │ Int32 │ │ │ │ │ │
|
||||||
|
│ ClientIP │ Int32 │ │ │ │ │ │
|
||||||
|
│ ClientIP6 │ FixedString(16) │ │ │ │ │ │
|
||||||
|
│ RegionID │ Int32 │ │ │ │ │ │
|
||||||
|
...
|
||||||
|
│ IslandID │ FixedString(16) │ │ │ │ │ │
|
||||||
|
│ RequestNum │ Int32 │ │ │ │ │ │
|
||||||
|
│ RequestTry │ Int32 │ │ │ │ │ │
|
||||||
|
└────────────────────────────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## AvroConfluent {#data-format-avro-confluent}
|
## AvroConfluent {#data-format-avro-confluent}
|
||||||
@ -2006,8 +2026,8 @@ The table below shows supported data types and how they match ClickHouse [data t
|
|||||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||||
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
||||||
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` |
|
||||||
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
||||||
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
||||||
|
|
||||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||||
@ -2209,8 +2229,8 @@ The table below shows supported data types and how they match ClickHouse [data t
|
|||||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||||
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
||||||
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` |
|
||||||
| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_SIZE_BINARY` |
|
| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_SIZE_BINARY` |
|
||||||
| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` |
|
| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` |
|
||||||
|
|
||||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||||
@ -2277,7 +2297,7 @@ The table below shows supported data types and how they match ClickHouse [data t
|
|||||||
| `Struct` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `Struct` |
|
| `Struct` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `Struct` |
|
||||||
| `Map` | [Map](/docs/en/sql-reference/data-types/map.md) | `Map` |
|
| `Map` | [Map](/docs/en/sql-reference/data-types/map.md) | `Map` |
|
||||||
| `Int` | [IPv4](/docs/en/sql-reference/data-types/int-uint.md) | `Int` |
|
| `Int` | [IPv4](/docs/en/sql-reference/data-types/int-uint.md) | `Int` |
|
||||||
| `Binary` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `Binary` |
|
| `Binary` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `Binary` |
|
||||||
| `Binary` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `Binary` |
|
| `Binary` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `Binary` |
|
||||||
| `Binary` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `Binary` |
|
| `Binary` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `Binary` |
|
||||||
|
|
||||||
@ -2490,7 +2510,7 @@ ClickHouse supports reading and writing [MessagePack](https://msgpack.org/) data
|
|||||||
| `uint 64` | [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `uint 64` |
|
| `uint 64` | [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `uint 64` |
|
||||||
| `fixarray`, `array 16`, `array 32` | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | `fixarray`, `array 16`, `array 32` |
|
| `fixarray`, `array 16`, `array 32` | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | `fixarray`, `array 16`, `array 32` |
|
||||||
| `fixmap`, `map 16`, `map 32` | [Map](/docs/en/sql-reference/data-types/map.md) | `fixmap`, `map 16`, `map 32` |
|
| `fixmap`, `map 16`, `map 32` | [Map](/docs/en/sql-reference/data-types/map.md) | `fixmap`, `map 16`, `map 32` |
|
||||||
| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `uint 32` |
|
| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `uint 32` |
|
||||||
| `bin 8` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8` |
|
| `bin 8` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8` |
|
||||||
| `int 8` | [Enum8](/docs/en/sql-reference/data-types/enum.md) | `int 8` |
|
| `int 8` | [Enum8](/docs/en/sql-reference/data-types/enum.md) | `int 8` |
|
||||||
| `bin 8` | [(U)Int128/(U)Int256](/docs/en/sql-reference/data-types/int-uint.md) | `bin 8` |
|
| `bin 8` | [(U)Int128/(U)Int256](/docs/en/sql-reference/data-types/int-uint.md) | `bin 8` |
|
||||||
|
@ -6,32 +6,43 @@ sidebar_label: Configuration Files
|
|||||||
|
|
||||||
# Configuration Files
|
# Configuration Files
|
||||||
|
|
||||||
ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml` or `/etc/clickhouse-server/config.yaml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. Note, that any configuration file can be written either in XML or YAML, but mixing formats in one file is not supported. For example, you can have main configs as `config.xml` and `users.xml` and write additional files in `config.d` and `users.d` directories in `.yaml`.
|
The ClickHouse server can be configured with configuration files in XML or YAML syntax. In most installation types, the ClickHouse server runs with `/etc/clickhouse-server/config.xml` as default configuration file but it is also possible to specify the location of the configuration file manually at server startup using command line option `--config-file=` or `-C`. Additional configuration files may be placed into directory `config.d/` relative to the main configuration file, for example into directory `/etc/clickhouse-server/config.d/`. Files in this directory and the main configuration are merged in a preprocessing step before the configuration is applied in ClickHouse server. Configuration files are merged in alphabetical order. To simplify updates and improve modularization, it is best practice to keep the default `config.xml` file unmodified and place additional customization into `config.d/`.
|
||||||
|
|
||||||
All XML files should have the same root element, usually `<clickhouse>`. As for YAML, `clickhouse:` should not be present, the parser will insert it automatically.
|
It is possible to mix XML and YAML configuration files, for example you could have a main configuration file `config.xml` and additional configuration files `config.d/network.xml`, `config.d/timezone.yaml` and `config.d/keeper.yaml`. Mixing XML and YAML within a single configuration file is not supported. XML configuration files should use `<clickhouse>...</clickhouse>` as top-level tag. In YAML configuration files, `clickhouse:` is optional, the parser inserts it implicitly if absent.
|
||||||
|
|
||||||
## Override {#override}
|
## Overriding Configuration {#override}
|
||||||
|
|
||||||
Some settings specified in the main configuration file can be overridden in other configuration files:
|
The merge of configuration files behaves as one intuitively expects: The contents of both files are combined recursively, children with the same name are replaced by the element of the more specific configuration file. The merge can be customized using attributes `replace` and `remove`.
|
||||||
|
- Attribute `replace` means that the element is replaced by the specified one.
|
||||||
|
- Attribute `remove` means that the element is deleted.
|
||||||
|
|
||||||
- The `replace` or `remove` attributes can be specified for the elements of these configuration files.
|
To specify that a value of an element should be replaced by the value of an environment variable, you can use attribute `from_env`.
|
||||||
- If neither is specified, it combines the contents of elements recursively, replacing values of duplicate children.
|
|
||||||
- If `replace` is specified, it replaces the entire element with the specified one.
|
|
||||||
- If `remove` is specified, it deletes the element.
|
|
||||||
|
|
||||||
You can also declare attributes as coming from environment variables by using `from_env="VARIABLE_NAME"`:
|
Example with `$MAX_QUERY_SIZE = 150000`:
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<macros>
|
<profiles>
|
||||||
<replica from_env="REPLICA" />
|
<default>
|
||||||
<layer from_env="LAYER" />
|
<max_query_size from_env="MAX_QUERY_SIZE"/>
|
||||||
<shard from_env="SHARD" />
|
</default>
|
||||||
</macros>
|
</profiles>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Substitution {#substitution}
|
which is equal to
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<clickhouse>
|
||||||
|
<profiles>
|
||||||
|
<default>
|
||||||
|
<max_query_size/>150000</max_query_size>
|
||||||
|
</default>
|
||||||
|
</profiles>
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Substituting Configuration {#substitution}
|
||||||
|
|
||||||
The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/clickhouse/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md#macros)).
|
The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/clickhouse/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md#macros)).
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ To manage named collections with DDL a user must have the `named_control_collect
|
|||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
In the above example the `passowrd_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
|
In the above example the `password_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Storing named collections in configuration files
|
## Storing named collections in configuration files
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/operations/server-configuration-parameters/
|
|
||||||
sidebar_position: 54
|
|
||||||
sidebar_label: Server Configuration Parameters
|
|
||||||
pagination_next: en/operations/server-configuration-parameters/settings
|
|
||||||
---
|
|
||||||
|
|
||||||
# Server Configuration Parameters
|
|
||||||
|
|
||||||
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
|
||||||
|
|
||||||
These settings are stored in the `config.xml` file on the ClickHouse server.
|
|
||||||
|
|
||||||
Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section.
|
|
||||||
|
|
||||||
Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes).
|
|
@ -7,6 +7,14 @@ description: This section contains descriptions of server settings that cannot b
|
|||||||
|
|
||||||
# Server Settings
|
# Server Settings
|
||||||
|
|
||||||
|
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||||
|
|
||||||
|
These settings are stored in the `config.xml` file on the ClickHouse server.
|
||||||
|
|
||||||
|
Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section.
|
||||||
|
|
||||||
|
Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes).
|
||||||
|
|
||||||
## allow_use_jemalloc_memory
|
## allow_use_jemalloc_memory
|
||||||
|
|
||||||
Allows to use jemalloc memory.
|
Allows to use jemalloc memory.
|
||||||
|
@ -932,6 +932,38 @@ Result
|
|||||||
" string "
|
" string "
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
||||||
|
|
||||||
|
Allow to use whitespace or tab as field delimiter in CSV strings.
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Query
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter=' '
|
||||||
|
```
|
||||||
|
|
||||||
|
Result
|
||||||
|
|
||||||
|
```text
|
||||||
|
a b
|
||||||
|
```
|
||||||
|
|
||||||
|
Query
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter='\t'
|
||||||
|
```
|
||||||
|
|
||||||
|
Result
|
||||||
|
|
||||||
|
```text
|
||||||
|
a b
|
||||||
|
```
|
||||||
|
|
||||||
## Values format settings {#values-format-settings}
|
## Values format settings {#values-format-settings}
|
||||||
|
|
||||||
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
||||||
|
@ -2941,7 +2941,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## mutations_sync {#mutations_sync}
|
## mutations_sync {#mutations_sync}
|
||||||
|
|
||||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
Allows to execute `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3328,7 +3328,35 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## s3_truncate_on_insert
|
## engine_file_allow_create_multiple_files {#engine_file_allow_create_multiple_files}
|
||||||
|
|
||||||
|
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
|
||||||
|
|
||||||
|
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `INSERT` query appends new data to the end of the file.
|
||||||
|
- 1 — `INSERT` query creates a new file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## engine_file_skip_empty_files {#engine_file_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [File](../../engines/table-engines/special/file.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## storage_file_read_method {#storage_file_read_method}
|
||||||
|
|
||||||
|
Method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local).
|
||||||
|
|
||||||
|
Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
|
||||||
|
## s3_truncate_on_insert {#s3_truncate_on_insert}
|
||||||
|
|
||||||
Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists.
|
Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists.
|
||||||
|
|
||||||
@ -3338,7 +3366,29 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## hdfs_truncate_on_insert
|
## s3_create_new_file_on_insert {#s3_create_new_file_on_insert}
|
||||||
|
|
||||||
|
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
|
||||||
|
|
||||||
|
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `INSERT` query appends new data to the end of the file.
|
||||||
|
- 1 — `INSERT` query creates a new file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## s3_skip_empty_files {#s3_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [S3](../../engines/table-engines/integrations/s3.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## hdfs_truncate_on_insert {#hdfs_truncate_on_insert}
|
||||||
|
|
||||||
Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists.
|
Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists.
|
||||||
|
|
||||||
@ -3348,31 +3398,7 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## engine_file_allow_create_multiple_files
|
## hdfs_create_new_file_on_insert {#hdfs_create_new_file_on_insert
|
||||||
|
|
||||||
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
|
|
||||||
|
|
||||||
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
- 0 — `INSERT` query appends new data to the end of the file.
|
|
||||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
|
||||||
|
|
||||||
Default value: `0`.
|
|
||||||
|
|
||||||
## s3_create_new_file_on_insert
|
|
||||||
|
|
||||||
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
|
|
||||||
|
|
||||||
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
- 0 — `INSERT` query appends new data to the end of the file.
|
|
||||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
|
||||||
|
|
||||||
Default value: `0`.
|
|
||||||
|
|
||||||
## hdfs_create_new_file_on_insert
|
|
||||||
|
|
||||||
Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern:
|
Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern:
|
||||||
|
|
||||||
@ -3380,7 +3406,27 @@ initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
|||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
- 0 — `INSERT` query appends new data to the end of the file.
|
- 0 — `INSERT` query appends new data to the end of the file.
|
||||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
- 1 — `INSERT` query creates a new file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## hdfs_skip_empty_files {#hdfs_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [HDFS](../../engines/table-engines/integrations/hdfs.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## engine_url_skip_empty_files {#engine_url_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
@ -71,11 +71,11 @@ Columns:
|
|||||||
- 0 — Query was initiated by another query as part of distributed query execution.
|
- 0 — Query was initiated by another query as part of distributed query execution.
|
||||||
- `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
- `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
||||||
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query.
|
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query.
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query.
|
||||||
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
||||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from.
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
||||||
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution).
|
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution).
|
||||||
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution).
|
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution).
|
||||||
|
@ -40,11 +40,11 @@ Columns:
|
|||||||
- 0 — Query was initiated by another query for distributed query execution.
|
- 0 — Query was initiated by another query for distributed query execution.
|
||||||
- `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
- `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
||||||
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query.
|
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the query.
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the query.
|
||||||
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
||||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from.
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the parent query.
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the parent query.
|
||||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Interface that the query was initiated from. Possible values:
|
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Interface that the query was initiated from. Possible values:
|
||||||
- 1 — TCP.
|
- 1 — TCP.
|
||||||
|
@ -28,7 +28,7 @@ Columns:
|
|||||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
||||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
||||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
||||||
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — The IP address that was used to log in/out.
|
- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — The IP address that was used to log in/out.
|
||||||
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to log in/out.
|
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to log in/out.
|
||||||
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — The interface from which the login was initiated. Possible values:
|
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — The interface from which the login was initiated. Possible values:
|
||||||
- `TCP`
|
- `TCP`
|
||||||
|
@ -11,7 +11,8 @@ Columns:
|
|||||||
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
|
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
|
||||||
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
|
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
|
||||||
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
|
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
|
||||||
- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established
|
- `connected_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the connection was established
|
||||||
|
- `session_uptime_elapsed_seconds` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Seconds elapsed since the connection was established
|
||||||
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
|
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
|
||||||
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
|
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
|
||||||
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
|
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
|
||||||
@ -23,7 +24,7 @@ SELECT * FROM system.zookeeper_connection;
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐
|
┌─name────┬─host──────┬─port─┬─index─┬──────connected_time─┬─session_uptime_elapsed_seconds─┬─is_expired─┬─keeper_api_version─┬─client_id─┐
|
||||||
│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660 │
|
│ default │ 127.0.0.1 │ 9181 │ 0 │ 2023-06-15 14:36:01 │ 3058 │ 0 │ 3 │ 5 │
|
||||||
└───────────────────┴──────────────┴──────┴───────┴─────────────────────┴────────────┴────────────────────┴────────────────────┘
|
└─────────┴───────────┴──────┴───────┴─────────────────────┴────────────────────────────────┴────────────┴────────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
@ -15,7 +15,7 @@ Columns with request parameters:
|
|||||||
- `Finalize` — The connection is lost, no response was received.
|
- `Finalize` — The connection is lost, no response was received.
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the event happened.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the event happened.
|
||||||
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — The date and time when the event happened.
|
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — The date and time when the event happened.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address of ZooKeeper server that was used to make the request.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address of ZooKeeper server that was used to make the request.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The port of ZooKeeper server that was used to make the request.
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The port of ZooKeeper server that was used to make the request.
|
||||||
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — The session ID that the ZooKeeper server sets for each connection.
|
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — The session ID that the ZooKeeper server sets for each connection.
|
||||||
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — The ID of the request within the session. This is usually a sequential request number. It is the same for the request row and the paired `response`/`finalize` row.
|
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — The ID of the request within the session. This is usually a sequential request number. It is the same for the request row and the paired `response`/`finalize` row.
|
||||||
|
@ -32,7 +32,7 @@ For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 wit
|
|||||||
|
|
||||||
Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string.
|
Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string.
|
||||||
|
|
||||||
Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64.
|
Because modern CPUs do not support 128-bit and 256-bit integers natively, operations on Decimal128 and Decimal256 are emulated. Thus, Decimal128 and Decimal256 work significantly slower than Decimal32/Decimal64.
|
||||||
|
|
||||||
## Operations and Result Type
|
## Operations and Result Type
|
||||||
|
|
||||||
@ -59,6 +59,10 @@ Some functions on Decimal return result as Float64 (for example, var or stddev).
|
|||||||
|
|
||||||
During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception.
|
During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Overflow check is not implemented for Decimal128 and Decimal256. In case of overflow incorrect result is returned, no exception is thrown.
|
||||||
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||||
```
|
```
|
||||||
|
@ -28,6 +28,6 @@ ClickHouse data types include:
|
|||||||
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
|
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
|
||||||
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
|
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
|
||||||
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
|
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
|
||||||
- **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses
|
- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses
|
||||||
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
|
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
|
||||||
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
|
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/data-types/domains/ipv4
|
slug: /en/sql-reference/data-types/ipv4
|
||||||
sidebar_position: 59
|
sidebar_position: 59
|
||||||
sidebar_label: IPv4
|
sidebar_label: IPv4
|
||||||
---
|
---
|
||||||
|
|
||||||
## IPv4
|
## IPv4
|
||||||
|
|
||||||
`IPv4` is a domain based on `UInt32` type and serves as a typed replacement for storing IPv4 values. It provides compact storage with the human-friendly input-output format and column type information on inspection.
|
IPv4 addresses. Stored in 4 bytes as UInt32.
|
||||||
|
|
||||||
### Basic Usage
|
### Basic Usage
|
||||||
|
|
||||||
@ -57,25 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1;
|
|||||||
└──────────────────┴───────────┘
|
└──────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Domain values are not implicitly convertible to types other than `UInt32`.
|
**See Also**
|
||||||
If you want to convert `IPv4` value to a string, you have to do that explicitly with `IPv4NumToString()` function:
|
|
||||||
|
|
||||||
``` sql
|
- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md)
|
||||||
SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1;
|
|
||||||
```
|
|
||||||
|
|
||||||
┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐
|
|
||||||
│ String │ 183.247.232.58 │
|
|
||||||
└───────────────────────────────────┴────────────────┘
|
|
||||||
|
|
||||||
Or cast to a `UInt32` value:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1;
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐
|
|
||||||
│ UInt32 │ 3086477370 │
|
|
||||||
└──────────────────────────────────┴────────────┘
|
|
||||||
```
|
|
@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/data-types/domains/ipv6
|
slug: /en/sql-reference/data-types/ipv6
|
||||||
sidebar_position: 60
|
sidebar_position: 60
|
||||||
sidebar_label: IPv6
|
sidebar_label: IPv6
|
||||||
---
|
---
|
||||||
|
|
||||||
## IPv6
|
## IPv6
|
||||||
|
|
||||||
`IPv6` is a domain based on `FixedString(16)` type and serves as a typed replacement for storing IPv6 values. It provides compact storage with the human-friendly input-output format and column type information on inspection.
|
IPv6 addresses. Stored in 16 bytes as UInt128 big-endian.
|
||||||
|
|
||||||
### Basic Usage
|
### Basic Usage
|
||||||
|
|
||||||
@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1;
|
|||||||
└──────────────────┴──────────────────────────────────┘
|
└──────────────────┴──────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Domain values are not implicitly convertible to types other than `FixedString(16)`.
|
**See Also**
|
||||||
If you want to convert `IPv6` value to a string, you have to do that explicitly with `IPv6NumToString()` function:
|
|
||||||
|
|
||||||
``` sql
|
- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md)
|
||||||
SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1;
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐
|
|
||||||
│ String │ 2001:44c8:129:2632:33:0:252:2 │
|
|
||||||
└───────────────────────────────────┴───────────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
Or cast to a `FixedString(16)` value:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1;
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐
|
|
||||||
│ FixedString(16) │ <20><><EFBFBD> │
|
|
||||||
└───────────────────────────────────────────┴─────────┘
|
|
||||||
```
|
|
@ -248,7 +248,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32);
|
|||||||
|
|
||||||
## toIPv4(string)
|
## toIPv4(string)
|
||||||
|
|
||||||
An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/domains/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`.
|
An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
WITH
|
WITH
|
||||||
@ -296,7 +296,7 @@ Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns null
|
|||||||
|
|
||||||
## toIPv6
|
## toIPv6
|
||||||
|
|
||||||
Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/domains/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value.
|
Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value.
|
||||||
Similar to [IPv6StringToNum](#ipv6stringtonums) function, which converts IPv6 address to binary format.
|
Similar to [IPv6StringToNum](#ipv6stringtonums) function, which converts IPv6 address to binary format.
|
||||||
|
|
||||||
If the input string contains a valid IPv4 address, then the IPv6 equivalent of the IPv4 address is returned.
|
If the input string contains a valid IPv4 address, then the IPv6 equivalent of the IPv4 address is returned.
|
||||||
@ -315,7 +315,7 @@ toIPv6(string)
|
|||||||
|
|
||||||
- IP address.
|
- IP address.
|
||||||
|
|
||||||
Type: [IPv6](../../sql-reference/data-types/domains/ipv6.md).
|
Type: [IPv6](../../sql-reference/data-types/ipv6.md).
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
|
@ -232,6 +232,7 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
|||||||
|
|
||||||
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
||||||
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
||||||
|
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ You can specify how long (in seconds) to wait for inactive replicas to execute a
|
|||||||
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
|
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
For `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||||
|
|
||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
|
@ -142,19 +142,19 @@ The following operations with [projections](/docs/en/engines/table-engines/merge
|
|||||||
|
|
||||||
## ADD PROJECTION
|
## ADD PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
`ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||||
|
|
||||||
## DROP PROJECTION
|
## DROP PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
## MATERIALIZE PROJECTION
|
## MATERIALIZE PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
## CLEAR PROJECTION
|
## CLEAR PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
|
|
||||||
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
||||||
|
@ -10,15 +10,25 @@ sidebar_label: INDEX
|
|||||||
|
|
||||||
The following operations are available:
|
The following operations are available:
|
||||||
|
|
||||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] ADD INDEX name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
## ADD INDEX
|
||||||
|
|
||||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
||||||
|
|
||||||
- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
## DROP INDEX
|
||||||
|
|
||||||
The first two commands are lightweight in a sense that they only change metadata or remove files.
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
Also, they are replicated, syncing indices metadata via ZooKeeper.
|
## MATERIALIZE INDEX
|
||||||
|
|
||||||
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||||
|
|
||||||
|
## CLEAR INDEX
|
||||||
|
|
||||||
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Deletes the secondary index files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
|
|
||||||
|
The commands `ADD`, `DROP`, and `CLEAR` are lightweight in the sense that they only change metadata or remove files.
|
||||||
|
Also, they are replicated, syncing indices metadata via ClickHouse Keeper or ZooKeeper.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants).
|
Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants).
|
||||||
|
@ -82,6 +82,35 @@ LIFETIME(MIN 0 MAX 1000)
|
|||||||
LAYOUT(FLAT())
|
LAYOUT(FLAT())
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
When using the SQL console in [ClickHouse Cloud](https://clickhouse.com), you must specify a user (`default` or any other user with the role `default_role`) and password when creating a dictionary.
|
||||||
|
:::note
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER IF NOT EXISTS clickhouse_admin
|
||||||
|
IDENTIFIED WITH sha256_password BY 'passworD43$x';
|
||||||
|
|
||||||
|
GRANT default_role TO clickhouse_admin;
|
||||||
|
|
||||||
|
CREATE DATABASE foo_db;
|
||||||
|
|
||||||
|
CREATE TABLE foo_db.source_table (
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
) ENGINE = MergeTree
|
||||||
|
PRIMARY KEY id;
|
||||||
|
|
||||||
|
CREATE DICTIONARY foo_db.id_value_dictionary
|
||||||
|
(
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
)
|
||||||
|
PRIMARY KEY id
|
||||||
|
SOURCE(CLICKHOUSE(TABLE 'source_table' USER 'clickhouse_admin' PASSWORD 'passworD43$x' DB 'foo_db' ))
|
||||||
|
LAYOUT(FLAT())
|
||||||
|
LIFETIME(MIN 0 MAX 1000);
|
||||||
|
```
|
||||||
|
|
||||||
### Create a dictionary from a table in a remote ClickHouse service
|
### Create a dictionary from a table in a remote ClickHouse service
|
||||||
|
|
||||||
Input table (in the remote ClickHouse service) `source_table`:
|
Input table (in the remote ClickHouse service) `source_table`:
|
||||||
|
@ -380,11 +380,15 @@ High compression levels are useful for asymmetric scenarios, like compress once,
|
|||||||
|
|
||||||
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
|
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
|
||||||
|
|
||||||
- DEFLATE_QPL is experimental and can only be used after setting configuration parameter `allow_experimental_codecs=1`.
|
- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`.
|
||||||
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
|
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
|
||||||
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
|
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
|
||||||
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.
|
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
DEFLATE_QPL is not available in ClickHouse Cloud.
|
||||||
|
:::
|
||||||
|
|
||||||
### Specialized Codecs
|
### Specialized Codecs
|
||||||
|
|
||||||
These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation.
|
These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation.
|
||||||
|
@ -10,7 +10,7 @@ sidebar_label: SET
|
|||||||
SET param = value
|
SET param = value
|
||||||
```
|
```
|
||||||
|
|
||||||
Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/index.md) this way.
|
Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/settings.md) this way.
|
||||||
|
|
||||||
You can also set all the values from the specified settings profile in a single query.
|
You can also set all the values from the specified settings profile in a single query.
|
||||||
|
|
||||||
|
@ -196,6 +196,16 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3
|
|||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
- `_file` — Name of the file.
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
## Settings
|
||||||
|
|
||||||
|
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||||
|
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -97,6 +97,12 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin
|
|||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
- `_file` — Name of the file.
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -202,6 +202,12 @@ FROM s3(
|
|||||||
LIMIT 5;
|
LIMIT 5;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [S3 engine](../../engines/table-engines/integrations/s3.md)
|
- [S3 engine](../../engines/table-engines/integrations/s3.md)
|
||||||
|
@ -53,6 +53,10 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
|||||||
- `_path` — Path to the `URL`.
|
- `_path` — Path to the `URL`.
|
||||||
- `_file` — Resource name of the `URL`.
|
- `_file` — Resource name of the `URL`.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -13,8 +13,8 @@ data_types/date.md sql-reference/data-types/date.md
|
|||||||
data_types/datetime.md sql-reference/data-types/datetime.md
|
data_types/datetime.md sql-reference/data-types/datetime.md
|
||||||
data_types/datetime64.md sql-reference/data-types/datetime64.md
|
data_types/datetime64.md sql-reference/data-types/datetime64.md
|
||||||
data_types/decimal.md sql-reference/data-types/decimal.md
|
data_types/decimal.md sql-reference/data-types/decimal.md
|
||||||
data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md
|
data_types/domains/ipv4.md sql-reference/data-types/ipv4.md
|
||||||
data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md
|
data_types/domains/ipv6.md sql-reference/data-types/ipv6.md
|
||||||
data_types/domains/overview.md sql-reference/data-types/domains/overview.md
|
data_types/domains/overview.md sql-reference/data-types/domains/overview.md
|
||||||
data_types/enum.md sql-reference/data-types/enum.md
|
data_types/enum.md sql-reference/data-types/enum.md
|
||||||
data_types/fixedstring.md sql-reference/data-types/fixedstring.md
|
data_types/fixedstring.md sql-reference/data-types/fixedstring.md
|
||||||
@ -305,8 +305,8 @@ sql_reference/data_types/datetime.md sql-reference/data-types/datetime.md
|
|||||||
sql_reference/data_types/datetime64.md sql-reference/data-types/datetime64.md
|
sql_reference/data_types/datetime64.md sql-reference/data-types/datetime64.md
|
||||||
sql_reference/data_types/decimal.md sql-reference/data-types/decimal.md
|
sql_reference/data_types/decimal.md sql-reference/data-types/decimal.md
|
||||||
sql_reference/data_types/domains/index.md sql-reference/data-types/domains/index.md
|
sql_reference/data_types/domains/index.md sql-reference/data-types/domains/index.md
|
||||||
sql_reference/data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md
|
sql_reference/data_types/domains/ipv4.md sql-reference/data-types/ipv4.md
|
||||||
sql_reference/data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md
|
sql_reference/data_types/domains/ipv6.md sql-reference/data-types/ipv6.md
|
||||||
sql_reference/data_types/domains/overview.md sql-reference/data-types/domains/overview.md
|
sql_reference/data_types/domains/overview.md sql-reference/data-types/domains/overview.md
|
||||||
sql_reference/data_types/enum.md sql-reference/data-types/enum.md
|
sql_reference/data_types/enum.md sql-reference/data-types/enum.md
|
||||||
sql_reference/data_types/fixedstring.md sql-reference/data-types/fixedstring.md
|
sql_reference/data_types/fixedstring.md sql-reference/data-types/fixedstring.md
|
||||||
|
@ -69,11 +69,11 @@ ClickHouse не удаляет данные из таблица автомати
|
|||||||
- 0 — запрос был инициирован другим запросом при выполнении распределенного запроса.
|
- 0 — запрос был инициирован другим запросом при выполнении распределенного запроса.
|
||||||
- `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
|
- `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
|
||||||
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса.
|
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос
|
||||||
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
|
||||||
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса (для распределенных запросов).
|
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса (для распределенных запросов).
|
||||||
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд (для распределенных запросов).
|
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд (для распределенных запросов).
|
||||||
|
@ -39,11 +39,11 @@ ClickHouse не удаляет данные из таблицы автомати
|
|||||||
- 0 — запрос был инициирован другим запросом при распределенном запросе.
|
- 0 — запрос был инициирован другим запросом при распределенном запросе.
|
||||||
- `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
|
- `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
|
||||||
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса.
|
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, с которого пришел запрос.
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, с которого пришел запрос.
|
||||||
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, пришел родительский запрос.
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, пришел родительский запрос.
|
||||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — интерфейс, с которого ушёл запрос. Возможные значения:
|
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||||
- 1 — TCP.
|
- 1 — TCP.
|
||||||
|
@ -27,7 +27,7 @@ slug: /ru/operations/system-tables/session_log
|
|||||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список профилей, установленных для всех ролей и (или) пользователей.
|
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список профилей, установленных для всех ролей и (или) пользователей.
|
||||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список ролей, к которым применяется данный профиль.
|
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список ролей, к которым применяется данный профиль.
|
||||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — настройки, которые были изменены при входе или выходе клиента из системы.
|
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — настройки, которые были изменены при входе или выходе клиента из системы.
|
||||||
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы.
|
- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы.
|
||||||
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт клиента, который использовался для входа или выхода из системы.
|
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт клиента, который использовался для входа или выхода из системы.
|
||||||
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — интерфейс, с которого был инициирован вход в систему. Возможные значения:
|
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — интерфейс, с которого был инициирован вход в систему. Возможные значения:
|
||||||
- `TCP`
|
- `TCP`
|
||||||
|
@ -15,7 +15,7 @@ slug: /ru/operations/system-tables/zookeeper_log
|
|||||||
- `Finalize` — соединение разорвано, ответ не получен.
|
- `Finalize` — соединение разорвано, ответ не получен.
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата, когда произошло событие.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата, когда произошло событие.
|
||||||
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — дата и время, когда произошло событие.
|
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — дата и время, когда произошло событие.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт сервера ZooKeeper, с которого был сделан запрос.
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт сервера ZooKeeper, с которого был сделан запрос.
|
||||||
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — идентификатор сессии, который сервер ZooKeeper создает для каждого соединения.
|
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — идентификатор сессии, который сервер ZooKeeper создает для каждого соединения.
|
||||||
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — идентификатор запроса внутри сессии. Обычно это последовательный номер запроса, одинаковый у строки запроса и у парной строки `response`/`finalize`.
|
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — идентификатор запроса внутри сессии. Обычно это последовательный номер запроса, одинаковый у строки запроса и у парной строки `response`/`finalize`.
|
||||||
|
@ -31,7 +31,7 @@ sidebar_label: Decimal
|
|||||||
## Внутреннее представление {#vnutrennee-predstavlenie}
|
## Внутреннее представление {#vnutrennee-predstavlenie}
|
||||||
|
|
||||||
Внутри данные представляются как знаковые целые числа, соответсвующей разрядности. Реальные диапазоны, хранящиеся в ячейках памяти несколько больше заявленных. Заявленные диапазоны Decimal проверяются только при вводе числа из строкового представления.
|
Внутри данные представляются как знаковые целые числа, соответсвующей разрядности. Реальные диапазоны, хранящиеся в ячейках памяти несколько больше заявленных. Заявленные диапазоны Decimal проверяются только при вводе числа из строкового представления.
|
||||||
Поскольку современные CPU не поддерживают 128-битные числа, операции над Decimal128 эмулируются программно. Decimal128 работает в разы медленней чем Decimal32/Decimal64.
|
Поскольку современные CPU не поддерживают 128-битные и 256-битные числа, для операций над Decimal128 и Decimal256 эмулируются программно. Данные типы работают в разы медленнее, чем Decimal32/Decimal64.
|
||||||
|
|
||||||
## Операции и типы результата {#operatsii-i-tipy-rezultata}
|
## Операции и типы результата {#operatsii-i-tipy-rezultata}
|
||||||
|
|
||||||
@ -59,6 +59,10 @@ sidebar_label: Decimal
|
|||||||
|
|
||||||
При выполнении операций над типом Decimal могут происходить целочисленные переполнения. Лишняя дробная часть отбрасывается (не округляется). Лишняя целочисленная часть приводит к исключению.
|
При выполнении операций над типом Decimal могут происходить целочисленные переполнения. Лишняя дробная часть отбрасывается (не округляется). Лишняя целочисленная часть приводит к исключению.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Проверка переполнения не реализована для Decimal128 и Decimal256. В случае переполнения неверный результат будёт возвращён без выбрасывания исключения.
|
||||||
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||||
```
|
```
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
slug: /ru/sql-reference/data-types/domains/ipv4
|
slug: /ru/sql-reference/data-types/ipv4
|
||||||
sidebar_position: 59
|
sidebar_position: 59
|
||||||
sidebar_label: IPv4
|
sidebar_label: IPv4
|
||||||
---
|
---
|
||||||
|
|
||||||
## IPv4 {#ipv4}
|
## IPv4 {#ipv4}
|
||||||
|
|
||||||
`IPv4` — это домен, базирующийся на типе данных `UInt32` предназначенный для хранения адресов IPv4. Он обеспечивает компактное хранение данных с удобным для человека форматом ввода-вывода, и явно отображаемым типом данных в структуре таблицы.
|
IPv4-адреса. Хранится в 4 байтах как UInt32.
|
||||||
|
|
||||||
### Применение {#primenenie}
|
### Применение {#primenenie}
|
||||||
|
|
||||||
@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1;
|
|||||||
└──────────────────┴───────────┘
|
└──────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Значения с доменным типом данных не преобразуются неявно в другие типы данных, кроме `UInt32`.
|
**См. также**
|
||||||
Если необходимо преобразовать значение типа `IPv4` в строку, то это необходимо делать явно с помощью функции `IPv4NumToString()`:
|
|
||||||
|
|
||||||
``` sql
|
- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md)
|
||||||
SELECT toTypeName(s), IPv4NumToString(from) AS s FROM hits LIMIT 1;
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐
|
|
||||||
│ String │ 183.247.232.58 │
|
|
||||||
└───────────────────────────────────┴────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
Или приводить к типу данных `UInt32`:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT toTypeName(i), CAST(from AS UInt32) AS i FROM hits LIMIT 1;
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐
|
|
||||||
│ UInt32 │ 3086477370 │
|
|
||||||
└──────────────────────────────────┴────────────┘
|
|
||||||
```
|
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
slug: /ru/sql-reference/data-types/domains/ipv6
|
slug: /ru/sql-reference/data-types/ipv6
|
||||||
sidebar_position: 60
|
sidebar_position: 60
|
||||||
sidebar_label: IPv6
|
sidebar_label: IPv6
|
||||||
---
|
---
|
@ -265,7 +265,7 @@ SELECT
|
|||||||
|
|
||||||
## toIPv6 {#toipv6string}
|
## toIPv6 {#toipv6string}
|
||||||
|
|
||||||
Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/domains/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом.
|
Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом.
|
||||||
Похоже на функцию [IPv6StringToNum](#ipv6stringtonums), которая представляет адрес IPv6 в двоичном виде.
|
Похоже на функцию [IPv6StringToNum](#ipv6stringtonums), которая представляет адрес IPv6 в двоичном виде.
|
||||||
|
|
||||||
Если входящая строка содержит корректный IPv4 адрес, функция возвращает его IPv6 эквивалент.
|
Если входящая строка содержит корректный IPv4 адрес, функция возвращает его IPv6 эквивалент.
|
||||||
@ -284,7 +284,7 @@ toIPv6(string)
|
|||||||
|
|
||||||
- IP адрес.
|
- IP адрес.
|
||||||
|
|
||||||
Тип: [IPv6](../../sql-reference/data-types/domains/ipv6.md).
|
Тип: [IPv6](../../sql-reference/data-types/ipv6.md).
|
||||||
|
|
||||||
**Примеры**
|
**Примеры**
|
||||||
|
|
||||||
|
@ -60,11 +60,11 @@ ClickHouse不会自动从表中删除数据。更多详情请看 [introduction](
|
|||||||
- 0 — 由另一个查询发起的,作为分布式查询的一部分.
|
- 0 — 由另一个查询发起的,作为分布式查询的一部分.
|
||||||
- `user` ([String](../../sql-reference/data-types/string.md)) — 发起查询的用户.
|
- `user` ([String](../../sql-reference/data-types/string.md)) — 发起查询的用户.
|
||||||
- `query_id` ([String](../../sql-reference/data-types/string.md)) — 查询ID.
|
- `query_id` ([String](../../sql-reference/data-types/string.md)) — 查询ID.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的客户端IP地址.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的客户端IP地址.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起查询的客户端端口.
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起查询的客户端端口.
|
||||||
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — 初始查询的用户名(用于分布式查询执行).
|
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — 初始查询的用户名(用于分布式查询执行).
|
||||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — 运行初始查询的ID(用于分布式查询执行).
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — 运行初始查询的ID(用于分布式查询执行).
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 运行父查询的IP地址.
|
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 运行父查询的IP地址.
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起父查询的客户端端口.
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起父查询的客户端端口.
|
||||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 发起查询的接口. 可能的值:
|
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 发起查询的接口. 可能的值:
|
||||||
- 1 — TCP.
|
- 1 — TCP.
|
||||||
|
@ -36,11 +36,11 @@ ClickHouse不会自动从表中删除数据。 欲了解更多详情,请参照
|
|||||||
- 0 — 由其他查询发起的分布式查询。
|
- 0 — 由其他查询发起的分布式查询。
|
||||||
- `user` ([字符串](../../sql-reference/data-types/string.md)) — 发起查询的用户名。
|
- `user` ([字符串](../../sql-reference/data-types/string.md)) — 发起查询的用户名。
|
||||||
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — 查询的ID。
|
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — 查询的ID。
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的IP地址。
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的IP地址。
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的端口。
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的端口。
|
||||||
- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的用户名(对于分布式查询)。
|
- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的用户名(对于分布式查询)。
|
||||||
- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的ID(对于分布式查询)。
|
- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的ID(对于分布式查询)。
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起该查询的父查询IP地址。
|
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起该查询的父查询IP地址。
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起该查询的父查询端口。
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起该查询的父查询端口。
|
||||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的界面,可能的值:
|
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的界面,可能的值:
|
||||||
- 1 — TCP.
|
- 1 — TCP.
|
||||||
|
@ -15,7 +15,7 @@ slug: /zh/operations/system-tables/zookeeper_log
|
|||||||
- `Finalize` — 连接丢失, 未收到响应.
|
- `Finalize` — 连接丢失, 未收到响应.
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件发生的日期.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件发生的日期.
|
||||||
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件发生的日期和时间.
|
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件发生的日期和时间.
|
||||||
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址.
|
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址.
|
||||||
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 用于发出请求的 ZooKeeper 服务器的端口.
|
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 用于发出请求的 ZooKeeper 服务器的端口.
|
||||||
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — ZooKeeper 服务器为每个连接设置的会话 ID.
|
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — ZooKeeper 服务器为每个连接设置的会话 ID.
|
||||||
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — 会话中请求的 ID. 这通常是一个连续的请求编号. 请求行和配对的 `response`/`finalize` 行相同.
|
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — 会话中请求的 ID. 这通常是一个连续的请求编号. 请求行和配对的 `response`/`finalize` 行相同.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/sql-reference/data-types/domains/ipv4
|
slug: /zh/sql-reference/data-types/ipv4
|
||||||
---
|
---
|
||||||
## IPv4 {#ipv4}
|
## IPv4 {#ipv4}
|
||||||
|
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/sql-reference/data-types/domains/ipv6
|
slug: /zh/sql-reference/data-types/ipv6
|
||||||
---
|
---
|
||||||
## IPv6 {#ipv6}
|
## IPv6 {#ipv6}
|
||||||
|
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -224,12 +224,12 @@ struct Keeper::KeeperHTTPContext : public IHTTPContext
|
|||||||
|
|
||||||
uint64_t getMaxFieldNameSize() const override
|
uint64_t getMaxFieldNameSize() const override
|
||||||
{
|
{
|
||||||
return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 1048576);
|
return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 128 * 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t getMaxFieldValueSize() const override
|
uint64_t getMaxFieldValueSize() const override
|
||||||
{
|
{
|
||||||
return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 1048576);
|
return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 128 * 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t getMaxChunkSize() const override
|
uint64_t getMaxChunkSize() const override
|
||||||
|
@ -1705,7 +1705,6 @@ try
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
||||||
|
|
||||||
async_metrics.start();
|
async_metrics.start();
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <bitset>
|
#include <bitset>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -146,8 +146,8 @@ public:
|
|||||||
for (const auto & argument : this->argument_types)
|
for (const auto & argument : this->argument_types)
|
||||||
can_be_compiled &= canBeNativeType(*argument);
|
can_be_compiled &= canBeNativeType(*argument);
|
||||||
|
|
||||||
auto return_type = this->getResultType();
|
const auto & result_type = this->getResultType();
|
||||||
can_be_compiled &= canBeNativeType(*return_type);
|
can_be_compiled &= canBeNativeType(*result_type);
|
||||||
|
|
||||||
return can_be_compiled;
|
return can_be_compiled;
|
||||||
}
|
}
|
||||||
@ -198,8 +198,8 @@ public:
|
|||||||
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||||
|
|
||||||
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, b.getDoubleTy());
|
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, this->getResultType());
|
||||||
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, b.getDoubleTy());
|
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, this->getResultType());
|
||||||
|
|
||||||
return b.CreateFDiv(double_numerator, double_denominator);
|
return b.CreateFDiv(double_numerator, double_denominator);
|
||||||
}
|
}
|
||||||
@ -308,7 +308,7 @@ public:
|
|||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -316,7 +316,7 @@ public:
|
|||||||
|
|
||||||
auto * numerator_ptr = aggregate_data_ptr;
|
auto * numerator_ptr = aggregate_data_ptr;
|
||||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||||
auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
auto * value_cast_to_numerator = nativeCast(b, arguments[0], toNativeDataType<Numerator>());
|
||||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
|
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
|
||||||
b.CreateStore(numerator_result_value, numerator_ptr);
|
b.CreateStore(numerator_result_value, numerator_ptr);
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ public:
|
|||||||
return can_be_compiled;
|
return can_be_compiled;
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -63,8 +63,9 @@ public:
|
|||||||
auto * numerator_ptr = aggregate_data_ptr;
|
auto * numerator_ptr = aggregate_data_ptr;
|
||||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||||
|
|
||||||
auto * argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
auto numerator_data_type = toNativeDataType<Numerator>();
|
||||||
auto * weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type);
|
auto * argument = nativeCast(b, arguments[0], numerator_data_type);
|
||||||
|
auto * weight = nativeCast(b, arguments[1], numerator_data_type);
|
||||||
|
|
||||||
llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight);
|
llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight);
|
||||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication);
|
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication);
|
||||||
@ -75,7 +76,7 @@ public:
|
|||||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||||
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||||
|
|
||||||
auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type);
|
auto * weight_cast_to_denominator = nativeCast(b, arguments[1], toNativeDataType<Denominator>());
|
||||||
|
|
||||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||||
auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator);
|
auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator);
|
||||||
|
@ -148,7 +148,7 @@ public:
|
|||||||
Data::compileCreate(builder, value_ptr);
|
Data::compileCreate(builder, value_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -157,8 +157,7 @@ public:
|
|||||||
auto * value_ptr = aggregate_data_ptr;
|
auto * value_ptr = aggregate_data_ptr;
|
||||||
auto * value = b.CreateLoad(return_type, value_ptr);
|
auto * value = b.CreateLoad(return_type, value_ptr);
|
||||||
|
|
||||||
const auto & argument_value = argument_values[0];
|
auto * result_value = Data::compileUpdate(builder, value, arguments[0].value);
|
||||||
auto * result_value = Data::compileUpdate(builder, value, argument_value);
|
|
||||||
|
|
||||||
b.CreateStore(result_value, value_ptr);
|
b.CreateStore(result_value, value_ptr);
|
||||||
}
|
}
|
||||||
|
@ -165,7 +165,7 @@ public:
|
|||||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> &) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType &) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -309,13 +309,13 @@ public:
|
|||||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
auto * return_type = toNativeType(b, this->getResultType());
|
auto * return_type = toNativeType(b, this->getResultType());
|
||||||
|
|
||||||
auto * is_null_value = b.CreateExtractValue(values[0], {1});
|
auto * is_null_value = b.CreateExtractValue(arguments[0].value, {1});
|
||||||
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
|
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
|
||||||
|
|
||||||
auto * count_value_ptr = aggregate_data_ptr;
|
auto * count_value_ptr = aggregate_data_ptr;
|
||||||
|
@ -188,18 +188,18 @@ public:
|
|||||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
const auto & nullable_type = arguments_types[0];
|
const auto & nullable_type = arguments[0].type;
|
||||||
const auto & nullable_value = argument_values[0];
|
const auto & nullable_value = arguments[0].value;
|
||||||
|
|
||||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||||
|
|
||||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
const auto & predicate_type = arguments.back().type;
|
||||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
auto * predicate_value = arguments.back().value;
|
||||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||||
|
|
||||||
auto * head = b.GetInsertBlock();
|
auto * head = b.GetInsertBlock();
|
||||||
@ -219,7 +219,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
@ -370,38 +370,31 @@ public:
|
|||||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
/// TODO: Check
|
|
||||||
|
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
size_t arguments_size = arguments_types.size();
|
size_t arguments_size = arguments.size();
|
||||||
|
|
||||||
|
ValuesWithType wrapped_arguments;
|
||||||
|
wrapped_arguments.reserve(arguments_size);
|
||||||
|
|
||||||
DataTypes non_nullable_types;
|
|
||||||
std::vector<llvm::Value * > wrapped_values;
|
|
||||||
std::vector<llvm::Value * > is_null_values;
|
std::vector<llvm::Value * > is_null_values;
|
||||||
|
|
||||||
non_nullable_types.resize(arguments_size);
|
|
||||||
wrapped_values.resize(arguments_size);
|
|
||||||
is_null_values.resize(arguments_size);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < arguments_size; ++i)
|
for (size_t i = 0; i < arguments_size; ++i)
|
||||||
{
|
{
|
||||||
const auto & argument_value = argument_values[i];
|
const auto & argument_value = arguments[i].value;
|
||||||
|
const auto & argument_type = arguments[i].type;
|
||||||
|
|
||||||
if (is_nullable[i])
|
if (is_nullable[i])
|
||||||
{
|
{
|
||||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||||
|
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||||
wrapped_values[i] = wrapped_value;
|
|
||||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
wrapped_values[i] = argument_value;
|
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||||
non_nullable_types[i] = arguments_types[i];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,9 +408,6 @@ public:
|
|||||||
|
|
||||||
for (auto * is_null_value : is_null_values)
|
for (auto * is_null_value : is_null_values)
|
||||||
{
|
{
|
||||||
if (!is_null_value)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||||
}
|
}
|
||||||
@ -426,8 +416,8 @@ public:
|
|||||||
|
|
||||||
b.SetInsertPoint(join_block_after_null_checks);
|
b.SetInsertPoint(join_block_after_null_checks);
|
||||||
|
|
||||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
const auto & predicate_type = arguments.back().type;
|
||||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
auto * predicate_value = arguments.back().value;
|
||||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||||
|
|
||||||
auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent());
|
auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent());
|
||||||
@ -444,7 +434,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values);
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
|
@ -223,12 +223,12 @@ public:
|
|||||||
nested_func->compileCreate(builder, aggregate_data_ptr);
|
nested_func->compileCreate(builder, aggregate_data_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
const auto & predicate_type = arguments.back().type;
|
||||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
auto * predicate_value = arguments.back().value;
|
||||||
|
|
||||||
auto * head = b.GetInsertBlock();
|
auto * head = b.GetInsertBlock();
|
||||||
|
|
||||||
@ -242,21 +242,9 @@ public:
|
|||||||
|
|
||||||
b.SetInsertPoint(if_true);
|
b.SetInsertPoint(if_true);
|
||||||
|
|
||||||
size_t arguments_size_without_predicate = arguments_types.size() - 1;
|
ValuesWithType arguments_without_predicate = arguments;
|
||||||
|
arguments_without_predicate.pop_back();
|
||||||
DataTypes argument_types_without_predicate;
|
nested_func->compileAdd(builder, aggregate_data_ptr, arguments_without_predicate);
|
||||||
std::vector<llvm::Value *> argument_values_without_predicate;
|
|
||||||
|
|
||||||
argument_types_without_predicate.resize(arguments_size_without_predicate);
|
|
||||||
argument_values_without_predicate.resize(arguments_size_without_predicate);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < arguments_size_without_predicate; ++i)
|
|
||||||
{
|
|
||||||
argument_types_without_predicate[i] = arguments_types[i];
|
|
||||||
argument_values_without_predicate[i] = argument_values[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
nested_func->compileAdd(builder, aggregate_data_ptr, argument_types_without_predicate, argument_values_without_predicate);
|
|
||||||
|
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
|
@ -1459,11 +1459,11 @@ public:
|
|||||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData()));
|
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
if constexpr (Data::is_compilable)
|
if constexpr (Data::is_compilable)
|
||||||
{
|
{
|
||||||
Data::compileChangeIfBetter(builder, aggregate_data_ptr, argument_values[0]);
|
Data::compileChangeIfBetter(builder, aggregate_data_ptr, arguments[0].value);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -378,12 +378,12 @@ public:
|
|||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
const auto & nullable_type = arguments_types[0];
|
const auto & nullable_type = arguments[0].type;
|
||||||
const auto & nullable_value = argument_values[0];
|
const auto & nullable_value = arguments[0].value;
|
||||||
|
|
||||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||||
@ -405,7 +405,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
@ -568,36 +568,32 @@ public:
|
|||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
size_t arguments_size = arguments_types.size();
|
size_t arguments_size = arguments.size();
|
||||||
|
|
||||||
|
ValuesWithType wrapped_arguments;
|
||||||
|
wrapped_arguments.reserve(arguments_size);
|
||||||
|
|
||||||
DataTypes non_nullable_types;
|
|
||||||
std::vector<llvm::Value * > wrapped_values;
|
|
||||||
std::vector<llvm::Value *> is_null_values;
|
std::vector<llvm::Value *> is_null_values;
|
||||||
|
is_null_values.reserve(arguments_size);
|
||||||
non_nullable_types.resize(arguments_size);
|
|
||||||
wrapped_values.resize(arguments_size);
|
|
||||||
is_null_values.resize(arguments_size);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < arguments_size; ++i)
|
for (size_t i = 0; i < arguments_size; ++i)
|
||||||
{
|
{
|
||||||
const auto & argument_value = argument_values[i];
|
const auto & argument_value = arguments[i].value;
|
||||||
|
const auto & argument_type = arguments[i].type;
|
||||||
|
|
||||||
if (is_nullable[i])
|
if (is_nullable[i])
|
||||||
{
|
{
|
||||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||||
|
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||||
wrapped_values[i] = wrapped_value;
|
|
||||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
wrapped_values[i] = argument_value;
|
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||||
non_nullable_types[i] = arguments_types[i];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,9 +608,6 @@ public:
|
|||||||
|
|
||||||
for (auto * is_null_value : is_null_values)
|
for (auto * is_null_value : is_null_values)
|
||||||
{
|
{
|
||||||
if (!is_null_value)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||||
}
|
}
|
||||||
@ -630,7 +623,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values);
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
|
@ -588,7 +588,7 @@ public:
|
|||||||
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
|
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -597,10 +597,7 @@ public:
|
|||||||
auto * sum_value_ptr = aggregate_data_ptr;
|
auto * sum_value_ptr = aggregate_data_ptr;
|
||||||
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
|
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
|
||||||
|
|
||||||
const auto & argument_type = arguments_types[0];
|
auto * value_cast_to_result = nativeCast(b, arguments[0], this->getResultType());
|
||||||
const auto & argument_value = argument_values[0];
|
|
||||||
|
|
||||||
auto * value_cast_to_result = nativeCast(b, argument_type, argument_value, return_type);
|
|
||||||
auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result);
|
auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result);
|
||||||
|
|
||||||
b.CreateStore(sum_result_value, sum_value_ptr);
|
b.CreateStore(sum_result_value, sum_value_ptr);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
#include <Core/ColumnNumbers.h>
|
#include <Core/ColumnNumbers.h>
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
|
#include <Core/ValuesWithType.h>
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
@ -389,7 +390,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr
|
/// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr
|
||||||
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector<llvm::Value *> & /*arguments_values*/) const
|
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const ValuesWithType & /*arguments*/) const
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName());
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName());
|
||||||
}
|
}
|
||||||
|
@ -85,6 +85,9 @@ void BackupCoordinationStageSync::setError(const String & current_host, const Ex
|
|||||||
writeException(exception, buf, true);
|
writeException(exception, buf, true);
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
||||||
|
|
||||||
|
/// When backup/restore fails, it removes the nodes from Zookeeper.
|
||||||
|
/// Sometimes it fails to remove all nodes. It's possible that it removes /error node, but fails to remove /stage node,
|
||||||
|
/// so the following line tries to preserve the error status.
|
||||||
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
|
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
|
||||||
if (code != Coordination::Error::ZOK)
|
if (code != Coordination::Error::ZOK)
|
||||||
throw zkutil::KeeperException(code, zookeeper_path);
|
throw zkutil::KeeperException(code, zookeeper_path);
|
||||||
|
@ -152,8 +152,7 @@ namespace
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
if (coordination)
|
sendExceptionToCoordination(coordination, Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
|
||||||
coordination->setError(Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ void IBridge::initialize(Application & self)
|
|||||||
http_timeout = config().getUInt64("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
http_timeout = config().getUInt64("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||||
max_server_connections = config().getUInt("max-server-connections", 1024);
|
max_server_connections = config().getUInt("max-server-connections", 1024);
|
||||||
keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10);
|
keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10);
|
||||||
http_max_field_value_size = config().getUInt64("http-max-field-value-size", 1048576);
|
http_max_field_value_size = config().getUInt64("http-max-field-value-size", 128 * 1024);
|
||||||
|
|
||||||
struct rlimit limit;
|
struct rlimit limit;
|
||||||
const UInt64 gb = 1024 * 1024 * 1024;
|
const UInt64 gb = 1024 * 1024 * 1024;
|
||||||
|
@ -1165,11 +1165,20 @@ void ClientBase::onProfileEvents(Block & block)
|
|||||||
/// Flush all buffers.
|
/// Flush all buffers.
|
||||||
void ClientBase::resetOutput()
|
void ClientBase::resetOutput()
|
||||||
{
|
{
|
||||||
|
/// Order is important: format, compression, file
|
||||||
|
|
||||||
if (output_format)
|
if (output_format)
|
||||||
output_format->finalize();
|
output_format->finalize();
|
||||||
output_format.reset();
|
output_format.reset();
|
||||||
|
|
||||||
logs_out_stream.reset();
|
logs_out_stream.reset();
|
||||||
|
|
||||||
|
if (out_file_buf)
|
||||||
|
{
|
||||||
|
out_file_buf->finalize();
|
||||||
|
out_file_buf.reset();
|
||||||
|
}
|
||||||
|
|
||||||
if (pager_cmd)
|
if (pager_cmd)
|
||||||
{
|
{
|
||||||
pager_cmd->in.close();
|
pager_cmd->in.close();
|
||||||
@ -1177,15 +1186,9 @@ void ClientBase::resetOutput()
|
|||||||
}
|
}
|
||||||
pager_cmd = nullptr;
|
pager_cmd = nullptr;
|
||||||
|
|
||||||
if (out_file_buf)
|
|
||||||
{
|
|
||||||
out_file_buf->next();
|
|
||||||
out_file_buf.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (out_logs_buf)
|
if (out_logs_buf)
|
||||||
{
|
{
|
||||||
out_logs_buf->next();
|
out_logs_buf->finalize();
|
||||||
out_logs_buf.reset();
|
out_logs_buf.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,7 +588,7 @@ void Connection::sendQuery(
|
|||||||
if (method == "ZSTD")
|
if (method == "ZSTD")
|
||||||
level = settings->network_zstd_compression_level;
|
level = settings->network_zstd_compression_level;
|
||||||
|
|
||||||
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs);
|
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_deflate_qpl_codec);
|
||||||
compression_codec = CompressionCodecFactory::instance().get(method, level);
|
compression_codec = CompressionCodecFactory::instance().get(method, level);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -18,7 +18,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
|
|||||||
String client_name,
|
String client_name,
|
||||||
Protocol::Compression compression,
|
Protocol::Compression compression,
|
||||||
Protocol::Secure secure,
|
Protocol::Secure secure,
|
||||||
Int64 priority)
|
Priority priority)
|
||||||
{
|
{
|
||||||
Key key{
|
Key key{
|
||||||
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
|
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
|
||||||
@ -74,7 +74,7 @@ size_t ConnectionPoolFactory::KeyHash::operator()(const ConnectionPoolFactory::K
|
|||||||
hash_combine(seed, hash_value(k.client_name));
|
hash_combine(seed, hash_value(k.client_name));
|
||||||
hash_combine(seed, hash_value(k.compression));
|
hash_combine(seed, hash_value(k.compression));
|
||||||
hash_combine(seed, hash_value(k.secure));
|
hash_combine(seed, hash_value(k.secure));
|
||||||
hash_combine(seed, hash_value(k.priority));
|
hash_combine(seed, hash_value(k.priority.value));
|
||||||
return seed;
|
return seed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Common/PoolBase.h>
|
#include <Common/PoolBase.h>
|
||||||
|
#include <Common/Priority.h>
|
||||||
#include <Client/Connection.h>
|
#include <Client/Connection.h>
|
||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
@ -34,7 +35,7 @@ public:
|
|||||||
const Settings * settings = nullptr,
|
const Settings * settings = nullptr,
|
||||||
bool force_connected = true) = 0;
|
bool force_connected = true) = 0;
|
||||||
|
|
||||||
virtual Int64 getPriority() const { return 1; }
|
virtual Priority getPriority() const { return Priority{1}; }
|
||||||
};
|
};
|
||||||
|
|
||||||
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
||||||
@ -60,7 +61,7 @@ public:
|
|||||||
const String & client_name_,
|
const String & client_name_,
|
||||||
Protocol::Compression compression_,
|
Protocol::Compression compression_,
|
||||||
Protocol::Secure secure_,
|
Protocol::Secure secure_,
|
||||||
Int64 priority_ = 1)
|
Priority priority_ = Priority{1})
|
||||||
: Base(max_connections_,
|
: Base(max_connections_,
|
||||||
&Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
&Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
||||||
host(host_),
|
host(host_),
|
||||||
@ -103,7 +104,7 @@ public:
|
|||||||
return host + ":" + toString(port);
|
return host + ":" + toString(port);
|
||||||
}
|
}
|
||||||
|
|
||||||
Int64 getPriority() const override
|
Priority getPriority() const override
|
||||||
{
|
{
|
||||||
return priority;
|
return priority;
|
||||||
}
|
}
|
||||||
@ -134,7 +135,7 @@ private:
|
|||||||
String client_name;
|
String client_name;
|
||||||
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
||||||
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
||||||
Int64 priority; /// priority from <remote_servers>
|
Priority priority; /// priority from <remote_servers>
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -157,7 +158,7 @@ public:
|
|||||||
String client_name;
|
String client_name;
|
||||||
Protocol::Compression compression;
|
Protocol::Compression compression;
|
||||||
Protocol::Secure secure;
|
Protocol::Secure secure;
|
||||||
Int64 priority;
|
Priority priority;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct KeyHash
|
struct KeyHash
|
||||||
@ -180,7 +181,7 @@ public:
|
|||||||
String client_name,
|
String client_name,
|
||||||
Protocol::Compression compression,
|
Protocol::Compression compression,
|
||||||
Protocol::Secure secure,
|
Protocol::Secure secure,
|
||||||
Int64 priority);
|
Priority priority);
|
||||||
private:
|
private:
|
||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
using ConnectionPoolWeakPtr = std::weak_ptr<IConnectionPool>;
|
using ConnectionPoolWeakPtr = std::weak_ptr<IConnectionPool>;
|
||||||
|
@ -71,7 +71,7 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
|
|||||||
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
Int64 ConnectionPoolWithFailover::getPriority() const
|
Priority ConnectionPoolWithFailover::getPriority() const
|
||||||
{
|
{
|
||||||
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
|
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
|
||||||
{
|
{
|
||||||
|
@ -48,7 +48,7 @@ public:
|
|||||||
const Settings * settings,
|
const Settings * settings,
|
||||||
bool force_connected) override; /// From IConnectionPool
|
bool force_connected) override; /// From IConnectionPool
|
||||||
|
|
||||||
Int64 getPriority() const override; /// From IConnectionPool
|
Priority getPriority() const override; /// From IConnectionPool
|
||||||
|
|
||||||
/** Allocates up to the specified number of connections to work.
|
/** Allocates up to the specified number of connections to work.
|
||||||
* Connections provide access to different replicas of one shard.
|
* Connections provide access to different replicas of one shard.
|
||||||
|
@ -528,6 +528,7 @@ StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t n, Arena & are
|
|||||||
{
|
{
|
||||||
WriteBufferFromArena out(arena, begin);
|
WriteBufferFromArena out(arena, begin);
|
||||||
func->serialize(data[n], out, version);
|
func->serialize(data[n], out, version);
|
||||||
|
out.finalize();
|
||||||
return out.complete();
|
return out.complete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,13 +151,13 @@ public:
|
|||||||
|
|
||||||
ColumnPtr compress() const override;
|
ColumnPtr compress() const override;
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(offsets);
|
callback(offsets);
|
||||||
callback(data);
|
callback(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*offsets);
|
callback(*offsets);
|
||||||
offsets->forEachSubcolumnRecursively(callback);
|
offsets->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -230,12 +230,12 @@ public:
|
|||||||
data->getExtremes(min, max);
|
data->getExtremes(min, max);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(data);
|
callback(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*data);
|
callback(*data);
|
||||||
data->forEachSubcolumnRecursively(callback);
|
data->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -166,7 +166,7 @@ public:
|
|||||||
size_t byteSizeAt(size_t n) const override { return getDictionary().byteSizeAt(getIndexes().getUInt(n)); }
|
size_t byteSizeAt(size_t n) const override { return getDictionary().byteSizeAt(getIndexes().getUInt(n)); }
|
||||||
size_t allocatedBytes() const override { return idx.getPositions()->allocatedBytes() + getDictionary().allocatedBytes(); }
|
size_t allocatedBytes() const override { return idx.getPositions()->allocatedBytes() + getDictionary().allocatedBytes(); }
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(idx.getPositionsPtr());
|
callback(idx.getPositionsPtr());
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ public:
|
|||||||
callback(dictionary.getColumnUniquePtr());
|
callback(dictionary.getColumnUniquePtr());
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*idx.getPositionsPtr());
|
callback(*idx.getPositionsPtr());
|
||||||
idx.getPositionsPtr()->forEachSubcolumnRecursively(callback);
|
idx.getPositionsPtr()->forEachSubcolumnRecursively(callback);
|
||||||
@ -340,7 +340,7 @@ private:
|
|||||||
explicit Dictionary(MutableColumnPtr && column_unique, bool is_shared);
|
explicit Dictionary(MutableColumnPtr && column_unique, bool is_shared);
|
||||||
explicit Dictionary(ColumnPtr column_unique, bool is_shared);
|
explicit Dictionary(ColumnPtr column_unique, bool is_shared);
|
||||||
|
|
||||||
const ColumnPtr & getColumnUniquePtr() const { return column_unique; }
|
const WrappedPtr & getColumnUniquePtr() const { return column_unique; }
|
||||||
WrappedPtr & getColumnUniquePtr() { return column_unique; }
|
WrappedPtr & getColumnUniquePtr() { return column_unique; }
|
||||||
|
|
||||||
const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); }
|
const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); }
|
||||||
|
@ -273,12 +273,12 @@ void ColumnMap::getExtremes(Field & min, Field & max) const
|
|||||||
max = std::move(map_max_value);
|
max = std::move(map_max_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnMap::forEachSubcolumn(ColumnCallback callback) const
|
void ColumnMap::forEachSubcolumn(MutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(nested);
|
callback(nested);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnMap::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
void ColumnMap::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(*nested);
|
callback(*nested);
|
||||||
nested->forEachSubcolumnRecursively(callback);
|
nested->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -88,8 +88,8 @@ public:
|
|||||||
size_t byteSizeAt(size_t n) const override;
|
size_t byteSizeAt(size_t n) const override;
|
||||||
size_t allocatedBytes() const override;
|
size_t allocatedBytes() const override;
|
||||||
void protect() override;
|
void protect() override;
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||||
bool structureEquals(const IColumn & rhs) const override;
|
bool structureEquals(const IColumn & rhs) const override;
|
||||||
double getRatioOfDefaultRows(double sample_ratio) const override;
|
double getRatioOfDefaultRows(double sample_ratio) const override;
|
||||||
UInt64 getNumberOfDefaultRows() const override;
|
UInt64 getNumberOfDefaultRows() const override;
|
||||||
|
@ -130,13 +130,13 @@ public:
|
|||||||
|
|
||||||
ColumnPtr compress() const override;
|
ColumnPtr compress() const override;
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(nested_column);
|
callback(nested_column);
|
||||||
callback(null_map);
|
callback(null_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*nested_column);
|
callback(*nested_column);
|
||||||
nested_column->forEachSubcolumnRecursively(callback);
|
nested_column->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -664,18 +664,18 @@ size_t ColumnObject::allocatedBytes() const
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnObject::forEachSubcolumn(ColumnCallback callback) const
|
void ColumnObject::forEachSubcolumn(MutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
for (const auto & entry : subcolumns)
|
for (auto & entry : subcolumns)
|
||||||
for (const auto & part : entry->data.data)
|
for (auto & part : entry->data.data)
|
||||||
callback(part);
|
callback(part);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnObject::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
for (const auto & entry : subcolumns)
|
for (auto & entry : subcolumns)
|
||||||
{
|
{
|
||||||
for (const auto & part : entry->data.data)
|
for (auto & part : entry->data.data)
|
||||||
{
|
{
|
||||||
callback(*part);
|
callback(*part);
|
||||||
part->forEachSubcolumnRecursively(callback);
|
part->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -206,8 +206,8 @@ public:
|
|||||||
size_t size() const override;
|
size_t size() const override;
|
||||||
size_t byteSize() const override;
|
size_t byteSize() const override;
|
||||||
size_t allocatedBytes() const override;
|
size_t allocatedBytes() const override;
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||||
void insert(const Field & field) override;
|
void insert(const Field & field) override;
|
||||||
void insertDefault() override;
|
void insertDefault() override;
|
||||||
void insertFrom(const IColumn & src, size_t n) override;
|
void insertFrom(const IColumn & src, size_t n) override;
|
||||||
|
@ -751,13 +751,13 @@ bool ColumnSparse::structureEquals(const IColumn & rhs) const
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnSparse::forEachSubcolumn(ColumnCallback callback) const
|
void ColumnSparse::forEachSubcolumn(MutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(values);
|
callback(values);
|
||||||
callback(offsets);
|
callback(offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnSparse::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
void ColumnSparse::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(*values);
|
callback(*values);
|
||||||
values->forEachSubcolumnRecursively(callback);
|
values->forEachSubcolumnRecursively(callback);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user