mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-16 11:22:12 +00:00
Merge remote-tracking branch 'origin/master' into query-plan-update-sort-description
This commit is contained in:
commit
14498c1d2b
@ -14,7 +14,7 @@ curl https://clickhouse.com/ | sh
|
|||||||
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
|
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
|
||||||
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
||||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||||
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
||||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
||||||
|
@ -115,6 +115,13 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A
|
|||||||
|
|
||||||
# ARROW_ORC + adapters/orc/CMakefiles
|
# ARROW_ORC + adapters/orc/CMakefiles
|
||||||
set(ORC_SRCS
|
set(ORC_SRCS
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
||||||
@ -129,13 +136,20 @@ set(ORC_SRCS
|
|||||||
"${ORC_SOURCE_SRC_DIR}/MemoryPool.cc"
|
"${ORC_SOURCE_SRC_DIR}/MemoryPool.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RLE.cc"
|
"${ORC_SOURCE_SRC_DIR}/RLE.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RLEv1.cc"
|
"${ORC_SOURCE_SRC_DIR}/RLEv1.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/RLEv2.cc"
|
"${ORC_SOURCE_SRC_DIR}/RleDecoderV2.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/RleEncoderV2.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Statistics.cc"
|
"${ORC_SOURCE_SRC_DIR}/Statistics.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/StripeStream.cc"
|
"${ORC_SOURCE_SRC_DIR}/StripeStream.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Timezone.cc"
|
"${ORC_SOURCE_SRC_DIR}/Timezone.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/TypeImpl.cc"
|
"${ORC_SOURCE_SRC_DIR}/TypeImpl.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Vector.cc"
|
"${ORC_SOURCE_SRC_DIR}/Vector.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Writer.cc"
|
"${ORC_SOURCE_SRC_DIR}/Writer.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Adaptor.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/BloomFilter.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/Murmur3.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc"
|
||||||
|
"${ORC_SOURCE_SRC_DIR}/wrap/orc-proto-wrapper.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/io/InputStream.cc"
|
"${ORC_SOURCE_SRC_DIR}/io/InputStream.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc"
|
"${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc"
|
||||||
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
||||||
@ -358,6 +372,9 @@ SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_zlib.cc" ${ARROW_SRCS})
|
|||||||
add_definitions(-DARROW_WITH_ZSTD)
|
add_definitions(-DARROW_WITH_ZSTD)
|
||||||
SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_zstd.cc" ${ARROW_SRCS})
|
SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_zstd.cc" ${ARROW_SRCS})
|
||||||
|
|
||||||
|
add_definitions(-DARROW_WITH_BROTLI)
|
||||||
|
SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_brotli.cc" ${ARROW_SRCS})
|
||||||
|
|
||||||
|
|
||||||
add_library(_arrow ${ARROW_SRCS})
|
add_library(_arrow ${ARROW_SRCS})
|
||||||
|
|
||||||
@ -372,6 +389,7 @@ target_link_libraries(_arrow PRIVATE
|
|||||||
ch_contrib::snappy
|
ch_contrib::snappy
|
||||||
ch_contrib::zlib
|
ch_contrib::zlib
|
||||||
ch_contrib::zstd
|
ch_contrib::zstd
|
||||||
|
ch_contrib::brotli
|
||||||
)
|
)
|
||||||
target_link_libraries(_arrow PUBLIC _orc)
|
target_link_libraries(_arrow PUBLIC _orc)
|
||||||
|
|
||||||
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f9a393ed2433a60034795284f82d093b348f2102
|
Subproject commit c5d7755ba0b9a95631c8daea4d094101f26ec761
|
@ -29,7 +29,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.2.3.17"
|
ARG VERSION="23.2.4.12"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.2.3.17"
|
ARG VERSION="23.2.4.12"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.2.3.17"
|
ARG VERSION="23.2.4.12"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -60,6 +60,13 @@ install_packages previous_release_package_folder
|
|||||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||||
# Previous version may not be ready for fault injections
|
# Previous version may not be ready for fault injections
|
||||||
export ZOOKEEPER_FAULT_INJECTION=0
|
export ZOOKEEPER_FAULT_INJECTION=0
|
||||||
|
|
||||||
|
# force_sync=false doesn't work correctly on some older versions
|
||||||
|
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||||
|
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
||||||
|
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||||
|
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
|
|
||||||
configure
|
configure
|
||||||
|
|
||||||
# But we still need default disk because some tables loaded only into it
|
# But we still need default disk because some tables loaded only into it
|
||||||
@ -161,7 +168,9 @@ rg -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
|||||||
-e "Authentication failed" \
|
-e "Authentication failed" \
|
||||||
-e "Cannot flush" \
|
-e "Cannot flush" \
|
||||||
-e "Container already exists" \
|
-e "Container already exists" \
|
||||||
/var/log/clickhouse-server/clickhouse-server.upgrade.log | zgrep -Fa "<Error>" > /test_output/upgrade_error_messages.txt \
|
clickhouse-server.upgrade.log \
|
||||||
|
| grep -av -e "_repl_01111_.*Mapping for table with UUID" \
|
||||||
|
| zgrep -Fa "<Error>" > /test_output/upgrade_error_messages.txt \
|
||||||
&& echo -e "Error message in clickhouse-server.log (see upgrade_error_messages.txt)$FAIL$(head_escaped /test_output/upgrade_error_messages.txt)" \
|
&& echo -e "Error message in clickhouse-server.log (see upgrade_error_messages.txt)$FAIL$(head_escaped /test_output/upgrade_error_messages.txt)" \
|
||||||
>> /test_output/test_results.tsv \
|
>> /test_output/test_results.tsv \
|
||||||
|| echo -e "No Error messages after server upgrade$OK" >> /test_output/test_results.tsv
|
|| echo -e "No Error messages after server upgrade$OK" >> /test_output/test_results.tsv
|
||||||
|
29
docs/changelogs/v22.12.5.34-stable.md
Normal file
29
docs/changelogs/v22.12.5.34-stable.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.12.5.34-stable (b82d6401ca1) FIXME as compared to v22.12.4.76-stable (cb5772db805)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#46983](https://github.com/ClickHouse/ClickHouse/issues/46983): - Apply `ALTER TABLE table_name ON CLUSTER cluster MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name'` to all replicas. Because `ALTER TABLE t MOVE` is not replicated. [#46402](https://github.com/ClickHouse/ClickHouse/pull/46402) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#45729](https://github.com/ClickHouse/ClickHouse/issues/45729): Fix key description when encountering duplicate primary keys. This can happen in projections. See [#45590](https://github.com/ClickHouse/ClickHouse/issues/45590) for details. [#45686](https://github.com/ClickHouse/ClickHouse/pull/45686) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Backported in [#46398](https://github.com/ClickHouse/ClickHouse/issues/46398): Fix `SYSTEM UNFREEZE` queries failing with the exception `CANNOT_PARSE_INPUT_ASSERTION_FAILED`. [#46325](https://github.com/ClickHouse/ClickHouse/pull/46325) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Backported in [#46903](https://github.com/ClickHouse/ClickHouse/issues/46903): - Fix incorrect alias recursion in QueryNormalizer. [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#47210](https://github.com/ClickHouse/ClickHouse/issues/47210): `INSERT` queries through native TCP protocol and HTTP protocol were not canceled correctly in some cases. It could lead to a partially applied query if a client canceled the query, or if a client died or, in rare cases, on network errors. As a result, it could lead to not working deduplication. Fixes [#27667](https://github.com/ClickHouse/ClickHouse/issues/27667) and [#45377](https://github.com/ClickHouse/ClickHouse/issues/45377). [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#47157](https://github.com/ClickHouse/ClickHouse/issues/47157): - Fix arithmetic operations in aggregate optimization with `min` and `max`. [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#46881](https://github.com/ClickHouse/ClickHouse/issues/46881): Fix MSan report in the `maxIntersections` function. This closes [#43126](https://github.com/ClickHouse/ClickHouse/issues/43126). [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#47359](https://github.com/ClickHouse/ClickHouse/issues/47359): Fix possible deadlock on distributed query cancellation. [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Use /etc/default/clickhouse in systemd too [#47003](https://github.com/ClickHouse/ClickHouse/pull/47003) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Follow-up to [#46681](https://github.com/ClickHouse/ClickHouse/issues/46681) [#47284](https://github.com/ClickHouse/ClickHouse/pull/47284) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
28
docs/changelogs/v22.8.15.23-lts.md
Normal file
28
docs/changelogs/v22.8.15.23-lts.md
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.15.23-lts (d36fa168bbf) FIXME as compared to v22.8.14.53-lts (4ea67c40077)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#46981](https://github.com/ClickHouse/ClickHouse/issues/46981): - Apply `ALTER TABLE table_name ON CLUSTER cluster MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name'` to all replicas. Because `ALTER TABLE t MOVE` is not replicated. [#46402](https://github.com/ClickHouse/ClickHouse/pull/46402) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
* Backported in [#47336](https://github.com/ClickHouse/ClickHouse/issues/47336): Sometimes after changing a role that could be not reflected on the access rights of a user who uses that role. This PR fixes that. [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#46901](https://github.com/ClickHouse/ClickHouse/issues/46901): - Fix incorrect alias recursion in QueryNormalizer. [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#47156](https://github.com/ClickHouse/ClickHouse/issues/47156): - Fix arithmetic operations in aggregate optimization with `min` and `max`. [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#46987](https://github.com/ClickHouse/ClickHouse/issues/46987): Fix result of LIKE predicates which translate to substring searches and contain quoted non-LIKE metacharacters. [#46875](https://github.com/ClickHouse/ClickHouse/pull/46875) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#47357](https://github.com/ClickHouse/ClickHouse/issues/47357): Fix possible deadlock on distributed query cancellation. [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Reduce updates of Mergeable Check [#46781](https://github.com/ClickHouse/ClickHouse/pull/46781) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
28
docs/changelogs/v23.1.5.24-stable.md
Normal file
28
docs/changelogs/v23.1.5.24-stable.md
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.1.5.24-stable (0e51b53ba99) FIXME as compared to v23.1.4.58-stable (9ed562163a5)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#47060](https://github.com/ClickHouse/ClickHouse/issues/47060): Fix error during server startup on old distros (e.g. Amazon Linux 2) and on ARM that glibc 2.28 symbols are not found. [#47008](https://github.com/ClickHouse/ClickHouse/pull/47008) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#46401](https://github.com/ClickHouse/ClickHouse/issues/46401): Fix `SYSTEM UNFREEZE` queries failing with the exception `CANNOT_PARSE_INPUT_ASSERTION_FAILED`. [#46325](https://github.com/ClickHouse/ClickHouse/pull/46325) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Backported in [#46905](https://github.com/ClickHouse/ClickHouse/issues/46905): - Fix incorrect alias recursion in QueryNormalizer. [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#47211](https://github.com/ClickHouse/ClickHouse/issues/47211): `INSERT` queries through native TCP protocol and HTTP protocol were not canceled correctly in some cases. It could lead to a partially applied query if a client canceled the query, or if a client died or, in rare cases, on network errors. As a result, it could lead to not working deduplication. Fixes [#27667](https://github.com/ClickHouse/ClickHouse/issues/27667) and [#45377](https://github.com/ClickHouse/ClickHouse/issues/45377). [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#47118](https://github.com/ClickHouse/ClickHouse/issues/47118): - Fix arithmetic operations in aggregate optimization with `min` and `max`. [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#46883](https://github.com/ClickHouse/ClickHouse/issues/46883): Fix MSan report in the `maxIntersections` function. This closes [#43126](https://github.com/ClickHouse/ClickHouse/issues/43126). [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#47361](https://github.com/ClickHouse/ClickHouse/issues/47361): Fix possible deadlock on distributed query cancellation. [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Use /etc/default/clickhouse in systemd too [#47003](https://github.com/ClickHouse/ClickHouse/pull/47003) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Follow-up to [#46681](https://github.com/ClickHouse/ClickHouse/issues/46681) [#47284](https://github.com/ClickHouse/ClickHouse/pull/47284) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
20
docs/changelogs/v23.2.4.12-stable.md
Normal file
20
docs/changelogs/v23.2.4.12-stable.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.2.4.12-stable (8fe866cb035) FIXME as compared to v23.2.3.17-stable (dec18bf7281)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#47277](https://github.com/ClickHouse/ClickHouse/issues/47277): Fix IPv4/IPv6 serialization/deserialization in binary formats that was broken in https://github.com/ClickHouse/ClickHouse/pull/43221. Closes [#46522](https://github.com/ClickHouse/ClickHouse/issues/46522). [#46616](https://github.com/ClickHouse/ClickHouse/pull/46616) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#47212](https://github.com/ClickHouse/ClickHouse/issues/47212): `INSERT` queries through native TCP protocol and HTTP protocol were not canceled correctly in some cases. It could lead to a partially applied query if a client canceled the query, or if a client died or, in rare cases, on network errors. As a result, it could lead to not working deduplication. Fixes [#27667](https://github.com/ClickHouse/ClickHouse/issues/27667) and [#45377](https://github.com/ClickHouse/ClickHouse/issues/45377). [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#47363](https://github.com/ClickHouse/ClickHouse/issues/47363): Fix possible deadlock on distributed query cancellation. [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Follow-up to [#46681](https://github.com/ClickHouse/ClickHouse/issues/46681) [#47284](https://github.com/ClickHouse/ClickHouse/pull/47284) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
123
docs/en/development/build-cross-s390x.md
Normal file
123
docs/en/development/build-cross-s390x.md
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
---
|
||||||
|
slug: /en/development/build-cross-s390x
|
||||||
|
sidebar_position: 69
|
||||||
|
title: How to Build, Run and Debug ClickHouse on Linux for s390x (zLinux)
|
||||||
|
sidebar_label: Build on Linux for s390x (zLinux)
|
||||||
|
---
|
||||||
|
|
||||||
|
As of writing (2023/3/10) building for s390x considered to be experimental. Not all features can be enabled, has broken features and is currently under active development.
|
||||||
|
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
As s390x does not support boringssl, it uses OpenSSL and has two related build options.
|
||||||
|
- By default, the s390x build will dynamically link to OpenSSL libraries. It will build OpenSSL shared objects, so it's not necessary to install OpenSSL beforehand. (This option is recommended in all cases.)
|
||||||
|
- Another option is to build OpenSSL in-tree. In this case two build flags need to be supplied to cmake
|
||||||
|
```bash
|
||||||
|
-DENABLE_OPENSSL_DYNAMIC=0 -DENABLE_OPENSSL=1
|
||||||
|
```
|
||||||
|
|
||||||
|
These instructions assume that the host machine is x86_64 and has all the tooling required to build natively based on the [build instructions](../development/build.md). It also assumes that the host is Ubuntu 22.04 but the following instructions should also work on Ubuntu 20.04.
|
||||||
|
|
||||||
|
In addition to installing the tooling used to build natively, the following additional packages need to be installed:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-gnu binfmt-support qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
If you wish to cross compile rust code install the rust cross compile target for s390x:
|
||||||
|
```bash
|
||||||
|
rustup target add s390x-unknown-linux-gnu
|
||||||
|
```
|
||||||
|
|
||||||
|
To build for s390x:
|
||||||
|
```bash
|
||||||
|
cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake ..
|
||||||
|
ninja
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
Once built, the binary can be run with, eg.:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse
|
||||||
|
```
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
Install LLDB:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
apt-get install lldb-15
|
||||||
|
```
|
||||||
|
|
||||||
|
To Debug a s390x executable, run clickhouse using QEMU in debug mode:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
qemu-s390x-static -g 31338 -L /usr/s390x-linux-gnu ./clickhouse
|
||||||
|
```
|
||||||
|
|
||||||
|
In another shell run LLDB and attach, replace `<Clickhouse Parent Directory>` and `<build directory>` with the values corresponding to your environment.
|
||||||
|
```bash
|
||||||
|
lldb-15
|
||||||
|
(lldb) target create ./clickhouse
|
||||||
|
Current executable set to '/<Clickhouse Parent Directory>/ClickHouse/<build directory>/programs/clickhouse' (s390x).
|
||||||
|
(lldb) settings set target.source-map <build directory> /<Clickhouse Parent Directory>/ClickHouse
|
||||||
|
(lldb) gdb-remote 31338
|
||||||
|
Process 1 stopped
|
||||||
|
* thread #1, stop reason = signal SIGTRAP
|
||||||
|
frame #0: 0x0000004020e74cd0
|
||||||
|
-> 0x4020e74cd0: lgr %r2, %r15
|
||||||
|
0x4020e74cd4: aghi %r15, -160
|
||||||
|
0x4020e74cd8: xc 0(8,%r15), 0(%r15)
|
||||||
|
0x4020e74cde: brasl %r14, 275429939040
|
||||||
|
(lldb) b main
|
||||||
|
Breakpoint 1: 9 locations.
|
||||||
|
(lldb) c
|
||||||
|
Process 1 resuming
|
||||||
|
Process 1 stopped
|
||||||
|
* thread #1, stop reason = breakpoint 1.1
|
||||||
|
frame #0: 0x0000004005cd9fc0 clickhouse`main(argc_=1, argv_=0x0000004020e594a8) at main.cpp:450:17
|
||||||
|
447 #if !defined(FUZZING_MODE)
|
||||||
|
448 int main(int argc_, char ** argv_)
|
||||||
|
449 {
|
||||||
|
-> 450 inside_main = true;
|
||||||
|
451 SCOPE_EXIT({ inside_main = false; });
|
||||||
|
452
|
||||||
|
453 /// PHDR cache is required for query profiler to work reliably
|
||||||
|
```
|
||||||
|
|
||||||
|
## Visual Studio Code integration
|
||||||
|
|
||||||
|
- (CodeLLDB extension)[https://github.com/vadimcn/vscode-lldb] is required for visual debugging, the (Command Variable)[https://github.com/rioj7/command-variable] extension can help dynamic launches if using (cmake variants)[https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md].
|
||||||
|
- Make sure to set the backend to your llvm installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
|
||||||
|
- Launcher:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Debug",
|
||||||
|
"type": "lldb",
|
||||||
|
"request": "custom",
|
||||||
|
"targetCreateCommands": ["target create ${command:cmake.launchTargetDirectory}/clickhouse"],
|
||||||
|
"processCreateCommands": ["settings set target.source-map ${input:targetdir} ${workspaceFolder}", "gdb-remote 31338"],
|
||||||
|
"sourceMap": { "${input:targetdir}": "${workspaceFolder}" },
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"id": "targetdir",
|
||||||
|
"type": "command",
|
||||||
|
"command": "extension.commandvariable.transform",
|
||||||
|
"args": {
|
||||||
|
"text": "${command:cmake.launchTargetDirectory}",
|
||||||
|
"find": ".*/([^/]+)/[^/]+$",
|
||||||
|
"replace": "$1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this)
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/development/contrib
|
slug: /en/development/contrib
|
||||||
sidebar_position: 71
|
sidebar_position: 72
|
||||||
sidebar_label: Third-Party Libraries
|
sidebar_label: Third-Party Libraries
|
||||||
description: A list of third-party libraries used
|
description: A list of third-party libraries used
|
||||||
---
|
---
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/development/style
|
slug: /en/development/style
|
||||||
sidebar_position: 69
|
sidebar_position: 70
|
||||||
sidebar_label: C++ Guide
|
sidebar_label: C++ Guide
|
||||||
description: A list of recommendations regarding coding style, naming convention, formatting and more
|
description: A list of recommendations regarding coding style, naming convention, formatting and more
|
||||||
---
|
---
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/development/tests
|
slug: /en/development/tests
|
||||||
sidebar_position: 70
|
sidebar_position: 71
|
||||||
sidebar_label: Testing
|
sidebar_label: Testing
|
||||||
title: ClickHouse Testing
|
title: ClickHouse Testing
|
||||||
description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
||||||
@ -31,6 +31,9 @@ folder and run the following command:
|
|||||||
PATH=$PATH:<path to clickhouse-client> tests/clickhouse-test 01428_hash_set_nan_key
|
PATH=$PATH:<path to clickhouse-client> tests/clickhouse-test 01428_hash_set_nan_key
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Test results (`stderr` and `stdout`) are written to files `01428_hash_set_nan_key.[stderr|stdout]` which
|
||||||
|
are located near the test file itself (so for `queries/0_stateless/foo.sql` output will be in `queries/0_stateless/foo.stdout`).
|
||||||
|
|
||||||
For more options, see `tests/clickhouse-test --help`. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. There are also options to run tests in parallel or in randomized order.
|
For more options, see `tests/clickhouse-test --help`. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. There are also options to run tests in parallel or in randomized order.
|
||||||
|
|
||||||
### Adding a New Test
|
### Adding a New Test
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1981,6 +1981,7 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
|
|||||||
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
|
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
|
||||||
- [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`.
|
- [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`.
|
||||||
- [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`.
|
- [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`.
|
||||||
|
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `snappy`.
|
||||||
|
|
||||||
## Arrow {#data-format-arrow}
|
## Arrow {#data-format-arrow}
|
||||||
|
|
||||||
@ -2051,6 +2052,7 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
|
|||||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||||
- [input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Arrow format. Default value - `false`.
|
- [input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Arrow format. Default value - `false`.
|
||||||
- [output_format_arrow_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_fixed_string_as_fixed_byte_array) - use Arrow FIXED_SIZE_BINARY type instead of Binary/String for FixedString columns. Default value - `true`.
|
- [output_format_arrow_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_fixed_string_as_fixed_byte_array) - use Arrow FIXED_SIZE_BINARY type instead of Binary/String for FixedString columns. Default value - `true`.
|
||||||
|
- [output_format_arrow_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_compression_method) - compression method used in output Arrow format. Default value - `none`.
|
||||||
|
|
||||||
## ArrowStream {#data-format-arrow-stream}
|
## ArrowStream {#data-format-arrow-stream}
|
||||||
|
|
||||||
@ -2107,6 +2109,7 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT ORC" > {filename.
|
|||||||
### Arrow format settings {#parquet-format-settings}
|
### Arrow format settings {#parquet-format-settings}
|
||||||
|
|
||||||
- [output_format_arrow_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_string_as_string) - use Arrow String type instead of Binary for String columns. Default value - `false`.
|
- [output_format_arrow_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_string_as_string) - use Arrow String type instead of Binary for String columns. Default value - `false`.
|
||||||
|
- [output_format_orc_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_orc_compression_method) - compression method used in output ORC format. Default value - `none`.
|
||||||
- [input_format_arrow_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_import_nested) - allow inserting array of structs into Nested table in Arrow input format. Default value - `false`.
|
- [input_format_arrow_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_import_nested) - allow inserting array of structs into Nested table in Arrow input format. Default value - `false`.
|
||||||
- [input_format_arrow_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_case_insensitive_column_matching) - ignore case when matching Arrow columns with ClickHouse columns. Default value - `false`.
|
- [input_format_arrow_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_case_insensitive_column_matching) - ignore case when matching Arrow columns with ClickHouse columns. Default value - `false`.
|
||||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||||
|
@ -117,7 +117,7 @@ clickhouse-local --file='hobbies.jsonl' --table='hobbies' --query='SELECT * FROM
|
|||||||
4 47 Brayan ['movies','skydiving']
|
4 47 Brayan ['movies','skydiving']
|
||||||
```
|
```
|
||||||
|
|
||||||
# Using structure from insertion table {#using-structure-from-insertion-table}
|
## Using structure from insertion table {#using-structure-from-insertion-table}
|
||||||
|
|
||||||
When table functions `file/s3/url/hdfs` are used to insert data into a table,
|
When table functions `file/s3/url/hdfs` are used to insert data into a table,
|
||||||
there is an option to use the structure from the insertion table instead of extracting it from the data.
|
there is an option to use the structure from the insertion table instead of extracting it from the data.
|
||||||
@ -222,7 +222,7 @@ INSERT INTO hobbies4 SELECT id, empty(hobbies) ? NULL : hobbies[1] FROM file(hob
|
|||||||
|
|
||||||
In this case, there are some operations performed on the column `hobbies` in the `SELECT` query to insert it into the table, so ClickHouse cannot use the structure from the insertion table, and schema inference will be used.
|
In this case, there are some operations performed on the column `hobbies` in the `SELECT` query to insert it into the table, so ClickHouse cannot use the structure from the insertion table, and schema inference will be used.
|
||||||
|
|
||||||
# Schema inference cache {#schema-inference-cache}
|
## Schema inference cache {#schema-inference-cache}
|
||||||
|
|
||||||
For most input formats schema inference reads some data to determine its structure and this process can take some time.
|
For most input formats schema inference reads some data to determine its structure and this process can take some time.
|
||||||
To prevent inferring the same schema every time ClickHouse read the data from the same file, the inferred schema is cached and when accessing the same file again, ClickHouse will use the schema from the cache.
|
To prevent inferring the same schema every time ClickHouse read the data from the same file, the inferred schema is cached and when accessing the same file again, ClickHouse will use the schema from the cache.
|
||||||
@ -326,14 +326,14 @@ SELECT count() FROM system.schema_inference_cache WHERE storage='S3'
|
|||||||
└─────────┘
|
└─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
# Text formats {#text-formats}
|
## Text formats {#text-formats}
|
||||||
|
|
||||||
For text formats, ClickHouse reads the data row by row, extracts column values according to the format,
|
For text formats, ClickHouse reads the data row by row, extracts column values according to the format,
|
||||||
and then uses some recursive parsers and heuristics to determine the type for each value. The maximum number of rows read from the data in schema inference
|
and then uses some recursive parsers and heuristics to determine the type for each value. The maximum number of rows read from the data in schema inference
|
||||||
is controlled by the setting `input_format_max_rows_to_read_for_schema_inference` with default value 25000.
|
is controlled by the setting `input_format_max_rows_to_read_for_schema_inference` with default value 25000.
|
||||||
By default, all inferred types are [Nullable](../sql-reference/data-types/nullable.md), but you can change this by setting `schema_inference_make_columns_nullable` (see examples in the [settings](#settings-for-text-formats) section).
|
By default, all inferred types are [Nullable](../sql-reference/data-types/nullable.md), but you can change this by setting `schema_inference_make_columns_nullable` (see examples in the [settings](#settings-for-text-formats) section).
|
||||||
|
|
||||||
## JSON formats {#json-formats}
|
### JSON formats {#json-formats}
|
||||||
|
|
||||||
In JSON formats ClickHouse parses values according to the JSON specification and then tries to find the most appropriate data type for them.
|
In JSON formats ClickHouse parses values according to the JSON specification and then tries to find the most appropriate data type for them.
|
||||||
|
|
||||||
@ -464,9 +464,9 @@ most likely this column contains only Nulls or empty Arrays/Maps.
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
### JSON settings {#json-settings}
|
#### JSON settings {#json-settings}
|
||||||
|
|
||||||
#### input_format_json_read_objects_as_strings
|
##### input_format_json_read_objects_as_strings
|
||||||
|
|
||||||
Enabling this setting allows reading nested JSON objects as strings.
|
Enabling this setting allows reading nested JSON objects as strings.
|
||||||
This setting can be used to read nested JSON objects without using JSON object type.
|
This setting can be used to read nested JSON objects without using JSON object type.
|
||||||
@ -486,7 +486,7 @@ DESC format(JSONEachRow, $$
|
|||||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
#### input_format_json_try_infer_numbers_from_strings
|
##### input_format_json_try_infer_numbers_from_strings
|
||||||
|
|
||||||
Enabling this setting allows inferring numbers from string values.
|
Enabling this setting allows inferring numbers from string values.
|
||||||
|
|
||||||
@ -507,7 +507,7 @@ DESC format(JSONEachRow, $$
|
|||||||
└───────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└───────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
#### input_format_json_read_numbers_as_strings
|
##### input_format_json_read_numbers_as_strings
|
||||||
|
|
||||||
Enabling this setting allows reading numeric values as strings.
|
Enabling this setting allows reading numeric values as strings.
|
||||||
|
|
||||||
@ -528,7 +528,7 @@ DESC format(JSONEachRow, $$
|
|||||||
└───────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└───────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
#### input_format_json_read_bools_as_numbers
|
##### input_format_json_read_bools_as_numbers
|
||||||
|
|
||||||
Enabling this setting allows reading Bool values as numbers.
|
Enabling this setting allows reading Bool values as numbers.
|
||||||
|
|
||||||
@ -549,7 +549,7 @@ DESC format(JSONEachRow, $$
|
|||||||
└───────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└───────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## CSV {#csv}
|
### CSV {#csv}
|
||||||
|
|
||||||
In CSV format ClickHouse extracts column values from the row according to delimiters. ClickHouse expects all types except numbers and strings to be enclosed in double quotes. If the value is in double quotes, ClickHouse tries to parse
|
In CSV format ClickHouse extracts column values from the row according to delimiters. ClickHouse expects all types except numbers and strings to be enclosed in double quotes. If the value is in double quotes, ClickHouse tries to parse
|
||||||
the data inside quotes using the recursive parser and then tries to find the most appropriate data type for it. If the value is not in double quotes, ClickHouse tries to parse it as a number,
|
the data inside quotes using the recursive parser and then tries to find the most appropriate data type for it. If the value is not in double quotes, ClickHouse tries to parse it as a number,
|
||||||
@ -726,7 +726,7 @@ $$)
|
|||||||
└──────────────┴───────────────┘
|
└──────────────┴───────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## TSV/TSKV {#tsv-tskv}
|
### TSV/TSKV {#tsv-tskv}
|
||||||
|
|
||||||
In TSV/TSKV formats ClickHouse extracts column value from the row according to tabular delimiters and then parses extracted value using
|
In TSV/TSKV formats ClickHouse extracts column value from the row according to tabular delimiters and then parses extracted value using
|
||||||
the recursive parser to determine the most appropriate type. If the type cannot be determined, ClickHouse treats this value as String.
|
the recursive parser to determine the most appropriate type. If the type cannot be determined, ClickHouse treats this value as String.
|
||||||
@ -1019,7 +1019,7 @@ DESC format(TSV, '[1,2,3] 42.42 Hello World!')
|
|||||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## CustomSeparated {#custom-separated}
|
### CustomSeparated {#custom-separated}
|
||||||
|
|
||||||
In CustomSeparated format ClickHouse first extracts all column values from the row according to specified delimiters and then tries to infer
|
In CustomSeparated format ClickHouse first extracts all column values from the row according to specified delimiters and then tries to infer
|
||||||
the data type for each value according to escaping rule.
|
the data type for each value according to escaping rule.
|
||||||
@ -1080,7 +1080,7 @@ $$)
|
|||||||
└────────┴───────────────┴────────────┘
|
└────────┴───────────────┴────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## Template {#template}
|
### Template {#template}
|
||||||
|
|
||||||
In Template format ClickHouse first extracts all column values from the row according to the specified template and then tries to infer the
|
In Template format ClickHouse first extracts all column values from the row according to the specified template and then tries to infer the
|
||||||
data type for each value according to its escaping rule.
|
data type for each value according to its escaping rule.
|
||||||
@ -1120,7 +1120,7 @@ $$)
|
|||||||
└──────────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## Regexp {#regexp}
|
### Regexp {#regexp}
|
||||||
|
|
||||||
Similar to Template, in Regexp format ClickHouse first extracts all column values from the row according to specified regular expression and then tries to infer
|
Similar to Template, in Regexp format ClickHouse first extracts all column values from the row according to specified regular expression and then tries to infer
|
||||||
data type for each value according to the specified escaping rule.
|
data type for each value according to the specified escaping rule.
|
||||||
@ -1142,9 +1142,9 @@ Line: value_1=2, value_2="Some string 2", value_3="[4, 5, NULL]"$$)
|
|||||||
└──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## Settings for text formats {settings-for-text-formats}
|
### Settings for text formats {#settings-for-text-formats}
|
||||||
|
|
||||||
### input_format_max_rows_to_read_for_schema_inference
|
#### input_format_max_rows_to_read_for_schema_inference
|
||||||
|
|
||||||
This setting controls the maximum number of rows to be read while schema inference.
|
This setting controls the maximum number of rows to be read while schema inference.
|
||||||
The more rows are read, the more time is spent on schema inference, but the greater the chance to
|
The more rows are read, the more time is spent on schema inference, but the greater the chance to
|
||||||
@ -1152,7 +1152,7 @@ correctly determine the types (especially when the data contains a lot of nulls)
|
|||||||
|
|
||||||
Default value: `25000`.
|
Default value: `25000`.
|
||||||
|
|
||||||
### column_names_for_schema_inference
|
#### column_names_for_schema_inference
|
||||||
|
|
||||||
The list of column names to use in schema inference for formats without explicit column names. Specified names will be used instead of default `c1,c2,c3,...`. The format: `column1,column2,column3,...`.
|
The list of column names to use in schema inference for formats without explicit column names. Specified names will be used instead of default `c1,c2,c3,...`. The format: `column1,column2,column3,...`.
|
||||||
|
|
||||||
@ -1169,7 +1169,7 @@ DESC format(TSV, 'Hello, World! 42 [1, 2, 3]') settings column_names_for_schema_
|
|||||||
└──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### schema_inference_hints
|
#### schema_inference_hints
|
||||||
|
|
||||||
The list of column names and types to use in schema inference instead of automatically determined types. The format: 'column_name1 column_type1, column_name2 column_type2, ...'.
|
The list of column names and types to use in schema inference instead of automatically determined types. The format: 'column_name1 column_type1, column_name2 column_type2, ...'.
|
||||||
This setting can be used to specify the types of columns that could not be determined automatically or for optimizing the schema.
|
This setting can be used to specify the types of columns that could not be determined automatically or for optimizing the schema.
|
||||||
@ -1189,7 +1189,7 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul
|
|||||||
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### schema_inference_make_columns_nullable
|
#### schema_inference_make_columns_nullable
|
||||||
|
|
||||||
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
||||||
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference.
|
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference.
|
||||||
@ -1232,7 +1232,7 @@ DESC format(JSONEachRow, $$
|
|||||||
└─────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└─────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### input_format_try_infer_integers
|
#### input_format_try_infer_integers
|
||||||
|
|
||||||
If enabled, ClickHouse will try to infer integers instead of floats in schema inference for text formats.
|
If enabled, ClickHouse will try to infer integers instead of floats in schema inference for text formats.
|
||||||
If all numbers in the column from sample data are integers, the result type will be `Int64`, if at least one number is float, the result type will be `Float64`.
|
If all numbers in the column from sample data are integers, the result type will be `Int64`, if at least one number is float, the result type will be `Float64`.
|
||||||
@ -1289,7 +1289,7 @@ DESC format(JSONEachRow, $$
|
|||||||
└────────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└────────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### input_format_try_infer_datetimes
|
#### input_format_try_infer_datetimes
|
||||||
|
|
||||||
If enabled, ClickHouse will try to infer type `DateTime64` from string fields in schema inference for text formats.
|
If enabled, ClickHouse will try to infer type `DateTime64` from string fields in schema inference for text formats.
|
||||||
If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime64(9)`,
|
If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime64(9)`,
|
||||||
@ -1337,7 +1337,7 @@ DESC format(JSONEachRow, $$
|
|||||||
|
|
||||||
Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format)
|
Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format)
|
||||||
|
|
||||||
### input_format_try_infer_dates
|
#### input_format_try_infer_dates
|
||||||
|
|
||||||
If enabled, ClickHouse will try to infer type `Date` from string fields in schema inference for text formats.
|
If enabled, ClickHouse will try to infer type `Date` from string fields in schema inference for text formats.
|
||||||
If all fields from a column in sample data were successfully parsed as dates, the result type will be `Date`,
|
If all fields from a column in sample data were successfully parsed as dates, the result type will be `Date`,
|
||||||
@ -1383,14 +1383,14 @@ DESC format(JSONEachRow, $$
|
|||||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
# Self describing formats {#self-describing-formats}
|
## Self describing formats {#self-describing-formats}
|
||||||
|
|
||||||
Self-describing formats contain information about the structure of the data in the data itself,
|
Self-describing formats contain information about the structure of the data in the data itself,
|
||||||
it can be some header with a description, a binary type tree, or some kind of table.
|
it can be some header with a description, a binary type tree, or some kind of table.
|
||||||
To automatically infer a schema from files in such formats, ClickHouse reads a part of the data containing
|
To automatically infer a schema from files in such formats, ClickHouse reads a part of the data containing
|
||||||
information about the types and converts it into a schema of the ClickHouse table.
|
information about the types and converts it into a schema of the ClickHouse table.
|
||||||
|
|
||||||
## Formats with -WithNamesAndTypes suffix {#formats-with-names-and-types}
|
### Formats with -WithNamesAndTypes suffix {#formats-with-names-and-types}
|
||||||
|
|
||||||
ClickHouse supports some text formats with the suffix -WithNamesAndTypes. This suffix means that the data contains two additional rows with column names and types before the actual data.
|
ClickHouse supports some text formats with the suffix -WithNamesAndTypes. This suffix means that the data contains two additional rows with column names and types before the actual data.
|
||||||
While schema inference for such formats, ClickHouse reads the first two rows and extracts column names and types.
|
While schema inference for such formats, ClickHouse reads the first two rows and extracts column names and types.
|
||||||
@ -1412,7 +1412,7 @@ $$)
|
|||||||
└──────┴──────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴──────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## JSON formats with metadata {#json-with-metadata}
|
### JSON formats with metadata {#json-with-metadata}
|
||||||
|
|
||||||
Some JSON input formats ([JSON](formats.md#json), [JSONCompact](formats.md#json-compact), [JSONColumnsWithMetadata](formats.md#jsoncolumnswithmetadata)) contain metadata with column names and types.
|
Some JSON input formats ([JSON](formats.md#json), [JSONCompact](formats.md#json-compact), [JSONColumnsWithMetadata](formats.md#jsoncolumnswithmetadata)) contain metadata with column names and types.
|
||||||
In schema inference for such formats, ClickHouse reads this metadata.
|
In schema inference for such formats, ClickHouse reads this metadata.
|
||||||
@ -1465,7 +1465,7 @@ $$)
|
|||||||
└──────┴──────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴──────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## Avro {#avro}
|
### Avro {#avro}
|
||||||
|
|
||||||
In Avro format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
In Avro format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
||||||
|
|
||||||
@ -1485,7 +1485,7 @@ In Avro format ClickHouse reads its schema from the data and converts it to Clic
|
|||||||
|
|
||||||
Other Avro types are not supported.
|
Other Avro types are not supported.
|
||||||
|
|
||||||
## Parquet {#parquet}
|
### Parquet {#parquet}
|
||||||
|
|
||||||
In Parquet format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
In Parquet format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
||||||
|
|
||||||
@ -1513,7 +1513,7 @@ In Parquet format ClickHouse reads its schema from the data and converts it to C
|
|||||||
|
|
||||||
Other Parquet types are not supported. By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
Other Parquet types are not supported. By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
||||||
|
|
||||||
## Arrow {#arrow}
|
### Arrow {#arrow}
|
||||||
|
|
||||||
In Arrow format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
In Arrow format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
||||||
|
|
||||||
@ -1541,7 +1541,7 @@ In Arrow format ClickHouse reads its schema from the data and converts it to Cli
|
|||||||
|
|
||||||
Other Arrow types are not supported. By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
Other Arrow types are not supported. By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
||||||
|
|
||||||
## ORC {#orc}
|
### ORC {#orc}
|
||||||
|
|
||||||
In ORC format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
In ORC format ClickHouse reads its schema from the data and converts it to ClickHouse schema using the following type matches:
|
||||||
|
|
||||||
@ -1564,17 +1564,17 @@ In ORC format ClickHouse reads its schema from the data and converts it to Click
|
|||||||
|
|
||||||
Other ORC types are not supported. By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
Other ORC types are not supported. By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
||||||
|
|
||||||
## Native {#native}
|
### Native {#native}
|
||||||
|
|
||||||
Native format is used inside ClickHouse and contains the schema in the data.
|
Native format is used inside ClickHouse and contains the schema in the data.
|
||||||
In schema inference, ClickHouse reads the schema from the data without any transformations.
|
In schema inference, ClickHouse reads the schema from the data without any transformations.
|
||||||
|
|
||||||
# Formats with external schema {#formats-with-external-schema}
|
## Formats with external schema {#formats-with-external-schema}
|
||||||
|
|
||||||
Such formats require a schema describing the data in a separate file in a specific schema language.
|
Such formats require a schema describing the data in a separate file in a specific schema language.
|
||||||
To automatically infer a schema from files in such formats, ClickHouse reads external schema from a separate file and transforms it to a ClickHouse table schema.
|
To automatically infer a schema from files in such formats, ClickHouse reads external schema from a separate file and transforms it to a ClickHouse table schema.
|
||||||
|
|
||||||
# Protobuf {#protobuf}
|
### Protobuf {#protobuf}
|
||||||
|
|
||||||
In schema inference for Protobuf format ClickHouse uses the following type matches:
|
In schema inference for Protobuf format ClickHouse uses the following type matches:
|
||||||
|
|
||||||
@ -1592,7 +1592,7 @@ In schema inference for Protobuf format ClickHouse uses the following type match
|
|||||||
| `repeated T` | [Array(T)](../sql-reference/data-types/array.md) |
|
| `repeated T` | [Array(T)](../sql-reference/data-types/array.md) |
|
||||||
| `message`, `group` | [Tuple](../sql-reference/data-types/tuple.md) |
|
| `message`, `group` | [Tuple](../sql-reference/data-types/tuple.md) |
|
||||||
|
|
||||||
# CapnProto {#capnproto}
|
### CapnProto {#capnproto}
|
||||||
|
|
||||||
In schema inference for CapnProto format ClickHouse uses the following type matches:
|
In schema inference for CapnProto format ClickHouse uses the following type matches:
|
||||||
|
|
||||||
@ -1615,13 +1615,13 @@ In schema inference for CapnProto format ClickHouse uses the following type matc
|
|||||||
| `struct` | [Tuple](../sql-reference/data-types/tuple.md) |
|
| `struct` | [Tuple](../sql-reference/data-types/tuple.md) |
|
||||||
| `union(T, Void)`, `union(Void, T)` | [Nullable(T)](../sql-reference/data-types/nullable.md) |
|
| `union(T, Void)`, `union(Void, T)` | [Nullable(T)](../sql-reference/data-types/nullable.md) |
|
||||||
|
|
||||||
# Strong-typed binary formats {#strong-typed-binary-formats}
|
## Strong-typed binary formats {#strong-typed-binary-formats}
|
||||||
|
|
||||||
In such formats, each serialized value contains information about its type (and possibly about its name), but there is no information about the whole table.
|
In such formats, each serialized value contains information about its type (and possibly about its name), but there is no information about the whole table.
|
||||||
In schema inference for such formats, ClickHouse reads data row by row (up to `input_format_max_rows_to_read_for_schema_inference` rows) and extracts
|
In schema inference for such formats, ClickHouse reads data row by row (up to `input_format_max_rows_to_read_for_schema_inference` rows) and extracts
|
||||||
the type (and possibly name) for each value from the data and then converts these types to ClickHouse types.
|
the type (and possibly name) for each value from the data and then converts these types to ClickHouse types.
|
||||||
|
|
||||||
## MsgPack {msgpack}
|
### MsgPack {#msgpack}
|
||||||
|
|
||||||
In MsgPack format there is no delimiter between rows, to use schema inference for this format you should specify the number of columns in the table
|
In MsgPack format there is no delimiter between rows, to use schema inference for this format you should specify the number of columns in the table
|
||||||
using the setting `input_format_msgpack_number_of_columns`. ClickHouse uses the following type matches:
|
using the setting `input_format_msgpack_number_of_columns`. ClickHouse uses the following type matches:
|
||||||
@ -1641,7 +1641,7 @@ using the setting `input_format_msgpack_number_of_columns`. ClickHouse uses the
|
|||||||
|
|
||||||
By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
||||||
|
|
||||||
## BSONEachRow {#bsoneachrow}
|
### BSONEachRow {#bsoneachrow}
|
||||||
|
|
||||||
In BSONEachRow each row of data is presented as a BSON document. In schema inference ClickHouse reads BSON documents one by one and extracts
|
In BSONEachRow each row of data is presented as a BSON document. In schema inference ClickHouse reads BSON documents one by one and extracts
|
||||||
values, names, and types from the data and then transforms these types to ClickHouse types using the following type matches:
|
values, names, and types from the data and then transforms these types to ClickHouse types using the following type matches:
|
||||||
@ -1661,11 +1661,11 @@ values, names, and types from the data and then transforms these types to ClickH
|
|||||||
|
|
||||||
By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
By default, all inferred types are inside `Nullable`, but it can be changed using the setting `schema_inference_make_columns_nullable`.
|
||||||
|
|
||||||
# Formats with constant schema {#formats-with-constant-schema}
|
## Formats with constant schema {#formats-with-constant-schema}
|
||||||
|
|
||||||
Data in such formats always have the same schema.
|
Data in such formats always have the same schema.
|
||||||
|
|
||||||
## LineAsString {#line-as-string}
|
### LineAsString {#line-as-string}
|
||||||
|
|
||||||
In this format, ClickHouse reads the whole line from the data into a single column with `String` data type. The inferred type for this format is always `String` and the column name is `line`.
|
In this format, ClickHouse reads the whole line from the data into a single column with `String` data type. The inferred type for this format is always `String` and the column name is `line`.
|
||||||
|
|
||||||
@ -1680,7 +1680,7 @@ DESC format(LineAsString, 'Hello\nworld!')
|
|||||||
└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## JSONAsString {#json-as-string}
|
### JSONAsString {#json-as-string}
|
||||||
|
|
||||||
In this format, ClickHouse reads the whole JSON object from the data into a single column with `String` data type. The inferred type for this format is always `String` and the column name is `json`.
|
In this format, ClickHouse reads the whole JSON object from the data into a single column with `String` data type. The inferred type for this format is always `String` and the column name is `json`.
|
||||||
|
|
||||||
@ -1695,7 +1695,7 @@ DESC format(JSONAsString, '{"x" : 42, "y" : "Hello, World!"}')
|
|||||||
└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## JSONAsObject {#json-as-object}
|
### JSONAsObject {#json-as-object}
|
||||||
|
|
||||||
In this format, ClickHouse reads the whole JSON object from the data into a single column with `Object('json')` data type. Inferred type for this format is always `String` and the column name is `json`.
|
In this format, ClickHouse reads the whole JSON object from the data into a single column with `Object('json')` data type. Inferred type for this format is always `String` and the column name is `json`.
|
||||||
|
|
||||||
|
@ -1318,12 +1318,12 @@ Settings:
|
|||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<prometheus>
|
<prometheus>
|
||||||
<endpoint>/metrics</endpoint>
|
<endpoint>/metrics</endpoint>
|
||||||
<port>8001</port>
|
<port>9363</port>
|
||||||
<metrics>true</metrics>
|
<metrics>true</metrics>
|
||||||
<events>true</events>
|
<events>true</events>
|
||||||
<asynchronous_metrics>true</asynchronous_metrics>
|
<asynchronous_metrics>true</asynchronous_metrics>
|
||||||
</prometheus>
|
</prometheus>
|
||||||
```
|
```
|
||||||
|
|
||||||
## query_log {#server_configuration_parameters-query-log}
|
## query_log {#server_configuration_parameters-query-log}
|
||||||
|
@ -1014,6 +1014,12 @@ Use Arrow FIXED_SIZE_BINARY type instead of Binary/String for FixedString column
|
|||||||
|
|
||||||
Enabled by default.
|
Enabled by default.
|
||||||
|
|
||||||
|
### output_format_arrow_compression_method {#output_format_arrow_compression_method}
|
||||||
|
|
||||||
|
Compression method used in output Arrow format. Supported codecs: `lz4_frame`, `zstd`, `none` (uncompressed)
|
||||||
|
|
||||||
|
Default value: `none`.
|
||||||
|
|
||||||
## ORC format settings {#orc-format-settings}
|
## ORC format settings {#orc-format-settings}
|
||||||
|
|
||||||
### input_format_orc_import_nested {#input_format_orc_import_nested}
|
### input_format_orc_import_nested {#input_format_orc_import_nested}
|
||||||
@ -1057,6 +1063,12 @@ Use ORC String type instead of Binary for String columns.
|
|||||||
|
|
||||||
Disabled by default.
|
Disabled by default.
|
||||||
|
|
||||||
|
### output_format_orc_compression_method {#output_format_orc_compression_method}
|
||||||
|
|
||||||
|
Compression method used in output ORC format. Supported codecs: `lz4`, `snappy`, `zlib`, `zstd`, `none` (uncompressed)
|
||||||
|
|
||||||
|
Default value: `none`.
|
||||||
|
|
||||||
## Parquet format settings {#parquet-format-settings}
|
## Parquet format settings {#parquet-format-settings}
|
||||||
|
|
||||||
### input_format_parquet_import_nested {#input_format_parquet_import_nested}
|
### input_format_parquet_import_nested {#input_format_parquet_import_nested}
|
||||||
@ -1112,6 +1124,12 @@ The version of Parquet format used in output format. Supported versions: `1.0`,
|
|||||||
|
|
||||||
Default value: `2.latest`.
|
Default value: `2.latest`.
|
||||||
|
|
||||||
|
### output_format_parquet_compression_method {#output_format_parquet_compression_method}
|
||||||
|
|
||||||
|
Compression method used in output Parquet format. Supported codecs: `snappy`, `lz4`, `brotli`, `zstd`, `gzip`, `none` (uncompressed)
|
||||||
|
|
||||||
|
Default value: `snappy`.
|
||||||
|
|
||||||
## Hive format settings {#hive-format-settings}
|
## Hive format settings {#hive-format-settings}
|
||||||
|
|
||||||
### input_format_hive_text_fields_delimiter {#input_format_hive_text_fields_delimiter}
|
### input_format_hive_text_fields_delimiter {#input_format_hive_text_fields_delimiter}
|
||||||
@ -1474,7 +1492,7 @@ Default value: `65505`.
|
|||||||
|
|
||||||
The name of table that will be used in the output INSERT statement.
|
The name of table that will be used in the output INSERT statement.
|
||||||
|
|
||||||
Default value: `'table''`.
|
Default value: `table`.
|
||||||
|
|
||||||
### output_format_sql_insert_include_column_names {#output_format_sql_insert_include_column_names}
|
### output_format_sql_insert_include_column_names {#output_format_sql_insert_include_column_names}
|
||||||
|
|
||||||
@ -1514,4 +1532,12 @@ Disabled by default.
|
|||||||
|
|
||||||
The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit.
|
The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit.
|
||||||
|
|
||||||
Default value: `1GiB`
|
Default value: `1GiB`.
|
||||||
|
|
||||||
|
## Native format settings {#native-format-settings}
|
||||||
|
|
||||||
|
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
|
||||||
|
|
||||||
|
Allow types conversion in Native input format between columns from input data and requested columns.
|
||||||
|
|
||||||
|
Enabled by default.
|
||||||
|
@ -1548,7 +1548,7 @@ Enables or disables asynchronous inserts. This makes sense only for insertion ov
|
|||||||
|
|
||||||
If enabled, the data is combined into batches before the insertion into tables, so it is possible to do small and frequent insertions into ClickHouse (up to 15000 queries per second) without buffer tables.
|
If enabled, the data is combined into batches before the insertion into tables, so it is possible to do small and frequent insertions into ClickHouse (up to 15000 queries per second) without buffer tables.
|
||||||
|
|
||||||
The data is inserted either after the [async_insert_max_data_size](#async-insert-max-data-size) is exceeded or after [async_insert_busy_timeout_ms](#async-insert-busy-timeout-ms) milliseconds since the first `INSERT` query. If the [async_insert_stale_timeout_ms](#async-insert-stale-timeout-ms) is set to a non-zero value, the data is inserted after `async_insert_stale_timeout_ms` milliseconds since the last query.
|
The data is inserted either after the [async_insert_max_data_size](#async-insert-max-data-size) is exceeded or after [async_insert_busy_timeout_ms](#async-insert-busy-timeout-ms) milliseconds since the first `INSERT` query. If the [async_insert_stale_timeout_ms](#async-insert-stale-timeout-ms) is set to a non-zero value, the data is inserted after `async_insert_stale_timeout_ms` milliseconds since the last query. Also the buffer will be flushed to disk if at least [async_insert_max_query_number](#async-insert-max-query-number) async insert queries per block were received. This last setting takes effect only if [async_insert_deduplicate](#async-insert-deduplicate) is enabled.
|
||||||
|
|
||||||
If [wait_for_async_insert](#wait-for-async-insert) is enabled, every client will wait for the data to be processed and flushed to the table. Otherwise, the query would be processed almost instantly, even if the data is not inserted.
|
If [wait_for_async_insert](#wait-for-async-insert) is enabled, every client will wait for the data to be processed and flushed to the table. Otherwise, the query would be processed almost instantly, even if the data is not inserted.
|
||||||
|
|
||||||
|
@ -15,6 +15,13 @@ Columns:
|
|||||||
|
|
||||||
- `operation_name` ([String](../../sql-reference/data-types/string.md)) — The name of the operation.
|
- `operation_name` ([String](../../sql-reference/data-types/string.md)) — The name of the operation.
|
||||||
|
|
||||||
|
- `kind` ([Enum8](../../sql-reference/data-types/enum.md)) — The [SpanKind](https://opentelemetry.io/docs/reference/specification/trace/api/#spankind) of the span.
|
||||||
|
- `INTERNAL` — Indicates that the span represents an internal operation within an application.
|
||||||
|
- `SERVER` — Indicates that the span covers server-side handling of a synchronous RPC or other remote request.
|
||||||
|
- `CLIENT` — Indicates that the span describes a request to some remote service.
|
||||||
|
- `PRODUCER` — Indicates that the span describes the initiators of an asynchronous request. This parent span will often end before the corresponding child CONSUMER span, possibly even before the child span starts.
|
||||||
|
- `CONSUMER` - Indicates that the span describes a child of an asynchronous PRODUCER request.
|
||||||
|
|
||||||
- `start_time_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The start time of the `trace span` (in microseconds).
|
- `start_time_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The start time of the `trace span` (in microseconds).
|
||||||
|
|
||||||
- `finish_time_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The finish time of the `trace span` (in microseconds).
|
- `finish_time_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The finish time of the `trace span` (in microseconds).
|
||||||
@ -42,6 +49,7 @@ trace_id: cdab0847-0d62-61d5-4d38-dd65b19a1914
|
|||||||
span_id: 701487461015578150
|
span_id: 701487461015578150
|
||||||
parent_span_id: 2991972114672045096
|
parent_span_id: 2991972114672045096
|
||||||
operation_name: DB::Block DB::InterpreterSelectQuery::getSampleBlockImpl()
|
operation_name: DB::Block DB::InterpreterSelectQuery::getSampleBlockImpl()
|
||||||
|
kind: INTERNAL
|
||||||
start_time_us: 1612374594529090
|
start_time_us: 1612374594529090
|
||||||
finish_time_us: 1612374594529108
|
finish_time_us: 1612374594529108
|
||||||
finish_date: 2021-02-03
|
finish_date: 2021-02-03
|
||||||
|
@ -14,10 +14,6 @@ Accepts data that represent tables and queries them using [ClickHouse SQL dialec
|
|||||||
|
|
||||||
By default `clickhouse-local` does not have access to data on the same host, but it supports loading server configuration using `--config-file` argument.
|
By default `clickhouse-local` does not have access to data on the same host, but it supports loading server configuration using `--config-file` argument.
|
||||||
|
|
||||||
:::warning
|
|
||||||
It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error.
|
|
||||||
:::
|
|
||||||
|
|
||||||
For temporary data, a unique temporary data directory is created by default.
|
For temporary data, a unique temporary data directory is created by default.
|
||||||
|
|
||||||
## Usage {#usage}
|
## Usage {#usage}
|
||||||
|
@ -11,15 +11,15 @@ sidebar_title: exponentialMovingAverage
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
exponentialMovingAverage(x)(value, timestamp)
|
exponentialMovingAverage(x)(value, timeunit)
|
||||||
```
|
```
|
||||||
|
|
||||||
Each `value` corresponds to the determinate `timestamp`. The half-life `x` is the time lag at which the exponential weights decay by one-half. The function returns a weighted average: the older the time point, the less weight the corresponding value is considered to be.
|
Each `value` corresponds to the determinate `timeunit`. The half-life `x` is the time lag at which the exponential weights decay by one-half. The function returns a weighted average: the older the time point, the less weight the corresponding value is considered to be.
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `value` — Value. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
- `value` — Value. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||||
- `timestamp` — Timestamp. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
- `timeunit` — Timeunit. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). Timeunit is not timestamp (seconds), it's -- an index of the time interval. Can be calculated using [intDiv](../../functions/arithmetic-functions/#intdiva-b).
|
||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
@ -148,3 +148,58 @@ Result:
|
|||||||
│ 1 │ 49 │ 0.825 │ █████████████████████████████████████████▎│
|
│ 1 │ 49 │ 0.825 │ █████████████████████████████████████████▎│
|
||||||
└───────┴──────┴──────────────────────┴────────────────────────────────────────────┘
|
└───────┴──────┴──────────────────────┴────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE data
|
||||||
|
ENGINE = Memory AS
|
||||||
|
SELECT
|
||||||
|
10 AS value,
|
||||||
|
toDateTime('2020-01-01') + (3600 * number) AS time
|
||||||
|
FROM numbers_mt(10);
|
||||||
|
|
||||||
|
|
||||||
|
-- Calculate timeunit using intDiv
|
||||||
|
SELECT
|
||||||
|
value,
|
||||||
|
time,
|
||||||
|
exponentialMovingAverage(1)(value, intDiv(toUInt32(time), 3600)) OVER (ORDER BY time ASC) AS res,
|
||||||
|
intDiv(toUInt32(time), 3600) AS timeunit
|
||||||
|
FROM data
|
||||||
|
ORDER BY time ASC;
|
||||||
|
|
||||||
|
┌─value─┬────────────────time─┬─────────res─┬─timeunit─┐
|
||||||
|
│ 10 │ 2020-01-01 00:00:00 │ 5 │ 438288 │
|
||||||
|
│ 10 │ 2020-01-01 01:00:00 │ 7.5 │ 438289 │
|
||||||
|
│ 10 │ 2020-01-01 02:00:00 │ 8.75 │ 438290 │
|
||||||
|
│ 10 │ 2020-01-01 03:00:00 │ 9.375 │ 438291 │
|
||||||
|
│ 10 │ 2020-01-01 04:00:00 │ 9.6875 │ 438292 │
|
||||||
|
│ 10 │ 2020-01-01 05:00:00 │ 9.84375 │ 438293 │
|
||||||
|
│ 10 │ 2020-01-01 06:00:00 │ 9.921875 │ 438294 │
|
||||||
|
│ 10 │ 2020-01-01 07:00:00 │ 9.9609375 │ 438295 │
|
||||||
|
│ 10 │ 2020-01-01 08:00:00 │ 9.98046875 │ 438296 │
|
||||||
|
│ 10 │ 2020-01-01 09:00:00 │ 9.990234375 │ 438297 │
|
||||||
|
└───────┴─────────────────────┴─────────────┴──────────┘
|
||||||
|
|
||||||
|
|
||||||
|
-- Calculate timeunit using toRelativeHourNum
|
||||||
|
SELECT
|
||||||
|
value,
|
||||||
|
time,
|
||||||
|
exponentialMovingAverage(1)(value, toRelativeHourNum(time)) OVER (ORDER BY time ASC) AS res,
|
||||||
|
toRelativeHourNum(time) AS timeunit
|
||||||
|
FROM data
|
||||||
|
ORDER BY time ASC;
|
||||||
|
|
||||||
|
┌─value─┬────────────────time─┬─────────res─┬─timeunit─┐
|
||||||
|
│ 10 │ 2020-01-01 00:00:00 │ 5 │ 438288 │
|
||||||
|
│ 10 │ 2020-01-01 01:00:00 │ 7.5 │ 438289 │
|
||||||
|
│ 10 │ 2020-01-01 02:00:00 │ 8.75 │ 438290 │
|
||||||
|
│ 10 │ 2020-01-01 03:00:00 │ 9.375 │ 438291 │
|
||||||
|
│ 10 │ 2020-01-01 04:00:00 │ 9.6875 │ 438292 │
|
||||||
|
│ 10 │ 2020-01-01 05:00:00 │ 9.84375 │ 438293 │
|
||||||
|
│ 10 │ 2020-01-01 06:00:00 │ 9.921875 │ 438294 │
|
||||||
|
│ 10 │ 2020-01-01 07:00:00 │ 9.9609375 │ 438295 │
|
||||||
|
│ 10 │ 2020-01-01 08:00:00 │ 9.98046875 │ 438296 │
|
||||||
|
│ 10 │ 2020-01-01 09:00:00 │ 9.990234375 │ 438297 │
|
||||||
|
└───────┴─────────────────────┴─────────────┴──────────┘
|
||||||
|
```
|
||||||
|
@ -112,23 +112,21 @@ See also [data_type_default_nullable](../../../operations/settings/settings.md#d
|
|||||||
|
|
||||||
## Default Values {#default_values}
|
## Default Values {#default_values}
|
||||||
|
|
||||||
The column description can specify an expression for a default value, in one of the following ways: `DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`.
|
The column description can specify a default value expression in the form of `DEFAULT expr`, `MATERIALIZED expr`, or `ALIAS expr`. Example: `URLDomain String DEFAULT domain(URL)`.
|
||||||
|
|
||||||
Example: `URLDomain String DEFAULT domain(URL)`.
|
The expression `expr` is optional. If it is omitted, the column type must be specified explicitly and the default value will be `0` for numeric columns, `''` (the empty string) for string columns, `[]` (the empty array) for array columns, `1970-01-01` for date columns, or `NULL` for nullable columns.
|
||||||
|
|
||||||
If an expression for the default value is not defined, the default values will be set to zeros for numbers, empty strings for strings, empty arrays for arrays, and `1970-01-01` for dates or zero unix timestamp for DateTime, NULL for Nullable.
|
The column type of a default value column can be omitted in which case it is infered from `expr`'s type. For example the type of column `EventDate DEFAULT toDate(EventTime)` will be date.
|
||||||
|
|
||||||
If the default expression is defined, the column type is optional. If there isn’t an explicitly defined type, the default expression type is used. Example: `EventDate DEFAULT toDate(EventTime)` – the ‘Date’ type will be used for the ‘EventDate’ column.
|
If both a data type and a default value expression are specified, an implicit type casting function inserted which converts the expression to the specified type. Example: `Hits UInt32 DEFAULT 0` is internally represented as `Hits UInt32 DEFAULT toUInt32(0)`.
|
||||||
|
|
||||||
If the data type and default expression are defined explicitly, this expression will be cast to the specified type using type casting functions. Example: `Hits UInt32 DEFAULT 0` means the same thing as `Hits UInt32 DEFAULT toUInt32(0)`.
|
A default value expression `expr` may reference arbitrary table columns and constants. ClickHouse checks that changes of the table structure do not introduce loops in the expression calculation. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed.
|
||||||
|
|
||||||
Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions do not contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed.
|
|
||||||
|
|
||||||
### DEFAULT
|
### DEFAULT
|
||||||
|
|
||||||
`DEFAULT expr`
|
`DEFAULT expr`
|
||||||
|
|
||||||
Normal default value. If the INSERT query does not specify the corresponding column, it will be filled in by computing the corresponding expression.
|
Normal default value. If the value of such a column is not specified in an INSERT query, it is computed from `expr`.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -154,9 +152,9 @@ SELECT * FROM test;
|
|||||||
|
|
||||||
`MATERIALIZED expr`
|
`MATERIALIZED expr`
|
||||||
|
|
||||||
Materialized expression. Such a column can’t be specified for INSERT, because it is always calculated.
|
Materialized expression. Values of such columns are always calculated, they cannot be specified in INSERT queries.
|
||||||
For an INSERT without a list of columns, these columns are not considered.
|
|
||||||
In addition, this column is not substituted when using an asterisk in a SELECT query. This is to preserve the invariant that the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns.
|
Also, default value columns of this type are not included in the result of `SELECT *`. This is to preserve the invariant that the result of a `SELECT *` can always be inserted back into the table using `INSERT`. This behavior can be disabled with setting `asterisk_include_materialized_columns`.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -192,8 +190,9 @@ SELECT * FROM test SETTINGS asterisk_include_materialized_columns=1;
|
|||||||
|
|
||||||
`EPHEMERAL [expr]`
|
`EPHEMERAL [expr]`
|
||||||
|
|
||||||
Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. If `expr` is omitted type for column is required.
|
Ephemeral column. Columns of this type are not stored in the table and it is not possible to SELECT from them. The only purpose of ephemeral columns is to build default value expressions of other columns from them.
|
||||||
INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns.
|
|
||||||
|
An insert without explicitly specified columns will skip columns of this type. This is to preserve the invariant that the result of a `SELECT *` can always be inserted back into the table using `INSERT`.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -205,7 +204,7 @@ CREATE OR REPLACE TABLE test
|
|||||||
hexed FixedString(4) DEFAULT unhex(unhexed)
|
hexed FixedString(4) DEFAULT unhex(unhexed)
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id
|
ORDER BY id;
|
||||||
|
|
||||||
INSERT INTO test (id, unhexed) Values (1, '5a90b714');
|
INSERT INTO test (id, unhexed) Values (1, '5a90b714');
|
||||||
|
|
||||||
@ -227,9 +226,9 @@ hex(hexed): 5A90B714
|
|||||||
|
|
||||||
`ALIAS expr`
|
`ALIAS expr`
|
||||||
|
|
||||||
Synonym. Such a column isn’t stored in the table at all.
|
Calculated columns (synonym). Column of this type are not stored in the table and it is not possible to INSERT values into them.
|
||||||
Its values can’t be inserted in a table, and it is not substituted when using an asterisk in a SELECT query.
|
|
||||||
It can be used in SELECTs if the alias is expanded during query parsing.
|
When SELECT queries explicitly reference columns of this type, the value is computed at query time from `expr`. By default, `SELECT *` excludes ALIAS columns. This behavior can be disabled with setting `asteriks_include_alias_columns`.
|
||||||
|
|
||||||
When using the ALTER query to add new columns, old data for these columns is not written. Instead, when reading old data that does not have values for the new columns, expressions are computed on the fly by default. However, if running the expressions requires different columns that are not indicated in the query, these columns will additionally be read, but only for the blocks of data that need it.
|
When using the ALTER query to add new columns, old data for these columns is not written. Instead, when reading old data that does not have values for the new columns, expressions are computed on the fly by default. However, if running the expressions requires different columns that are not indicated in the query, these columns will additionally be read, but only for the blocks of data that need it.
|
||||||
|
|
||||||
|
@ -70,6 +70,12 @@ A materialized view is implemented as follows: when inserting data to the table
|
|||||||
Materialized views in ClickHouse use **column names** instead of column order during insertion into destination table. If some column names are not present in the `SELECT` query result, ClickHouse uses a default value, even if the column is not [Nullable](../../data-types/nullable.md). A safe practice would be to add aliases for every column when using Materialized views.
|
Materialized views in ClickHouse use **column names** instead of column order during insertion into destination table. If some column names are not present in the `SELECT` query result, ClickHouse uses a default value, even if the column is not [Nullable](../../data-types/nullable.md). A safe practice would be to add aliases for every column when using Materialized views.
|
||||||
|
|
||||||
Materialized views in ClickHouse are implemented more like insert triggers. If there’s some aggregation in the view query, it’s applied only to the batch of freshly inserted data. Any changes to existing data of source table (like update, delete, drop partition, etc.) does not change the materialized view.
|
Materialized views in ClickHouse are implemented more like insert triggers. If there’s some aggregation in the view query, it’s applied only to the batch of freshly inserted data. Any changes to existing data of source table (like update, delete, drop partition, etc.) does not change the materialized view.
|
||||||
|
|
||||||
|
Materialized views in ClickHouse do not have deterministic behaviour in case of errors. This means that blocks that had been already written will be preserved in the destination table, but all blocks after error will not.
|
||||||
|
|
||||||
|
By default if pushing to one of views fails, then the INSERT query will fail too, and some blocks may not be written to the destination table. This can be changed using `materialized_views_ignore_errors` setting (you should set it for `INSERT` query), if you will set `materialized_views_ignore_errors=true`, then any errors while pushing to views will be ignored and all blocks will be written to the destination table.
|
||||||
|
|
||||||
|
Also note, that `materialized_views_ignore_errors` set to `true` by default for `system.*_log` tables.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
If you specify `POPULATE`, the existing table data is inserted into the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **do not recommend** using `POPULATE`, since data inserted in the table during the view creation will not be inserted in it.
|
If you specify `POPULATE`, the existing table data is inserted into the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We **do not recommend** using `POPULATE`, since data inserted in the table during the view creation will not be inserted in it.
|
||||||
|
@ -89,7 +89,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
└─────────────────────┴───────────┴──────────┴──────┘
|
└─────────────────────┴───────────┴──────────┴──────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Первая строка отменяет предыдущее состояние объекта (пользователя). Она должен повторять все поля из ключа сортировки для отменённого состояния за исключением `Sign`.
|
Первая строка отменяет предыдущее состояние объекта (пользователя). Она должна повторять все поля из ключа сортировки для отменённого состояния за исключением `Sign`.
|
||||||
|
|
||||||
Вторая строка содержит текущее состояние.
|
Вторая строка содержит текущее состояние.
|
||||||
|
|
||||||
|
@ -584,7 +584,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
|
|||||||
|
|
||||||
Данные с истекшим `TTL` удаляются, когда ClickHouse мёржит куски данных.
|
Данные с истекшим `TTL` удаляются, когда ClickHouse мёржит куски данных.
|
||||||
|
|
||||||
Когда ClickHouse видит, что некоторые данные устарели, он выполняет внеплановые мёржи. Для управление частотой подобных мёржей, можно задать настройку `merge_with_ttl_timeout`. Если её значение слишком низкое, придется выполнять много внеплановых мёржей, которые могут начать потреблять значительную долю ресурсов сервера.
|
Когда ClickHouse видит, что некоторые данные устарели, он выполняет внеплановые мёржи. Для управления частотой подобных мёржей, можно задать настройку `merge_with_ttl_timeout`. Если её значение слишком низкое, придется выполнять много внеплановых мёржей, которые могут начать потреблять значительную долю ресурсов сервера.
|
||||||
|
|
||||||
Если вы выполните запрос `SELECT` между слияниями вы можете получить устаревшие данные. Чтобы избежать этого используйте запрос [OPTIMIZE](../../../engines/table-engines/mergetree-family/mergetree.md#misc_operations-optimize) перед `SELECT`.
|
Если вы выполните запрос `SELECT` между слияниями вы можете получить устаревшие данные. Чтобы избежать этого используйте запрос [OPTIMIZE](../../../engines/table-engines/mergetree-family/mergetree.md#misc_operations-optimize) перед `SELECT`.
|
||||||
|
|
||||||
@ -679,7 +679,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
|
|||||||
- `policy_name_N` — название политики. Названия политик должны быть уникальны.
|
- `policy_name_N` — название политики. Названия политик должны быть уникальны.
|
||||||
- `volume_name_N` — название тома. Названия томов должны быть уникальны.
|
- `volume_name_N` — название тома. Названия томов должны быть уникальны.
|
||||||
- `disk` — диск, находящийся внутри тома.
|
- `disk` — диск, находящийся внутри тома.
|
||||||
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
|
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находиться на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
|
||||||
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты.
|
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты.
|
||||||
- `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками.
|
- `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками.
|
||||||
|
|
||||||
@ -730,7 +730,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
|
|||||||
|
|
||||||
В приведенном примере, политика `hdd_in_order` реализует прицип [round-robin](https://ru.wikipedia.org/wiki/Round-robin_(%D0%B0%D0%BB%D0%B3%D0%BE%D1%80%D0%B8%D1%82%D0%BC)). Так как в политике есть всего один том (`single`), то все записи производятся на его диски по круговому циклу. Такая политика может быть полезна при наличии в системе нескольких похожих дисков, но при этом не сконфигурирован RAID. Учтите, что каждый отдельный диск ненадёжен и чтобы не потерять важные данные это необходимо скомпенсировать за счет хранения данных в трёх копиях.
|
В приведенном примере, политика `hdd_in_order` реализует прицип [round-robin](https://ru.wikipedia.org/wiki/Round-robin_(%D0%B0%D0%BB%D0%B3%D0%BE%D1%80%D0%B8%D1%82%D0%BC)). Так как в политике есть всего один том (`single`), то все записи производятся на его диски по круговому циклу. Такая политика может быть полезна при наличии в системе нескольких похожих дисков, но при этом не сконфигурирован RAID. Учтите, что каждый отдельный диск ненадёжен и чтобы не потерять важные данные это необходимо скомпенсировать за счет хранения данных в трёх копиях.
|
||||||
|
|
||||||
Если система содержит диски различных типов, то может пригодиться политика `moving_from_ssd_to_hdd`. В томе `hot` находится один SSD-диск (`fast_ssd`), а также задается ограничение на максимальный размер куска, который может храниться на этом томе (1GB). Все куски такой таблицы больше 1GB будут записываться сразу на том `cold`, в котором содержится один HDD-диск `disk1`. Также, при заполнении диска `fast_ssd` более чем на 80% данные будут переносится на диск `disk1` фоновым процессом.
|
Если система содержит диски различных типов, то может пригодиться политика `moving_from_ssd_to_hdd`. В томе `hot` находится один SSD-диск (`fast_ssd`), а также задается ограничение на максимальный размер куска, который может храниться на этом томе (1GB). Все куски такой таблицы больше 1GB будут записываться сразу на том `cold`, в котором содержится один HDD-диск `disk1`. Также при заполнении диска `fast_ssd` более чем на 80% данные будут переноситься на диск `disk1` фоновым процессом.
|
||||||
|
|
||||||
Порядок томов в политиках хранения важен, при достижении условий на переполнение тома данные переносятся на следующий. Порядок дисков в томах так же важен, данные пишутся по очереди на каждый из них.
|
Порядок томов в политиках хранения важен, при достижении условий на переполнение тома данные переносятся на следующий. Порядок дисков в томах так же важен, данные пишутся по очереди на каждый из них.
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ sidebar_label: "Клиентские библиотеки от сторонни
|
|||||||
|
|
||||||
:::danger "Disclaimer"
|
:::danger "Disclaimer"
|
||||||
Яндекс не поддерживает перечисленные ниже библиотеки и не проводит тщательного тестирования для проверки их качества.
|
Яндекс не поддерживает перечисленные ниже библиотеки и не проводит тщательного тестирования для проверки их качества.
|
||||||
|
:::
|
||||||
|
|
||||||
- Python:
|
- Python:
|
||||||
- [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)
|
- [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)
|
||||||
|
9
docs/ru/interfaces/third-party/gui.md
vendored
9
docs/ru/interfaces/third-party/gui.md
vendored
@ -177,19 +177,20 @@ sidebar_label: "Визуальные интерфейсы от сторонни
|
|||||||
|
|
||||||
### Yandex DataLens {#yandex-datalens}
|
### Yandex DataLens {#yandex-datalens}
|
||||||
|
|
||||||
[Yandex DataLens](https://cloud.yandex.ru/services/datalens) — cервис визуализации и анализа данных.
|
[Yandex DataLens](https://datalens.yandex.ru) — cервис визуализации и анализа данных.
|
||||||
|
|
||||||
Основные возможности:
|
Основные возможности:
|
||||||
|
|
||||||
- Широкий выбор инструментов визуализации, от простых столбчатых диаграмм до сложных дашбордов.
|
- Широкий выбор инструментов визуализации, от простых столбчатых диаграмм до сложных дашбордов.
|
||||||
- Возможность опубликовать дашборды на широкую аудиторию.
|
- Возможность опубликовать дашборды на широкую аудиторию.
|
||||||
- Поддержка множества источников данных, включая ClickHouse.
|
- Поддержка множества источников данных, включая ClickHouse.
|
||||||
- Хранение материализованных данных в кластере ClickHouse DataLens.
|
|
||||||
|
|
||||||
Для небольших проектов DataLens [доступен бесплатно](https://cloud.yandex.ru/docs/datalens/pricing), в том числе и для коммерческого использования.
|
DataLens [доступен бесплатно](https://cloud.yandex.ru/docs/datalens/pricing), в том числе и для коммерческого использования.
|
||||||
|
|
||||||
|
- [Знакомство с DataLens]((https://youtu.be/57ngi_6BINE).
|
||||||
|
- [Чат сообщества DataLens](https://t.me/YandexDataLens)
|
||||||
- [Документация DataLens](https://cloud.yandex.ru/docs/datalens/).
|
- [Документация DataLens](https://cloud.yandex.ru/docs/datalens/).
|
||||||
- [Пособие по визуализации данных из ClickHouse](https://cloud.yandex.ru/docs/solutions/datalens/data-from-ch-visualization).
|
- [Сценарий по визуализации данных из ClickHouse](https://cloud.yandex.ru/docs/solutions/datalens/data-from-ch-visualization).
|
||||||
|
|
||||||
### Holistics Software {#holistics-software}
|
### Holistics Software {#holistics-software}
|
||||||
|
|
||||||
|
@ -325,21 +325,21 @@ clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --
|
|||||||
Например, для кластера из 3 нод, алгоритм кворума продолжает работать при отказе не более чем одной ноды.
|
Например, для кластера из 3 нод, алгоритм кворума продолжает работать при отказе не более чем одной ноды.
|
||||||
|
|
||||||
Конфигурация кластера может быть изменена динамически с некоторыми ограничениями.
|
Конфигурация кластера может быть изменена динамически с некоторыми ограничениями.
|
||||||
Переконфигурация также использует Raft, поэтому для добавление новой ноды кластера или исключения старой ноды из него требуется достижения кворума в рамках текущей конфигурации кластера.
|
Переконфигурация также использует Raft, поэтому для добавления новой ноды кластера или исключения старой ноды требуется достижение кворума в рамках текущей конфигурации кластера.
|
||||||
Если в вашем кластере произошел отказ большего числа нод, чем допускает Raft для вашей текущей конфигурации и у вас нет возможности восстановить их работоспособность, Raft перестанет работать и не позволит изменить конфигурацию стандартным механизмом.
|
Если в вашем кластере произошел отказ большего числа нод, чем допускает Raft для вашей текущей конфигурации и у вас нет возможности восстановить их работоспособность, Raft перестанет работать и не позволит изменить конфигурацию стандартным механизмом.
|
||||||
|
|
||||||
Тем не менее ClickHousr Keeper имеет возможность запуститься в режиме восстановления, который позволяет переконфигурировать класте используя только одну ноду кластера.
|
Тем не менее ClickHouse Keeper имеет возможность запуститься в режиме восстановления, который позволяет переконфигурировать кластер используя только одну ноду кластера.
|
||||||
Этот механизм может использоваться только как крайняя мера, когда вы не можете восстановить существующие ноды кластера или запустить новый сервер с тем же идентификатором.
|
Этот механизм может использоваться только как крайняя мера, когда вы не можете восстановить существующие ноды кластера или запустить новый сервер с тем же идентификатором.
|
||||||
|
|
||||||
Важно:
|
Важно:
|
||||||
- Удостоверьтесь, что отказавшие ноды не смогут в дальнейшем подключиться к кластеру в будущем.
|
- Удостоверьтесь, что отказавшие ноды не смогут в дальнейшем подключиться к кластеру в будущем.
|
||||||
- Не запускайте новые ноды, пока не завешите процедуру ниже.
|
- Не запускайте новые ноды, пока не завершите процедуру ниже.
|
||||||
|
|
||||||
После того, как выполнили действия выше выполните следующие шаги.
|
После того, как выполнили действия выше выполните следующие шаги.
|
||||||
1. Выберете одну ноду Keeper, которая станет новым лидером. Учтите, что данные которые с этой ноды будут испольщзованы всем кластером, поэтому рекомендуется выбрать ноду с наиболее актуальным состоянием.
|
1. Выберете одну ноду Keeper, которая станет новым лидером. Учтите, что данные с этой ноды будут использованы всем кластером, поэтому рекомендуется выбрать ноду с наиболее актуальным состоянием.
|
||||||
2. Перед дальнейшими действиям сделайте резервную копию данных из директорий `log_storage_path` и `snapshot_storage_path`.
|
2. Перед дальнейшими действиям сделайте резервную копию данных из директорий `log_storage_path` и `snapshot_storage_path`.
|
||||||
3. Измените настройки на всех нодах кластера, которые вы собираетесь использовать.
|
3. Измените настройки на всех нодах кластера, которые вы собираетесь использовать.
|
||||||
4. Отправьте команду `rcvr` на ноду, которую вы выбрали или остановите ее и запустите заново с аргументом `--force-recovery`. Это переведет ноду в режим восстановления.
|
4. Отправьте команду `rcvr` на ноду, которую вы выбрали, или остановите ее и запустите заново с аргументом `--force-recovery`. Это переведет ноду в режим восстановления.
|
||||||
5. Запускайте остальные ноды кластера по одной и проверяйте, что команда `mntr` возвращает `follower` в выводе состояния `zk_server_state` перед тем, как запустить следующую ноду.
|
5. Запускайте остальные ноды кластера по одной и проверяйте, что команда `mntr` возвращает `follower` в выводе состояния `zk_server_state` перед тем, как запустить следующую ноду.
|
||||||
6. Пока нода работает в режиме восстановления, лидер будет возвращать ошибку на запрос `mntr` пока кворум не будет достигнут с помощью новых нод. Любые запросы от клиентов и постедователей будут возвращать ошибку.
|
6. Пока нода работает в режиме восстановления, лидер будет возвращать ошибку на запрос `mntr` пока кворум не будет достигнут с помощью новых нод. Любые запросы от клиентов и последователей будут возвращать ошибку.
|
||||||
7. После достижения кворума лидер перейдет в нормальный режим работы и станет обрабатывать все запросы через Raft. Удостоверьтесь, что запрос `mntr` возвращает `leader` в выводе состояния `zk_server_state`.
|
7. После достижения кворума лидер перейдет в нормальный режим работы и станет обрабатывать все запросы через Raft. Удостоверьтесь, что запрос `mntr` возвращает `leader` в выводе состояния `zk_server_state`.
|
||||||
|
@ -10,6 +10,7 @@ ClickHouse поддерживает [OpenTelemetry](https://opentelemetry.io/)
|
|||||||
|
|
||||||
:::danger "Предупреждение"
|
:::danger "Предупреждение"
|
||||||
Поддержка стандарта экспериментальная и будет со временем меняться.
|
Поддержка стандарта экспериментальная и будет со временем меняться.
|
||||||
|
:::
|
||||||
|
|
||||||
## Обеспечение поддержки контекста трассировки в ClickHouse
|
## Обеспечение поддержки контекста трассировки в ClickHouse
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ ClickHouse перезагружает встроенные словари с з
|
|||||||
|
|
||||||
:::danger "Внимание"
|
:::danger "Внимание"
|
||||||
Лучше не использовать, если вы только начали работать с ClickHouse.
|
Лучше не использовать, если вы только начали работать с ClickHouse.
|
||||||
|
:::
|
||||||
|
|
||||||
Общий вид конфигурации:
|
Общий вид конфигурации:
|
||||||
|
|
||||||
@ -1064,6 +1065,7 @@ ClickHouse использует потоки из глобального пул
|
|||||||
|
|
||||||
:::danger "Обратите внимание"
|
:::danger "Обратите внимание"
|
||||||
Завершающий слеш обязателен.
|
Завершающий слеш обязателен.
|
||||||
|
:::
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
@ -1330,6 +1332,7 @@ TCP порт для защищённого обмена данными с кли
|
|||||||
|
|
||||||
:::danger "Обратите внимание"
|
:::danger "Обратите внимание"
|
||||||
Завершающий слеш обязателен.
|
Завершающий слеш обязателен.
|
||||||
|
:::
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ sidebar_label: "Хранение данных на внешних дисках"
|
|||||||
|
|
||||||
- `type` — `encrypted`. Иначе зашифрованный диск создан не будет.
|
- `type` — `encrypted`. Иначе зашифрованный диск создан не будет.
|
||||||
- `disk` — тип диска для хранения данных.
|
- `disk` — тип диска для хранения данных.
|
||||||
- `key` — ключ для шифрования и расшифровки. Тип: [Uint64](../sql-reference/data-types/int-uint.md). Вы можете использовать параметр `key_hex` для шифрования в шестнадцатеричной форме.
|
- `key` — ключ для шифрования и расшифровки. Тип: [UInt64](../sql-reference/data-types/int-uint.md). Вы можете использовать параметр `key_hex` для шифрования в шестнадцатеричной форме.
|
||||||
Вы можете указать несколько ключей, используя атрибут `id` (смотрите пример выше).
|
Вы можете указать несколько ключей, используя атрибут `id` (смотрите пример выше).
|
||||||
|
|
||||||
Необязательные параметры:
|
Необязательные параметры:
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: AggregateFunction
|
|||||||
|
|
||||||
# AggregateFunction {#data-type-aggregatefunction}
|
# AggregateFunction {#data-type-aggregatefunction}
|
||||||
|
|
||||||
Агрегатные функции могут обладать определяемым реализацией промежуточным состоянием, которое может быть сериализовано в тип данных, соответствующий AggregateFunction(…), и быть записано в таблицу обычно посредством [материализованного представления] (../../sql-reference/statements/create/view.md). Чтобы получить промежуточное состояние, обычно используются агрегатные функции с суффиксом `-State`. Чтобы в дальнейшем получить агрегированные данные необходимо использовать те же агрегатные функции с суффиксом `-Merge`.
|
Агрегатные функции могут обладать определяемым реализацией промежуточным состоянием, которое может быть сериализовано в тип данных, соответствующий AggregateFunction(…), и быть записано в таблицу обычно посредством [материализованного представления](../../sql-reference/statements/create/view.md). Чтобы получить промежуточное состояние, обычно используются агрегатные функции с суффиксом `-State`. Чтобы в дальнейшем получить агрегированные данные необходимо использовать те же агрегатные функции с суффиксом `-Merge`.
|
||||||
|
|
||||||
`AggregateFunction(name, types_of_arguments…)` — параметрический тип данных.
|
`AggregateFunction(name, types_of_arguments…)` — параметрический тип данных.
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ ClickHouse поддерживает типы данных для отображ
|
|||||||
|
|
||||||
:::danger "Предупреждение"
|
:::danger "Предупреждение"
|
||||||
Сейчас использование типов данных для работы с географическими структурами является экспериментальной возможностью. Чтобы использовать эти типы данных, включите настройку `allow_experimental_geo_types = 1`.
|
Сейчас использование типов данных для работы с географическими структурами является экспериментальной возможностью. Чтобы использовать эти типы данных, включите настройку `allow_experimental_geo_types = 1`.
|
||||||
|
:::
|
||||||
|
|
||||||
**См. также**
|
**См. также**
|
||||||
- [Хранение географических структур данных](https://ru.wikipedia.org/wiki/GeoJSON).
|
- [Хранение географических структур данных](https://ru.wikipedia.org/wiki/GeoJSON).
|
||||||
|
@ -10,6 +10,7 @@ sidebar_label: Interval
|
|||||||
|
|
||||||
:::danger "Внимание"
|
:::danger "Внимание"
|
||||||
Нельзя использовать типы данных `Interval` для хранения данных в таблице.
|
Нельзя использовать типы данных `Interval` для хранения данных в таблице.
|
||||||
|
:::
|
||||||
|
|
||||||
Структура:
|
Структура:
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ SELECT tuple(1,'a') AS x, toTypeName(x)
|
|||||||
|
|
||||||
## Особенности работы с типами данных {#osobennosti-raboty-s-tipami-dannykh}
|
## Особенности работы с типами данных {#osobennosti-raboty-s-tipami-dannykh}
|
||||||
|
|
||||||
При создании кортежа «на лету» ClickHouse автоматически определяет тип каждого аргументов как минимальный из типов, который может сохранить значение аргумента. Если аргумент — [NULL](../../sql-reference/data-types/tuple.md#null-literal), то тип элемента кортежа — [Nullable](nullable.md).
|
При создании кортежа «на лету» ClickHouse автоматически определяет тип всех аргументов как минимальный из типов, который может сохранить значение аргумента. Если аргумент — [NULL](../../sql-reference/data-types/tuple.md#null-literal), то тип элемента кортежа — [Nullable](nullable.md).
|
||||||
|
|
||||||
Пример автоматического определения типа данных:
|
Пример автоматического определения типа данных:
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1))
|
|||||||
- Мультиполигон. Представляет из себя массив полигонов. Каждый полигон задается двумерным массивом точек — первый элемент этого массива задает внешнюю границу полигона,
|
- Мультиполигон. Представляет из себя массив полигонов. Каждый полигон задается двумерным массивом точек — первый элемент этого массива задает внешнюю границу полигона,
|
||||||
последующие элементы могут задавать дырки, вырезаемые из него.
|
последующие элементы могут задавать дырки, вырезаемые из него.
|
||||||
|
|
||||||
Точки могут задаваться массивом или кортежем из своих координат. В текущей реализации поддерживается только двумерные точки.
|
Точки могут задаваться массивом или кортежем из своих координат. В текущей реализации поддерживаются только двумерные точки.
|
||||||
|
|
||||||
Пользователь может [загружать свои собственные данные](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) во всех поддерживаемых ClickHouse форматах.
|
Пользователь может [загружать свои собственные данные](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) во всех поддерживаемых ClickHouse форматах.
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1))
|
|||||||
- `POLYGON`. Синоним к `POLYGON_INDEX_CELL`.
|
- `POLYGON`. Синоним к `POLYGON_INDEX_CELL`.
|
||||||
|
|
||||||
Запросы к словарю осуществляются с помощью стандартных [функций](../../../sql-reference/functions/ext-dict-functions.md) для работы со внешними словарями.
|
Запросы к словарю осуществляются с помощью стандартных [функций](../../../sql-reference/functions/ext-dict-functions.md) для работы со внешними словарями.
|
||||||
Важным отличием является то, что здесь ключами будут являются точки, для которых хочется найти содержащий их полигон.
|
Важным отличием является то, что здесь ключами являются точки, для которых хочется найти содержащий их полигон.
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
|
@ -59,6 +59,7 @@ ClickHouse поддерживает следующие виды ключей:
|
|||||||
|
|
||||||
:::danger "Обратите внимание"
|
:::danger "Обратите внимание"
|
||||||
Ключ не надо дополнительно описывать в атрибутах.
|
Ключ не надо дополнительно описывать в атрибутах.
|
||||||
|
:::
|
||||||
|
|
||||||
### Числовой ключ {#ext_dict-numeric-key}
|
### Числовой ключ {#ext_dict-numeric-key}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ ClickHouse:
|
|||||||
- Периодически обновляет их и динамически подгружает отсутствующие значения.
|
- Периодически обновляет их и динамически подгружает отсутствующие значения.
|
||||||
- Позволяет создавать внешние словари с помощью xml-файлов или [DDL-запросов](../../statements/create/dictionary.md#create-dictionary-query).
|
- Позволяет создавать внешние словари с помощью xml-файлов или [DDL-запросов](../../statements/create/dictionary.md#create-dictionary-query).
|
||||||
|
|
||||||
Конфигурация внешних словарей может находится в одном или нескольких xml-файлах. Путь к конфигурации указывается в параметре [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config).
|
Конфигурация внешних словарей может находиться в одном или нескольких xml-файлах. Путь к конфигурации указывается в параметре [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config).
|
||||||
|
|
||||||
Словари могут загружаться при старте сервера или при первом использовании, в зависимости от настройки [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load).
|
Словари могут загружаться при старте сервера или при первом использовании, в зависимости от настройки [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load).
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ sidebar_label: "Функции интроспекции"
|
|||||||
|
|
||||||
ClickHouse сохраняет отчеты профилировщика в [журнал трассировки](../../operations/system-tables/trace_log.md#system_tables-trace_log) в системной таблице. Убедитесь, что таблица и профилировщик настроены правильно.
|
ClickHouse сохраняет отчеты профилировщика в [журнал трассировки](../../operations/system-tables/trace_log.md#system_tables-trace_log) в системной таблице. Убедитесь, что таблица и профилировщик настроены правильно.
|
||||||
|
|
||||||
## addresssToLine {#addresstoline}
|
## addressToLine {#addresstoline}
|
||||||
|
|
||||||
Преобразует адрес виртуальной памяти внутри процесса сервера ClickHouse в имя файла и номер строки в исходном коде ClickHouse.
|
Преобразует адрес виртуальной памяти внутри процесса сервера ClickHouse в имя файла и номер строки в исходном коде ClickHouse.
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ slug: /ru/sql-reference/operators/exists
|
|||||||
|
|
||||||
:::danger "Предупреждение"
|
:::danger "Предупреждение"
|
||||||
Ссылки на таблицы или столбцы основного запроса не поддерживаются в подзапросе.
|
Ссылки на таблицы или столбцы основного запроса не поддерживаются в подзапросе.
|
||||||
|
:::
|
||||||
|
|
||||||
**Синтаксис**
|
**Синтаксис**
|
||||||
|
|
||||||
|
@ -38,9 +38,9 @@ SELECT '1' IN (SELECT 1);
|
|||||||
└──────────────────────┘
|
└──────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Если в качестве правой части оператора указано имя таблицы (например, `UserID IN users`), то это эквивалентно подзапросу `UserID IN (SELECT * FROM users)`. Это используется при работе с внешними данными, отправляемым вместе с запросом. Например, вместе с запросом может быть отправлено множество идентификаторов посетителей, загруженное во временную таблицу users, по которому следует выполнить фильтрацию.
|
Если в качестве правой части оператора указано имя таблицы (например, `UserID IN users`), то это эквивалентно подзапросу `UserID IN (SELECT * FROM users)`. Это используется при работе с внешними данными, отправляемыми вместе с запросом. Например, вместе с запросом может быть отправлено множество идентификаторов посетителей, загруженное во временную таблицу users, по которому следует выполнить фильтрацию.
|
||||||
|
|
||||||
Если в качестве правой части оператора, указано имя таблицы, имеющий движок Set (подготовленное множество, постоянно находящееся в оперативке), то множество не будет создаваться заново при каждом запросе.
|
Если в качестве правой части оператора, указано имя таблицы, имеющей движок Set (подготовленное множество, постоянно находящееся в оперативке), то множество не будет создаваться заново при каждом запросе.
|
||||||
|
|
||||||
В подзапросе может быть указано более одного столбца для фильтрации кортежей.
|
В подзапросе может быть указано более одного столбца для фильтрации кортежей.
|
||||||
Пример:
|
Пример:
|
||||||
@ -49,9 +49,9 @@ SELECT '1' IN (SELECT 1);
|
|||||||
SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ...
|
SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Типы столбцов слева и справа оператора IN, должны совпадать.
|
Типы столбцов слева и справа оператора IN должны совпадать.
|
||||||
|
|
||||||
Оператор IN и подзапрос могут встречаться в любой части запроса, в том числе в агрегатных и лямбда функциях.
|
Оператор IN и подзапрос могут встречаться в любой части запроса, в том числе в агрегатных и лямбда-функциях.
|
||||||
Пример:
|
Пример:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -122,7 +122,7 @@ FROM t_null
|
|||||||
|
|
||||||
Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса.
|
Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса.
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Внимание"
|
||||||
Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`.
|
Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`.
|
||||||
:::
|
:::
|
||||||
При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`.
|
При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`.
|
||||||
@ -228,7 +228,7 @@ SELECT CounterID, count() FROM distributed_table_1 WHERE UserID IN (SELECT UserI
|
|||||||
SETTINGS max_parallel_replicas=3
|
SETTINGS max_parallel_replicas=3
|
||||||
```
|
```
|
||||||
|
|
||||||
преобразуются на каждом сервере в
|
преобразуется на каждом сервере в
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT CounterID, count() FROM local_table_1 WHERE UserID IN (SELECT UserID FROM local_table_2 WHERE CounterID < 100)
|
SELECT CounterID, count() FROM local_table_1 WHERE UserID IN (SELECT UserID FROM local_table_2 WHERE CounterID < 100)
|
||||||
|
@ -263,6 +263,7 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Europe/Moscow') AS time, time + 60 * 6
|
|||||||
│ 2014-10-26 00:00:00 │ 2014-10-26 23:00:00 │ 2014-10-27 00:00:00 │
|
│ 2014-10-26 00:00:00 │ 2014-10-26 23:00:00 │ 2014-10-27 00:00:00 │
|
||||||
└─────────────────────┴─────────────────────┴─────────────────────┘
|
└─────────────────────┴─────────────────────┴─────────────────────┘
|
||||||
```
|
```
|
||||||
|
:::
|
||||||
|
|
||||||
**Смотрите также**
|
**Смотрите также**
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: VIEW
|
|||||||
|
|
||||||
# Выражение ALTER TABLE … MODIFY QUERY {#alter-modify-query}
|
# Выражение ALTER TABLE … MODIFY QUERY {#alter-modify-query}
|
||||||
|
|
||||||
Вы можеие изменить запрос `SELECT`, который был задан при создании [материализованного представления](../create/view.md#materialized), с помощью запроса 'ALTER TABLE … MODIFY QUERY'. Используйте его если при создании материализованного представления не использовалась секция `TO [db.]name`. Настройка `allow_experimental_alter_materialized_view_structure` должна быть включена.
|
Вы можете изменить запрос `SELECT`, который был задан при создании [материализованного представления](../create/view.md#materialized), с помощью запроса 'ALTER TABLE … MODIFY QUERY'. Используйте его если при создании материализованного представления не использовалась секция `TO [db.]name`. Настройка `allow_experimental_alter_materialized_view_structure` должна быть включена.
|
||||||
|
|
||||||
Если при создании материализованного представления использовалась конструкция `TO [db.]name`, то для изменения отсоедините представление с помощью [DETACH](../detach.md), измените таблицу с помощью [ALTER TABLE](index.md), а затем снова присоедините запрос с помощью [ATTACH](../attach.md).
|
Если при создании материализованного представления использовалась конструкция `TO [db.]name`, то для изменения отсоедините представление с помощью [DETACH](../detach.md), измените таблицу с помощью [ALTER TABLE](index.md), а затем снова присоедините запрос с помощью [ATTACH](../attach.md).
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ sidebar_label: OPTIMIZE
|
|||||||
|
|
||||||
:::danger "Внимание"
|
:::danger "Внимание"
|
||||||
`OPTIMIZE` не устраняет причину появления ошибки `Too many parts`.
|
`OPTIMIZE` не устраняет причину появления ошибки `Too many parts`.
|
||||||
|
:::
|
||||||
|
|
||||||
**Синтаксис**
|
**Синтаксис**
|
||||||
|
|
||||||
|
@ -1867,8 +1867,8 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
|
|||||||
String query;
|
String query;
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString wb;
|
WriteBufferFromOwnString wb;
|
||||||
wb << "SELECT DISTINCT " << partition_name << " AS partition FROM"
|
wb << "SELECT " << partition_name << " AS partition FROM "
|
||||||
<< " " << getQuotedTable(task_shard.table_read_shard) << " ORDER BY partition DESC";
|
<< getQuotedTable(task_shard.table_read_shard) << " GROUP BY partition ORDER BY partition DESC";
|
||||||
query = wb.str();
|
query = wb.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,18 +2,21 @@
|
|||||||
|
|
||||||
#include <Common/SipHash.h>
|
#include <Common/SipHash.h>
|
||||||
#include <Common/FieldVisitorToString.h>
|
#include <Common/FieldVisitorToString.h>
|
||||||
#include <DataTypes/IDataType.h>
|
|
||||||
#include <Analyzer/ConstantNode.h>
|
|
||||||
|
|
||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
|
|
||||||
|
#include <DataTypes/IDataType.h>
|
||||||
|
#include <DataTypes/DataTypeSet.h>
|
||||||
|
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
|
|
||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
|
||||||
|
#include <Analyzer/Utils.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
#include <Analyzer/IdentifierNode.h>
|
#include <Analyzer/IdentifierNode.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -44,17 +47,29 @@ const DataTypes & FunctionNode::getArgumentTypes() const
|
|||||||
ColumnsWithTypeAndName FunctionNode::getArgumentColumns() const
|
ColumnsWithTypeAndName FunctionNode::getArgumentColumns() const
|
||||||
{
|
{
|
||||||
const auto & arguments = getArguments().getNodes();
|
const auto & arguments = getArguments().getNodes();
|
||||||
|
size_t arguments_size = arguments.size();
|
||||||
|
|
||||||
ColumnsWithTypeAndName argument_columns;
|
ColumnsWithTypeAndName argument_columns;
|
||||||
argument_columns.reserve(arguments.size());
|
argument_columns.reserve(arguments.size());
|
||||||
|
|
||||||
for (const auto & arg : arguments)
|
for (size_t i = 0; i < arguments_size; ++i)
|
||||||
{
|
{
|
||||||
ColumnWithTypeAndName argument;
|
const auto & argument = arguments[i];
|
||||||
argument.type = arg->getResultType();
|
|
||||||
if (auto * constant = arg->as<ConstantNode>())
|
ColumnWithTypeAndName argument_column;
|
||||||
argument.column = argument.type->createColumnConst(1, constant->getValue());
|
|
||||||
argument_columns.push_back(std::move(argument));
|
if (isNameOfInFunction(function_name) && i == 1)
|
||||||
|
argument_column.type = std::make_shared<DataTypeSet>();
|
||||||
|
else
|
||||||
|
argument_column.type = argument->getResultType();
|
||||||
|
|
||||||
|
auto * constant = argument->as<ConstantNode>();
|
||||||
|
if (constant && !isNotCreatable(argument_column.type))
|
||||||
|
argument_column.column = argument_column.type->createColumnConst(1, constant->getValue());
|
||||||
|
|
||||||
|
argument_columns.push_back(std::move(argument_column));
|
||||||
}
|
}
|
||||||
|
|
||||||
return argument_columns;
|
return argument_columns;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,8 +99,9 @@ class InDepthQueryTreeVisitorWithContext
|
|||||||
public:
|
public:
|
||||||
using VisitQueryTreeNodeType = std::conditional_t<const_visitor, const QueryTreeNodePtr, QueryTreeNodePtr>;
|
using VisitQueryTreeNodeType = std::conditional_t<const_visitor, const QueryTreeNodePtr, QueryTreeNodePtr>;
|
||||||
|
|
||||||
explicit InDepthQueryTreeVisitorWithContext(ContextPtr context)
|
explicit InDepthQueryTreeVisitorWithContext(ContextPtr context, size_t initial_subquery_depth = 0)
|
||||||
: current_context(std::move(context))
|
: current_context(std::move(context))
|
||||||
|
, subquery_depth(initial_subquery_depth)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
/// Return true if visitor should traverse tree top to bottom, false otherwise
|
/// Return true if visitor should traverse tree top to bottom, false otherwise
|
||||||
@ -125,11 +126,17 @@ public:
|
|||||||
return current_context->getSettingsRef();
|
return current_context->getSettingsRef();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t getSubqueryDepth() const
|
||||||
|
{
|
||||||
|
return subquery_depth;
|
||||||
|
}
|
||||||
|
|
||||||
void visit(VisitQueryTreeNodeType & query_tree_node)
|
void visit(VisitQueryTreeNodeType & query_tree_node)
|
||||||
{
|
{
|
||||||
auto current_scope_context_ptr = current_context;
|
auto current_scope_context_ptr = current_context;
|
||||||
SCOPE_EXIT(
|
SCOPE_EXIT(
|
||||||
current_context = std::move(current_scope_context_ptr);
|
current_context = std::move(current_scope_context_ptr);
|
||||||
|
--subquery_depth;
|
||||||
);
|
);
|
||||||
|
|
||||||
if (auto * query_node = query_tree_node->template as<QueryNode>())
|
if (auto * query_node = query_tree_node->template as<QueryNode>())
|
||||||
@ -137,6 +144,8 @@ public:
|
|||||||
else if (auto * union_node = query_tree_node->template as<UnionNode>())
|
else if (auto * union_node = query_tree_node->template as<UnionNode>())
|
||||||
current_context = union_node->getContext();
|
current_context = union_node->getContext();
|
||||||
|
|
||||||
|
++subquery_depth;
|
||||||
|
|
||||||
bool traverse_top_to_bottom = getDerived().shouldTraverseTopToBottom();
|
bool traverse_top_to_bottom = getDerived().shouldTraverseTopToBottom();
|
||||||
if (!traverse_top_to_bottom)
|
if (!traverse_top_to_bottom)
|
||||||
visitChildren(query_tree_node);
|
visitChildren(query_tree_node);
|
||||||
@ -145,7 +154,12 @@ public:
|
|||||||
|
|
||||||
if (traverse_top_to_bottom)
|
if (traverse_top_to_bottom)
|
||||||
visitChildren(query_tree_node);
|
visitChildren(query_tree_node);
|
||||||
|
|
||||||
|
getDerived().leaveImpl(query_tree_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void leaveImpl(VisitQueryTreeNodeType & node [[maybe_unused]])
|
||||||
|
{}
|
||||||
private:
|
private:
|
||||||
Derived & getDerived()
|
Derived & getDerived()
|
||||||
{
|
{
|
||||||
@ -172,6 +186,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
ContextPtr current_context;
|
ContextPtr current_context;
|
||||||
|
size_t subquery_depth = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Derived>
|
template <typename Derived>
|
||||||
|
@ -106,6 +106,12 @@ public:
|
|||||||
return locality;
|
return locality;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set join locality
|
||||||
|
void setLocality(JoinLocality locality_value)
|
||||||
|
{
|
||||||
|
locality = locality_value;
|
||||||
|
}
|
||||||
|
|
||||||
/// Get join strictness
|
/// Get join strictness
|
||||||
JoinStrictness getStrictness() const
|
JoinStrictness getStrictness() const
|
||||||
{
|
{
|
||||||
|
@ -42,7 +42,7 @@ private:
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage();
|
const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage();
|
||||||
bool is_final_supported = storage && storage->supportsFinal() && !storage->isRemote();
|
bool is_final_supported = storage && storage->supportsFinal();
|
||||||
if (!is_final_supported)
|
if (!is_final_supported)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -7,8 +7,6 @@
|
|||||||
#include <Analyzer/ConstantNode.h>
|
#include <Analyzer/ConstantNode.h>
|
||||||
#include <Analyzer/HashUtils.h>
|
#include <Analyzer/HashUtils.h>
|
||||||
|
|
||||||
#include <DataTypes/DataTypeString.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -100,6 +98,9 @@ private:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (and_operands.size() == function_node.getArguments().getNodes().size())
|
||||||
|
return;
|
||||||
|
|
||||||
if (and_operands.size() == 1)
|
if (and_operands.size() == 1)
|
||||||
{
|
{
|
||||||
/// AND operator can have UInt8 or bool as its type.
|
/// AND operator can have UInt8 or bool as its type.
|
||||||
@ -207,6 +208,9 @@ private:
|
|||||||
or_operands.push_back(std::move(in_function));
|
or_operands.push_back(std::move(in_function));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (or_operands.size() == function_node.getArguments().getNodes().size())
|
||||||
|
return;
|
||||||
|
|
||||||
if (or_operands.size() == 1)
|
if (or_operands.size() == 1)
|
||||||
{
|
{
|
||||||
/// if the result type of operand is the same as the result type of OR
|
/// if the result type of operand is the same as the result type of OR
|
@ -69,8 +69,7 @@ private:
|
|||||||
for (auto it = function_arguments.rbegin(); it != function_arguments.rend(); ++it)
|
for (auto it = function_arguments.rbegin(); it != function_arguments.rend(); ++it)
|
||||||
candidates.push_back({ *it, is_deterministic });
|
candidates.push_back({ *it, is_deterministic });
|
||||||
|
|
||||||
// Using DFS we traverse function tree and try to find if it uses other keys as function arguments.
|
/// Using DFS we traverse function tree and try to find if it uses other keys as function arguments.
|
||||||
// TODO: Also process CONSTANT here. We can simplify GROUP BY x, x + 1 to GROUP BY x.
|
|
||||||
while (!candidates.empty())
|
while (!candidates.empty())
|
||||||
{
|
{
|
||||||
auto [candidate, parents_are_only_deterministic] = candidates.back();
|
auto [candidate, parents_are_only_deterministic] = candidates.back();
|
||||||
@ -108,6 +107,7 @@ private:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,13 +193,9 @@ namespace ErrorCodes
|
|||||||
* lookup should not be continued, and exception must be thrown because if lookup continues identifier can be resolved from parent scope.
|
* lookup should not be continued, and exception must be thrown because if lookup continues identifier can be resolved from parent scope.
|
||||||
*
|
*
|
||||||
* TODO: Update exception messages
|
* TODO: Update exception messages
|
||||||
* TODO: JOIN TREE subquery constant columns
|
|
||||||
* TODO: Table identifiers with optional UUID.
|
* TODO: Table identifiers with optional UUID.
|
||||||
* TODO: Lookup functions arrayReduce(sum, [1, 2, 3]);
|
* TODO: Lookup functions arrayReduce(sum, [1, 2, 3]);
|
||||||
* TODO: SELECT (compound_expression).*, (compound_expression).COLUMNS are not supported on parser level.
|
|
||||||
* TODO: SELECT a.b.c.*, a.b.c.COLUMNS. Qualified matcher where identifier size is greater than 2 are not supported on parser level.
|
|
||||||
* TODO: Support function identifier resolve from parent query scope, if lambda in parent scope does not capture any columns.
|
* TODO: Support function identifier resolve from parent query scope, if lambda in parent scope does not capture any columns.
|
||||||
* TODO: Scalar subqueries cache.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -701,7 +697,9 @@ struct IdentifierResolveScope
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (auto * union_node = scope_node->as<UnionNode>())
|
if (auto * union_node = scope_node->as<UnionNode>())
|
||||||
|
{
|
||||||
context = union_node->getContext();
|
context = union_node->getContext();
|
||||||
|
}
|
||||||
else if (auto * query_node = scope_node->as<QueryNode>())
|
else if (auto * query_node = scope_node->as<QueryNode>())
|
||||||
{
|
{
|
||||||
context = query_node->getContext();
|
context = query_node->getContext();
|
||||||
@ -1336,6 +1334,9 @@ private:
|
|||||||
/// Global resolve expression node to projection names map
|
/// Global resolve expression node to projection names map
|
||||||
std::unordered_map<QueryTreeNodePtr, ProjectionNames> resolved_expressions;
|
std::unordered_map<QueryTreeNodePtr, ProjectionNames> resolved_expressions;
|
||||||
|
|
||||||
|
/// Global resolve expression node to tree size
|
||||||
|
std::unordered_map<QueryTreeNodePtr, size_t> node_to_tree_size;
|
||||||
|
|
||||||
/// Global scalar subquery to scalar value map
|
/// Global scalar subquery to scalar value map
|
||||||
std::unordered_map<QueryTreeNodePtrWithHash, Block> scalar_subquery_to_scalar_value;
|
std::unordered_map<QueryTreeNodePtrWithHash, Block> scalar_subquery_to_scalar_value;
|
||||||
|
|
||||||
@ -1864,7 +1865,10 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
|||||||
|
|
||||||
Block scalar_block;
|
Block scalar_block;
|
||||||
|
|
||||||
QueryTreeNodePtrWithHash node_with_hash(node);
|
auto node_without_alias = node->clone();
|
||||||
|
node_without_alias->removeAlias();
|
||||||
|
|
||||||
|
QueryTreeNodePtrWithHash node_with_hash(node_without_alias);
|
||||||
auto scalar_value_it = scalar_subquery_to_scalar_value.find(node_with_hash);
|
auto scalar_value_it = scalar_subquery_to_scalar_value.find(node_with_hash);
|
||||||
|
|
||||||
if (scalar_value_it != scalar_subquery_to_scalar_value.end())
|
if (scalar_value_it != scalar_subquery_to_scalar_value.end())
|
||||||
@ -1954,21 +1958,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
|||||||
*
|
*
|
||||||
* Example: SELECT (SELECT 2 AS x, x)
|
* Example: SELECT (SELECT 2 AS x, x)
|
||||||
*/
|
*/
|
||||||
NameSet block_column_names;
|
makeUniqueColumnNamesInBlock(block);
|
||||||
size_t unique_column_name_counter = 1;
|
|
||||||
|
|
||||||
for (auto & column_with_type : block)
|
|
||||||
{
|
|
||||||
if (!block_column_names.contains(column_with_type.name))
|
|
||||||
{
|
|
||||||
block_column_names.insert(column_with_type.name);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
column_with_type.name += '_';
|
|
||||||
column_with_type.name += std::to_string(unique_column_name_counter);
|
|
||||||
++unique_column_name_counter;
|
|
||||||
}
|
|
||||||
|
|
||||||
scalar_block.insert({
|
scalar_block.insert({
|
||||||
ColumnTuple::create(block.getColumns()),
|
ColumnTuple::create(block.getColumns()),
|
||||||
@ -2348,7 +2338,13 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveTableIdentifierFromDatabaseCatalog(con
|
|||||||
storage_id = context->resolveStorageID(storage_id);
|
storage_id = context->resolveStorageID(storage_id);
|
||||||
bool is_temporary_table = storage_id.getDatabaseName() == DatabaseCatalog::TEMPORARY_DATABASE;
|
bool is_temporary_table = storage_id.getDatabaseName() == DatabaseCatalog::TEMPORARY_DATABASE;
|
||||||
|
|
||||||
auto storage = DatabaseCatalog::instance().tryGetTable(storage_id, context);
|
StoragePtr storage;
|
||||||
|
|
||||||
|
if (is_temporary_table)
|
||||||
|
storage = DatabaseCatalog::instance().getTable(storage_id, context);
|
||||||
|
else
|
||||||
|
storage = DatabaseCatalog::instance().tryGetTable(storage_id, context);
|
||||||
|
|
||||||
if (!storage)
|
if (!storage)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
@ -2914,7 +2910,10 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromTableExpression(const Id
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
IdentifierLookup column_identifier_lookup = {qualified_identifier_with_removed_part, IdentifierLookupContext::EXPRESSION};
|
IdentifierLookup column_identifier_lookup = {qualified_identifier_with_removed_part, IdentifierLookupContext::EXPRESSION};
|
||||||
if (tryBindIdentifierToAliases(column_identifier_lookup, scope) ||
|
if (tryBindIdentifierToAliases(column_identifier_lookup, scope))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (table_expression_data.should_qualify_columns &&
|
||||||
tryBindIdentifierToTableExpressions(column_identifier_lookup, table_expression_node, scope))
|
tryBindIdentifierToTableExpressions(column_identifier_lookup, table_expression_node, scope))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -3018,11 +3017,39 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromJoin(const IdentifierLoo
|
|||||||
|
|
||||||
resolved_identifier = std::move(result_column_node);
|
resolved_identifier = std::move(result_column_node);
|
||||||
}
|
}
|
||||||
else if (scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
|
else if (left_resolved_identifier->isEqual(*right_resolved_identifier, IQueryTreeNode::CompareOptions{.compare_aliases = false}))
|
||||||
{
|
{
|
||||||
|
const auto & identifier_path_part = identifier_lookup.identifier.front();
|
||||||
|
auto * left_resolved_identifier_column = left_resolved_identifier->as<ColumnNode>();
|
||||||
|
auto * right_resolved_identifier_column = right_resolved_identifier->as<ColumnNode>();
|
||||||
|
|
||||||
|
if (left_resolved_identifier_column && right_resolved_identifier_column)
|
||||||
|
{
|
||||||
|
const auto & left_column_source_alias = left_resolved_identifier_column->getColumnSource()->getAlias();
|
||||||
|
const auto & right_column_source_alias = right_resolved_identifier_column->getColumnSource()->getAlias();
|
||||||
|
|
||||||
|
/** If column from right table was resolved using alias, we prefer column from right table.
|
||||||
|
*
|
||||||
|
* Example: SELECT dummy FROM system.one JOIN system.one AS A ON A.dummy = system.one.dummy;
|
||||||
|
*
|
||||||
|
* If alias is specified for left table, and alias is not specified for right table and identifier was resolved
|
||||||
|
* without using left table alias, we prefer column from right table.
|
||||||
|
*
|
||||||
|
* Example: SELECT dummy FROM system.one AS A JOIN system.one ON A.dummy = system.one.dummy;
|
||||||
|
*
|
||||||
|
* Otherwise we prefer column from left table.
|
||||||
|
*/
|
||||||
|
if (identifier_path_part == right_column_source_alias)
|
||||||
|
return right_resolved_identifier;
|
||||||
|
else if (!left_column_source_alias.empty() &&
|
||||||
|
right_column_source_alias.empty() &&
|
||||||
|
identifier_path_part != left_column_source_alias)
|
||||||
|
return right_resolved_identifier;
|
||||||
|
}
|
||||||
|
|
||||||
return left_resolved_identifier;
|
return left_resolved_identifier;
|
||||||
}
|
}
|
||||||
else if (left_resolved_identifier->isEqual(*right_resolved_identifier, IQueryTreeNode::CompareOptions{.compare_aliases = false}))
|
else if (scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
|
||||||
{
|
{
|
||||||
return left_resolved_identifier;
|
return left_resolved_identifier;
|
||||||
}
|
}
|
||||||
@ -4466,6 +4493,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
bool is_special_function_dict_get = false;
|
bool is_special_function_dict_get = false;
|
||||||
bool is_special_function_join_get = false;
|
bool is_special_function_join_get = false;
|
||||||
bool is_special_function_exists = false;
|
bool is_special_function_exists = false;
|
||||||
|
bool is_special_function_if = false;
|
||||||
|
|
||||||
if (!lambda_expression_untyped)
|
if (!lambda_expression_untyped)
|
||||||
{
|
{
|
||||||
@ -4473,6 +4501,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
is_special_function_dict_get = functionIsDictGet(function_name);
|
is_special_function_dict_get = functionIsDictGet(function_name);
|
||||||
is_special_function_join_get = functionIsJoinGet(function_name);
|
is_special_function_join_get = functionIsJoinGet(function_name);
|
||||||
is_special_function_exists = function_name == "exists";
|
is_special_function_exists = function_name == "exists";
|
||||||
|
is_special_function_if = function_name == "if";
|
||||||
|
|
||||||
auto function_name_lowercase = Poco::toLower(function_name);
|
auto function_name_lowercase = Poco::toLower(function_name);
|
||||||
|
|
||||||
@ -4571,6 +4600,60 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
is_special_function_in = true;
|
is_special_function_in = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (is_special_function_if && !function_node_ptr->getArguments().getNodes().empty())
|
||||||
|
{
|
||||||
|
/** Handle special case with constant If function, even if some of the arguments are invalid.
|
||||||
|
*
|
||||||
|
* SELECT if(hasColumnInTable('system', 'numbers', 'not_existing_column'), not_existing_column, 5) FROM system.numbers;
|
||||||
|
*/
|
||||||
|
auto & if_function_arguments = function_node_ptr->getArguments().getNodes();
|
||||||
|
auto if_function_condition = if_function_arguments[0];
|
||||||
|
resolveExpressionNode(if_function_condition, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
|
auto constant_condition = tryExtractConstantFromConditionNode(if_function_condition);
|
||||||
|
|
||||||
|
if (constant_condition.has_value() && if_function_arguments.size() == 3)
|
||||||
|
{
|
||||||
|
QueryTreeNodePtr constant_if_result_node;
|
||||||
|
QueryTreeNodePtr possibly_invalid_argument_node;
|
||||||
|
|
||||||
|
if (*constant_condition)
|
||||||
|
{
|
||||||
|
possibly_invalid_argument_node = if_function_arguments[2];
|
||||||
|
constant_if_result_node = if_function_arguments[1];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
possibly_invalid_argument_node = if_function_arguments[1];
|
||||||
|
constant_if_result_node = if_function_arguments[2];
|
||||||
|
}
|
||||||
|
|
||||||
|
bool apply_constant_if_optimization = false;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
resolveExpressionNode(possibly_invalid_argument_node,
|
||||||
|
scope,
|
||||||
|
false /*allow_lambda_expression*/,
|
||||||
|
false /*allow_table_expression*/);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
apply_constant_if_optimization = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (apply_constant_if_optimization)
|
||||||
|
{
|
||||||
|
auto result_projection_names = resolveExpressionNode(constant_if_result_node,
|
||||||
|
scope,
|
||||||
|
false /*allow_lambda_expression*/,
|
||||||
|
false /*allow_table_expression*/);
|
||||||
|
node = std::move(constant_if_result_node);
|
||||||
|
return result_projection_names;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Resolve function arguments
|
/// Resolve function arguments
|
||||||
|
|
||||||
bool allow_table_expressions = is_special_function_in;
|
bool allow_table_expressions = is_special_function_in;
|
||||||
@ -5059,7 +5142,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
|
|
||||||
/// Do not constant fold get scalar functions
|
/// Do not constant fold get scalar functions
|
||||||
bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
|
bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
|
||||||
function_name == "shardCount";
|
function_name == "shardCount" || function_name == "hostName";
|
||||||
|
|
||||||
/** If function is suitable for constant folding try to convert it to constant.
|
/** If function is suitable for constant folding try to convert it to constant.
|
||||||
* Example: SELECT plus(1, 1);
|
* Example: SELECT plus(1, 1);
|
||||||
@ -5085,7 +5168,8 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
/** Do not perform constant folding if there are aggregate or arrayJoin functions inside function.
|
/** Do not perform constant folding if there are aggregate or arrayJoin functions inside function.
|
||||||
* Example: SELECT toTypeName(sum(number)) FROM numbers(10);
|
* Example: SELECT toTypeName(sum(number)) FROM numbers(10);
|
||||||
*/
|
*/
|
||||||
if (column && isColumnConst(*column) && (!hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin")))
|
if (column && isColumnConst(*column) && !typeid_cast<const ColumnConst *>(column.get())->getDataColumn().isDummy() &&
|
||||||
|
(!hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin")))
|
||||||
{
|
{
|
||||||
/// Replace function node with result constant node
|
/// Replace function node with result constant node
|
||||||
Field column_constant_value;
|
Field column_constant_value;
|
||||||
@ -5433,9 +5517,9 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, Id
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (node
|
validateTreeSize(node, scope.context->getSettingsRef().max_expanded_ast_elements, node_to_tree_size);
|
||||||
&& scope.nullable_group_by_keys.contains(node)
|
|
||||||
&& !scope.expressions_in_resolve_process_stack.hasAggregateFunction())
|
if (scope.nullable_group_by_keys.contains(node) && !scope.expressions_in_resolve_process_stack.hasAggregateFunction())
|
||||||
{
|
{
|
||||||
node = node->clone();
|
node = node->clone();
|
||||||
node->convertToNullable();
|
node->convertToNullable();
|
||||||
@ -6592,6 +6676,17 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
|||||||
|
|
||||||
/// Resolve query node sections.
|
/// Resolve query node sections.
|
||||||
|
|
||||||
|
NamesAndTypes projection_columns;
|
||||||
|
|
||||||
|
if (!scope.group_by_use_nulls)
|
||||||
|
{
|
||||||
|
projection_columns = resolveProjectionExpressionNodeList(query_node_typed.getProjectionNode(), scope);
|
||||||
|
if (query_node_typed.getProjection().getNodes().empty())
|
||||||
|
throw Exception(ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED,
|
||||||
|
"Empty list of columns in projection. In scope {}",
|
||||||
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
}
|
||||||
|
|
||||||
if (query_node_typed.hasWith())
|
if (query_node_typed.hasWith())
|
||||||
resolveExpressionNodeList(query_node_typed.getWithNode(), scope, true /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
resolveExpressionNodeList(query_node_typed.getWithNode(), scope, true /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
@ -6686,11 +6781,14 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
|||||||
convertLimitOffsetExpression(query_node_typed.getOffset(), "OFFSET", scope);
|
convertLimitOffsetExpression(query_node_typed.getOffset(), "OFFSET", scope);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto projection_columns = resolveProjectionExpressionNodeList(query_node_typed.getProjectionNode(), scope);
|
if (scope.group_by_use_nulls)
|
||||||
if (query_node_typed.getProjection().getNodes().empty())
|
{
|
||||||
throw Exception(ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED,
|
projection_columns = resolveProjectionExpressionNodeList(query_node_typed.getProjectionNode(), scope);
|
||||||
"Empty list of columns in projection. In scope {}",
|
if (query_node_typed.getProjection().getNodes().empty())
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
throw Exception(ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED,
|
||||||
|
"Empty list of columns in projection. In scope {}",
|
||||||
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
}
|
||||||
|
|
||||||
/** Resolve nodes with duplicate aliases.
|
/** Resolve nodes with duplicate aliases.
|
||||||
* Table expressions cannot have duplicate aliases.
|
* Table expressions cannot have duplicate aliases.
|
||||||
@ -6757,6 +6855,15 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
|||||||
|
|
||||||
validateAggregates(query_node, { .group_by_use_nulls = scope.group_by_use_nulls });
|
validateAggregates(query_node, { .group_by_use_nulls = scope.group_by_use_nulls });
|
||||||
|
|
||||||
|
for (const auto & column : projection_columns)
|
||||||
|
{
|
||||||
|
if (isNotCreatable(column.type))
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
|
"Invalid projection column with type {}. In scope {}",
|
||||||
|
column.type->getName(),
|
||||||
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
}
|
||||||
|
|
||||||
/** WITH section can be safely removed, because WITH section only can provide aliases to query expressions
|
/** WITH section can be safely removed, because WITH section only can provide aliases to query expressions
|
||||||
* and CTE for other sections to use.
|
* and CTE for other sections to use.
|
||||||
*
|
*
|
||||||
|
@ -355,21 +355,67 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
|||||||
if (select_limit_by)
|
if (select_limit_by)
|
||||||
current_query_tree->getLimitByNode() = buildExpressionList(select_limit_by, current_context);
|
current_query_tree->getLimitByNode() = buildExpressionList(select_limit_by, current_context);
|
||||||
|
|
||||||
/// Combine limit expression with limit setting
|
/// Combine limit expression with limit and offset settings into final limit expression
|
||||||
|
/// The sequence of application is the following - offset expression, limit expression, offset setting, limit setting.
|
||||||
|
/// Since offset setting is applied after limit expression, but we want to transfer settings into expression
|
||||||
|
/// we must decrease limit expression by offset setting and then add offset setting to offset expression.
|
||||||
|
/// select_limit - limit expression
|
||||||
|
/// limit - limit setting
|
||||||
|
/// offset - offset setting
|
||||||
|
///
|
||||||
|
/// if select_limit
|
||||||
|
/// -- if offset >= select_limit (expr 0)
|
||||||
|
/// then (0) (0 rows)
|
||||||
|
/// -- else if limit > 0 (expr 1)
|
||||||
|
/// then min(select_limit - offset, limit) (expr 2)
|
||||||
|
/// -- else
|
||||||
|
/// then (select_limit - offset) (expr 3)
|
||||||
|
/// else if limit > 0
|
||||||
|
/// then limit
|
||||||
|
///
|
||||||
|
/// offset = offset + of_expr
|
||||||
auto select_limit = select_query_typed.limitLength();
|
auto select_limit = select_query_typed.limitLength();
|
||||||
if (select_limit && limit)
|
if (select_limit)
|
||||||
{
|
{
|
||||||
auto function_node = std::make_shared<FunctionNode>("least");
|
/// Shortcut
|
||||||
function_node->getArguments().getNodes().push_back(buildExpression(select_limit, current_context));
|
if (offset == 0 && limit == 0)
|
||||||
function_node->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(limit));
|
{
|
||||||
current_query_tree->getLimit() = std::move(function_node);
|
current_query_tree->getLimit() = buildExpression(select_limit, current_context);
|
||||||
}
|
}
|
||||||
else if (limit)
|
else
|
||||||
current_query_tree->getLimit() = std::make_shared<ConstantNode>(limit);
|
{
|
||||||
else if (select_limit)
|
/// expr 3
|
||||||
current_query_tree->getLimit() = buildExpression(select_limit, current_context);
|
auto expr_3 = std::make_shared<FunctionNode>("minus");
|
||||||
|
expr_3->getArguments().getNodes().push_back(buildExpression(select_limit, current_context));
|
||||||
|
expr_3->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(offset));
|
||||||
|
|
||||||
/// Combine offset expression with offset setting
|
/// expr 2
|
||||||
|
auto expr_2 = std::make_shared<FunctionNode>("least");
|
||||||
|
expr_2->getArguments().getNodes().push_back(expr_3->clone());
|
||||||
|
expr_2->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(limit));
|
||||||
|
|
||||||
|
/// expr 0
|
||||||
|
auto expr_0 = std::make_shared<FunctionNode>("greaterOrEquals");
|
||||||
|
expr_0->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(offset));
|
||||||
|
expr_0->getArguments().getNodes().push_back(buildExpression(select_limit, current_context));
|
||||||
|
|
||||||
|
/// expr 1
|
||||||
|
auto expr_1 = std::make_shared<ConstantNode>(limit > 0);
|
||||||
|
|
||||||
|
auto function_node = std::make_shared<FunctionNode>("multiIf");
|
||||||
|
function_node->getArguments().getNodes().push_back(expr_0);
|
||||||
|
function_node->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(0));
|
||||||
|
function_node->getArguments().getNodes().push_back(expr_1);
|
||||||
|
function_node->getArguments().getNodes().push_back(expr_2);
|
||||||
|
function_node->getArguments().getNodes().push_back(expr_3);
|
||||||
|
|
||||||
|
current_query_tree->getLimit() = std::move(function_node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (limit > 0)
|
||||||
|
current_query_tree->getLimit() = std::make_shared<ConstantNode>(limit);
|
||||||
|
|
||||||
|
/// Combine offset expression with offset setting into final offset expression
|
||||||
auto select_offset = select_query_typed.limitOffset();
|
auto select_offset = select_query_typed.limitOffset();
|
||||||
if (select_offset && offset)
|
if (select_offset && offset)
|
||||||
{
|
{
|
||||||
|
@ -61,12 +61,17 @@ bool TableNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
|||||||
|
|
||||||
void TableNode::updateTreeHashImpl(HashState & state) const
|
void TableNode::updateTreeHashImpl(HashState & state) const
|
||||||
{
|
{
|
||||||
auto full_name = storage_id.getFullNameNotQuoted();
|
if (!temporary_table_name.empty())
|
||||||
state.update(full_name.size());
|
{
|
||||||
state.update(full_name);
|
state.update(temporary_table_name.size());
|
||||||
|
state.update(temporary_table_name);
|
||||||
state.update(temporary_table_name.size());
|
}
|
||||||
state.update(temporary_table_name);
|
else
|
||||||
|
{
|
||||||
|
auto full_name = storage_id.getFullNameNotQuoted();
|
||||||
|
state.update(full_name.size());
|
||||||
|
state.update(full_name);
|
||||||
|
}
|
||||||
|
|
||||||
if (table_expression_modifiers)
|
if (table_expression_modifiers)
|
||||||
table_expression_modifiers->updateTreeHash(state);
|
table_expression_modifiers->updateTreeHash(state);
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
|
|
||||||
#include <Functions/FunctionHelpers.h>
|
#include <Functions/FunctionHelpers.h>
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
@ -32,6 +33,7 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root)
|
bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root)
|
||||||
@ -79,6 +81,75 @@ bool isNameOfInFunction(const std::string & function_name)
|
|||||||
return is_special_function_in;
|
return is_special_function_in;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool isNameOfLocalInFunction(const std::string & function_name)
|
||||||
|
{
|
||||||
|
bool is_special_function_in = function_name == "in" ||
|
||||||
|
function_name == "notIn" ||
|
||||||
|
function_name == "nullIn" ||
|
||||||
|
function_name == "notNullIn" ||
|
||||||
|
function_name == "inIgnoreSet" ||
|
||||||
|
function_name == "notInIgnoreSet" ||
|
||||||
|
function_name == "nullInIgnoreSet" ||
|
||||||
|
function_name == "notNullInIgnoreSet";
|
||||||
|
|
||||||
|
return is_special_function_in;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isNameOfGlobalInFunction(const std::string & function_name)
|
||||||
|
{
|
||||||
|
bool is_special_function_in = function_name == "globalIn" ||
|
||||||
|
function_name == "globalNotIn" ||
|
||||||
|
function_name == "globalNullIn" ||
|
||||||
|
function_name == "globalNotNullIn" ||
|
||||||
|
function_name == "globalInIgnoreSet" ||
|
||||||
|
function_name == "globalNotInIgnoreSet" ||
|
||||||
|
function_name == "globalNullInIgnoreSet" ||
|
||||||
|
function_name == "globalNotNullInIgnoreSet";
|
||||||
|
|
||||||
|
return is_special_function_in;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string getGlobalInFunctionNameForLocalInFunctionName(const std::string & function_name)
|
||||||
|
{
|
||||||
|
if (function_name == "in")
|
||||||
|
return "globalIn";
|
||||||
|
else if (function_name == "notIn")
|
||||||
|
return "globalNotIn";
|
||||||
|
else if (function_name == "nullIn")
|
||||||
|
return "globalNullIn";
|
||||||
|
else if (function_name == "notNullIn")
|
||||||
|
return "globalNotNullIn";
|
||||||
|
else if (function_name == "inIgnoreSet")
|
||||||
|
return "globalInIgnoreSet";
|
||||||
|
else if (function_name == "notInIgnoreSet")
|
||||||
|
return "globalNotInIgnoreSet";
|
||||||
|
else if (function_name == "nullInIgnoreSet")
|
||||||
|
return "globalNullInIgnoreSet";
|
||||||
|
else if (function_name == "notNullInIgnoreSet")
|
||||||
|
return "globalNotNullInIgnoreSet";
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid local IN function name {}", function_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
void makeUniqueColumnNamesInBlock(Block & block)
|
||||||
|
{
|
||||||
|
NameSet block_column_names;
|
||||||
|
size_t unique_column_name_counter = 1;
|
||||||
|
|
||||||
|
for (auto & column_with_type : block)
|
||||||
|
{
|
||||||
|
if (!block_column_names.contains(column_with_type.name))
|
||||||
|
{
|
||||||
|
block_column_names.insert(column_with_type.name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
column_with_type.name += '_';
|
||||||
|
column_with_type.name += std::to_string(unique_column_name_counter);
|
||||||
|
++unique_column_name_counter;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
|
QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
|
||||||
const DataTypePtr & type,
|
const DataTypePtr & type,
|
||||||
const ContextPtr & context,
|
const ContextPtr & context,
|
||||||
@ -102,6 +173,27 @@ QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
|
|||||||
return cast_function_node;
|
return cast_function_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<bool> tryExtractConstantFromConditionNode(const QueryTreeNodePtr & condition_node)
|
||||||
|
{
|
||||||
|
const auto * constant_node = condition_node->as<ConstantNode>();
|
||||||
|
if (!constant_node)
|
||||||
|
return {};
|
||||||
|
|
||||||
|
const auto & value = constant_node->getValue();
|
||||||
|
auto constant_type = constant_node->getResultType();
|
||||||
|
constant_type = removeNullable(removeLowCardinality(constant_type));
|
||||||
|
|
||||||
|
auto which_constant_type = WhichDataType(constant_type);
|
||||||
|
if (!which_constant_type.isUInt8() && !which_constant_type.isNothing())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
if (value.isNull())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
UInt8 predicate_value = value.safeGet<UInt8>();
|
||||||
|
return predicate_value > 0;
|
||||||
|
}
|
||||||
|
|
||||||
static ASTPtr convertIntoTableExpressionAST(const QueryTreeNodePtr & table_expression_node)
|
static ASTPtr convertIntoTableExpressionAST(const QueryTreeNodePtr & table_expression_node)
|
||||||
{
|
{
|
||||||
ASTPtr table_expression_node_ast;
|
ASTPtr table_expression_node_ast;
|
||||||
|
@ -13,6 +13,18 @@ bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root);
|
|||||||
/// Returns true if function name is name of IN function or its variations, false otherwise
|
/// Returns true if function name is name of IN function or its variations, false otherwise
|
||||||
bool isNameOfInFunction(const std::string & function_name);
|
bool isNameOfInFunction(const std::string & function_name);
|
||||||
|
|
||||||
|
/// Returns true if function name is name of local IN function or its variations, false otherwise
|
||||||
|
bool isNameOfLocalInFunction(const std::string & function_name);
|
||||||
|
|
||||||
|
/// Returns true if function name is name of global IN function or its variations, false otherwise
|
||||||
|
bool isNameOfGlobalInFunction(const std::string & function_name);
|
||||||
|
|
||||||
|
/// Returns global IN function name for local IN function name
|
||||||
|
std::string getGlobalInFunctionNameForLocalInFunctionName(const std::string & function_name);
|
||||||
|
|
||||||
|
/// Add unique suffix to names of duplicate columns in block
|
||||||
|
void makeUniqueColumnNamesInBlock(Block & block);
|
||||||
|
|
||||||
/** Build cast function that cast expression into type.
|
/** Build cast function that cast expression into type.
|
||||||
* If resolve = true, then result cast function is resolved during build, otherwise
|
* If resolve = true, then result cast function is resolved during build, otherwise
|
||||||
* result cast function is not resolved during build.
|
* result cast function is not resolved during build.
|
||||||
@ -22,6 +34,9 @@ QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
|
|||||||
const ContextPtr & context,
|
const ContextPtr & context,
|
||||||
bool resolve = true);
|
bool resolve = true);
|
||||||
|
|
||||||
|
/// Try extract boolean constant from condition node
|
||||||
|
std::optional<bool> tryExtractConstantFromConditionNode(const QueryTreeNodePtr & condition_node);
|
||||||
|
|
||||||
/** Add table expression in tables in select query children.
|
/** Add table expression in tables in select query children.
|
||||||
* If table expression node is not of identifier node, table node, query node, table function node, join node or array join node type throws logical error exception.
|
* If table expression node is not of identifier node, table node, query node, table function node, join node or array join node type throws logical error exception.
|
||||||
*/
|
*/
|
||||||
|
@ -16,6 +16,7 @@ namespace ErrorCodes
|
|||||||
{
|
{
|
||||||
extern const int NOT_AN_AGGREGATE;
|
extern const int NOT_AN_AGGREGATE;
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
class ValidateGroupByColumnsVisitor : public ConstInDepthQueryTreeVisitor<ValidateGroupByColumnsVisitor>
|
class ValidateGroupByColumnsVisitor : public ConstInDepthQueryTreeVisitor<ValidateGroupByColumnsVisitor>
|
||||||
@ -283,4 +284,52 @@ void assertNoFunctionNodes(const QueryTreeNodePtr & node,
|
|||||||
visitor.visit(node);
|
visitor.visit(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void validateTreeSize(const QueryTreeNodePtr & node,
|
||||||
|
size_t max_size,
|
||||||
|
std::unordered_map<QueryTreeNodePtr, size_t> & node_to_tree_size)
|
||||||
|
{
|
||||||
|
size_t tree_size = 0;
|
||||||
|
std::vector<std::pair<QueryTreeNodePtr, bool>> nodes_to_process;
|
||||||
|
nodes_to_process.emplace_back(node, false);
|
||||||
|
|
||||||
|
while (!nodes_to_process.empty())
|
||||||
|
{
|
||||||
|
const auto [node_to_process, processed_children] = nodes_to_process.back();
|
||||||
|
nodes_to_process.pop_back();
|
||||||
|
|
||||||
|
if (processed_children)
|
||||||
|
{
|
||||||
|
++tree_size;
|
||||||
|
node_to_tree_size.emplace(node_to_process, tree_size);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto node_to_size_it = node_to_tree_size.find(node_to_process);
|
||||||
|
if (node_to_size_it != node_to_tree_size.end())
|
||||||
|
{
|
||||||
|
tree_size += node_to_size_it->second;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes_to_process.emplace_back(node_to_process, true);
|
||||||
|
|
||||||
|
for (const auto & node_to_process_child : node_to_process->getChildren())
|
||||||
|
{
|
||||||
|
if (!node_to_process_child)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
nodes_to_process.emplace_back(node_to_process_child, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto * constant_node = node_to_process->as<ConstantNode>();
|
||||||
|
if (constant_node && constant_node->hasSourceExpression())
|
||||||
|
nodes_to_process.emplace_back(constant_node->getSourceExpression(), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tree_size > max_size)
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Query tree is too big. Maximum: {}",
|
||||||
|
max_size);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ namespace DB
|
|||||||
|
|
||||||
struct ValidationParams
|
struct ValidationParams
|
||||||
{
|
{
|
||||||
bool group_by_use_nulls;
|
bool group_by_use_nulls = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Validate aggregates in query node.
|
/** Validate aggregates in query node.
|
||||||
@ -31,4 +31,11 @@ void assertNoFunctionNodes(const QueryTreeNodePtr & node,
|
|||||||
std::string_view exception_function_name,
|
std::string_view exception_function_name,
|
||||||
std::string_view exception_place_message);
|
std::string_view exception_place_message);
|
||||||
|
|
||||||
|
/** Validate tree size. If size of tree is greater than max size throws exception.
|
||||||
|
* Additionally for each node in tree, update node to tree size map.
|
||||||
|
*/
|
||||||
|
void validateTreeSize(const QueryTreeNodePtr & node,
|
||||||
|
size_t max_size,
|
||||||
|
std::unordered_map<QueryTreeNodePtr, size_t> & node_to_tree_size);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -113,11 +113,17 @@ ASTPtr WindowNode::toASTImpl() const
|
|||||||
|
|
||||||
window_definition->parent_window_name = parent_window_name;
|
window_definition->parent_window_name = parent_window_name;
|
||||||
|
|
||||||
window_definition->children.push_back(getPartitionByNode()->toAST());
|
if (hasPartitionBy())
|
||||||
window_definition->partition_by = window_definition->children.back();
|
{
|
||||||
|
window_definition->children.push_back(getPartitionByNode()->toAST());
|
||||||
|
window_definition->partition_by = window_definition->children.back();
|
||||||
|
}
|
||||||
|
|
||||||
window_definition->children.push_back(getOrderByNode()->toAST());
|
if (hasOrderBy())
|
||||||
window_definition->order_by = window_definition->children.back();
|
{
|
||||||
|
window_definition->children.push_back(getOrderByNode()->toAST());
|
||||||
|
window_definition->order_by = window_definition->children.back();
|
||||||
|
}
|
||||||
|
|
||||||
window_definition->frame_is_default = window_frame.is_default;
|
window_definition->frame_is_default = window_frame.is_default;
|
||||||
window_definition->frame_type = window_frame.type;
|
window_definition->frame_type = window_frame.type;
|
||||||
|
@ -1834,7 +1834,7 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
{
|
{
|
||||||
/// disable logs if expects errors
|
/// disable logs if expects errors
|
||||||
TestHint test_hint(all_queries_text);
|
TestHint test_hint(all_queries_text);
|
||||||
if (test_hint.clientError() || test_hint.serverError())
|
if (test_hint.hasClientErrors() || test_hint.hasServerErrors())
|
||||||
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
|
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1876,17 +1876,17 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
// the query ends because we failed to parse it, so we consume
|
// the query ends because we failed to parse it, so we consume
|
||||||
// the entire line.
|
// the entire line.
|
||||||
TestHint hint(String(this_query_begin, this_query_end - this_query_begin));
|
TestHint hint(String(this_query_begin, this_query_end - this_query_begin));
|
||||||
if (hint.serverError())
|
if (hint.hasServerErrors())
|
||||||
{
|
{
|
||||||
// Syntax errors are considered as client errors
|
// Syntax errors are considered as client errors
|
||||||
current_exception->addMessage("\nExpected server error '{}'.", hint.serverError());
|
current_exception->addMessage("\nExpected server error: {}.", hint.serverErrors());
|
||||||
current_exception->rethrow();
|
current_exception->rethrow();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hint.clientError() != current_exception->code())
|
if (!hint.hasExpectedClientError(current_exception->code()))
|
||||||
{
|
{
|
||||||
if (hint.clientError())
|
if (hint.hasClientErrors())
|
||||||
current_exception->addMessage("\nExpected client error: " + std::to_string(hint.clientError()));
|
current_exception->addMessage("\nExpected client error: {}.", hint.clientErrors());
|
||||||
|
|
||||||
current_exception->rethrow();
|
current_exception->rethrow();
|
||||||
}
|
}
|
||||||
@ -1935,37 +1935,37 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
bool error_matches_hint = true;
|
bool error_matches_hint = true;
|
||||||
if (have_error)
|
if (have_error)
|
||||||
{
|
{
|
||||||
if (test_hint.serverError())
|
if (test_hint.hasServerErrors())
|
||||||
{
|
{
|
||||||
if (!server_exception)
|
if (!server_exception)
|
||||||
{
|
{
|
||||||
error_matches_hint = false;
|
error_matches_hint = false;
|
||||||
fmt::print(stderr, "Expected server error code '{}' but got no server error (query: {}).\n",
|
fmt::print(stderr, "Expected server error code '{}' but got no server error (query: {}).\n",
|
||||||
test_hint.serverError(), full_query);
|
test_hint.serverErrors(), full_query);
|
||||||
}
|
}
|
||||||
else if (server_exception->code() != test_hint.serverError())
|
else if (!test_hint.hasExpectedServerError(server_exception->code()))
|
||||||
{
|
{
|
||||||
error_matches_hint = false;
|
error_matches_hint = false;
|
||||||
fmt::print(stderr, "Expected server error code: {} but got: {} (query: {}).\n",
|
fmt::print(stderr, "Expected server error code: {} but got: {} (query: {}).\n",
|
||||||
test_hint.serverError(), server_exception->code(), full_query);
|
test_hint.serverErrors(), server_exception->code(), full_query);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (test_hint.clientError())
|
if (test_hint.hasClientErrors())
|
||||||
{
|
{
|
||||||
if (!client_exception)
|
if (!client_exception)
|
||||||
{
|
{
|
||||||
error_matches_hint = false;
|
error_matches_hint = false;
|
||||||
fmt::print(stderr, "Expected client error code '{}' but got no client error (query: {}).\n",
|
fmt::print(stderr, "Expected client error code '{}' but got no client error (query: {}).\n",
|
||||||
test_hint.clientError(), full_query);
|
test_hint.clientErrors(), full_query);
|
||||||
}
|
}
|
||||||
else if (client_exception->code() != test_hint.clientError())
|
else if (!test_hint.hasExpectedClientError(client_exception->code()))
|
||||||
{
|
{
|
||||||
error_matches_hint = false;
|
error_matches_hint = false;
|
||||||
fmt::print(stderr, "Expected client error code '{}' but got '{}' (query: {}).\n",
|
fmt::print(stderr, "Expected client error code '{}' but got '{}' (query: {}).\n",
|
||||||
test_hint.clientError(), client_exception->code(), full_query);
|
test_hint.clientErrors(), client_exception->code(), full_query);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!test_hint.clientError() && !test_hint.serverError())
|
if (!test_hint.hasClientErrors() && !test_hint.hasServerErrors())
|
||||||
{
|
{
|
||||||
// No error was expected but it still occurred. This is the
|
// No error was expected but it still occurred. This is the
|
||||||
// default case without test hint, doesn't need additional
|
// default case without test hint, doesn't need additional
|
||||||
@ -1975,19 +1975,19 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (test_hint.clientError())
|
if (test_hint.hasClientErrors())
|
||||||
{
|
{
|
||||||
error_matches_hint = false;
|
error_matches_hint = false;
|
||||||
fmt::print(stderr,
|
fmt::print(stderr,
|
||||||
"The query succeeded but the client error '{}' was expected (query: {}).\n",
|
"The query succeeded but the client error '{}' was expected (query: {}).\n",
|
||||||
test_hint.clientError(), full_query);
|
test_hint.clientErrors(), full_query);
|
||||||
}
|
}
|
||||||
if (test_hint.serverError())
|
if (test_hint.hasServerErrors())
|
||||||
{
|
{
|
||||||
error_matches_hint = false;
|
error_matches_hint = false;
|
||||||
fmt::print(stderr,
|
fmt::print(stderr,
|
||||||
"The query succeeded but the server error '{}' was expected (query: {}).\n",
|
"The query succeeded but the server error '{}' was expected (query: {}).\n",
|
||||||
test_hint.serverError(), full_query);
|
test_hint.serverErrors(), full_query);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -506,7 +506,7 @@ void Connection::sendQuery(
|
|||||||
bool with_pending_data,
|
bool with_pending_data,
|
||||||
std::function<void(const Progress &)>)
|
std::function<void(const Progress &)>)
|
||||||
{
|
{
|
||||||
OpenTelemetry::SpanHolder span("Connection::sendQuery()");
|
OpenTelemetry::SpanHolder span("Connection::sendQuery()", OpenTelemetry::CLIENT);
|
||||||
span.addAttribute("clickhouse.query_id", query_id_);
|
span.addAttribute("clickhouse.query_id", query_id_);
|
||||||
span.addAttribute("clickhouse.query", query);
|
span.addAttribute("clickhouse.query", query);
|
||||||
span.addAttribute("target", [this] () { return this->getHost() + ":" + std::to_string(this->getPort()); });
|
span.addAttribute("target", [this] () { return this->getHost() + ":" + std::to_string(this->getPort()); });
|
||||||
|
@ -1,32 +1,15 @@
|
|||||||
#include "TestHint.h"
|
#include <charconv>
|
||||||
|
#include <string_view>
|
||||||
|
|
||||||
|
#include <Client/TestHint.h>
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
|
||||||
#include <Common/ErrorCodes.h>
|
|
||||||
#include <IO/ReadBufferFromString.h>
|
|
||||||
#include <IO/ReadHelpers.h>
|
|
||||||
#include <Parsers/Lexer.h>
|
#include <Parsers/Lexer.h>
|
||||||
|
#include <Common/ErrorCodes.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
namespace
|
namespace DB::ErrorCodes
|
||||||
{
|
{
|
||||||
|
extern const int CANNOT_PARSE_TEXT;
|
||||||
/// Parse error as number or as a string (name of the error code const)
|
|
||||||
int parseErrorCode(DB::ReadBufferFromString & in)
|
|
||||||
{
|
|
||||||
int code = -1;
|
|
||||||
String code_name;
|
|
||||||
|
|
||||||
auto * pos = in.position();
|
|
||||||
tryReadText(code, in);
|
|
||||||
if (pos != in.position())
|
|
||||||
{
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try parse as string
|
|
||||||
readStringUntilWhitespace(code_name, in);
|
|
||||||
return DB::ErrorCodes::getErrorCodeByName(code_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -60,8 +43,8 @@ TestHint::TestHint(const String & query_)
|
|||||||
size_t pos_end = comment.find('}', pos_start);
|
size_t pos_end = comment.find('}', pos_start);
|
||||||
if (pos_end != String::npos)
|
if (pos_end != String::npos)
|
||||||
{
|
{
|
||||||
String hint(comment.begin() + pos_start + 1, comment.begin() + pos_end);
|
Lexer comment_lexer(comment.c_str() + pos_start + 1, comment.c_str() + pos_end, 0);
|
||||||
parse(hint, is_leading_hint);
|
parse(comment_lexer, is_leading_hint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -69,33 +52,86 @@ TestHint::TestHint(const String & query_)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TestHint::parse(const String & hint, bool is_leading_hint)
|
bool TestHint::hasExpectedClientError(int error)
|
||||||
{
|
{
|
||||||
ReadBufferFromString in(hint);
|
return std::find(client_errors.begin(), client_errors.end(), error) != client_errors.end();
|
||||||
String item;
|
}
|
||||||
|
|
||||||
while (!in.eof())
|
bool TestHint::hasExpectedServerError(int error)
|
||||||
|
{
|
||||||
|
return std::find(server_errors.begin(), server_errors.end(), error) != server_errors.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint)
|
||||||
|
{
|
||||||
|
std::unordered_set<std::string_view> commands{"echo", "echoOn", "echoOff"};
|
||||||
|
|
||||||
|
std::unordered_set<std::string_view> command_errors{
|
||||||
|
"serverError",
|
||||||
|
"clientError",
|
||||||
|
};
|
||||||
|
|
||||||
|
for (Token token = comment_lexer.nextToken(); !token.isEnd(); token = comment_lexer.nextToken())
|
||||||
{
|
{
|
||||||
readStringUntilWhitespace(item, in);
|
String item = String(token.begin, token.end);
|
||||||
if (in.eof())
|
if (token.type == TokenType::BareWord && commands.contains(item))
|
||||||
break;
|
|
||||||
|
|
||||||
skipWhitespaceIfAny(in);
|
|
||||||
|
|
||||||
if (!is_leading_hint)
|
|
||||||
{
|
{
|
||||||
if (item == "serverError")
|
if (item == "echo")
|
||||||
server_error = parseErrorCode(in);
|
echo.emplace(true);
|
||||||
else if (item == "clientError")
|
if (item == "echoOn")
|
||||||
client_error = parseErrorCode(in);
|
echo.emplace(true);
|
||||||
|
if (item == "echoOff")
|
||||||
|
echo.emplace(false);
|
||||||
}
|
}
|
||||||
|
else if (!is_leading_hint && token.type == TokenType::BareWord && command_errors.contains(item))
|
||||||
|
{
|
||||||
|
/// Everything after this must be a list of errors separated by comma
|
||||||
|
ErrorVector error_codes;
|
||||||
|
while (!token.isEnd())
|
||||||
|
{
|
||||||
|
token = comment_lexer.nextToken();
|
||||||
|
if (token.type == TokenType::Whitespace)
|
||||||
|
continue;
|
||||||
|
if (token.type == TokenType::Number)
|
||||||
|
{
|
||||||
|
int code;
|
||||||
|
auto [p, ec] = std::from_chars(token.begin, token.end, code);
|
||||||
|
if (p == token.begin)
|
||||||
|
throw DB::Exception(
|
||||||
|
DB::ErrorCodes::CANNOT_PARSE_TEXT,
|
||||||
|
"Could not parse integer number for errorcode: {}",
|
||||||
|
std::string_view(token.begin, token.end));
|
||||||
|
error_codes.push_back(code);
|
||||||
|
}
|
||||||
|
else if (token.type == TokenType::BareWord)
|
||||||
|
{
|
||||||
|
int code = DB::ErrorCodes::getErrorCodeByName(std::string_view(token.begin, token.end));
|
||||||
|
error_codes.push_back(code);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw DB::Exception(
|
||||||
|
DB::ErrorCodes::CANNOT_PARSE_TEXT,
|
||||||
|
"Could not parse error code in {}: {}",
|
||||||
|
getTokenName(token.type),
|
||||||
|
std::string_view(token.begin, token.end));
|
||||||
|
do
|
||||||
|
{
|
||||||
|
token = comment_lexer.nextToken();
|
||||||
|
} while (!token.isEnd() && token.type == TokenType::Whitespace);
|
||||||
|
|
||||||
if (item == "echo")
|
if (!token.isEnd() && token.type != TokenType::Comma)
|
||||||
echo.emplace(true);
|
throw DB::Exception(
|
||||||
if (item == "echoOn")
|
DB::ErrorCodes::CANNOT_PARSE_TEXT,
|
||||||
echo.emplace(true);
|
"Could not parse error code. Expected ','. Got '{}'",
|
||||||
if (item == "echoOff")
|
std::string_view(token.begin, token.end));
|
||||||
echo.emplace(false);
|
}
|
||||||
|
|
||||||
|
if (item == "serverError")
|
||||||
|
server_errors = error_codes;
|
||||||
|
else
|
||||||
|
client_errors = error_codes;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,21 +1,30 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
class Lexer;
|
||||||
|
|
||||||
/// Checks expected server and client error codes.
|
/// Checks expected server and client error codes.
|
||||||
///
|
///
|
||||||
/// The following comment hints are supported:
|
/// The following comment hints are supported:
|
||||||
///
|
///
|
||||||
/// - "-- { serverError 60 }" -- in case of you are expecting server error.
|
/// - "-- { serverError 60 }" -- in case of you are expecting server error.
|
||||||
|
/// - "-- { serverError 16, 36 }" -- in case of you are expecting one of the 2 errors.
|
||||||
///
|
///
|
||||||
/// - "-- { clientError 20 }" -- in case of you are expecting client error.
|
/// - "-- { clientError 20 }" -- in case of you are expecting client error.
|
||||||
|
/// - "-- { clientError 20, 60, 92 }" -- It's expected that the client will return one of the 3 errors.
|
||||||
///
|
///
|
||||||
/// - "-- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }" -- by error name.
|
/// - "-- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }" -- by error name.
|
||||||
|
/// - "-- { serverError NO_SUCH_COLUMN_IN_TABLE, BAD_ARGUMENTS }" -- by error name.
|
||||||
///
|
///
|
||||||
/// - "-- { clientError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }" -- by error name.
|
/// - "-- { clientError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }" -- by error name.
|
||||||
///
|
///
|
||||||
@ -43,29 +52,73 @@ namespace DB
|
|||||||
class TestHint
|
class TestHint
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
using ErrorVector = std::vector<int>;
|
||||||
TestHint(const String & query_);
|
TestHint(const String & query_);
|
||||||
|
|
||||||
int serverError() const { return server_error; }
|
const auto & serverErrors() const { return server_errors; }
|
||||||
int clientError() const { return client_error; }
|
const auto & clientErrors() const { return client_errors; }
|
||||||
std::optional<bool> echoQueries() const { return echo; }
|
std::optional<bool> echoQueries() const { return echo; }
|
||||||
|
|
||||||
|
bool hasClientErrors() { return !client_errors.empty(); }
|
||||||
|
bool hasServerErrors() { return !server_errors.empty(); }
|
||||||
|
|
||||||
|
bool hasExpectedClientError(int error);
|
||||||
|
bool hasExpectedServerError(int error);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const String & query;
|
const String & query;
|
||||||
int server_error = 0;
|
ErrorVector server_errors{};
|
||||||
int client_error = 0;
|
ErrorVector client_errors{};
|
||||||
std::optional<bool> echo;
|
std::optional<bool> echo;
|
||||||
|
|
||||||
void parse(const String & hint, bool is_leading_hint);
|
void parse(Lexer & comment_lexer, bool is_leading_hint);
|
||||||
|
|
||||||
bool allErrorsExpected(int actual_server_error, int actual_client_error) const
|
bool allErrorsExpected(int actual_server_error, int actual_client_error) const
|
||||||
{
|
{
|
||||||
return (server_error || client_error) && (server_error == actual_server_error) && (client_error == actual_client_error);
|
if (actual_server_error && std::find(server_errors.begin(), server_errors.end(), actual_server_error) == server_errors.end())
|
||||||
|
return false;
|
||||||
|
if (!actual_server_error && server_errors.size())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (actual_client_error && std::find(client_errors.begin(), client_errors.end(), actual_client_error) == client_errors.end())
|
||||||
|
return false;
|
||||||
|
if (!actual_client_error && client_errors.size())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool lostExpectedError(int actual_server_error, int actual_client_error) const
|
bool lostExpectedError(int actual_server_error, int actual_client_error) const
|
||||||
{
|
{
|
||||||
return (server_error && !actual_server_error) || (client_error && !actual_client_error);
|
return (server_errors.size() && !actual_server_error) || (client_errors.size() && !actual_client_error);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct fmt::formatter<DB::TestHint::ErrorVector>
|
||||||
|
{
|
||||||
|
static constexpr auto parse(format_parse_context & ctx)
|
||||||
|
{
|
||||||
|
const auto * it = ctx.begin();
|
||||||
|
const auto * end = ctx.end();
|
||||||
|
|
||||||
|
/// Only support {}.
|
||||||
|
if (it != end && *it != '}')
|
||||||
|
throw format_error("Invalid format");
|
||||||
|
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename FormatContext>
|
||||||
|
auto format(const DB::TestHint::ErrorVector & ErrorVector, FormatContext & ctx)
|
||||||
|
{
|
||||||
|
if (ErrorVector.empty())
|
||||||
|
return format_to(ctx.out(), "{}", 0);
|
||||||
|
else if (ErrorVector.size() == 1)
|
||||||
|
return format_to(ctx.out(), "{}", ErrorVector[0]);
|
||||||
|
else
|
||||||
|
return format_to(ctx.out(), "[{}]", fmt::join(ErrorVector, ", "));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
@ -110,23 +110,4 @@ ThreadGroupStatusPtr CurrentThread::getGroup()
|
|||||||
return current_thread->getThreadGroup();
|
return current_thread->getThreadGroup();
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryTracker * CurrentThread::getUserMemoryTracker()
|
|
||||||
{
|
|
||||||
if (unlikely(!current_thread))
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
auto * tracker = current_thread->memory_tracker.getParent();
|
|
||||||
while (tracker && tracker->level != VariableContext::User)
|
|
||||||
tracker = tracker->getParent();
|
|
||||||
|
|
||||||
return tracker;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CurrentThread::flushUntrackedMemory()
|
|
||||||
{
|
|
||||||
if (unlikely(!current_thread))
|
|
||||||
return;
|
|
||||||
current_thread->flushUntrackedMemory();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -40,12 +40,6 @@ public:
|
|||||||
/// Group to which belongs current thread
|
/// Group to which belongs current thread
|
||||||
static ThreadGroupStatusPtr getGroup();
|
static ThreadGroupStatusPtr getGroup();
|
||||||
|
|
||||||
/// MemoryTracker for user that owns current thread if any
|
|
||||||
static MemoryTracker * getUserMemoryTracker();
|
|
||||||
|
|
||||||
/// Adjust counters in MemoryTracker hierarchy if untracked_memory is not 0.
|
|
||||||
static void flushUntrackedMemory();
|
|
||||||
|
|
||||||
/// A logs queue used by TCPHandler to pass logs to a client
|
/// A logs queue used by TCPHandler to pass logs to a client
|
||||||
static void attachInternalTextLogsQueue(const std::shared_ptr<InternalTextLogsQueue> & logs_queue,
|
static void attachInternalTextLogsQueue(const std::shared_ptr<InternalTextLogsQueue> & logs_queue,
|
||||||
LogsLevel client_logs_level);
|
LogsLevel client_logs_level);
|
||||||
|
@ -92,7 +92,7 @@ bool Span::addAttributeImpl(std::string_view name, std::string_view value) noexc
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
SpanHolder::SpanHolder(std::string_view _operation_name)
|
SpanHolder::SpanHolder(std::string_view _operation_name, SpanKind _kind)
|
||||||
{
|
{
|
||||||
if (!current_thread_trace_context.isTraceEnabled())
|
if (!current_thread_trace_context.isTraceEnabled())
|
||||||
{
|
{
|
||||||
@ -106,6 +106,7 @@ SpanHolder::SpanHolder(std::string_view _operation_name)
|
|||||||
this->parent_span_id = current_thread_trace_context.span_id;
|
this->parent_span_id = current_thread_trace_context.span_id;
|
||||||
this->span_id = thread_local_rng(); // create a new id for this span
|
this->span_id = thread_local_rng(); // create a new id for this span
|
||||||
this->operation_name = _operation_name;
|
this->operation_name = _operation_name;
|
||||||
|
this->kind = _kind;
|
||||||
this->start_time_us
|
this->start_time_us
|
||||||
= std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
= std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||||
|
|
||||||
|
@ -13,6 +13,29 @@ class ReadBuffer;
|
|||||||
namespace OpenTelemetry
|
namespace OpenTelemetry
|
||||||
{
|
{
|
||||||
|
|
||||||
|
/// See https://opentelemetry.io/docs/reference/specification/trace/api/#spankind
|
||||||
|
enum SpanKind
|
||||||
|
{
|
||||||
|
/// Default value. Indicates that the span represents an internal operation within an application,
|
||||||
|
/// as opposed to an operations with remote parents or children.
|
||||||
|
INTERNAL = 0,
|
||||||
|
|
||||||
|
/// Indicates that the span covers server-side handling of a synchronous RPC or other remote request.
|
||||||
|
/// This span is often the child of a remote CLIENT span that was expected to wait for a response.
|
||||||
|
SERVER = 1,
|
||||||
|
|
||||||
|
/// Indicates that the span describes a request to some remote service.
|
||||||
|
/// This span is usually the parent of a remote SERVER span and does not end until the response is received.
|
||||||
|
CLIENT = 2,
|
||||||
|
|
||||||
|
/// Indicates that the span describes the initiators of an asynchronous request. This parent span will often end before the corresponding child CONSUMER span, possibly even before the child span starts.
|
||||||
|
/// In messaging scenarios with batching, tracing individual messages requires a new PRODUCER span per message to be created.
|
||||||
|
PRODUCER = 3,
|
||||||
|
|
||||||
|
/// Indicates that the span describes a child of an asynchronous PRODUCER request
|
||||||
|
CONSUMER = 4
|
||||||
|
};
|
||||||
|
|
||||||
struct Span
|
struct Span
|
||||||
{
|
{
|
||||||
UUID trace_id{};
|
UUID trace_id{};
|
||||||
@ -21,6 +44,7 @@ struct Span
|
|||||||
String operation_name;
|
String operation_name;
|
||||||
UInt64 start_time_us = 0;
|
UInt64 start_time_us = 0;
|
||||||
UInt64 finish_time_us = 0;
|
UInt64 finish_time_us = 0;
|
||||||
|
SpanKind kind = INTERNAL;
|
||||||
Map attributes;
|
Map attributes;
|
||||||
|
|
||||||
/// Following methods are declared as noexcept to make sure they're exception safe.
|
/// Following methods are declared as noexcept to make sure they're exception safe.
|
||||||
@ -155,7 +179,7 @@ using TracingContextHolderPtr = std::unique_ptr<TracingContextHolder>;
|
|||||||
/// Once it's created or destructed, it automatically maitains the tracing context on the thread that it lives.
|
/// Once it's created or destructed, it automatically maitains the tracing context on the thread that it lives.
|
||||||
struct SpanHolder : public Span
|
struct SpanHolder : public Span
|
||||||
{
|
{
|
||||||
SpanHolder(std::string_view);
|
SpanHolder(std::string_view, SpanKind _kind = INTERNAL);
|
||||||
~SpanHolder();
|
~SpanHolder();
|
||||||
|
|
||||||
/// Finish a span explicitly if needed.
|
/// Finish a span explicitly if needed.
|
||||||
|
@ -211,9 +211,14 @@ public:
|
|||||||
void flush()
|
void flush()
|
||||||
{
|
{
|
||||||
auto * file_buffer = tryGetFileBuffer();
|
auto * file_buffer = tryGetFileBuffer();
|
||||||
/// Fsync file system if needed
|
if (file_buffer)
|
||||||
if (file_buffer && log_file_settings.force_sync)
|
{
|
||||||
file_buffer->sync();
|
/// Fsync file system if needed
|
||||||
|
if (log_file_settings.force_sync)
|
||||||
|
file_buffer->sync();
|
||||||
|
else
|
||||||
|
file_buffer->next();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t getStartIndex() const
|
uint64_t getStartIndex() const
|
||||||
|
@ -516,6 +516,7 @@ class IColumn;
|
|||||||
M(Bool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \
|
M(Bool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \
|
||||||
M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
|
M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
|
||||||
M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \
|
M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \
|
||||||
|
M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \
|
||||||
M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \
|
M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \
|
||||||
M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \
|
M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \
|
||||||
M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \
|
M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \
|
||||||
@ -802,7 +803,6 @@ class IColumn;
|
|||||||
M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \
|
M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \
|
||||||
M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \
|
M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \
|
||||||
M(Bool, input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format Parquet", 0) \
|
M(Bool, input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format Parquet", 0) \
|
||||||
M(UInt64, input_format_parquet_max_block_size, 8192, "Max block size for parquet reader.", 0) \
|
|
||||||
M(Bool, input_format_protobuf_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format Protobuf", 0) \
|
M(Bool, input_format_protobuf_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format Protobuf", 0) \
|
||||||
M(Bool, input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format CapnProto", 0) \
|
M(Bool, input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format CapnProto", 0) \
|
||||||
M(Bool, input_format_orc_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format ORC", 0) \
|
M(Bool, input_format_orc_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format ORC", 0) \
|
||||||
@ -826,6 +826,8 @@ class IColumn;
|
|||||||
M(UInt64, input_format_csv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in CSV format", 0) \
|
M(UInt64, input_format_csv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in CSV format", 0) \
|
||||||
M(UInt64, input_format_tsv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in TSV format", 0) \
|
M(UInt64, input_format_tsv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in TSV format", 0) \
|
||||||
\
|
\
|
||||||
|
M(Bool, input_format_native_allow_types_conversion, true, "Allow data types conversion in Native input format", 0) \
|
||||||
|
\
|
||||||
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
||||||
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
||||||
\
|
\
|
||||||
@ -864,6 +866,7 @@ class IColumn;
|
|||||||
M(Bool, output_format_parquet_string_as_string, false, "Use Parquet String type instead of Binary for String columns.", 0) \
|
M(Bool, output_format_parquet_string_as_string, false, "Use Parquet String type instead of Binary for String columns.", 0) \
|
||||||
M(Bool, output_format_parquet_fixed_string_as_fixed_byte_array, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary for FixedString columns.", 0) \
|
M(Bool, output_format_parquet_fixed_string_as_fixed_byte_array, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary for FixedString columns.", 0) \
|
||||||
M(ParquetVersion, output_format_parquet_version, "2.latest", "Parquet format version for output format. Supported versions: 1.0, 2.4, 2.6 and 2.latest (default)", 0) \
|
M(ParquetVersion, output_format_parquet_version, "2.latest", "Parquet format version for output format. Supported versions: 1.0, 2.4, 2.6 and 2.latest (default)", 0) \
|
||||||
|
M(ParquetCompression, output_format_parquet_compression_method, "lz4", "Compression method for Parquet output format. Supported codecs: snappy, lz4, brotli, zstd, gzip, none (uncompressed)", 0) \
|
||||||
M(String, output_format_avro_codec, "", "Compression codec used for output. Possible values: 'null', 'deflate', 'snappy'.", 0) \
|
M(String, output_format_avro_codec, "", "Compression codec used for output. Possible values: 'null', 'deflate', 'snappy'.", 0) \
|
||||||
M(UInt64, output_format_avro_sync_interval, 16 * 1024, "Sync interval in bytes.", 0) \
|
M(UInt64, output_format_avro_sync_interval, 16 * 1024, "Sync interval in bytes.", 0) \
|
||||||
M(String, output_format_avro_string_column_pattern, "", "For Avro format: regexp of String columns to select as AVRO string.", 0) \
|
M(String, output_format_avro_string_column_pattern, "", "For Avro format: regexp of String columns to select as AVRO string.", 0) \
|
||||||
@ -906,8 +909,10 @@ class IColumn;
|
|||||||
M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \
|
M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \
|
||||||
M(Bool, output_format_arrow_string_as_string, false, "Use Arrow String type instead of Binary for String columns", 0) \
|
M(Bool, output_format_arrow_string_as_string, false, "Use Arrow String type instead of Binary for String columns", 0) \
|
||||||
M(Bool, output_format_arrow_fixed_string_as_fixed_byte_array, true, "Use Arrow FIXED_SIZE_BINARY type instead of Binary for FixedString columns.", 0) \
|
M(Bool, output_format_arrow_fixed_string_as_fixed_byte_array, true, "Use Arrow FIXED_SIZE_BINARY type instead of Binary for FixedString columns.", 0) \
|
||||||
|
M(ArrowCompression, output_format_arrow_compression_method, "lz4_frame", "Compression method for Arrow output format. Supported codecs: lz4_frame, zstd, none (uncompressed)", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, output_format_orc_string_as_string, false, "Use ORC String type instead of Binary for String columns", 0) \
|
M(Bool, output_format_orc_string_as_string, false, "Use ORC String type instead of Binary for String columns", 0) \
|
||||||
|
M(ORCCompression, output_format_orc_compression_method, "lz4", "Compression method for ORC output format. Supported codecs: lz4, snappy, zlib, zstd, none (uncompressed)", 0) \
|
||||||
\
|
\
|
||||||
M(EnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::EnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \
|
M(EnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::EnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \
|
||||||
\
|
\
|
||||||
|
@ -81,7 +81,11 @@ namespace SettingsChangesHistory
|
|||||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||||
{
|
{
|
||||||
{"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"},
|
{"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"},
|
||||||
{"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}}},
|
{"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"},
|
||||||
|
{"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"},
|
||||||
|
{"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"},
|
||||||
|
{"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"},
|
||||||
|
{"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}}},
|
||||||
{"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"},
|
{"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"},
|
||||||
{"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"},
|
{"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"},
|
||||||
{"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"},
|
{"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"},
|
||||||
|
@ -158,7 +158,7 @@ IMPLEMENT_SETTING_ENUM(EscapingRule, ErrorCodes::BAD_ARGUMENTS,
|
|||||||
{"XML", FormatSettings::EscapingRule::XML},
|
{"XML", FormatSettings::EscapingRule::XML},
|
||||||
{"Raw", FormatSettings::EscapingRule::Raw}})
|
{"Raw", FormatSettings::EscapingRule::Raw}})
|
||||||
|
|
||||||
IMPLEMENT_SETTING_ENUM(MsgPackUUIDRepresentation , ErrorCodes::BAD_ARGUMENTS,
|
IMPLEMENT_SETTING_ENUM(MsgPackUUIDRepresentation, ErrorCodes::BAD_ARGUMENTS,
|
||||||
{{"bin", FormatSettings::MsgPackUUIDRepresentation::BIN},
|
{{"bin", FormatSettings::MsgPackUUIDRepresentation::BIN},
|
||||||
{"str", FormatSettings::MsgPackUUIDRepresentation::STR},
|
{"str", FormatSettings::MsgPackUUIDRepresentation::STR},
|
||||||
{"ext", FormatSettings::MsgPackUUIDRepresentation::EXT}})
|
{"ext", FormatSettings::MsgPackUUIDRepresentation::EXT}})
|
||||||
@ -176,11 +176,30 @@ IMPLEMENT_SETTING_ENUM(LocalFSReadMethod, ErrorCodes::BAD_ARGUMENTS,
|
|||||||
{"pread", LocalFSReadMethod::pread},
|
{"pread", LocalFSReadMethod::pread},
|
||||||
{"read", LocalFSReadMethod::read}})
|
{"read", LocalFSReadMethod::read}})
|
||||||
|
|
||||||
|
|
||||||
IMPLEMENT_SETTING_ENUM_WITH_RENAME(ParquetVersion, ErrorCodes::BAD_ARGUMENTS,
|
IMPLEMENT_SETTING_ENUM_WITH_RENAME(ParquetVersion, ErrorCodes::BAD_ARGUMENTS,
|
||||||
{{"1.0", FormatSettings::ParquetVersion::V1_0},
|
{{"1.0", FormatSettings::ParquetVersion::V1_0},
|
||||||
{"2.4", FormatSettings::ParquetVersion::V2_4},
|
{"2.4", FormatSettings::ParquetVersion::V2_4},
|
||||||
{"2.6", FormatSettings::ParquetVersion::V2_6},
|
{"2.6", FormatSettings::ParquetVersion::V2_6},
|
||||||
{"2.latest", FormatSettings::ParquetVersion::V2_LATEST}})
|
{"2.latest", FormatSettings::ParquetVersion::V2_LATEST}})
|
||||||
|
|
||||||
|
IMPLEMENT_SETTING_ENUM(ParquetCompression, ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
{{"none", FormatSettings::ParquetCompression::NONE},
|
||||||
|
{"snappy", FormatSettings::ParquetCompression::SNAPPY},
|
||||||
|
{"zstd", FormatSettings::ParquetCompression::ZSTD},
|
||||||
|
{"gzip", FormatSettings::ParquetCompression::GZIP},
|
||||||
|
{"lz4", FormatSettings::ParquetCompression::LZ4},
|
||||||
|
{"brotli", FormatSettings::ParquetCompression::BROTLI}})
|
||||||
|
|
||||||
|
IMPLEMENT_SETTING_ENUM(ArrowCompression, ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
{{"none", FormatSettings::ArrowCompression::NONE},
|
||||||
|
{"lz4_frame", FormatSettings::ArrowCompression::LZ4_FRAME},
|
||||||
|
{"zstd", FormatSettings::ArrowCompression::ZSTD}})
|
||||||
|
|
||||||
|
IMPLEMENT_SETTING_ENUM(ORCCompression, ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
{{"none", FormatSettings::ORCCompression::NONE},
|
||||||
|
{"snappy", FormatSettings::ORCCompression::SNAPPY},
|
||||||
|
{"zstd", FormatSettings::ORCCompression::ZSTD},
|
||||||
|
{"zlib", FormatSettings::ORCCompression::ZLIB},
|
||||||
|
{"lz4", FormatSettings::ORCCompression::LZ4}})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -194,6 +194,12 @@ DECLARE_SETTING_ENUM_WITH_RENAME(EscapingRule, FormatSettings::EscapingRule)
|
|||||||
|
|
||||||
DECLARE_SETTING_ENUM_WITH_RENAME(MsgPackUUIDRepresentation, FormatSettings::MsgPackUUIDRepresentation)
|
DECLARE_SETTING_ENUM_WITH_RENAME(MsgPackUUIDRepresentation, FormatSettings::MsgPackUUIDRepresentation)
|
||||||
|
|
||||||
|
DECLARE_SETTING_ENUM_WITH_RENAME(ParquetCompression, FormatSettings::ParquetCompression)
|
||||||
|
|
||||||
|
DECLARE_SETTING_ENUM_WITH_RENAME(ArrowCompression, FormatSettings::ArrowCompression)
|
||||||
|
|
||||||
|
DECLARE_SETTING_ENUM_WITH_RENAME(ORCCompression, FormatSettings::ORCCompression)
|
||||||
|
|
||||||
enum class Dialect
|
enum class Dialect
|
||||||
{
|
{
|
||||||
clickhouse,
|
clickhouse,
|
||||||
|
@ -86,6 +86,6 @@ DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type);
|
|||||||
ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column);
|
ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column);
|
||||||
|
|
||||||
/// Convert column of type from_type to type to_type by converting nested LowCardinality columns.
|
/// Convert column of type from_type to type to_type by converting nested LowCardinality columns.
|
||||||
ColumnPtr recursiveTypeConversion(const ColumnPtr & column, const DataTypePtr & from_type, const DataTypePtr & to_type);
|
ColumnPtr recursiveLowCardinalityTypeConversion(const ColumnPtr & column, const DataTypePtr & from_type, const DataTypePtr & to_type);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column)
|
|||||||
return column;
|
return column;
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr recursiveTypeConversion(const ColumnPtr & column, const DataTypePtr & from_type, const DataTypePtr & to_type)
|
ColumnPtr recursiveLowCardinalityTypeConversion(const ColumnPtr & column, const DataTypePtr & from_type, const DataTypePtr & to_type)
|
||||||
{
|
{
|
||||||
if (!column)
|
if (!column)
|
||||||
return column;
|
return column;
|
||||||
@ -128,7 +128,7 @@ ColumnPtr recursiveTypeConversion(const ColumnPtr & column, const DataTypePtr &
|
|||||||
if (const auto * column_const = typeid_cast<const ColumnConst *>(column.get()))
|
if (const auto * column_const = typeid_cast<const ColumnConst *>(column.get()))
|
||||||
{
|
{
|
||||||
const auto & nested = column_const->getDataColumnPtr();
|
const auto & nested = column_const->getDataColumnPtr();
|
||||||
auto nested_no_lc = recursiveTypeConversion(nested, from_type, to_type);
|
auto nested_no_lc = recursiveLowCardinalityTypeConversion(nested, from_type, to_type);
|
||||||
if (nested.get() == nested_no_lc.get())
|
if (nested.get() == nested_no_lc.get())
|
||||||
return column;
|
return column;
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ ColumnPtr recursiveTypeConversion(const ColumnPtr & column, const DataTypePtr &
|
|||||||
const auto & nested_to = to_array_type->getNestedType();
|
const auto & nested_to = to_array_type->getNestedType();
|
||||||
|
|
||||||
return ColumnArray::create(
|
return ColumnArray::create(
|
||||||
recursiveTypeConversion(column_array->getDataPtr(), nested_from, nested_to),
|
recursiveLowCardinalityTypeConversion(column_array->getDataPtr(), nested_from, nested_to),
|
||||||
column_array->getOffsetsPtr());
|
column_array->getOffsetsPtr());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -187,7 +187,7 @@ ColumnPtr recursiveTypeConversion(const ColumnPtr & column, const DataTypePtr &
|
|||||||
for (size_t i = 0; i < columns.size(); ++i)
|
for (size_t i = 0; i < columns.size(); ++i)
|
||||||
{
|
{
|
||||||
auto & element = columns[i];
|
auto & element = columns[i];
|
||||||
auto element_no_lc = recursiveTypeConversion(element, from_elements.at(i), to_elements.at(i));
|
auto element_no_lc = recursiveLowCardinalityTypeConversion(element, from_elements.at(i), to_elements.at(i));
|
||||||
if (element.get() != element_no_lc.get())
|
if (element.get() != element_no_lc.get())
|
||||||
{
|
{
|
||||||
element = element_no_lc;
|
element = element_no_lc;
|
||||||
|
@ -117,7 +117,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
|||||||
format_settings.parquet.skip_columns_with_unsupported_types_in_schema_inference = settings.input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference;
|
format_settings.parquet.skip_columns_with_unsupported_types_in_schema_inference = settings.input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference;
|
||||||
format_settings.parquet.output_string_as_string = settings.output_format_parquet_string_as_string;
|
format_settings.parquet.output_string_as_string = settings.output_format_parquet_string_as_string;
|
||||||
format_settings.parquet.output_fixed_string_as_fixed_byte_array = settings.output_format_parquet_fixed_string_as_fixed_byte_array;
|
format_settings.parquet.output_fixed_string_as_fixed_byte_array = settings.output_format_parquet_fixed_string_as_fixed_byte_array;
|
||||||
format_settings.parquet.max_block_size = settings.input_format_parquet_max_block_size;
|
format_settings.parquet.output_compression_method = settings.output_format_parquet_compression_method;
|
||||||
format_settings.pretty.charset = settings.output_format_pretty_grid_charset.toString() == "ASCII" ? FormatSettings::Pretty::Charset::ASCII : FormatSettings::Pretty::Charset::UTF8;
|
format_settings.pretty.charset = settings.output_format_pretty_grid_charset.toString() == "ASCII" ? FormatSettings::Pretty::Charset::ASCII : FormatSettings::Pretty::Charset::UTF8;
|
||||||
format_settings.pretty.color = settings.output_format_pretty_color;
|
format_settings.pretty.color = settings.output_format_pretty_color;
|
||||||
format_settings.pretty.max_column_pad_width = settings.output_format_pretty_max_column_pad_width;
|
format_settings.pretty.max_column_pad_width = settings.output_format_pretty_max_column_pad_width;
|
||||||
@ -158,6 +158,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
|||||||
format_settings.arrow.case_insensitive_column_matching = settings.input_format_arrow_case_insensitive_column_matching;
|
format_settings.arrow.case_insensitive_column_matching = settings.input_format_arrow_case_insensitive_column_matching;
|
||||||
format_settings.arrow.output_string_as_string = settings.output_format_arrow_string_as_string;
|
format_settings.arrow.output_string_as_string = settings.output_format_arrow_string_as_string;
|
||||||
format_settings.arrow.output_fixed_string_as_fixed_byte_array = settings.output_format_arrow_fixed_string_as_fixed_byte_array;
|
format_settings.arrow.output_fixed_string_as_fixed_byte_array = settings.output_format_arrow_fixed_string_as_fixed_byte_array;
|
||||||
|
format_settings.arrow.output_compression_method = settings.output_format_arrow_compression_method;
|
||||||
format_settings.orc.import_nested = settings.input_format_orc_import_nested;
|
format_settings.orc.import_nested = settings.input_format_orc_import_nested;
|
||||||
format_settings.orc.allow_missing_columns = settings.input_format_orc_allow_missing_columns;
|
format_settings.orc.allow_missing_columns = settings.input_format_orc_allow_missing_columns;
|
||||||
format_settings.orc.row_batch_size = settings.input_format_orc_row_batch_size;
|
format_settings.orc.row_batch_size = settings.input_format_orc_row_batch_size;
|
||||||
@ -168,6 +169,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
|||||||
format_settings.orc.skip_columns_with_unsupported_types_in_schema_inference = settings.input_format_orc_skip_columns_with_unsupported_types_in_schema_inference;
|
format_settings.orc.skip_columns_with_unsupported_types_in_schema_inference = settings.input_format_orc_skip_columns_with_unsupported_types_in_schema_inference;
|
||||||
format_settings.orc.case_insensitive_column_matching = settings.input_format_orc_case_insensitive_column_matching;
|
format_settings.orc.case_insensitive_column_matching = settings.input_format_orc_case_insensitive_column_matching;
|
||||||
format_settings.orc.output_string_as_string = settings.output_format_orc_string_as_string;
|
format_settings.orc.output_string_as_string = settings.output_format_orc_string_as_string;
|
||||||
|
format_settings.orc.output_compression_method = settings.output_format_orc_compression_method;
|
||||||
format_settings.defaults_for_omitted_fields = settings.input_format_defaults_for_omitted_fields;
|
format_settings.defaults_for_omitted_fields = settings.input_format_defaults_for_omitted_fields;
|
||||||
format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode;
|
format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode;
|
||||||
format_settings.capn_proto.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference;
|
format_settings.capn_proto.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference;
|
||||||
@ -191,6 +193,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
|||||||
format_settings.bson.output_string_as_string = settings.output_format_bson_string_as_string;
|
format_settings.bson.output_string_as_string = settings.output_format_bson_string_as_string;
|
||||||
format_settings.bson.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_bson_skip_fields_with_unsupported_types_in_schema_inference;
|
format_settings.bson.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_bson_skip_fields_with_unsupported_types_in_schema_inference;
|
||||||
format_settings.max_binary_string_size = settings.format_binary_max_string_size;
|
format_settings.max_binary_string_size = settings.format_binary_max_string_size;
|
||||||
|
format_settings.native.allow_types_conversion = settings.input_format_native_allow_types_conversion;
|
||||||
format_settings.max_parser_depth = context->getSettingsRef().max_parser_depth;
|
format_settings.max_parser_depth = context->getSettingsRef().max_parser_depth;
|
||||||
format_settings.client_protocol_version = context->getClientProtocolVersion();
|
format_settings.client_protocol_version = context->getClientProtocolVersion();
|
||||||
|
|
||||||
|
@ -86,6 +86,13 @@ struct FormatSettings
|
|||||||
|
|
||||||
UInt64 max_parser_depth = DBMS_DEFAULT_MAX_PARSER_DEPTH;
|
UInt64 max_parser_depth = DBMS_DEFAULT_MAX_PARSER_DEPTH;
|
||||||
|
|
||||||
|
enum class ArrowCompression
|
||||||
|
{
|
||||||
|
NONE,
|
||||||
|
LZ4_FRAME,
|
||||||
|
ZSTD
|
||||||
|
};
|
||||||
|
|
||||||
struct
|
struct
|
||||||
{
|
{
|
||||||
UInt64 row_group_size = 1000000;
|
UInt64 row_group_size = 1000000;
|
||||||
@ -96,6 +103,7 @@ struct FormatSettings
|
|||||||
bool case_insensitive_column_matching = false;
|
bool case_insensitive_column_matching = false;
|
||||||
bool output_string_as_string = false;
|
bool output_string_as_string = false;
|
||||||
bool output_fixed_string_as_fixed_byte_array = true;
|
bool output_fixed_string_as_fixed_byte_array = true;
|
||||||
|
ArrowCompression output_compression_method = ArrowCompression::NONE;
|
||||||
} arrow;
|
} arrow;
|
||||||
|
|
||||||
struct
|
struct
|
||||||
@ -183,6 +191,16 @@ struct FormatSettings
|
|||||||
V2_LATEST,
|
V2_LATEST,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class ParquetCompression
|
||||||
|
{
|
||||||
|
NONE,
|
||||||
|
SNAPPY,
|
||||||
|
ZSTD,
|
||||||
|
LZ4,
|
||||||
|
GZIP,
|
||||||
|
BROTLI,
|
||||||
|
};
|
||||||
|
|
||||||
struct
|
struct
|
||||||
{
|
{
|
||||||
UInt64 row_group_size = 1000000;
|
UInt64 row_group_size = 1000000;
|
||||||
@ -193,8 +211,8 @@ struct FormatSettings
|
|||||||
std::unordered_set<int> skip_row_groups = {};
|
std::unordered_set<int> skip_row_groups = {};
|
||||||
bool output_string_as_string = false;
|
bool output_string_as_string = false;
|
||||||
bool output_fixed_string_as_fixed_byte_array = true;
|
bool output_fixed_string_as_fixed_byte_array = true;
|
||||||
UInt64 max_block_size = 8192;
|
|
||||||
ParquetVersion output_version;
|
ParquetVersion output_version;
|
||||||
|
ParquetCompression output_compression_method = ParquetCompression::SNAPPY;
|
||||||
} parquet;
|
} parquet;
|
||||||
|
|
||||||
struct Pretty
|
struct Pretty
|
||||||
@ -276,6 +294,15 @@ struct FormatSettings
|
|||||||
bool accurate_types_of_literals = true;
|
bool accurate_types_of_literals = true;
|
||||||
} values;
|
} values;
|
||||||
|
|
||||||
|
enum class ORCCompression
|
||||||
|
{
|
||||||
|
NONE,
|
||||||
|
LZ4,
|
||||||
|
SNAPPY,
|
||||||
|
ZSTD,
|
||||||
|
ZLIB,
|
||||||
|
};
|
||||||
|
|
||||||
struct
|
struct
|
||||||
{
|
{
|
||||||
bool import_nested = false;
|
bool import_nested = false;
|
||||||
@ -285,6 +312,7 @@ struct FormatSettings
|
|||||||
bool case_insensitive_column_matching = false;
|
bool case_insensitive_column_matching = false;
|
||||||
std::unordered_set<int> skip_stripes = {};
|
std::unordered_set<int> skip_stripes = {};
|
||||||
bool output_string_as_string = false;
|
bool output_string_as_string = false;
|
||||||
|
ORCCompression output_compression_method = ORCCompression::NONE;
|
||||||
} orc;
|
} orc;
|
||||||
|
|
||||||
/// For capnProto format we should determine how to
|
/// For capnProto format we should determine how to
|
||||||
@ -335,6 +363,11 @@ struct FormatSettings
|
|||||||
bool output_string_as_string;
|
bool output_string_as_string;
|
||||||
bool skip_fields_with_unsupported_types_in_schema_inference;
|
bool skip_fields_with_unsupported_types_in_schema_inference;
|
||||||
} bson;
|
} bson;
|
||||||
|
|
||||||
|
struct
|
||||||
|
{
|
||||||
|
bool allow_types_conversion = true;
|
||||||
|
} native;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
#include <DataTypes/Serializations/SerializationInfo.h>
|
#include <DataTypes/Serializations/SerializationInfo.h>
|
||||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||||
|
|
||||||
|
#include <Interpreters/castColumn.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -39,12 +41,14 @@ NativeReader::NativeReader(
|
|||||||
UInt64 server_revision_,
|
UInt64 server_revision_,
|
||||||
bool skip_unknown_columns_,
|
bool skip_unknown_columns_,
|
||||||
bool null_as_default_,
|
bool null_as_default_,
|
||||||
|
bool allow_types_conversion_,
|
||||||
BlockMissingValues * block_missing_values_)
|
BlockMissingValues * block_missing_values_)
|
||||||
: istr(istr_)
|
: istr(istr_)
|
||||||
, header(header_)
|
, header(header_)
|
||||||
, server_revision(server_revision_)
|
, server_revision(server_revision_)
|
||||||
, skip_unknown_columns(skip_unknown_columns_)
|
, skip_unknown_columns(skip_unknown_columns_)
|
||||||
, null_as_default(null_as_default_)
|
, null_as_default(null_as_default_)
|
||||||
|
, allow_types_conversion(allow_types_conversion_)
|
||||||
, block_missing_values(block_missing_values_)
|
, block_missing_values(block_missing_values_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -204,11 +208,31 @@ Block NativeReader::read()
|
|||||||
if (null_as_default)
|
if (null_as_default)
|
||||||
insertNullAsDefaultIfNeeded(column, header_column, header.getPositionByName(column.name), block_missing_values);
|
insertNullAsDefaultIfNeeded(column, header_column, header.getPositionByName(column.name), block_missing_values);
|
||||||
|
|
||||||
/// Support insert from old clients without low cardinality type.
|
|
||||||
if (!header_column.type->equals(*column.type))
|
if (!header_column.type->equals(*column.type))
|
||||||
{
|
{
|
||||||
column.column = recursiveTypeConversion(column.column, column.type, header.safeGetByPosition(i).type);
|
if (allow_types_conversion)
|
||||||
column.type = header.safeGetByPosition(i).type;
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
column.column = castColumn(column, header_column.type);
|
||||||
|
}
|
||||||
|
catch (Exception & e)
|
||||||
|
{
|
||||||
|
e.addMessage(fmt::format(
|
||||||
|
"while converting column \"{}\" from type {} to type {}",
|
||||||
|
column.name,
|
||||||
|
column.type->getName(),
|
||||||
|
header_column.type->getName()));
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Support insert from old clients without low cardinality type.
|
||||||
|
column.column = recursiveLowCardinalityTypeConversion(column.column, column.type, header_column.type);
|
||||||
|
}
|
||||||
|
|
||||||
|
column.type = header_column.type;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -30,6 +30,7 @@ public:
|
|||||||
UInt64 server_revision_,
|
UInt64 server_revision_,
|
||||||
bool skip_unknown_columns_ = false,
|
bool skip_unknown_columns_ = false,
|
||||||
bool null_as_default_ = false,
|
bool null_as_default_ = false,
|
||||||
|
bool allow_types_conversion_ = false,
|
||||||
BlockMissingValues * block_missing_values_ = nullptr);
|
BlockMissingValues * block_missing_values_ = nullptr);
|
||||||
|
|
||||||
/// For cases when we have an index. It allows to skip columns. Only columns specified in the index will be read.
|
/// For cases when we have an index. It allows to skip columns. Only columns specified in the index will be read.
|
||||||
@ -51,6 +52,7 @@ private:
|
|||||||
UInt64 server_revision;
|
UInt64 server_revision;
|
||||||
bool skip_unknown_columns = false;
|
bool skip_unknown_columns = false;
|
||||||
bool null_as_default = false;
|
bool null_as_default = false;
|
||||||
|
bool allow_types_conversion = false;
|
||||||
BlockMissingValues * block_missing_values = nullptr;
|
BlockMissingValues * block_missing_values = nullptr;
|
||||||
|
|
||||||
bool use_index = false;
|
bool use_index = false;
|
||||||
|
@ -984,13 +984,16 @@ DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSetting
|
|||||||
if (tryReadIntText(tmp_int, buf) && buf.eof())
|
if (tryReadIntText(tmp_int, buf) && buf.eof())
|
||||||
return std::make_shared<DataTypeInt64>();
|
return std::make_shared<DataTypeInt64>();
|
||||||
|
|
||||||
|
/// We can safely get back to the start of buffer, because we read from a string and we didn't reach eof.
|
||||||
|
buf.position() = buf.buffer().begin();
|
||||||
|
|
||||||
/// In case of Int64 overflow, try to infer UInt64
|
/// In case of Int64 overflow, try to infer UInt64
|
||||||
UInt64 tmp_uint;
|
UInt64 tmp_uint;
|
||||||
if (tryReadIntText(tmp_uint, buf) && buf.eof())
|
if (tryReadIntText(tmp_uint, buf) && buf.eof())
|
||||||
return std::make_shared<DataTypeUInt64>();
|
return std::make_shared<DataTypeUInt64>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We cam safely get back to the start of buffer, because we read from a string and we didn't reach eof.
|
/// We can safely get back to the start of buffer, because we read from a string and we didn't reach eof.
|
||||||
buf.position() = buf.buffer().begin();
|
buf.position() = buf.buffer().begin();
|
||||||
|
|
||||||
Float64 tmp;
|
Float64 tmp;
|
||||||
|
@ -38,6 +38,8 @@ public:
|
|||||||
String getName() const override { return name; }
|
String getName() const override { return name; }
|
||||||
size_t getNumberOfArguments() const override { return 0; }
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
bool isDeterministic() const override { return false; }
|
||||||
|
bool isDeterministicInScopeOfQuery() const override { return false; }
|
||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
{
|
{
|
||||||
|
@ -14,28 +14,33 @@ namespace ErrorCodes
|
|||||||
static size_t encodeURL(const char * __restrict src, size_t src_size, char * __restrict dst, bool space_as_plus)
|
static size_t encodeURL(const char * __restrict src, size_t src_size, char * __restrict dst, bool space_as_plus)
|
||||||
{
|
{
|
||||||
char * dst_pos = dst;
|
char * dst_pos = dst;
|
||||||
for (size_t i = 0; i < src_size; i++)
|
for (size_t i = 0; i < src_size; ++i)
|
||||||
{
|
{
|
||||||
if ((src[i] >= '0' && src[i] <= '9') || (src[i] >= 'a' && src[i] <= 'z') || (src[i] >= 'A' && src[i] <= 'Z')
|
if ((src[i] >= '0' && src[i] <= '9') || (src[i] >= 'a' && src[i] <= 'z') || (src[i] >= 'A' && src[i] <= 'Z')
|
||||||
|| src[i] == '-' || src[i] == '_' || src[i] == '.' || src[i] == '~')
|
|| src[i] == '-' || src[i] == '_' || src[i] == '.' || src[i] == '~')
|
||||||
{
|
{
|
||||||
*dst_pos++ = src[i];
|
*dst_pos = src[i];
|
||||||
|
++dst_pos;
|
||||||
}
|
}
|
||||||
else if (src[i] == ' ' && space_as_plus)
|
else if (src[i] == ' ' && space_as_plus)
|
||||||
{
|
{
|
||||||
*dst_pos++ = '+';
|
*dst_pos = '+';
|
||||||
|
++dst_pos;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
*dst_pos++ = '%';
|
dst_pos[0] = '%';
|
||||||
*dst_pos++ = hexDigitUppercase(src[i] >> 4);
|
++dst_pos;
|
||||||
*dst_pos++ = hexDigitUppercase(src[i] & 0xf);
|
writeHexByteUppercase(src[i], dst_pos);
|
||||||
|
dst_pos += 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*dst_pos++ = src[src_size];
|
*dst_pos = 0;
|
||||||
|
++dst_pos;
|
||||||
return dst_pos - dst;
|
return dst_pos - dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// We assume that size of the dst buf isn't less than src_size.
|
/// We assume that size of the dst buf isn't less than src_size.
|
||||||
static size_t decodeURL(const char * __restrict src, size_t src_size, char * __restrict dst, bool plus_as_space)
|
static size_t decodeURL(const char * __restrict src, size_t src_size, char * __restrict dst, bool plus_as_space)
|
||||||
{
|
{
|
||||||
@ -120,10 +125,14 @@ struct CodeURLComponentImpl
|
|||||||
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets)
|
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets)
|
||||||
{
|
{
|
||||||
if (code_strategy == encode)
|
if (code_strategy == encode)
|
||||||
//the destination(res_data) string is at most three times the length of the source string
|
{
|
||||||
|
/// the destination(res_data) string is at most three times the length of the source string
|
||||||
res_data.resize(data.size() * 3);
|
res_data.resize(data.size() * 3);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
|
{
|
||||||
res_data.resize(data.size());
|
res_data.resize(data.size());
|
||||||
|
}
|
||||||
|
|
||||||
size_t size = offsets.size();
|
size_t size = offsets.size();
|
||||||
res_offsets.resize(size);
|
res_offsets.resize(size);
|
||||||
|
@ -91,6 +91,8 @@ ColumnPtr FunctionArrayReverse::executeImpl(const ColumnsWithTypeAndName & argum
|
|||||||
|| executeFixedString(*src_inner_col, offsets, *res_inner_col)
|
|| executeFixedString(*src_inner_col, offsets, *res_inner_col)
|
||||||
|| executeGeneric(*src_inner_col, offsets, *res_inner_col);
|
|| executeGeneric(*src_inner_col, offsets, *res_inner_col);
|
||||||
|
|
||||||
|
chassert(bool(src_nullable_col) == bool(res_nullable_col));
|
||||||
|
|
||||||
if (src_nullable_col)
|
if (src_nullable_col)
|
||||||
if (!executeNumber<UInt8>(src_nullable_col->getNullMapColumn(), offsets, res_nullable_col->getNullMapColumn()))
|
if (!executeNumber<UInt8>(src_nullable_col->getNullMapColumn(), offsets, res_nullable_col->getNullMapColumn()))
|
||||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of null map of the first argument of function {}",
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of null map of the first argument of function {}",
|
||||||
|
@ -70,7 +70,7 @@ private:
|
|||||||
|
|
||||||
if (!has_prev_value)
|
if (!has_prev_value)
|
||||||
{
|
{
|
||||||
dst[i] = is_first_line_zero ? 0 : src[i];
|
dst[i] = is_first_line_zero ? static_cast<Dst>(0) : static_cast<Dst>(src[i]);
|
||||||
prev = src[i];
|
prev = src[i];
|
||||||
has_prev_value = true;
|
has_prev_value = true;
|
||||||
}
|
}
|
||||||
@ -102,6 +102,10 @@ private:
|
|||||||
f(UInt32());
|
f(UInt32());
|
||||||
else if (which.isUInt64())
|
else if (which.isUInt64())
|
||||||
f(UInt64());
|
f(UInt64());
|
||||||
|
else if (which.isUInt128())
|
||||||
|
f(UInt128());
|
||||||
|
else if (which.isUInt256())
|
||||||
|
f(UInt256());
|
||||||
else if (which.isInt8())
|
else if (which.isInt8())
|
||||||
f(Int8());
|
f(Int8());
|
||||||
else if (which.isInt16())
|
else if (which.isInt16())
|
||||||
@ -110,6 +114,10 @@ private:
|
|||||||
f(Int32());
|
f(Int32());
|
||||||
else if (which.isInt64())
|
else if (which.isInt64())
|
||||||
f(Int64());
|
f(Int64());
|
||||||
|
else if (which.isInt128())
|
||||||
|
f(Int128());
|
||||||
|
else if (which.isInt256())
|
||||||
|
f(Int256());
|
||||||
else if (which.isFloat32())
|
else if (which.isFloat32())
|
||||||
f(Float32());
|
f(Float32());
|
||||||
else if (which.isFloat64())
|
else if (which.isFloat64())
|
||||||
|
@ -259,12 +259,12 @@ namespace detail
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
callWithRedirects<true>(response, Poco::Net::HTTPRequest::HTTP_HEAD);
|
callWithRedirects<true>(response, Poco::Net::HTTPRequest::HTTP_HEAD, true);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
catch (const Poco::Exception & e)
|
catch (const Poco::Exception & e)
|
||||||
{
|
{
|
||||||
if (i == settings.http_max_tries - 1)
|
if (i == settings.http_max_tries - 1 || !isRetriableError(response.getStatus()))
|
||||||
throw;
|
throw;
|
||||||
|
|
||||||
LOG_ERROR(log, "Failed to make HTTP_HEAD request to {}. Error: {}", uri.toString(), e.displayText());
|
LOG_ERROR(log, "Failed to make HTTP_HEAD request to {}. Error: {}", uri.toString(), e.displayText());
|
||||||
@ -353,11 +353,12 @@ namespace detail
|
|||||||
|
|
||||||
static bool isRetriableError(const Poco::Net::HTTPResponse::HTTPStatus http_status) noexcept
|
static bool isRetriableError(const Poco::Net::HTTPResponse::HTTPStatus http_status) noexcept
|
||||||
{
|
{
|
||||||
constexpr std::array non_retriable_errors{
|
static constexpr std::array non_retriable_errors{
|
||||||
Poco::Net::HTTPResponse::HTTPStatus::HTTP_BAD_REQUEST,
|
Poco::Net::HTTPResponse::HTTPStatus::HTTP_BAD_REQUEST,
|
||||||
Poco::Net::HTTPResponse::HTTPStatus::HTTP_UNAUTHORIZED,
|
Poco::Net::HTTPResponse::HTTPStatus::HTTP_UNAUTHORIZED,
|
||||||
Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_FOUND,
|
Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_FOUND,
|
||||||
Poco::Net::HTTPResponse::HTTPStatus::HTTP_FORBIDDEN,
|
Poco::Net::HTTPResponse::HTTPStatus::HTTP_FORBIDDEN,
|
||||||
|
Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_IMPLEMENTED,
|
||||||
Poco::Net::HTTPResponse::HTTPStatus::HTTP_METHOD_NOT_ALLOWED};
|
Poco::Net::HTTPResponse::HTTPStatus::HTTP_METHOD_NOT_ALLOWED};
|
||||||
|
|
||||||
return std::all_of(
|
return std::all_of(
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
#include <Parsers/ASTInsertQuery.h>
|
#include <Parsers/ASTInsertQuery.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
#include <Storages/IStorage.h>
|
#include <Storages/IStorage.h>
|
||||||
#include <Common/CurrentThread.h>
|
|
||||||
#include <Common/SipHash.h>
|
#include <Common/SipHash.h>
|
||||||
#include <Common/FieldVisitorHash.h>
|
#include <Common/FieldVisitorHash.h>
|
||||||
#include <Common/DateLUT.h>
|
#include <Common/DateLUT.h>
|
||||||
@ -104,10 +103,9 @@ bool AsynchronousInsertQueue::InsertQuery::operator==(const InsertQuery & other)
|
|||||||
return query_str == other.query_str && settings == other.settings;
|
return query_str == other.query_str && settings == other.settings;
|
||||||
}
|
}
|
||||||
|
|
||||||
AsynchronousInsertQueue::InsertData::Entry::Entry(String && bytes_, String && query_id_, MemoryTracker * user_memory_tracker_)
|
AsynchronousInsertQueue::InsertData::Entry::Entry(String && bytes_, String && query_id_)
|
||||||
: bytes(std::move(bytes_))
|
: bytes(std::move(bytes_))
|
||||||
, query_id(std::move(query_id_))
|
, query_id(std::move(query_id_))
|
||||||
, user_memory_tracker(user_memory_tracker_)
|
|
||||||
, create_time(std::chrono::system_clock::now())
|
, create_time(std::chrono::system_clock::now())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -236,7 +234,7 @@ AsynchronousInsertQueue::push(ASTPtr query, ContextPtr query_context)
|
|||||||
if (auto quota = query_context->getQuota())
|
if (auto quota = query_context->getQuota())
|
||||||
quota->used(QuotaType::WRITTEN_BYTES, bytes.size());
|
quota->used(QuotaType::WRITTEN_BYTES, bytes.size());
|
||||||
|
|
||||||
auto entry = std::make_shared<InsertData::Entry>(std::move(bytes), query_context->getCurrentQueryId(), CurrentThread::getUserMemoryTracker());
|
auto entry = std::make_shared<InsertData::Entry>(std::move(bytes), query_context->getCurrentQueryId());
|
||||||
|
|
||||||
InsertQuery key{query, settings};
|
InsertQuery key{query, settings};
|
||||||
InsertDataPtr data_to_process;
|
InsertDataPtr data_to_process;
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Parsers/IAST_fwd.h>
|
#include <Parsers/IAST_fwd.h>
|
||||||
#include <Common/CurrentThread.h>
|
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
@ -60,31 +59,6 @@ private:
|
|||||||
UInt128 calculateHash() const;
|
UInt128 calculateHash() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct UserMemoryTrackerSwitcher
|
|
||||||
{
|
|
||||||
explicit UserMemoryTrackerSwitcher(MemoryTracker * new_tracker)
|
|
||||||
{
|
|
||||||
auto * thread_tracker = CurrentThread::getMemoryTracker();
|
|
||||||
prev_untracked_memory = current_thread->untracked_memory;
|
|
||||||
prev_memory_tracker_parent = thread_tracker->getParent();
|
|
||||||
|
|
||||||
current_thread->untracked_memory = 0;
|
|
||||||
thread_tracker->setParent(new_tracker);
|
|
||||||
}
|
|
||||||
|
|
||||||
~UserMemoryTrackerSwitcher()
|
|
||||||
{
|
|
||||||
CurrentThread::flushUntrackedMemory();
|
|
||||||
auto * thread_tracker = CurrentThread::getMemoryTracker();
|
|
||||||
|
|
||||||
current_thread->untracked_memory = prev_untracked_memory;
|
|
||||||
thread_tracker->setParent(prev_memory_tracker_parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
MemoryTracker * prev_memory_tracker_parent;
|
|
||||||
Int64 prev_untracked_memory;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct InsertData
|
struct InsertData
|
||||||
{
|
{
|
||||||
struct Entry
|
struct Entry
|
||||||
@ -92,10 +66,9 @@ private:
|
|||||||
public:
|
public:
|
||||||
const String bytes;
|
const String bytes;
|
||||||
const String query_id;
|
const String query_id;
|
||||||
MemoryTracker * const user_memory_tracker;
|
|
||||||
const std::chrono::time_point<std::chrono::system_clock> create_time;
|
const std::chrono::time_point<std::chrono::system_clock> create_time;
|
||||||
|
|
||||||
Entry(String && bytes_, String && query_id_, MemoryTracker * user_memory_tracker_);
|
Entry(String && bytes_, String && query_id_);
|
||||||
|
|
||||||
void finish(std::exception_ptr exception_ = nullptr);
|
void finish(std::exception_ptr exception_ = nullptr);
|
||||||
std::future<void> getFuture() { return promise.get_future(); }
|
std::future<void> getFuture() { return promise.get_future(); }
|
||||||
@ -106,19 +79,6 @@ private:
|
|||||||
std::atomic_bool finished = false;
|
std::atomic_bool finished = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
~InsertData()
|
|
||||||
{
|
|
||||||
auto it = entries.begin();
|
|
||||||
// Entries must be destroyed in context of user who runs async insert.
|
|
||||||
// Each entry in the list may correspond to a different user,
|
|
||||||
// so we need to switch current thread's MemoryTracker parent on each iteration.
|
|
||||||
while (it != entries.end())
|
|
||||||
{
|
|
||||||
UserMemoryTrackerSwitcher switcher((*it)->user_memory_tracker);
|
|
||||||
it = entries.erase(it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
using EntryPtr = std::shared_ptr<Entry>;
|
using EntryPtr = std::shared_ptr<Entry>;
|
||||||
|
|
||||||
std::list<EntryPtr> entries;
|
std::list<EntryPtr> entries;
|
||||||
|
@ -34,7 +34,7 @@ IFileCachePriority::WriteIterator LRUFileCachePriority::add(const Key & key, siz
|
|||||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
|
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
|
||||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheElements);
|
CurrentMetrics::add(CurrentMetrics::FilesystemCacheElements);
|
||||||
|
|
||||||
LOG_TRACE(log, "Added entry into LRU queue, key: {}, offset: {}", key.toString(), offset);
|
LOG_TEST(log, "Added entry into LRU queue, key: {}, offset: {}", key.toString(), offset);
|
||||||
|
|
||||||
return std::make_shared<LRUFileCacheIterator>(this, iter);
|
return std::make_shared<LRUFileCacheIterator>(this, iter);
|
||||||
}
|
}
|
||||||
@ -54,7 +54,7 @@ void LRUFileCachePriority::removeAll(std::lock_guard<std::mutex> &)
|
|||||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, cache_size);
|
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, cache_size);
|
||||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements, queue.size());
|
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements, queue.size());
|
||||||
|
|
||||||
LOG_TRACE(log, "Removed all entries from LRU queue");
|
LOG_TEST(log, "Removed all entries from LRU queue");
|
||||||
|
|
||||||
queue.clear();
|
queue.clear();
|
||||||
cache_size = 0;
|
cache_size = 0;
|
||||||
@ -88,7 +88,7 @@ void LRUFileCachePriority::LRUFileCacheIterator::removeAndGetNext(std::lock_guar
|
|||||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, queue_iter->size);
|
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, queue_iter->size);
|
||||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements);
|
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements);
|
||||||
|
|
||||||
LOG_TRACE(cache_priority->log, "Removed entry from LRU queue, key: {}, offset: {}", queue_iter->key.toString(), queue_iter->offset);
|
LOG_TEST(cache_priority->log, "Removed entry from LRU queue, key: {}, offset: {}", queue_iter->key.toString(), queue_iter->offset);
|
||||||
|
|
||||||
queue_iter = cache_priority->queue.erase(queue_iter);
|
queue_iter = cache_priority->queue.erase(queue_iter);
|
||||||
}
|
}
|
||||||
|
@ -169,11 +169,11 @@ DDLTaskPtr DDLWorker::initAndCheckTask(const String & entry_name, String & out_r
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
auto write_error_status = [&](const String & host_id, const String & error_message, const String & reason)
|
auto write_error_status = [&](const String & host_id, const ExecutionStatus & status, const String & reason)
|
||||||
{
|
{
|
||||||
LOG_ERROR(log, "Cannot parse DDL task {}: {}. Will try to send error status: {}", entry_name, reason, error_message);
|
LOG_ERROR(log, "Cannot parse DDL task {}: {}. Will try to send error status: {}", entry_name, reason, status.message);
|
||||||
createStatusDirs(entry_path, zookeeper);
|
createStatusDirs(entry_path, zookeeper);
|
||||||
zookeeper->tryCreate(fs::path(entry_path) / "finished" / host_id, error_message, zkutil::CreateMode::Persistent);
|
zookeeper->tryCreate(fs::path(entry_path) / "finished" / host_id, status.serializeText(), zkutil::CreateMode::Persistent);
|
||||||
};
|
};
|
||||||
|
|
||||||
try
|
try
|
||||||
@ -187,7 +187,7 @@ DDLTaskPtr DDLWorker::initAndCheckTask(const String & entry_name, String & out_r
|
|||||||
/// We can try to create fail node using FQDN if it equal to host name in cluster config attempt will be successful.
|
/// We can try to create fail node using FQDN if it equal to host name in cluster config attempt will be successful.
|
||||||
/// Otherwise, that node will be ignored by DDLQueryStatusSource.
|
/// Otherwise, that node will be ignored by DDLQueryStatusSource.
|
||||||
out_reason = "Incorrect task format";
|
out_reason = "Incorrect task format";
|
||||||
write_error_status(host_fqdn_id, ExecutionStatus::fromCurrentException().serializeText(), out_reason);
|
write_error_status(host_fqdn_id, ExecutionStatus::fromCurrentException(), out_reason);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,7 +212,7 @@ DDLTaskPtr DDLWorker::initAndCheckTask(const String & entry_name, String & out_r
|
|||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
out_reason = "Cannot parse query or obtain cluster info";
|
out_reason = "Cannot parse query or obtain cluster info";
|
||||||
write_error_status(task->host_id_str, ExecutionStatus::fromCurrentException().serializeText(), out_reason);
|
write_error_status(task->host_id_str, ExecutionStatus::fromCurrentException(), out_reason);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -542,6 +542,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper)
|
|||||||
OpenTelemetry::TracingContextHolder tracing_ctx_holder(__PRETTY_FUNCTION__ ,
|
OpenTelemetry::TracingContextHolder tracing_ctx_holder(__PRETTY_FUNCTION__ ,
|
||||||
task.entry.tracing_context,
|
task.entry.tracing_context,
|
||||||
this->context->getOpenTelemetrySpanLog());
|
this->context->getOpenTelemetrySpanLog());
|
||||||
|
tracing_ctx_holder.root_span.kind = OpenTelemetry::CONSUMER;
|
||||||
|
|
||||||
String active_node_path = task.getActiveNodePath();
|
String active_node_path = task.getActiveNodePath();
|
||||||
String finished_node_path = task.getFinishedNodePath();
|
String finished_node_path = task.getFinishedNodePath();
|
||||||
@ -650,7 +651,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper)
|
|||||||
bool status_written_by_table_or_db = task.ops.empty();
|
bool status_written_by_table_or_db = task.ops.empty();
|
||||||
if (status_written_by_table_or_db)
|
if (status_written_by_table_or_db)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::UNFINISHED, "Unexpected error: {}", task.execution_status.serializeText());
|
throw Exception(ErrorCodes::UNFINISHED, "Unexpected error: {}", task.execution_status.message);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1089,6 +1089,7 @@ static std::shared_ptr<IJoin> chooseJoinAlgorithm(
|
|||||||
|
|
||||||
if (MergeJoin::isSupported(analyzed_join))
|
if (MergeJoin::isSupported(analyzed_join))
|
||||||
return std::make_shared<JoinSwitcher>(analyzed_join, right_sample_block);
|
return std::make_shared<JoinSwitcher>(analyzed_join, right_sample_block);
|
||||||
|
return std::make_shared<HashJoin>(analyzed_join, right_sample_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||||
|
@ -44,6 +44,10 @@ public:
|
|||||||
const auto & on_expr = table_join->getOnlyClause();
|
const auto & on_expr = table_join->getOnlyClause();
|
||||||
bool support_conditions = !on_expr.on_filter_condition_left && !on_expr.on_filter_condition_right;
|
bool support_conditions = !on_expr.on_filter_condition_left && !on_expr.on_filter_condition_right;
|
||||||
|
|
||||||
|
if (!on_expr.analyzer_left_filter_condition_column_name.empty() ||
|
||||||
|
!on_expr.analyzer_right_filter_condition_column_name.empty())
|
||||||
|
support_conditions = false;
|
||||||
|
|
||||||
/// Key column can change nullability and it's not handled on type conversion stage, so algorithm should be aware of it
|
/// Key column can change nullability and it's not handled on type conversion stage, so algorithm should be aware of it
|
||||||
bool support_using_and_nulls = !table_join->hasUsing() || !table_join->joinUseNulls();
|
bool support_using_and_nulls = !table_join->hasUsing() || !table_join->joinUseNulls();
|
||||||
|
|
||||||
|
@ -226,6 +226,12 @@ BlockIO InterpreterSelectQueryAnalyzer::execute()
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QueryPlan & InterpreterSelectQueryAnalyzer::getQueryPlan()
|
||||||
|
{
|
||||||
|
planner.buildQueryPlanIfNeeded();
|
||||||
|
return planner.getQueryPlan();
|
||||||
|
}
|
||||||
|
|
||||||
QueryPlan && InterpreterSelectQueryAnalyzer::extractQueryPlan() &&
|
QueryPlan && InterpreterSelectQueryAnalyzer::extractQueryPlan() &&
|
||||||
{
|
{
|
||||||
planner.buildQueryPlanIfNeeded();
|
planner.buildQueryPlanIfNeeded();
|
||||||
|
@ -51,6 +51,8 @@ public:
|
|||||||
|
|
||||||
BlockIO execute() override;
|
BlockIO execute() override;
|
||||||
|
|
||||||
|
QueryPlan & getQueryPlan();
|
||||||
|
|
||||||
QueryPlan && extractQueryPlan() &&;
|
QueryPlan && extractQueryPlan() &&;
|
||||||
|
|
||||||
QueryPipelineBuilder buildQueryPipeline();
|
QueryPipelineBuilder buildQueryPipeline();
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <DataTypes/DataTypeMap.h>
|
#include <DataTypes/DataTypeMap.h>
|
||||||
#include <DataTypes/DataTypeUUID.h>
|
#include <DataTypes/DataTypeUUID.h>
|
||||||
|
#include <DataTypes/DataTypeEnum.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
#include <base/hex.h>
|
#include <base/hex.h>
|
||||||
@ -20,11 +21,23 @@ namespace DB
|
|||||||
|
|
||||||
NamesAndTypesList OpenTelemetrySpanLogElement::getNamesAndTypes()
|
NamesAndTypesList OpenTelemetrySpanLogElement::getNamesAndTypes()
|
||||||
{
|
{
|
||||||
|
auto span_kind_type = std::make_shared<DataTypeEnum8>(
|
||||||
|
DataTypeEnum8::Values
|
||||||
|
{
|
||||||
|
{"INTERNAL", static_cast<Int8>(OpenTelemetry::INTERNAL)},
|
||||||
|
{"SERVER", static_cast<Int8>(OpenTelemetry::SERVER)},
|
||||||
|
{"CLIENT", static_cast<Int8>(OpenTelemetry::CLIENT)},
|
||||||
|
{"PRODUCER", static_cast<Int8>(OpenTelemetry::PRODUCER)},
|
||||||
|
{"CONSUMER", static_cast<Int8>(OpenTelemetry::CONSUMER)}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
{"trace_id", std::make_shared<DataTypeUUID>()},
|
{"trace_id", std::make_shared<DataTypeUUID>()},
|
||||||
{"span_id", std::make_shared<DataTypeUInt64>()},
|
{"span_id", std::make_shared<DataTypeUInt64>()},
|
||||||
{"parent_span_id", std::make_shared<DataTypeUInt64>()},
|
{"parent_span_id", std::make_shared<DataTypeUInt64>()},
|
||||||
{"operation_name", std::make_shared<DataTypeString>()},
|
{"operation_name", std::make_shared<DataTypeString>()},
|
||||||
|
{"kind", std::move(span_kind_type)},
|
||||||
// DateTime64 is really unwieldy -- there is no "normal" way to convert
|
// DateTime64 is really unwieldy -- there is no "normal" way to convert
|
||||||
// it to an UInt64 count of microseconds, except:
|
// it to an UInt64 count of microseconds, except:
|
||||||
// 1) reinterpretAsUInt64(reinterpretAsFixedString(date)), which just
|
// 1) reinterpretAsUInt64(reinterpretAsFixedString(date)), which just
|
||||||
@ -59,6 +72,7 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const
|
|||||||
columns[i++]->insert(span_id);
|
columns[i++]->insert(span_id);
|
||||||
columns[i++]->insert(parent_span_id);
|
columns[i++]->insert(parent_span_id);
|
||||||
columns[i++]->insert(operation_name);
|
columns[i++]->insert(operation_name);
|
||||||
|
columns[i++]->insert(kind);
|
||||||
columns[i++]->insert(start_time_us);
|
columns[i++]->insert(start_time_us);
|
||||||
columns[i++]->insert(finish_time_us);
|
columns[i++]->insert(finish_time_us);
|
||||||
columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000).toUnderType());
|
columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000).toUnderType());
|
||||||
|
@ -64,6 +64,7 @@ ASTs OptimizeIfChainsVisitor::ifChain(const ASTPtr & child)
|
|||||||
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST for function 'if'");
|
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST for function 'if'");
|
||||||
|
|
||||||
const auto * function_args = function_node->arguments->as<ASTExpressionList>();
|
const auto * function_args = function_node->arguments->as<ASTExpressionList>();
|
||||||
|
chassert(function_args);
|
||||||
|
|
||||||
if (!function_args || function_args->children.size() != 3)
|
if (!function_args || function_args->children.size() != 3)
|
||||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
|
@ -50,7 +50,16 @@ void ReplaceQueryParameterVisitor::visit(ASTPtr & ast)
|
|||||||
void ReplaceQueryParameterVisitor::visitChildren(ASTPtr & ast)
|
void ReplaceQueryParameterVisitor::visitChildren(ASTPtr & ast)
|
||||||
{
|
{
|
||||||
for (auto & child : ast->children)
|
for (auto & child : ast->children)
|
||||||
|
{
|
||||||
|
void * old_ptr = child.get();
|
||||||
visit(child);
|
visit(child);
|
||||||
|
void * new_ptr = child.get();
|
||||||
|
|
||||||
|
/// Some AST classes have naked pointers to children elements as members.
|
||||||
|
/// We have to replace them if the child was replaced.
|
||||||
|
if (new_ptr != old_ptr)
|
||||||
|
ast->updatePointerToChild(old_ptr, new_ptr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const String & ReplaceQueryParameterVisitor::getParamValue(const String & name)
|
const String & ReplaceQueryParameterVisitor::getParamValue(const String & name)
|
||||||
@ -89,6 +98,7 @@ void ReplaceQueryParameterVisitor::visitQueryParameter(ASTPtr & ast)
|
|||||||
literal = value;
|
literal = value;
|
||||||
else
|
else
|
||||||
literal = temp_column[0];
|
literal = temp_column[0];
|
||||||
|
|
||||||
ast = addTypeConversionToAST(std::make_shared<ASTLiteral>(literal), type_name);
|
ast = addTypeConversionToAST(std::make_shared<ASTLiteral>(literal), type_name);
|
||||||
|
|
||||||
/// Keep the original alias.
|
/// Keep the original alias.
|
||||||
|
@ -426,6 +426,8 @@ void SystemLog<LogElement>::flushImpl(const std::vector<LogElement> & to_flush,
|
|||||||
// we need query context to do inserts to target table with MV containing subqueries or joins
|
// we need query context to do inserts to target table with MV containing subqueries or joins
|
||||||
auto insert_context = Context::createCopy(context);
|
auto insert_context = Context::createCopy(context);
|
||||||
insert_context->makeQueryContext();
|
insert_context->makeQueryContext();
|
||||||
|
/// We always want to deliver the data to the original table regardless of the MVs
|
||||||
|
insert_context->setSetting("materialized_views_ignore_errors", true);
|
||||||
|
|
||||||
InterpreterInsertQuery interpreter(query_ptr, insert_context);
|
InterpreterInsertQuery interpreter(query_ptr, insert_context);
|
||||||
BlockIO io = interpreter.execute();
|
BlockIO io = interpreter.execute();
|
||||||
|
@ -55,7 +55,7 @@ bool isSupportedAlterType(int type)
|
|||||||
|
|
||||||
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context, const DDLQueryOnClusterParams & params)
|
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context, const DDLQueryOnClusterParams & params)
|
||||||
{
|
{
|
||||||
OpenTelemetry::SpanHolder span(__FUNCTION__);
|
OpenTelemetry::SpanHolder span(__FUNCTION__, OpenTelemetry::PRODUCER);
|
||||||
|
|
||||||
if (context->getCurrentTransaction() && context->getSettingsRef().throw_on_unsupported_query_inside_transaction)
|
if (context->getCurrentTransaction() && context->getSettingsRef().throw_on_unsupported_query_inside_transaction)
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ON CLUSTER queries inside transactions are not supported");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ON CLUSTER queries inside transactions are not supported");
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user