diff --git a/.github/ISSUE_TEMPLATE/backward-compatibility.md b/.github/ISSUE_TEMPLATE/backward-compatibility.md index f40a9d6a915..8f87197e73d 100644 --- a/.github/ISSUE_TEMPLATE/backward-compatibility.md +++ b/.github/ISSUE_TEMPLATE/backward-compatibility.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Error message and/or stacktrace** diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index eb73dc3e435..1445af4b051 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Expected behavior** diff --git a/.github/ISSUE_TEMPLATE/performance-issue.md b/.github/ISSUE_TEMPLATE/performance-issue.md index 96c8cb77afb..d0e549039a6 100644 --- a/.github/ISSUE_TEMPLATE/performance-issue.md +++ b/.github/ISSUE_TEMPLATE/performance-issue.md @@ -17,7 +17,7 @@ What exactly works slower than expected? * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to slow performance **Expected performance** diff --git a/.github/ISSUE_TEMPLATE/unexpected-behaviour.md b/.github/ISSUE_TEMPLATE/unexpected-behaviour.md index 25557693140..27ab217ca33 100644 --- a/.github/ISSUE_TEMPLATE/unexpected-behaviour.md +++ b/.github/ISSUE_TEMPLATE/unexpected-behaviour.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Expected behavior** diff --git a/.github/ISSUE_TEMPLATE/usability-issue.md b/.github/ISSUE_TEMPLATE/usability-issue.md index daa83878182..6a084a72619 100644 --- a/.github/ISSUE_TEMPLATE/usability-issue.md +++ b/.github/ISSUE_TEMPLATE/usability-issue.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Expected behavior** diff --git a/.gitignore b/.gitignore index 6aa331edc84..7acee3ad016 100644 --- a/.gitignore +++ b/.gitignore @@ -16,9 +16,9 @@ /docs/publish /docs/edit /docs/website -/docs/venv/ -/docs/tools/venv/ -/docs/tools/translate/venv/ +/docs/venv +/docs/tools/venv +/docs/tools/translate/venv /docs/tools/translate/output.md /docs/en/single.md /docs/ru/single.md @@ -73,100 +73,100 @@ contrib/libpoco/Poco/ contrib/libpoco/bin/ contrib/libpoco/cmake_uninstall.cmake contrib/libre2/re2_st/ -dbms/src/Client/clickhouse-benchmark -dbms/src/Client/clickhouse-client -dbms/src/Client/tests/test-connect -dbms/src/Common/tests/arena_with_free_lists -dbms/src/Common/tests/auto_array -dbms/src/Common/tests/compact_array -dbms/src/Common/tests/hash_table -dbms/src/Common/tests/hashes_test -dbms/src/Common/tests/int_hashes_perf -dbms/src/Common/tests/lru_cache -dbms/src/Common/tests/parallel_aggregation -dbms/src/Common/tests/parallel_aggregation2 -dbms/src/Common/tests/radix_sort -dbms/src/Common/tests/shell_command_test -dbms/src/Common/tests/simple_cache -dbms/src/Common/tests/sip_hash -dbms/src/Common/tests/sip_hash_perf -dbms/src/Common/tests/small_table -dbms/src/Core/tests/exception -dbms/src/Core/tests/field -dbms/src/Core/tests/rvo_test -dbms/src/Core/tests/string_pool -dbms/src/DataStreams/tests/aggregating_stream -dbms/src/DataStreams/tests/block_tab_separated_streams -dbms/src/DataStreams/tests/collapsing_sorted_stream -dbms/src/DataStreams/tests/expression_stream -dbms/src/DataStreams/tests/filter_stream -dbms/src/DataStreams/tests/filter_stream_hitlog -dbms/src/DataStreams/tests/fork_streams -dbms/src/DataStreams/tests/glue_streams -dbms/src/DataStreams/tests/json_streams -dbms/src/DataStreams/tests/native_streams -dbms/src/DataStreams/tests/sorting_stream -dbms/src/DataStreams/tests/tab_separated_streams -dbms/src/DataStreams/tests/union_stream -dbms/src/DataStreams/tests/union_stream2 -dbms/src/DataTypes/tests/data_type_string -dbms/src/DataTypes/tests/data_types_number_fixed -dbms/src/Functions/tests/functions_arithmetic -dbms/src/Functions/tests/logical_functions_performance -dbms/src/Functions/tests/number_traits -dbms/src/IO/tests/async_write -dbms/src/IO/tests/cached_compressed_read_buffer -dbms/src/IO/tests/compressed_buffer -dbms/src/IO/tests/hashing_read_buffer -dbms/src/IO/tests/hashing_write_buffer -dbms/src/IO/tests/io_and_exceptions -dbms/src/IO/tests/io_operators -dbms/src/IO/tests/mempbrk -dbms/src/IO/tests/o_direct_and_dirty_pages -dbms/src/IO/tests/parse_int_perf -dbms/src/IO/tests/parse_int_perf2 -dbms/src/IO/tests/read_buffer -dbms/src/IO/tests/read_buffer_aio -dbms/src/IO/tests/read_buffer_perf -dbms/src/IO/tests/read_escaped_string -dbms/src/IO/tests/read_float_perf -dbms/src/IO/tests/read_write_int -dbms/src/IO/tests/valid_utf8 -dbms/src/IO/tests/valid_utf8_perf -dbms/src/IO/tests/var_uint -dbms/src/IO/tests/write_buffer -dbms/src/IO/tests/write_buffer_aio -dbms/src/IO/tests/write_buffer_perf -dbms/src/Interpreters/tests/address_patterns -dbms/src/Interpreters/tests/aggregate -dbms/src/Interpreters/tests/compiler_test -dbms/src/Interpreters/tests/create_query -dbms/src/Interpreters/tests/expression -dbms/src/Interpreters/tests/expression_analyzer -dbms/src/Interpreters/tests/hash_map -dbms/src/Interpreters/tests/hash_map2 -dbms/src/Interpreters/tests/hash_map3 -dbms/src/Interpreters/tests/hash_map_string -dbms/src/Interpreters/tests/hash_map_string_2 -dbms/src/Interpreters/tests/hash_map_string_3 -dbms/src/Interpreters/tests/hash_map_string_small -dbms/src/Interpreters/tests/in_join_subqueries_preprocessor -dbms/src/Interpreters/tests/logical_expressions_optimizer -dbms/src/Interpreters/tests/select_query -dbms/src/Interpreters/tests/two_level_hash_map -dbms/src/Interpreters/tests/users -dbms/src/Parsers/tests/create_parser -dbms/src/Parsers/tests/select_parser -dbms/src/Server/clickhouse-server -dbms/src/Server/clickhouse-server.init -dbms/src/Storages/tests/hit_log -dbms/src/Storages/tests/merge_tree -dbms/src/Storages/tests/part_checker -dbms/src/Storages/tests/part_name -dbms/src/Storages/tests/pk_condition -dbms/src/Storages/tests/seek_speed_test -dbms/src/Storages/tests/storage_log -dbms/src/Storages/tests/system_numbers +src/Client/clickhouse-benchmark +src/Client/clickhouse-client +src/Client/tests/test-connect +src/Common/tests/arena_with_free_lists +src/Common/tests/auto_array +src/Common/tests/compact_array +src/Common/tests/hash_table +src/Common/tests/hashes_test +src/Common/tests/int_hashes_perf +src/Common/tests/lru_cache +src/Common/tests/parallel_aggregation +src/Common/tests/parallel_aggregation2 +src/Common/tests/radix_sort +src/Common/tests/shell_command_test +src/Common/tests/simple_cache +src/Common/tests/sip_hash +src/Common/tests/sip_hash_perf +src/Common/tests/small_table +src/Core/tests/exception +src/Core/tests/field +src/Core/tests/rvo_test +src/Core/tests/string_pool +src/DataStreams/tests/aggregating_stream +src/DataStreams/tests/block_tab_separated_streams +src/DataStreams/tests/collapsing_sorted_stream +src/DataStreams/tests/expression_stream +src/DataStreams/tests/filter_stream +src/DataStreams/tests/filter_stream_hitlog +src/DataStreams/tests/fork_streams +src/DataStreams/tests/glue_streams +src/DataStreams/tests/json_streams +src/DataStreams/tests/native_streams +src/DataStreams/tests/sorting_stream +src/DataStreams/tests/tab_separated_streams +src/DataStreams/tests/union_stream +src/DataStreams/tests/union_stream2 +src/DataTypes/tests/data_type_string +src/DataTypes/tests/data_types_number_fixed +src/Functions/tests/functions_arithmetic +src/Functions/tests/logical_functions_performance +src/Functions/tests/number_traits +src/IO/tests/async_write +src/IO/tests/cached_compressed_read_buffer +src/IO/tests/compressed_buffer +src/IO/tests/hashing_read_buffer +src/IO/tests/hashing_write_buffer +src/IO/tests/io_and_exceptions +src/IO/tests/io_operators +src/IO/tests/mempbrk +src/IO/tests/o_direct_and_dirty_pages +src/IO/tests/parse_int_perf +src/IO/tests/parse_int_perf2 +src/IO/tests/read_buffer +src/IO/tests/read_buffer_aio +src/IO/tests/read_buffer_perf +src/IO/tests/read_escaped_string +src/IO/tests/read_float_perf +src/IO/tests/read_write_int +src/IO/tests/valid_utf8 +src/IO/tests/valid_utf8_perf +src/IO/tests/var_uint +src/IO/tests/write_buffer +src/IO/tests/write_buffer_aio +src/IO/tests/write_buffer_perf +src/Interpreters/tests/address_patterns +src/Interpreters/tests/aggregate +src/Interpreters/tests/compiler_test +src/Interpreters/tests/create_query +src/Interpreters/tests/expression +src/Interpreters/tests/expression_analyzer +src/Interpreters/tests/hash_map +src/Interpreters/tests/hash_map2 +src/Interpreters/tests/hash_map3 +src/Interpreters/tests/hash_map_string +src/Interpreters/tests/hash_map_string_2 +src/Interpreters/tests/hash_map_string_3 +src/Interpreters/tests/hash_map_string_small +src/Interpreters/tests/in_join_subqueries_preprocessor +src/Interpreters/tests/logical_expressions_optimizer +src/Interpreters/tests/select_query +src/Interpreters/tests/two_level_hash_map +src/Interpreters/tests/users +src/Parsers/tests/create_parser +src/Parsers/tests/select_parser +src/Server/clickhouse-server +src/Server/clickhouse-server.init +src/Storages/tests/hit_log +src/Storages/tests/merge_tree +src/Storages/tests/part_checker +src/Storages/tests/part_name +src/Storages/tests/pk_condition +src/Storages/tests/seek_speed_test +src/Storages/tests/storage_log +src/Storages/tests/system_numbers libs/libcommon/src/revision.h libs/libcommon/src/tests/date_lut2 libs/libcommon/src/tests/date_lut3 @@ -184,15 +184,15 @@ libs/libzkutil/src/tests/zkutil_zookeeper_holder utils/zookeeper-create-entry-to-download-part/zookeeper-create-entry-to-download-part utils/zookeeper-dump-tree/zookeeper-dump-tree utils/zookeeper-remove-by-list/zookeeper-remove-by-list -dbms/src/Storages/tests/remove_symlink_directory +src/Storages/tests/remove_symlink_directory libs/libcommon/src/tests/json_test utils/compressor/zstd_test utils/wikistat-loader/wikistat-loader -dbms/src/Common/tests/pod_array +src/Common/tests/pod_array -dbms/src/Server/data/* -dbms/src/Server/metadata/* -dbms/src/Server/status +src/Server/data/* +src/Server/metadata/* +src/Server/status config-9001.xml *-preprocessed.xml @@ -242,7 +242,7 @@ website/package-lock.json */.DS_Store # Ignore files for locally disabled tests -/dbms/tests/queries/**/*.disabled +/src/queries/**/*.disabled # cquery cache /.cquery-cache diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d9f207a06d4..12afadc55a6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,11 +31,11 @@ build: - docker pull $CI_REGISTRY/yandex/clickhouse-builder - docker run --rm --volumes-from "${HOSTNAME}-build" --workdir "${CI_PROJECT_DIR}" --env CI_PROJECT_DIR=${CI_PROJECT_DIR} $CI_REGISTRY/yandex/clickhouse-builder /build_gitlab_ci.sh # You can upload your binary to nexus - - curl -v --keepalive-time 60 --keepalive --user "$NEXUS_USER:$NEXUS_PASSWORD" -XPUT "http://$NEXUS_HOST/repository/binaries/$CI_PROJECT_NAME" --upload-file ./dbms/src/Server/clickhouse + - curl -v --keepalive-time 60 --keepalive --user "$NEXUS_USER:$NEXUS_PASSWORD" -XPUT "http://$NEXUS_HOST/repository/binaries/$CI_PROJECT_NAME" --upload-file ./src/Server/clickhouse # Or download artifacts from gitlab artifacts: paths: - - ./dbms/src/Server/clickhouse + - ./src/Server/clickhouse expire_in: 1 day tags: - - docker \ No newline at end of file + - docker diff --git a/.gitmodules b/.gitmodules index 4a5b21ce30a..14661e8a32d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,7 +13,7 @@ url = https://github.com/edenhill/librdkafka.git [submodule "contrib/cctz"] path = contrib/cctz - url = https://github.com/google/cctz.git + url = https://github.com/ClickHouse-Extras/cctz.git [submodule "contrib/zlib-ng"] path = contrib/zlib-ng url = https://github.com/ClickHouse-Extras/zlib-ng.git @@ -151,3 +151,6 @@ [submodule "website/images/feathericons"] path = website/images/feathericons url = https://github.com/feathericons/feather +[submodule "contrib/msgpack-c"] + path = contrib/msgpack-c + url = https://github.com/msgpack/msgpack-c diff --git a/CHANGELOG.md b/CHANGELOG.md index f588adb7ef0..2ab006bcdd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ ## ClickHouse release v20.3 +### ClickHouse release v20.3.5.21, 2020-03-27 + +#### Bug Fix + +* Fix 'Different expressions with the same alias' error when query has PREWHERE and WHERE on distributed table and `SET distributed_product_mode = 'local'`. [#9871](https://github.com/ClickHouse/ClickHouse/pull/9871) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix mutations excessive memory consumption for tables with a composite primary key. This fixes [#9850](https://github.com/ClickHouse/ClickHouse/issues/9850). [#9860](https://github.com/ClickHouse/ClickHouse/pull/9860) ([alesapin](https://github.com/alesapin)). +* Fix 'COMMA to CROSS JOIN rewriter is not enabled or cannot rewrite query' error in case of subqueries with COMMA JOIN out of tables lists (i.e. in WHERE). Fixes [#9782](https://github.com/ClickHouse/ClickHouse/issues/9782). [#9830](https://github.com/ClickHouse/ClickHouse/pull/9830) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix possible exception `Got 0 in totals chunk, expected 1` on client. It happened for queries with `JOIN` in case if right joined table had zero rows. Example: `select * from system.one t1 join system.one t2 on t1.dummy = t2.dummy limit 0 FORMAT TabSeparated;`. Fixes [#9777](https://github.com/ClickHouse/ClickHouse/issues/9777). [#9823](https://github.com/ClickHouse/ClickHouse/pull/9823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix SIGSEGV with optimize_skip_unused_shards when type cannot be converted. [#9804](https://github.com/ClickHouse/ClickHouse/pull/9804) ([Azat Khuzhin](https://github.com/azat)). +* Fix broken `ALTER TABLE DELETE COLUMN` query for compact parts. [#9779](https://github.com/ClickHouse/ClickHouse/pull/9779) ([alesapin](https://github.com/alesapin)). +* Fix max_distributed_connections (w/ and w/o Processors). [#9673](https://github.com/ClickHouse/ClickHouse/pull/9673) ([Azat Khuzhin](https://github.com/azat)). +* Fixed a few cases when timezone of the function argument wasn't used properly. [#9574](https://github.com/ClickHouse/ClickHouse/pull/9574) ([Vasily Nemkov](https://github.com/Enmk)). + +#### Improvement + +* Remove order by stage from mutations because we read from a single ordered part in a single thread. Also add check that the order of rows in mutation is ordered in sorting key order and this order is not violated. [#9886](https://github.com/ClickHouse/ClickHouse/pull/9886) ([alesapin](https://github.com/alesapin)). + + ### ClickHouse release v20.3.4.10, 2020-03-20 #### Bug Fix @@ -234,7 +252,7 @@ * Updated checking for hung queries in clickhouse-test script [#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) * Removed some useless files from repository. [#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Changed type of math perftests from `once` to `loop`. [#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) * Suppress some test failures under MSan. [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) * Speedup "exception while insert" test. This test often time out in debug-with-coverage build. [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Updated `libcxx` and `libcxxabi` to master. In preparation to [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -341,7 +359,7 @@ [#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) #### New Feature -* Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. +* Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. [#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) ### ClickHouse release v20.1.2.4, 2020-01-22 @@ -641,4 +659,4 @@ #### Security Fix * Fixed the possibility of reading directories structure in tables with `File` table engine. This fixes [#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([alexey-milovidov](https://github.com/alexey-milovidov)) -## [Changelog for 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) +## [Changelog for 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats_new/changelog/2019.md) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8feb5d8c129..a06ce0e0d73 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -228,7 +228,7 @@ else () set(NOT_UNBUNDLED 1) endif () -if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN) OR ARCH_32) +if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN)) # Using system libs can cause a lot of warnings in includes (on macro expansion). option (WERROR "Enable -Werror compiler option" OFF) else () @@ -251,6 +251,8 @@ if (OS_LINUX) include(cmake/linux/default_libs.cmake) elseif (OS_DARWIN) include(cmake/darwin/default_libs.cmake) +elseif (OS_FREEBSD) + include(cmake/freebsd/default_libs.cmake) endif () ###################################### @@ -316,7 +318,6 @@ include (cmake/find/poco.cmake) include (cmake/find/lz4.cmake) include (cmake/find/xxhash.cmake) include (cmake/find/sparsehash.cmake) -include (cmake/find/execinfo.cmake) include (cmake/find/re2.cmake) include (cmake/find/libgsasl.cmake) include (cmake/find/rdkafka.cmake) @@ -342,6 +343,7 @@ include (cmake/find/rapidjson.cmake) include (cmake/find/fastops.cmake) include (cmake/find/orc.cmake) include (cmake/find/avro.cmake) +include (cmake/find/msgpack.cmake) find_contrib_lib(cityhash) find_contrib_lib(farmhash) @@ -380,8 +382,13 @@ macro (add_executable target) endif() endmacro() +set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.") +include_directories(${ConfigIncludePath}) + add_subdirectory (base) +add_subdirectory (programs) +add_subdirectory (src) +add_subdirectory (tests) add_subdirectory (utils) -add_subdirectory (dbms) include (cmake/print_include_directories.cmake) diff --git a/README.md b/README.md index aab4cb9f63c..955f9d1a5d1 100644 --- a/README.md +++ b/README.md @@ -11,12 +11,9 @@ ClickHouse is an open-source column-oriented database management system that all * [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time. * [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events. * [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any. -* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person. +* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person. ## Upcoming Events -* [ClickHouse Online Meetup (in Russian)](https://events.yandex.ru/events/click-house-onlajn-vs-03-04-2020) on April 3, 2020. -* [ClickHouse in Avito (online in Russian)](https://avitotech.timepad.ru/event/1290051/) on April 9, 2020. * [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date. -* [Talks on Saint HighLoad++ in St. Petersburg](https://www.highload.ru/spb/2020/abstracts/6647) on TBD date. * [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date. diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index d7ab0046992..a7ca21c984e 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -133,7 +133,10 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) } /// Fill lookup table for years and months. - for (size_t day = 0; day < DATE_LUT_SIZE && lut[day].year <= DATE_LUT_MAX_YEAR; ++day) + size_t year_months_lut_index = 0; + size_t first_day_of_last_month = 0; + + for (size_t day = 0; day < DATE_LUT_SIZE; ++day) { const Values & values = lut[day]; @@ -141,7 +144,16 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) { if (values.month == 1) years_lut[values.year - DATE_LUT_MIN_YEAR] = day; - years_months_lut[(values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1] = day; + + year_months_lut_index = (values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1; + years_months_lut[year_months_lut_index] = day; + first_day_of_last_month = day; } } + + /// Fill the rest of lookup table with the same last month (2106-02-01). + for (; year_months_lut_index < DATE_LUT_YEARS * 12; ++year_months_lut_index) + { + years_months_lut[year_months_lut_index] = first_day_of_last_month; + } } diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index d9d27c56ee3..ec32d62bcad 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -12,7 +12,7 @@ /// Table size is bigger than DATE_LUT_MAX_DAY_NUM to fill all indices within UInt16 range: this allows to remove extra check. #define DATE_LUT_SIZE 0x10000 #define DATE_LUT_MIN_YEAR 1970 -#define DATE_LUT_MAX_YEAR 2105 /// Last supported year +#define DATE_LUT_MAX_YEAR 2106 /// Last supported year (incomplete) #define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table #if defined(__PPC__) diff --git a/base/common/time.h b/base/common/time.h index 9a52d8e40b8..1bf588b7cb3 100644 --- a/base/common/time.h +++ b/base/common/time.h @@ -4,4 +4,6 @@ #if defined (OS_DARWIN) # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC +#elif defined (OS_FREEBSD) +# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST #endif diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index dc70d06619f..c150dc03014 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -12,7 +12,6 @@ #include #include -#include #include #include #include diff --git a/dbms/benchmark/benchmark.sh b/benchmark/benchmark.sh similarity index 100% rename from dbms/benchmark/benchmark.sh rename to benchmark/benchmark.sh diff --git a/dbms/benchmark/clickhouse/benchmark-chyt.sh b/benchmark/clickhouse/benchmark-chyt.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-chyt.sh rename to benchmark/clickhouse/benchmark-chyt.sh diff --git a/dbms/benchmark/clickhouse/benchmark-new.sh b/benchmark/clickhouse/benchmark-new.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-new.sh rename to benchmark/clickhouse/benchmark-new.sh diff --git a/dbms/benchmark/clickhouse/benchmark-yql.sh b/benchmark/clickhouse/benchmark-yql.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-yql.sh rename to benchmark/clickhouse/benchmark-yql.sh diff --git a/dbms/benchmark/clickhouse/queries.sql b/benchmark/clickhouse/queries.sql similarity index 100% rename from dbms/benchmark/clickhouse/queries.sql rename to benchmark/clickhouse/queries.sql diff --git a/dbms/benchmark/create_dump.sh b/benchmark/create_dump.sh similarity index 100% rename from dbms/benchmark/create_dump.sh rename to benchmark/create_dump.sh diff --git a/dbms/benchmark/greenplum/README b/benchmark/greenplum/README similarity index 100% rename from dbms/benchmark/greenplum/README rename to benchmark/greenplum/README diff --git a/dbms/benchmark/greenplum/benchmark.sh b/benchmark/greenplum/benchmark.sh similarity index 100% rename from dbms/benchmark/greenplum/benchmark.sh rename to benchmark/greenplum/benchmark.sh diff --git a/dbms/benchmark/greenplum/dump_dataset_from_ch.sh b/benchmark/greenplum/dump_dataset_from_ch.sh similarity index 100% rename from dbms/benchmark/greenplum/dump_dataset_from_ch.sh rename to benchmark/greenplum/dump_dataset_from_ch.sh diff --git a/dbms/benchmark/greenplum/load_data_set.sql b/benchmark/greenplum/load_data_set.sql similarity index 100% rename from dbms/benchmark/greenplum/load_data_set.sql rename to benchmark/greenplum/load_data_set.sql diff --git a/dbms/benchmark/greenplum/queries.sql b/benchmark/greenplum/queries.sql similarity index 100% rename from dbms/benchmark/greenplum/queries.sql rename to benchmark/greenplum/queries.sql diff --git a/dbms/benchmark/greenplum/result_parser.py b/benchmark/greenplum/result_parser.py similarity index 100% rename from dbms/benchmark/greenplum/result_parser.py rename to benchmark/greenplum/result_parser.py diff --git a/dbms/benchmark/greenplum/schema.sql b/benchmark/greenplum/schema.sql similarity index 100% rename from dbms/benchmark/greenplum/schema.sql rename to benchmark/greenplum/schema.sql diff --git a/dbms/benchmark/hive/conf.sh b/benchmark/hive/conf.sh similarity index 100% rename from dbms/benchmark/hive/conf.sh rename to benchmark/hive/conf.sh diff --git a/dbms/benchmark/hive/define_schema.sql b/benchmark/hive/define_schema.sql similarity index 100% rename from dbms/benchmark/hive/define_schema.sql rename to benchmark/hive/define_schema.sql diff --git a/dbms/benchmark/hive/expect.tcl b/benchmark/hive/expect.tcl similarity index 100% rename from dbms/benchmark/hive/expect.tcl rename to benchmark/hive/expect.tcl diff --git a/dbms/benchmark/hive/log/log_100m_tuned b/benchmark/hive/log/log_100m_tuned similarity index 100% rename from dbms/benchmark/hive/log/log_100m_tuned rename to benchmark/hive/log/log_100m_tuned diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_ b/benchmark/hive/log/log_10m/log_10m_ similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_ rename to benchmark/hive/log/log_10m/log_10m_ diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_1 b/benchmark/hive/log/log_10m/log_10m_1 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_1 rename to benchmark/hive/log/log_10m/log_10m_1 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_2 b/benchmark/hive/log/log_10m/log_10m_2 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_2 rename to benchmark/hive/log/log_10m/log_10m_2 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_3 b/benchmark/hive/log/log_10m/log_10m_3 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_3 rename to benchmark/hive/log/log_10m/log_10m_3 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_tuned b/benchmark/hive/log/log_10m/log_10m_tuned similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_tuned rename to benchmark/hive/log/log_10m/log_10m_tuned diff --git a/dbms/benchmark/hive/log/log_10m/log_hits_10m b/benchmark/hive/log/log_10m/log_hits_10m similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_hits_10m rename to benchmark/hive/log/log_10m/log_hits_10m diff --git a/dbms/benchmark/hive/queries.sql b/benchmark/hive/queries.sql similarity index 100% rename from dbms/benchmark/hive/queries.sql rename to benchmark/hive/queries.sql diff --git a/dbms/benchmark/hive/run_hive.sh b/benchmark/hive/run_hive.sh similarity index 100% rename from dbms/benchmark/hive/run_hive.sh rename to benchmark/hive/run_hive.sh diff --git a/dbms/benchmark/infinidb/conf.sh b/benchmark/infinidb/conf.sh similarity index 100% rename from dbms/benchmark/infinidb/conf.sh rename to benchmark/infinidb/conf.sh diff --git a/dbms/benchmark/infinidb/define_schema.sql b/benchmark/infinidb/define_schema.sql similarity index 100% rename from dbms/benchmark/infinidb/define_schema.sql rename to benchmark/infinidb/define_schema.sql diff --git a/dbms/benchmark/infinidb/expect.tcl b/benchmark/infinidb/expect.tcl similarity index 100% rename from dbms/benchmark/infinidb/expect.tcl rename to benchmark/infinidb/expect.tcl diff --git a/dbms/benchmark/infinidb/log/log_100m b/benchmark/infinidb/log/log_100m similarity index 100% rename from dbms/benchmark/infinidb/log/log_100m rename to benchmark/infinidb/log/log_100m diff --git a/dbms/benchmark/infinidb/log/log_100m_tuned b/benchmark/infinidb/log/log_100m_tuned similarity index 100% rename from dbms/benchmark/infinidb/log/log_100m_tuned rename to benchmark/infinidb/log/log_100m_tuned diff --git a/dbms/benchmark/infinidb/log/log_10m b/benchmark/infinidb/log/log_10m similarity index 100% rename from dbms/benchmark/infinidb/log/log_10m rename to benchmark/infinidb/log/log_10m diff --git a/dbms/benchmark/infinidb/log/log_10m_tuned b/benchmark/infinidb/log/log_10m_tuned similarity index 100% rename from dbms/benchmark/infinidb/log/log_10m_tuned rename to benchmark/infinidb/log/log_10m_tuned diff --git a/dbms/benchmark/infinidb/queries.sql b/benchmark/infinidb/queries.sql similarity index 100% rename from dbms/benchmark/infinidb/queries.sql rename to benchmark/infinidb/queries.sql diff --git a/dbms/benchmark/infobright/conf.sh b/benchmark/infobright/conf.sh similarity index 100% rename from dbms/benchmark/infobright/conf.sh rename to benchmark/infobright/conf.sh diff --git a/dbms/benchmark/infobright/define_schema.sql b/benchmark/infobright/define_schema.sql similarity index 100% rename from dbms/benchmark/infobright/define_schema.sql rename to benchmark/infobright/define_schema.sql diff --git a/dbms/benchmark/infobright/expect.tcl b/benchmark/infobright/expect.tcl similarity index 100% rename from dbms/benchmark/infobright/expect.tcl rename to benchmark/infobright/expect.tcl diff --git a/dbms/benchmark/infobright/log-community/log_10m b/benchmark/infobright/log-community/log_10m similarity index 100% rename from dbms/benchmark/infobright/log-community/log_10m rename to benchmark/infobright/log-community/log_10m diff --git a/dbms/benchmark/infobright/queries.sql b/benchmark/infobright/queries.sql similarity index 100% rename from dbms/benchmark/infobright/queries.sql rename to benchmark/infobright/queries.sql diff --git a/dbms/benchmark/memsql/benchmark.sh b/benchmark/memsql/benchmark.sh similarity index 100% rename from dbms/benchmark/memsql/benchmark.sh rename to benchmark/memsql/benchmark.sh diff --git a/dbms/benchmark/memsql/instructions.txt b/benchmark/memsql/instructions.txt similarity index 100% rename from dbms/benchmark/memsql/instructions.txt rename to benchmark/memsql/instructions.txt diff --git a/dbms/benchmark/memsql/queries.sql b/benchmark/memsql/queries.sql similarity index 100% rename from dbms/benchmark/memsql/queries.sql rename to benchmark/memsql/queries.sql diff --git a/dbms/benchmark/monetdb/conf.sh b/benchmark/monetdb/conf.sh similarity index 100% rename from dbms/benchmark/monetdb/conf.sh rename to benchmark/monetdb/conf.sh diff --git a/dbms/benchmark/monetdb/define_schema.sql b/benchmark/monetdb/define_schema.sql similarity index 100% rename from dbms/benchmark/monetdb/define_schema.sql rename to benchmark/monetdb/define_schema.sql diff --git a/dbms/benchmark/monetdb/expect.tcl b/benchmark/monetdb/expect.tcl similarity index 100% rename from dbms/benchmark/monetdb/expect.tcl rename to benchmark/monetdb/expect.tcl diff --git a/dbms/benchmark/monetdb/log/log_100m b/benchmark/monetdb/log/log_100m similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m rename to benchmark/monetdb/log/log_100m diff --git a/dbms/benchmark/monetdb/log/log_100m_1 b/benchmark/monetdb/log/log_100m_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_1 rename to benchmark/monetdb/log/log_100m_1 diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected b/benchmark/monetdb/log/log_100m_corrected similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected rename to benchmark/monetdb/log/log_100m_corrected diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected_1 b/benchmark/monetdb/log/log_100m_corrected_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected_1 rename to benchmark/monetdb/log/log_100m_corrected_1 diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected_2 b/benchmark/monetdb/log/log_100m_corrected_2 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected_2 rename to benchmark/monetdb/log/log_100m_corrected_2 diff --git a/dbms/benchmark/monetdb/log/log_10m b/benchmark/monetdb/log/log_10m similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m rename to benchmark/monetdb/log/log_10m diff --git a/dbms/benchmark/monetdb/log/log_10m_corrected b/benchmark/monetdb/log/log_10m_corrected similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m_corrected rename to benchmark/monetdb/log/log_10m_corrected diff --git a/dbms/benchmark/monetdb/log/log_10m_corrected_1 b/benchmark/monetdb/log/log_10m_corrected_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m_corrected_1 rename to benchmark/monetdb/log/log_10m_corrected_1 diff --git a/dbms/benchmark/monetdb/log/log_upload_100m b/benchmark/monetdb/log/log_upload_100m similarity index 100% rename from dbms/benchmark/monetdb/log/log_upload_100m rename to benchmark/monetdb/log/log_upload_100m diff --git a/dbms/benchmark/monetdb/log/log_upload_1b b/benchmark/monetdb/log/log_upload_1b similarity index 100% rename from dbms/benchmark/monetdb/log/log_upload_1b rename to benchmark/monetdb/log/log_upload_1b diff --git a/dbms/benchmark/monetdb/queries.sql b/benchmark/monetdb/queries.sql similarity index 100% rename from dbms/benchmark/monetdb/queries.sql rename to benchmark/monetdb/queries.sql diff --git a/dbms/benchmark/vertica/README b/benchmark/vertica/README similarity index 100% rename from dbms/benchmark/vertica/README rename to benchmark/vertica/README diff --git a/dbms/benchmark/vertica/benchmark.sh b/benchmark/vertica/benchmark.sh similarity index 100% rename from dbms/benchmark/vertica/benchmark.sh rename to benchmark/vertica/benchmark.sh diff --git a/dbms/benchmark/vertica/hits_define_schema.sql b/benchmark/vertica/hits_define_schema.sql similarity index 100% rename from dbms/benchmark/vertica/hits_define_schema.sql rename to benchmark/vertica/hits_define_schema.sql diff --git a/dbms/benchmark/vertica/queries.sql b/benchmark/vertica/queries.sql similarity index 100% rename from dbms/benchmark/vertica/queries.sql rename to benchmark/vertica/queries.sql diff --git a/cmake/analysis.cmake b/cmake/analysis.cmake index a6a93774817..287c36a8de7 100644 --- a/cmake/analysis.cmake +++ b/cmake/analysis.cmake @@ -10,7 +10,7 @@ if (ENABLE_CLANG_TIDY) if (CLANG_TIDY_PATH) message(STATUS "Using clang-tidy: ${CLANG_TIDY_PATH}. The checks will be run during build process. See the .clang-tidy file at the root directory to configure the checks.") set (USE_CLANG_TIDY 1) - # The variable CMAKE_CXX_CLANG_TIDY will be set inside dbms and base directories with non third-party code. + # The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code. # set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") else () message(STATUS "clang-tidy is not found. This is normal - the tool is used only for static code analysis and not essential for build.") diff --git a/cmake/arch.cmake b/cmake/arch.cmake index ec644b6fe77..57ed42295bb 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -11,7 +11,6 @@ if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "i386") set (ARCH_I386 1) endif () if ((ARCH_ARM AND NOT ARCH_AARCH64) OR ARCH_I386) - set (ARCH_32 1) message (FATAL_ERROR "32bit platforms are not supported") endif () diff --git a/cmake/find/execinfo.cmake b/cmake/find/execinfo.cmake deleted file mode 100644 index 85cc5cf951a..00000000000 --- a/cmake/find/execinfo.cmake +++ /dev/null @@ -1,8 +0,0 @@ -if (OS_FREEBSD) - find_library (EXECINFO_LIBRARY execinfo) - find_library (ELF_LIBRARY elf) - set (EXECINFO_LIBRARIES ${EXECINFO_LIBRARY} ${ELF_LIBRARY}) - message (STATUS "Using execinfo: ${EXECINFO_LIBRARIES}") -else () - set (EXECINFO_LIBRARIES "") -endif () diff --git a/cmake/find/libgsasl.cmake b/cmake/find/libgsasl.cmake index 589e965e19b..801b63899da 100644 --- a/cmake/find/libgsasl.cmake +++ b/cmake/find/libgsasl.cmake @@ -1,6 +1,4 @@ -if (NOT ARCH_32) - option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED}) -endif () +option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED}) if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h") if (USE_INTERNAL_LIBGSASL_LIBRARY) @@ -16,7 +14,7 @@ if (NOT USE_INTERNAL_LIBGSASL_LIBRARY) endif () if (LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR) -elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY AND NOT ARCH_32) +elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY) set (LIBGSASL_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/linux_x86_64/include) set (USE_INTERNAL_LIBGSASL_LIBRARY 1) set (LIBGSASL_LIBRARY libgsasl) diff --git a/cmake/find/msgpack.cmake b/cmake/find/msgpack.cmake new file mode 100644 index 00000000000..46344fc162f --- /dev/null +++ b/cmake/find/msgpack.cmake @@ -0,0 +1,17 @@ +option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED}) + +if (USE_INTERNAL_MSGPACK_LIBRARY) + if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include/msgpack.hpp") + message(WARNING "Submodule contrib/msgpack-c is missing. To fix try run: \n git submodule update --init --recursive") + set(USE_INTERNAL_MSGPACK_LIBRARY 0) + set(MISSING_INTERNAL_MSGPACK_LIBRARY 1) + endif() +endif() + +if (USE_INTERNAL_MSGPACK_LIBRARY) + set(MSGPACK_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include) +else() + find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS}) +endif() + +message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}") diff --git a/cmake/find/rdkafka.cmake b/cmake/find/rdkafka.cmake index dfab142a3cd..f18674dd440 100644 --- a/cmake/find/rdkafka.cmake +++ b/cmake/find/rdkafka.cmake @@ -1,5 +1,5 @@ # Freebsd: contrib/cppkafka/include/cppkafka/detail/endianness.h:53:23: error: 'betoh16' was not declared in this scope -if (NOT ARCH_ARM AND NOT ARCH_32 AND NOT OS_FREEBSD AND OPENSSL_FOUND) +if (NOT ARCH_ARM AND NOT OS_FREEBSD AND OPENSSL_FOUND) option (ENABLE_RDKAFKA "Enable kafka" ${ENABLE_LIBRARIES}) endif () diff --git a/cmake/find/ssl.cmake b/cmake/find/ssl.cmake index 36f9d1e67ec..efc9127309c 100644 --- a/cmake/find/ssl.cmake +++ b/cmake/find/ssl.cmake @@ -2,9 +2,7 @@ option(ENABLE_SSL "Enable ssl" ${ENABLE_LIBRARIES}) if(ENABLE_SSL) -if(NOT ARCH_32) - option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) -endif() +option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openssl/README") if(USE_INTERNAL_SSL_LIBRARY) diff --git a/cmake/find/unwind.cmake b/cmake/find/unwind.cmake index d3653973082..c9f5f30a5d6 100644 --- a/cmake/find/unwind.cmake +++ b/cmake/find/unwind.cmake @@ -1,14 +1,5 @@ option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES}) -if (NOT CMAKE_SYSTEM MATCHES "Linux" OR ARCH_ARM OR ARCH_32) - set (USE_UNWIND OFF) -endif () - -if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libunwind/CMakeLists.txt") - message(WARNING "submodule contrib/libunwind is missing. to fix try run: \n git submodule update --init --recursive") - set (USE_UNWIND OFF) -endif () - if (USE_UNWIND) add_subdirectory(contrib/libunwind-cmake) set (UNWIND_LIBRARIES unwind) diff --git a/cmake/find/zlib.cmake b/cmake/find/zlib.cmake index fb91622e298..f65d379f577 100644 --- a/cmake/find/zlib.cmake +++ b/cmake/find/zlib.cmake @@ -1,6 +1,4 @@ -if (NOT OS_FREEBSD AND NOT ARCH_32) - option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) -endif () +option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) if (NOT MSVC) set (INTERNAL_ZLIB_NAME "zlib-ng" CACHE INTERNAL "") diff --git a/cmake/freebsd/default_libs.cmake b/cmake/freebsd/default_libs.cmake new file mode 100644 index 00000000000..2bb76c6a761 --- /dev/null +++ b/cmake/freebsd/default_libs.cmake @@ -0,0 +1,40 @@ +set (DEFAULT_LIBS "-nodefaultlibs") + +if (NOT COMPILER_CLANG) + message (FATAL_ERROR "FreeBSD build is supported only for Clang") +endif () + +execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) + +set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread") + +message(STATUS "Default libraries: ${DEFAULT_LIBS}") + +set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) +set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) + +# Global libraries + +add_library(global-libs INTERFACE) + +# Unfortunately '-pthread' doesn't work with '-nodefaultlibs'. +# Just make sure we have pthreads at all. +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +include (cmake/find/unwind.cmake) +include (cmake/find/cxx.cmake) + +add_library(global-group INTERFACE) + +target_link_libraries(global-group INTERFACE + $ +) + +link_libraries(global-group) + +# FIXME: remove when all contribs will get custom cmake lists +install( + TARGETS global-group global-libs + EXPORT global +) diff --git a/cmake/freebsd/toolchain-x86_64.cmake b/cmake/freebsd/toolchain-x86_64.cmake new file mode 100644 index 00000000000..30468731b69 --- /dev/null +++ b/cmake/freebsd/toolchain-x86_64.cmake @@ -0,0 +1,19 @@ +set (CMAKE_SYSTEM_NAME "FreeBSD") +set (CMAKE_SYSTEM_PROCESSOR "x86_64") +set (CMAKE_C_COMPILER_TARGET "x86_64-pc-freebsd12.1") +set (CMAKE_CXX_COMPILER_TARGET "x86_64-pc-freebsd12.1") +set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd12.1") +set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/freebsd-x86_64") + +set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake + +set (LINKER_NAME "lld" CACHE STRING "" FORCE) + +set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") +set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") + +set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) + +set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/lib_name.cmake b/cmake/lib_name.cmake index 51a424cb4e2..8f5bebf4abe 100644 --- a/cmake/lib_name.cmake +++ b/cmake/lib_name.cmake @@ -1,5 +1,5 @@ set(DIVIDE_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libdivide) -set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/dbms/src ${ClickHouse_BINARY_DIR}/dbms/src) +set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/src ${ClickHouse_BINARY_DIR}/src) set(DOUBLE_CONVERSION_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion) set(METROHASH_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libmetrohash/src) set(PCG_RANDOM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpcg-random/include) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 3d192f1fe76..7d906de7602 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -23,7 +23,7 @@ if (SANITIZE) # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # keep the binary size down. # TODO: try compiling with -Og and with ld.gold. - set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/dbms/tests/msan_suppressions.txt") + set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") diff --git a/cmake/target.cmake b/cmake/target.cmake index 1f40e28e76b..03d470b0aea 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -24,6 +24,9 @@ if (CMAKE_CROSSCOMPILING) set (ENABLE_PARQUET OFF CACHE INTERNAL "") set (ENABLE_MYSQL OFF CACHE INTERNAL "") endif () + elseif (OS_FREEBSD) + # FIXME: broken dependencies + set (ENABLE_PROTOBUF OFF CACHE INTERNAL "") else () message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!") endif () diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 8eccbfd9f83..5eeec3b2f0a 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -6,18 +6,18 @@ endif () if (COMPILER_GCC) # Require minimum version of gcc - set (GCC_MINIMUM_VERSION 8) + set (GCC_MINIMUM_VERSION 9) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION} AND NOT CMAKE_VERSION VERSION_LESS 2.8.9) message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.") endif () elseif (COMPILER_CLANG) # Require minimum version of clang - set (CLANG_MINIMUM_VERSION 7) + set (CLANG_MINIMUM_VERSION 8) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION}) message (FATAL_ERROR "Clang version must be at least ${CLANG_MINIMUM_VERSION}.") endif () else () - message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang 6+ and GCC 7+.") + message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang and GCC.") endif () STRING(REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION}) diff --git a/dbms/cmake/version.cmake b/cmake/version.cmake similarity index 100% rename from dbms/cmake/version.cmake rename to cmake/version.cmake diff --git a/contrib/cctz b/contrib/cctz index 4f9776a310f..5a3f785329c 160000 --- a/contrib/cctz +++ b/contrib/cctz @@ -1 +1 @@ -Subproject commit 4f9776a310f4952454636363def82c2bf6641d5f +Subproject commit 5a3f785329cecdd2b68cd950e0647e9246774ef2 diff --git a/contrib/libc-headers b/contrib/libc-headers index 9676d2645a7..92c74f938cf 160000 --- a/contrib/libc-headers +++ b/contrib/libc-headers @@ -1 +1 @@ -Subproject commit 9676d2645a713e679dc981ffd84dee99fcd68b8e +Subproject commit 92c74f938cf2c4dd529cae4f3d2923d153b029a7 diff --git a/contrib/libcpuid/include/libcpuid/cpuid_main.c b/contrib/libcpuid/include/libcpuid/cpuid_main.c index 02a7cb7ad50..34457e297ca 100644 --- a/contrib/libcpuid/include/libcpuid/cpuid_main.c +++ b/contrib/libcpuid/include/libcpuid/cpuid_main.c @@ -38,7 +38,7 @@ /* Implementation: */ -static int _libcpiud_errno = ERR_OK; +_Thread_local int _libcpiud_errno = ERR_OK; int set_error(cpu_error_t err) { diff --git a/contrib/libdivide/libdivide.h b/contrib/libdivide/libdivide.h index eaeaec7db6b..a153e7f9c5e 100644 --- a/contrib/libdivide/libdivide.h +++ b/contrib/libdivide/libdivide.h @@ -1,117 +1,106 @@ -/* libdivide.h - Copyright 2010 ridiculous_fish -*/ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. -#if defined(_WIN32) || defined(WIN32) -#define LIBDIVIDE_WINDOWS 1 -#endif +#ifndef LIBDIVIDE_H +#define LIBDIVIDE_H -#if defined(_MSC_VER) -#define LIBDIVIDE_VC 1 -#endif +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 -#ifdef __cplusplus -#include -#include -#include -#else -#include -#include -#include -#endif - -#if ! LIBDIVIDE_HAS_STDINT_TYPES && (! LIBDIVIDE_VC || _MSC_VER >= 1600) -/* Only Visual C++ 2010 and later include stdint.h */ #include -#define LIBDIVIDE_HAS_STDINT_TYPES 1 + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include #endif -#if ! LIBDIVIDE_HAS_STDINT_TYPES -typedef __int32 int32_t; -typedef unsigned __int32 uint32_t; -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -typedef __int8 int8_t; -typedef unsigned __int8 uint8_t; -#endif - -#if LIBDIVIDE_USE_SSE2 +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) #include #endif -#if LIBDIVIDE_VC +#if defined(_MSC_VER) #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC #endif -#ifndef __has_builtin -#define __has_builtin(x) 0 // Compatibility with non-clang compilers. +#if !defined(__has_builtin) + #define __has_builtin(x) 0 #endif -#ifdef __ICC -#define HAS_INT128_T 0 -#else -#define HAS_INT128_T __LP64__ +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif #endif -#if defined(__x86_64__) || defined(_WIN64) || defined(_M_64) -#define LIBDIVIDE_IS_X86_64 1 +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 #endif #if defined(__i386__) -#define LIBDIVIDE_IS_i386 1 + #define LIBDIVIDE_i386 #endif -#if __GNUC__ || __clang__ -#define LIBDIVIDE_GCC_STYLE_ASM 1 +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM #endif +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif -/* libdivide may use the pmuldq (vector signed 32x32->64 mult instruction) which is in SSE 4.1. However, signed multiplication can be emulated efficiently with unsigned multiplication, and SSE 4.1 is currently rare, so it is OK to not turn this on */ -#ifdef LIBDIVIDE_USE_SSE4_1 -#include +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + exit(-1); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + exit(-1); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) #endif #ifdef __cplusplus -/* We place libdivide within the libdivide namespace, and that goes in an anonymous namespace so that the functions are only visible to files that #include this header and don't get external linkage. At least that's the theory. */ -namespace { namespace libdivide { #endif -/* Explanation of "more" field: bit 6 is whether to use shift path. If we are using the shift path, bit 7 is whether the divisor is negative in the signed case; in the unsigned case it is 0. Bits 0-4 is shift value (for shift path or mult path). In 32 bit case, bit 5 is always 0. We use bit 7 as the "negative divisor indicator" so that we can use sign extension to efficiently go to a full-width -1. - - -u32: [0-4] shift value - [5] ignored - [6] add indicator - [7] shift path - -s32: [0-4] shift value - [5] shift path - [6] add indicator - [7] indicates negative divisor - -u64: [0-5] shift value - [6] add indicator - [7] shift path - -s64: [0-5] shift value - [6] add indicator - [7] indicates negative divisor - magic number of 0 indicates shift path (we ran out of bits!) -*/ - -enum { - LIBDIVIDE_32_SHIFT_MASK = 0x1F, - LIBDIVIDE_64_SHIFT_MASK = 0x3F, - LIBDIVIDE_ADD_MARKER = 0x40, - LIBDIVIDE_U32_SHIFT_PATH = 0x80, - LIBDIVIDE_U64_SHIFT_PATH = 0x80, - LIBDIVIDE_S32_SHIFT_PATH = 0x20, - LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 -}; - +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) struct libdivide_u32_t { uint32_t magic; @@ -133,497 +122,446 @@ struct libdivide_s64_t { uint8_t more; }; +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; -#ifndef LIBDIVIDE_API - #ifdef __cplusplus - /* In C++, we don't want our public functions to be static, because they are arguments to templates and static functions can't do that. They get internal linkage through virtue of the anonymous namespace. In C, they should be static. */ - #define LIBDIVIDE_API - #else - #define LIBDIVIDE_API static - #endif -#endif +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; -#ifdef __APPLE__ -typedef signed long Int64; -typedef unsigned long UInt64; -#endif +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; -LIBDIVIDE_API struct libdivide_s32_t libdivide_s32_gen(int32_t y); -LIBDIVIDE_API struct libdivide_u32_t libdivide_u32_gen(uint32_t y); -LIBDIVIDE_API struct libdivide_s64_t libdivide_s64_gen(int64_t y); -LIBDIVIDE_API struct libdivide_u64_t libdivide_u64_gen(uint64_t y); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API struct libdivide_s64_t libdivide_s64_gen(Int64 y) { return libdivide_s64_gen(int64_t(y)); }; -LIBDIVIDE_API struct libdivide_u64_t libdivide_u64_gen(UInt64 y) { return libdivide_u64_gen(uint64_t(y)); }; -#pragma GCC diagnostic pop -#endif +#pragma pack(pop) -LIBDIVIDE_API int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do(uint64_t y, const struct libdivide_u64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API Int64 libdivide_s64_do(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do(int64_t(numer), denom)); }; -LIBDIVIDE_API UInt64 libdivide_u64_do(UInt64 y, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do(uint64_t(y), denom)); }; -#pragma GCC diagnostic pop -#endif +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. -LIBDIVIDE_API int libdivide_u32_get_algorithm(const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg0(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg1(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg2(uint32_t numer, const struct libdivide_u32_t *denom); +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; -LIBDIVIDE_API int libdivide_u64_get_algorithm(const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg0(uint64_t numer, const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg1(uint64_t numer, const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg2(uint64_t numer, const struct libdivide_u64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API UInt64 libdivide_u64_do_alg0(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg0(uint64_t(numer), denom)); } -LIBDIVIDE_API UInt64 libdivide_u64_do_alg1(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg1(uint64_t(numer), denom)); } -LIBDIVIDE_API UInt64 libdivide_u64_do_alg2(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg2(uint64_t(numer), denom)); } -#pragma GCC diagnostic pop -#endif +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); -LIBDIVIDE_API int libdivide_s32_get_algorithm(const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg0(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg1(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg2(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg3(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg4(int32_t numer, const struct libdivide_s32_t *denom); +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); -LIBDIVIDE_API int libdivide_s64_get_algorithm(const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg0(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg1(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg2(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg3(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg4(int64_t numer, const struct libdivide_s64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API Int64 libdivide_s64_do_alg0(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg0(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg1(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg1(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg2(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg2(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg3(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg3(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg4(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg4(int64_t(numer), denom)); } -#pragma GCC diagnostic pop -#endif +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); -#if LIBDIVIDE_USE_SSE2 -LIBDIVIDE_API __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t * denom); - -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg0(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg1(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg2(__m128i numers, const struct libdivide_u32_t * denom); - -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg0(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg1(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg2(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg3(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg4(__m128i numers, const struct libdivide_s32_t * denom); - -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg0(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg1(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg2(__m128i numers, const struct libdivide_u64_t * denom); - -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg0(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg1(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg2(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg3(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_t * denom); -#endif - +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); //////// Internal Utility Functions -static inline uint32_t libdivide__mullhi_u32(uint32_t x, uint32_t y) { +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { uint64_t xl = x, yl = y; uint64_t rl = xl * yl; return (uint32_t)(rl >> 32); } -static uint64_t libdivide__mullhi_u64(uint64_t x, uint64_t y) { -#if HAS_INT128_T +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) __uint128_t xl = x, yl = y; __uint128_t rl = xl * yl; return (uint64_t)(rl >> 64); #else - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const uint32_t mask = 0xFFFFFFFF; - const uint32_t x0 = (uint32_t)(x & mask), x1 = (uint32_t)(x >> 32); - const uint32_t y0 = (uint32_t)(y & mask), y1 = (uint32_t)(y >> 32); - const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); - const uint64_t x0y1 = x0 * (uint64_t)y1; - const uint64_t x1y0 = x1 * (uint64_t)y0; - const uint64_t x1y1 = x1 * (uint64_t)y1; - + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; uint64_t temp = x1y0 + x0y0_hi; - uint64_t temp_lo = temp & mask, temp_hi = temp >> 32; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); #endif } -static inline int64_t libdivide__mullhi_s64(int64_t x, int64_t y) { -#if HAS_INT128_T +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) __int128_t xl = x, yl = y; __int128_t rl = xl * yl; return (int64_t)(rl >> 64); #else - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const uint32_t mask = 0xFFFFFFFF; - const uint32_t x0 = (uint32_t)(x & mask), y0 = (uint32_t)(y & mask); - const int32_t x1 = (int32_t)(x >> 32), y1 = (int32_t)(y >> 32); - const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); - const int64_t t = x1*(int64_t)y0 + x0y0_hi; - const int64_t w1 = x0*(int64_t)y1 + (t & mask); - return x1*(int64_t)y1 + (t >> 32) + (w1 >> 32); + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); #endif } -#if LIBDIVIDE_USE_SSE2 - -static inline __m128i libdivide__u64_to_m128(uint64_t x) { -#if LIBDIVIDE_VC && ! _WIN64 - //64 bit windows doesn't seem to have an implementation of any of these load intrinsics, and 32 bit Visual C++ crashes - _declspec(align(16)) uint64_t temp[2] = {x, x}; - return _mm_load_si128((const __m128i*)temp); -#elif defined(__ICC) - uint64_t __attribute__((aligned(16))) temp[2] = {x,x}; - return _mm_load_si128((const __m128i*)temp); -#elif __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wc++11-narrowing" // narrowing from uint64_t (aka 'unsigned long') to 'long long' - // clang does not provide this intrinsic either - return (__m128i){x, x}; -#pragma clang diagnostic pop -#else - // everyone else gets it right - return _mm_set1_epi64x(x); -#endif -} - -static inline __m128i libdivide_get_FFFFFFFF00000000(void) { - //returns the same as _mm_set1_epi64(0xFFFFFFFF00000000ULL) without touching memory - __m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X - return _mm_slli_epi64(result, 32); -} - -static inline __m128i libdivide_get_00000000FFFFFFFF(void) { - //returns the same as _mm_set1_epi64(0x00000000FFFFFFFFULL) without touching memory - __m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X - result = _mm_srli_epi64(result, 32); - return result; -} - -#if __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif -static inline __m128i libdivide_get_0000FFFF(void) { - //returns the same as _mm_set1_epi32(0x0000FFFFULL) without touching memory - __m128i result; //we don't care what its contents are - result = _mm_cmpeq_epi8(result, result); //all 1s - result = _mm_srli_epi32(result, 16); - return result; -} -#if __clang__ -#pragma clang diagnostic pop -#endif - -/// This is a bug in gcc-8, _MM_SHUFFLE was forgotten, though in trunk it is ok https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/xmmintrin.h#L61 -#if defined(__PPC__) -#ifndef _MM_SHUFFLE -#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z)) -#endif -#endif - -static inline __m128i libdivide_s64_signbits(__m128i v) { - //we want to compute v >> 63, that is, _mm_srai_epi64(v, 63). But there is no 64 bit shift right arithmetic instruction in SSE2. So we have to fake it by first duplicating the high 32 bit values, and then using a 32 bit shift. Another option would be to use _mm_srli_epi64(v, 63) and then subtract that from 0, but that approach appears to be substantially slower for unknown reasons - __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); - __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); - return signBits; -} - -/* Returns an __m128i whose low 32 bits are equal to amt and has zero elsewhere. */ -static inline __m128i libdivide_u32_to_m128i(uint32_t amt) { - return _mm_set_epi32(0, 0, 0, amt); -} - -static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { - //implementation of _mm_sra_epi64. Here we have two 64 bit values which are shifted right to logically become (64 - amt) values, and are then sign extended from a (64 - amt) bit number. - const int b = 64 - amt; - __m128i m = libdivide__u64_to_m128(1ULL << (b - 1)); - __m128i x = _mm_srl_epi64(v, libdivide_u32_to_m128i(amt)); - __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); //result = x^m - m - return result; -} - -/* Here, b is assumed to contain one 32 bit value repeated four times. If it did not, the function would not work. */ -static inline __m128i libdivide__mullhi_u32_flat_vector(__m128i a, __m128i b) { - __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); - __m128i a1X3X = _mm_srli_epi64(a, 32); - __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), libdivide_get_FFFFFFFF00000000()); - return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); // = hi_product_0123 -} - - -/* Here, y is assumed to contain one 64 bit value repeated twice. */ -static inline __m128i libdivide_mullhi_u64_flat_vector(__m128i x, __m128i y) { - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const __m128i mask = libdivide_get_00000000FFFFFFFF(); - const __m128i x0 = _mm_and_si128(x, mask), x1 = _mm_srli_epi64(x, 32); //x0 is low half of 2 64 bit values, x1 is high half in low slots - const __m128i y0 = _mm_and_si128(y, mask), y1 = _mm_srli_epi64(y, 32); - const __m128i x0y0_hi = _mm_srli_epi64(_mm_mul_epu32(x0, y0), 32); //x0 happens to have the low half of the two 64 bit values in 32 bit slots 0 and 2, so _mm_mul_epu32 computes their full product, and then we shift right by 32 to get just the high values - const __m128i x0y1 = _mm_mul_epu32(x0, y1); - const __m128i x1y0 = _mm_mul_epu32(x1, y0); - const __m128i x1y1 = _mm_mul_epu32(x1, y1); - - const __m128i temp = _mm_add_epi64(x1y0, x0y0_hi); - __m128i temp_lo = _mm_and_si128(temp, mask), temp_hi = _mm_srli_epi64(temp, 32); - temp_lo = _mm_srli_epi64(_mm_add_epi64(temp_lo, x0y1), 32); - temp_hi = _mm_add_epi64(x1y1, temp_hi); - - return _mm_add_epi64(temp_lo, temp_hi); -} - -/* y is one 64 bit value repeated twice */ -static inline __m128i libdivide_mullhi_s64_flat_vector(__m128i x, __m128i y) { - __m128i p = libdivide_mullhi_u64_flat_vector(x, y); - __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); - p = _mm_sub_epi64(p, t1); - __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); - p = _mm_sub_epi64(p, t2); - return p; -} - -#ifdef LIBDIVIDE_USE_SSE4_1 - -/* b is one 32 bit value repeated four times. */ -static inline __m128i libdivide_mullhi_s32_flat_vector(__m128i a, __m128i b) { - __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epi32(a, b), 32); - __m128i a1X3X = _mm_srli_epi64(a, 32); - __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epi32(a1X3X, b), libdivide_get_FFFFFFFF00000000()); - return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); // = hi_product_0123 -} - -#else - -/* SSE2 does not have a signed multiplication instruction, but we can convert unsigned to signed pretty efficiently. Again, b is just a 32 bit value repeated four times. */ -static inline __m128i libdivide_mullhi_s32_flat_vector(__m128i a, __m128i b) { - __m128i p = libdivide__mullhi_u32_flat_vector(a, b); - __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); //t1 = (a >> 31) & y, arithmetic shift - __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); - p = _mm_sub_epi32(p, t1); - p = _mm_sub_epi32(p, t2); - return p; -} -#endif -#endif - -static inline int32_t libdivide__count_trailing_zeros32(uint32_t val) { -#if __GNUC__ || __has_builtin(__builtin_ctz) - /* Fast way to count trailing zeros */ - return __builtin_ctz(val); -#elif LIBDIVIDE_VC - unsigned long result; - if (_BitScanForward(&result, val)) { - return result; - } - return 0; -#else - /* Dorky way to count trailing zeros. Note that this hangs for val = 0! */ - int32_t result = 0; - val = (val ^ (val - 1)) >> 1; // Set v's trailing 0s to 1s and zero rest - while (val) { - val >>= 1; - result++; - } - return result; -#endif -} - -static inline int32_t libdivide__count_trailing_zeros64(uint64_t val) { -#if __LP64__ && (__GNUC__ || __has_builtin(__builtin_ctzll)) - /* Fast way to count trailing zeros. Note that we disable this in 32 bit because gcc does something horrible - it calls through to a dynamically bound function. */ - return __builtin_ctzll(val); -#elif LIBDIVIDE_VC && _WIN64 - unsigned long result; - if (_BitScanForward64(&result, val)) { - return result; - } - return 0; -#else - /* Pretty good way to count trailing zeros. Note that this hangs for val = 0! */ - uint32_t lo = val & 0xFFFFFFFF; - if (lo != 0) return libdivide__count_trailing_zeros32(lo); - return 32 + libdivide__count_trailing_zeros32(val >> 32); -#endif -} - -static inline int32_t libdivide__count_leading_zeros32(uint32_t val) { -#if __GNUC__ || __has_builtin(__builtin_clzll) - /* Fast way to count leading zeros */ +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros return __builtin_clz(val); -#elif LIBDIVIDE_VC +#elif defined(LIBDIVIDE_VC) unsigned long result; if (_BitScanReverse(&result, val)) { return 31 - result; } return 0; #else - /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ int32_t result = 0; - while (! (val & (1U << 31))) { - val <<= 1; + uint32_t hi = 1U << 31; + for (; ~val & hi; hi >>= 1) { result++; } return result; #endif } -static inline int32_t libdivide__count_leading_zeros64(uint64_t val) { -#if __GNUC__ || __has_builtin(__builtin_clzll) - /* Fast way to count leading zeros */ +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros return __builtin_clzll(val); -#elif LIBDIVIDE_VC && _WIN64 +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) unsigned long result; if (_BitScanReverse64(&result, val)) { return 63 - result; } return 0; #else - /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ - int32_t result = 0; - while (! (val & (1ULL << 63))) { - val <<= 1; - result++; - } - return result; + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); #endif } -//libdivide_64_div_32_to_32: divides a 64 bit uint {u1, u0} by a 32 bit uint {v}. The result must fit in 32 bits. Returns the quotient directly and the remainder in *r -#if (LIBDIVIDE_IS_i386 || LIBDIVIDE_IS_X86_64) && LIBDIVIDE_GCC_STYLE_ASM -static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) uint32_t result; __asm__("divl %[v]" : "=a"(result), "=d"(*r) : [v] "r"(v), "a"(u0), "d"(u1) ); return result; -} #else -static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { - uint64_t n = (((uint64_t)u1) << 32) | u0; + uint64_t n = ((uint64_t)u1 << 32) | u0; uint32_t result = (uint32_t)(n / v); *r = (uint32_t)(n - result * (uint64_t)v); return result; -} #endif +} -#if LIBDIVIDE_IS_X86_64 && LIBDIVIDE_GCC_STYLE_ASM +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { - //u0 -> rax - //u1 -> rdx - //divq +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) uint64_t result; __asm__("divq %[v]" : "=a"(result), "=d"(*r) : [v] "r"(v), "a"(u0), "d"(u1) ); return result; - -} +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; #else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm -/* Code taken from Hacker's Delight, http://www.hackersdelight.org/HDcode/divlu.c . License permits inclusion here per http://www.hackersdelight.org/permissions.htm - */ -static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { - const uint64_t b = (1ULL << 32); // Number base (16 bits). - uint64_t un1, un0, // Norm. dividend LSD's. - vn1, vn0, // Norm. divisor digits. - q1, q0, // Quotient digits. - un64, un21, un10,// Dividend digit pairs. - rhat; // A remainder. - int s; // Shift amount for norm. + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm - if (u1 >= v) { // If overflow, set rem. - if (r != NULL) // to an impossible value, - *r = (uint64_t)(-1); // and return the largest - return (uint64_t)(-1);} // possible quotient. + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } - /* count leading zeros */ - s = libdivide__count_leading_zeros64(v); // 0 <= s <= 63. + // count leading zeros + s = libdivide_count_leading_zeros64(v); if (s > 0) { - v = v << s; // Normalize divisor. - un64 = (u1 << s) | ((u0 >> (64 - s)) & (-s >> 31)); - un10 = u0 << s; // Shift dividend left. + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left } else { - // Avoid undefined behavior. - un64 = u1 | u0; + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; un10 = u0; } - vn1 = v >> 32; // Break divisor up into - vn0 = v & 0xFFFFFFFF; // two 32-bit digits. + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; - un1 = un10 >> 32; // Break right half of - un0 = un10 & 0xFFFFFFFF; // dividend into two digits. + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; - q1 = un64/vn1; // Compute the first - rhat = un64 - q1*vn1; // quotient digit, q1. -again1: - if (q1 >= b || q1*vn0 > b*rhat + un1) { + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { q1 = q1 - 1; rhat = rhat + vn1; - if (rhat < b) goto again1;} + if (rhat >= b) + break; + } - un21 = un64*b + un1 - q1*v; // Multiply and subtract. + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; - q0 = un21/vn1; // Compute the second - rhat = un21 - q0*vn1; // quotient digit, q0. -again2: - if (q0 >= b || q0*vn0 > b*rhat + un0) { + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { q0 = q0 - 1; rhat = rhat + vn1; - if (rhat < b) goto again2;} + if (rhat >= b) + break; + } - if (r != NULL) // If remainder is wanted, - *r = (un21*b + un0 - q0*v) >> s; // return it. - return q1*b + q0; + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif } -#endif -#if LIBDIVIDE_ASSERTIONS_ON -#define LIBDIVIDE_ASSERT(x) do { if (! (x)) { fprintf(stderr, "Assertion failure on line %ld: %s\n", (long)__LINE__, #x); exit(-1); } } while (0) +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; #else -#define LIBDIVIDE_ASSERT(x) -#endif + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; -#ifndef LIBDIVIDE_HEADER_ONLY + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} ////////// UINT32 -struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { - struct libdivide_u32_t result; - if ((d & (d - 1)) == 0) { - result.magic = 0; - result.more = libdivide__count_trailing_zeros32(d) | LIBDIVIDE_U32_SHIFT_PATH; +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); } - else { - const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(d); + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { uint8_t more; uint32_t rem, proposed_m; proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); @@ -631,570 +569,1358 @@ struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { LIBDIVIDE_ASSERT(rem > 0 && rem < d); const uint32_t e = d - rem; - /* This power works if e < 2**floor_log_2_d. */ - if (e < (1U << floor_log_2_d)) { - /* This power works */ + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works more = floor_log_2_d; - } - else { - /* We have to use the general 33-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */ - proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; const uint32_t twice_rem = rem + rem; if (twice_rem >= d || twice_rem < rem) proposed_m += 1; more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } result.magic = 1 + proposed_m; result.more = more; - //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases. - + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. } return result; } +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) { - return numer >> (more & LIBDIVIDE_32_SHIFT_MASK); + if (!denom->magic) { + return numer >> more; } else { - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { uint32_t t = ((numer - q) >> 1) + q; return t >> (more & LIBDIVIDE_32_SHIFT_MASK); } else { - return q >> more; //all upper bits are 0 - don't need to mask them off + // All upper bits are 0, + // don't need to mask them off. + return q >> more; } } } - -int libdivide_u32_get_algorithm(const struct libdivide_u32_t *denom) { - uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) return 0; - else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1; - else return 2; -} - -uint32_t libdivide_u32_do_alg0(uint32_t numer, const struct libdivide_u32_t *denom) { - return numer >> (denom->more & LIBDIVIDE_32_SHIFT_MASK); -} - -uint32_t libdivide_u32_do_alg1(uint32_t numer, const struct libdivide_u32_t *denom) { - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); - return q >> denom->more; -} - -uint32_t libdivide_u32_do_alg2(uint32_t numer, const struct libdivide_u32_t *denom) { - // denom->add != 0 - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); uint32_t t = ((numer - q) >> 1) + q; - return t >> (denom->more & LIBDIVIDE_32_SHIFT_MASK); + return t >> denom->more; } - - - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) { - return _mm_srl_epi32(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); - } - else { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - //uint32_t t = ((numer - q) >> 1) + q; - //return t >> denom->shift; - __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); - return _mm_srl_epi32(t, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; - } - else { - //q >> denom->shift - return _mm_srl_epi32(q, libdivide_u32_to_m128i(more)); - } + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; } } -__m128i libdivide_u32_do_vector_alg0(__m128i numers, const struct libdivide_u32_t *denom) { - return _mm_srl_epi32(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); -} +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; -__m128i libdivide_u32_do_vector_alg1(__m128i numers, const struct libdivide_u32_t *denom) { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - return _mm_srl_epi32(q, libdivide_u32_to_m128i(denom->more)); -} + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); -__m128i libdivide_u32_do_vector_alg2(__m128i numers, const struct libdivide_u32_t *denom) { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); - return _mm_srl_epi32(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); + // We rounded down in gen (hence +1) + return full_q + 1; + } } -#endif - /////////// UINT64 -struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { - struct libdivide_u64_t result; - if ((d & (d - 1)) == 0) { - result.more = libdivide__count_trailing_zeros64(d) | LIBDIVIDE_U64_SHIFT_PATH; - result.magic = 0; +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); } - else { - const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(d); + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { uint64_t proposed_m, rem; uint8_t more; - proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); //== (1 << (64 + floor_log_2_d)) / d + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); LIBDIVIDE_ASSERT(rem > 0 && rem < d); const uint64_t e = d - rem; - /* This power works if e < 2**floor_log_2_d. */ - if (e < (1ULL << floor_log_2_d)) { - /* This power works */ + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works more = floor_log_2_d; - } - else { - /* We have to use the general 65-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */ - proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; const uint64_t twice_rem = rem + rem; if (twice_rem >= d || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } result.magic = 1 + proposed_m; result.more = more; - //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases, which is why we do it outside of the if statement. + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. } return result; } +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) { - return numer >> (more & LIBDIVIDE_64_SHIFT_MASK); + if (!denom->magic) { + return numer >> more; } else { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { uint64_t t = ((numer - q) >> 1) + q; return t >> (more & LIBDIVIDE_64_SHIFT_MASK); } else { - return q >> more; //all upper bits are 0 - don't need to mask them off + // All upper bits are 0, + // don't need to mask them off. + return q >> more; } } } - -int libdivide_u64_get_algorithm(const struct libdivide_u64_t *denom) { - uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) return 0; - else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1; - else return 2; -} - -uint64_t libdivide_u64_do_alg0(uint64_t numer, const struct libdivide_u64_t *denom) { - return numer >> (denom->more & LIBDIVIDE_64_SHIFT_MASK); -} - -uint64_t libdivide_u64_do_alg1(uint64_t numer, const struct libdivide_u64_t *denom) { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); - return q >> denom->more; -} - -uint64_t libdivide_u64_do_alg2(uint64_t numer, const struct libdivide_u64_t *denom) { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); uint64_t t = ((numer - q) >> 1) + q; - return t >> (denom->more & LIBDIVIDE_64_SHIFT_MASK); + return t >> denom->more; } -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t * denom) { +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) { - return _mm_srl_epi64(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK)); - } - else { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - //uint32_t t = ((numer - q) >> 1) + q; - //return t >> denom->shift; - __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); - return _mm_srl_epi64(t, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK)); - } - else { - //q >> denom->shift - return _mm_srl_epi64(q, libdivide_u32_to_m128i(more)); - } + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; } } -__m128i libdivide_u64_do_vector_alg0(__m128i numers, const struct libdivide_u64_t *denom) { - return _mm_srl_epi64(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK)); +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } } -__m128i libdivide_u64_do_vector_alg1(__m128i numers, const struct libdivide_u64_t *denom) { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - return _mm_srl_epi64(q, libdivide_u32_to_m128i(denom->more)); -} - -__m128i libdivide_u64_do_vector_alg2(__m128i numers, const struct libdivide_u64_t *denom) { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); - return _mm_srl_epi64(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK)); -} - - -#endif - /////////// SINT32 +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } -static inline int32_t libdivide__mullhi_s32(int32_t x, int32_t y) { - int64_t xl = x, yl = y; - int64_t rl = xl * yl; - return (int32_t)(rl >> 32); //needs to be arithmetic shift -} - -struct libdivide_s32_t libdivide_s32_gen(int32_t d) { struct libdivide_s32_t result; - /* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */ - uint32_t absD = (uint32_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick - if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same result.magic = 0; - result.more = libdivide__count_trailing_zeros32(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0) | LIBDIVIDE_S32_SHIFT_PATH; - } - else { - const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(absD); + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { LIBDIVIDE_ASSERT(floor_log_2_d >= 1); uint8_t more; - //the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word is 0 and the high word is floor_log_2_d - 1 + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 uint32_t rem, proposed_m; proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); const uint32_t e = absD - rem; - /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ - if (e < (1U << floor_log_2_d)) { - /* This power works */ + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works more = floor_log_2_d - 1; - } - else { - /* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */ + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. proposed_m += proposed_m; const uint32_t twice_rem = rem + rem; if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); //use the general algorithm + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } - proposed_m += 1; - result.magic = (d < 0 ? -(int32_t)proposed_m : (int32_t)proposed_m); - result.more = more; + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; } return result; } +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_S32_SHIFT_PATH) { - uint8_t shifter = more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - q = q >> shifter; - int32_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend - q = (q ^ shiftMask) - shiftMask; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; return q; - } - else { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { - int32_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend - q += ((numer ^ sign) - sign); + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; } - q >>= more & LIBDIVIDE_32_SHIFT_MASK; + int32_t q = (int32_t)uq; + q >>= shift; q += (q < 0); return q; } } -int libdivide_s32_get_algorithm(const struct libdivide_s32_t *denom) { +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { uint8_t more = denom->more; - int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR); - if (more & LIBDIVIDE_S32_SHIFT_PATH) return (positiveDivisor ? 0 : 1); - else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3); - else return 4; -} - -int32_t libdivide_s32_do_alg0(int32_t numer, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - return q >> shifter; -} - -int32_t libdivide_s32_do_alg1(int32_t numer, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - return - (q >> shifter); -} - -int32_t libdivide_s32_do_alg2(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); q += numer; - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + return q; } -int32_t libdivide_s32_do_alg3(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); - q -= numer; - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); - return q; -} - -int32_t libdivide_s32_do_alg4(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); - return q; -} - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t * denom) { +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_S32_SHIFT_PATH) { - uint32_t shifter = more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); //could use _mm_srli_epi32 with an all -1 register - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); //q = numer + ((numer >> 31) & roundToZeroTweak); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)); // q = q >> shifter - __m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //set all bits of shift mask = to the sign bit of more - q = _mm_sub_epi32(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask; - return q; - } - else { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - __m128i sign = _mm_set1_epi32((int32_t)(int8_t)more >> 7); //must be arithmetic shift - q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; } - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); //q >>= shift - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) - return q; + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; } } -__m128i libdivide_s32_do_vector_alg0(__m128i numers, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); - return _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)); +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); } -__m128i libdivide_s32_do_vector_alg1(__m128i numers, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); - return _mm_sub_epi32(_mm_setzero_si128(), _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter))); -} - -__m128i libdivide_s32_do_vector_alg2(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_add_epi32(q, numers); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); - return q; -} - -__m128i libdivide_s32_do_vector_alg3(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_sub_epi32(q, numers); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); - return q; -} - -__m128i libdivide_s32_do_vector_alg4(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more)); //q >>= shift - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) - return q; -} -#endif - ///////////// SINT64 +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } -struct libdivide_s64_t libdivide_s64_gen(int64_t d) { struct libdivide_s64_t result; - /* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */ - const uint64_t absD = (uint64_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick - if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero - result.more = libdivide__count_trailing_zeros64(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same result.magic = 0; - } - else { - const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(absD); - - //the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word is 0 and the high word is floor_log_2_d - 1 + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 uint8_t more; uint64_t rem, proposed_m; proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); const uint64_t e = absD - rem; - /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ - if (e < (1ULL << floor_log_2_d)) { - /* This power works */ + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works more = floor_log_2_d - 1; - } - else { - /* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */ + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. proposed_m += proposed_m; const uint64_t twice_rem = rem + rem; if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + result.more = more; - result.magic = (d < 0 ? -(int64_t)proposed_m : (int64_t)proposed_m); + result.magic = magic; } return result; } +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { uint8_t more = denom->more; - int64_t magic = denom->magic; - if (magic == 0) { //shift path - uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - q = q >> shifter; - int64_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend - q = (q ^ shiftMask) - shiftMask; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; return q; - } - else { - int64_t q = libdivide__mullhi_s64(magic, numer); + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { - int64_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend - q += ((numer ^ sign) - sign); + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; } - q >>= more & LIBDIVIDE_64_SHIFT_MASK; + int64_t q = (int64_t)uq; + q >>= shift; q += (q < 0); return q; } } - -int libdivide_s64_get_algorithm(const struct libdivide_s64_t *denom) { - uint8_t more = denom->more; - int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR); - if (denom->magic == 0) return (positiveDivisor ? 0 : 1); //shift path - else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3); - else return 4; -} - -int64_t libdivide_s64_do_alg0(int64_t numer, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - return q >> shifter; -} - -int64_t libdivide_s64_do_alg1(int64_t numer, const struct libdivide_s64_t *denom) { - //denom->shifter != -1 && demo->shiftMask != 0 - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - return - (q >> shifter); -} - -int64_t libdivide_s64_do_alg2(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q += numer; - q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK; - q += (q < 0); - return q; -} - -int64_t libdivide_s64_do_alg3(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q -= numer; - q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK; - q += (q < 0); - return q; -} - -int64_t libdivide_s64_do_alg4(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q >>= denom->more; - q += (q < 0); - return q; -} - - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t * denom) { +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; int64_t magic = denom->magic; - if (magic == 0) { //shift path - uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); //q = numer + ((numer >> 63) & roundToZeroTweak); - q = libdivide_s64_shift_right_vector(q, shifter); // q = q >> shifter - __m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); - q = _mm_sub_epi64(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); return q; } else { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(magic)); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); if (more & LIBDIVIDE_ADD_MARKER) { - __m128i sign = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //must be arithmetic shift - q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign); + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); } - q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); //q >>= denom->mult_path.shift + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) return q; } } -__m128i libdivide_s64_do_vector_alg0(__m128i numers, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); - q = libdivide_s64_shift_right_vector(q, shifter); - return q; -} +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); -__m128i libdivide_s64_do_vector_alg1(__m128i numers, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); - q = libdivide_s64_shift_right_vector(q, shifter); - return _mm_sub_epi64(_mm_setzero_si128(), q); -} + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers -__m128i libdivide_s64_do_vector_alg2(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = _mm_add_epi64(q, numers); - q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) - return q; -} - -__m128i libdivide_s64_do_vector_alg3(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = _mm_sub_epi64(q, numers); - q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) - return q; -} - -__m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = libdivide_s64_shift_right_vector(q, denom->more); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign return q; } @@ -1204,228 +1930,143 @@ __m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_ #ifdef __cplusplus -/* The C++ template design here is a total mess. This needs to be fixed by someone better at templates than I. The current design is: - -- The base is a template divider_base that takes the integer type, the libdivide struct, a generating function, a get algorithm function, a do function, and either a do vector function or a dummy int. -- The base has storage for the libdivide struct. This is the only storage (so the C++ class should be no larger than the libdivide struct). - -- Above that, there's divider_mid. This is an empty struct by default, but it is specialized against our four int types. divider_mid contains a template struct algo, that contains a typedef for a specialization of divider_base. struct algo is specialized to take an "algorithm number," where -1 means to use the general algorithm. - -- Publicly we have class divider, which inherits from divider_mid::algo. This also take an algorithm number, which defaults to -1 (the general algorithm). -- divider has a operator / which allows you to use a divider as the divisor in a quotient expression. - -*/ - -namespace libdivide_internal { - -#if LIBDIVIDE_USE_SSE2 -#define MAYBE_VECTOR(x) x -#define MAYBE_VECTOR_PARAM __m128i vector_func(__m128i, const DenomType *) -#else -#define MAYBE_VECTOR(x) 0 -#define MAYBE_VECTOR_PARAM int vector_func -#endif - - /* Some bogus unswitch functions for unsigned types so the same (presumably templated) code can work for both signed and unsigned. */ - uint32_t crash_u32(uint32_t, const libdivide_u32_t *) { abort(); } - uint64_t crash_u64(uint64_t, const libdivide_u64_t *) { abort(); } -#ifdef __APPLE__ - UInt64 crash_u64(UInt64, const libdivide_u64_t *) { abort(); } -#endif -#if LIBDIVIDE_USE_SSE2 - __m128i crash_u32_vector(__m128i, const libdivide_u32_t *) { abort(); } - __m128i crash_u64_vector(__m128i, const libdivide_u64_t *) { abort(); } -#endif - - template - class divider_base { - public: - DenomType denom; - divider_base(IntType d) : denom(gen_func(d)) { } - divider_base(const DenomType & d) : denom(d) { } - - IntType perform_divide(IntType val) const { return do_func(val, &denom); } -#if LIBDIVIDE_USE_SSE2 - __m128i perform_divide_vector(__m128i val) const { return vector_func(val, &denom); } -#endif - - int get_algorithm() const { return get_algo(&denom); } - }; - - - template struct divider_mid { }; - - template<> struct divider_mid { - typedef uint32_t IntType; - typedef struct libdivide_u32_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - }; - - template<> struct divider_mid { - typedef int32_t IntType; - typedef struct libdivide_s32_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - }; - -#ifdef __APPLE__ - template<> struct divider_mid { - typedef Int64 IntType; - typedef struct libdivide_s64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - }; - - template<> struct divider_mid { - typedef UInt64 IntType; - typedef struct libdivide_u64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - - }; -#endif - - template<> struct divider_mid { - typedef uint64_t IntType; - typedef struct libdivide_u64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - - }; - - template<> struct divider_mid { - typedef int64_t IntType; - typedef struct libdivide_s64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - }; - -} - -template -class divider -{ - private: - typename libdivide_internal::divider_mid::template algo::divider sub; - template friend divider unswitch(const divider & d); - divider(const typename libdivide_internal::divider_mid::DenomType & denom) : sub(denom) { } - - public: - - /* Ordinary constructor, that takes the divisor as a parameter. */ - divider(T n) : sub(n) { } - - /* Default constructor, that divides by 1 */ - divider() : sub(1) { } - - /* Divides the parameter by the divisor, returning the quotient */ - T perform_divide(T val) const { return sub.perform_divide(val); } - -#if LIBDIVIDE_USE_SSE2 - /* Treats the vector as either two or four packed values (depending on the size), and divides each of them by the divisor, returning the packed quotients. */ - __m128i perform_divide_vector(__m128i val) const { return sub.perform_divide_vector(val); } -#endif - - /* Returns the index of algorithm, for use in the unswitch function */ - int get_algorithm() const { return sub.get_algorithm(); } // returns the algorithm for unswitching - - /* operator== */ - bool operator==(const divider & him) const { return sub.denom.magic == him.sub.denom.magic && sub.denom.more == him.sub.denom.more; } - - bool operator!=(const divider & him) const { return ! (*this == him); } +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE }; -/* Returns a divider specialized for the given algorithm. */ -template -divider unswitch(const divider & d) { return divider(d.sub.denom); } - -/* Overload of the / operator for scalar division. */ -template -int_type operator/(int_type numer, const divider & denom) { - return denom.perform_divide(numer); -} - -#if LIBDIVIDE_USE_SSE2 -/* Overload of the / operator for vector division. */ -template -__m128i operator/(__m128i numer, const divider & denom) { - return denom.perform_divide_vector(numer); -} +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i #endif - -#endif //__cplusplus - -#endif //LIBDIVIDE_HEADER_ONLY -#ifdef __cplusplus -} //close namespace libdivide -} //close anonymous namespace +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } #endif -#pragma GCC diagnostic pop +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // LIBDIVIDE_H diff --git a/contrib/msgpack-c b/contrib/msgpack-c new file mode 160000 index 00000000000..46684265d50 --- /dev/null +++ b/contrib/msgpack-c @@ -0,0 +1 @@ +Subproject commit 46684265d50b5d1b062d4c5c428ba08462844b1d diff --git a/contrib/pdqsort/pdqsort.h b/contrib/pdqsort/pdqsort.h index 31eb06fece4..01e82b710ee 100644 --- a/contrib/pdqsort/pdqsort.h +++ b/contrib/pdqsort/pdqsort.h @@ -124,11 +124,9 @@ namespace pdqsort_detail { inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; if (begin == end) return true; - - int limit = 0; - for (Iter cur = begin + 1; cur != end; ++cur) { - if (limit > partial_insertion_sort_limit) return false; + std::size_t limit = 0; + for (Iter cur = begin + 1; cur != end; ++cur) { Iter sift = cur; Iter sift_1 = cur - 1; @@ -142,6 +140,8 @@ namespace pdqsort_detail { *sift = PDQSORT_PREFER_MOVE(tmp); limit += cur - sift; } + + if (limit > partial_insertion_sort_limit) return false; } return true; @@ -232,7 +232,7 @@ namespace pdqsort_detail { unsigned char* offsets_r = align_cacheline(offsets_r_storage); int num_l, num_r, start_l, start_r; num_l = num_r = start_l = start_r = 0; - + while (last - first > 2 * block_size) { // Fill up offset blocks with elements that are on the wrong side. if (num_l == 0) { @@ -275,7 +275,7 @@ namespace pdqsort_detail { } int l_size = 0, r_size = 0; - int unknown_left = (last - first) - ((num_r || num_l) ? block_size : 0); + int unknown_left = (int)(last - first) - ((num_r || num_l) ? block_size : 0); if (num_r) { // Handle leftover block by assigning the unknown elements to the other block. l_size = unknown_left; @@ -311,7 +311,7 @@ namespace pdqsort_detail { start_l += num; start_r += num; if (num_l == 0) first += l_size; if (num_r == 0) last -= r_size; - + // We have now fully identified [first, last)'s proper position. Swap the last elements. if (num_l) { offsets_l += start_l; @@ -340,7 +340,7 @@ namespace pdqsort_detail { template inline std::pair partition_right(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; - + // Move pivot into local for speed. T pivot(PDQSORT_PREFER_MOVE(*begin)); @@ -359,7 +359,7 @@ namespace pdqsort_detail { // If the first pair of elements that should be swapped to partition are the same element, // the passed in sequence already was correctly partitioned. bool already_partitioned = first >= last; - + // Keep swapping pairs of elements that are on the wrong side of the pivot. Previously // swapped pairs guard the searches, which is why the first iteration is special-cased // above. @@ -388,7 +388,7 @@ namespace pdqsort_detail { T pivot(PDQSORT_PREFER_MOVE(*begin)); Iter first = begin; Iter last = end; - + while (comp(pivot, *--last)); if (last + 1 == end) while (first < last && !comp(pivot, *++first)); @@ -475,11 +475,11 @@ namespace pdqsort_detail { std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2)); } } - + if (r_size >= insertion_sort_threshold) { std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4)); std::iter_swap(end - 1, end - r_size / 4); - + if (r_size > ninther_threshold) { std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4)); std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4)); @@ -493,7 +493,7 @@ namespace pdqsort_detail { if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp) && partial_insertion_sort(pivot_pos + 1, end, comp)) return; } - + // Sort the left partition first using recursion and do tail recursion elimination for // the right-hand partition. pdqsort_loop(begin, pivot_pos, comp, bad_allowed, leftmost); diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt deleted file mode 100644 index 81cb5afbc43..00000000000 --- a/dbms/CMakeLists.txt +++ /dev/null @@ -1,590 +0,0 @@ -set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.") -include_directories(${ConfigIncludePath}) - -if (USE_INCLUDE_WHAT_YOU_USE) - set (CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) -endif () - -if (USE_CLANG_TIDY) - set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") -endif () - -if(COMPILER_PIPE) - set(MAX_COMPILER_MEMORY 2500) -else() - set(MAX_COMPILER_MEMORY 1500) -endif() -if(MAKE_STATIC_LIBRARIES) - set(MAX_LINKER_MEMORY 3500) -else() - set(MAX_LINKER_MEMORY 2500) -endif() -include(../cmake/limit_jobs.cmake) - -set (CONFIG_VERSION ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config_version.h) -set (CONFIG_COMMON ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config.h) - -include (cmake/version.cmake) -message (STATUS "Will build ${VERSION_FULL} revision ${VERSION_REVISION} ${VERSION_OFFICIAL}") -configure_file (src/Common/config.h.in ${CONFIG_COMMON}) -configure_file (src/Common/config_version.h.in ${CONFIG_VERSION}) -configure_file (src/Core/config_core.h.in ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include/config_core.h) - -if (NOT MSVC) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") -endif () - -if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/src/Core/iostream_debug_helpers.h") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") -endif () - -# Add some warnings that are not available even with -Wall -Wextra -Wpedantic. - -option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON) - -if (COMPILER_CLANG) - add_warning(pedantic) - no_warning(gnu-anonymous-struct) - no_warning(nested-anon-types) - no_warning(vla-extension) - no_warning(zero-length-array) - - add_warning(comma) - add_warning(conditional-uninitialized) - add_warning(covered-switch-default) - add_warning(deprecated) - add_warning(embedded-directive) - add_warning(empty-init-stmt) # linux-only - add_warning(extra-semi-stmt) # linux-only - add_warning(extra-semi) - add_warning(gnu-case-range) - add_warning(inconsistent-missing-destructor-override) - add_warning(newline-eof) - add_warning(old-style-cast) - add_warning(range-loop-analysis) - add_warning(redundant-parens) - add_warning(reserved-id-macro) - add_warning(shadow-field) # clang 8+ - add_warning(shadow-uncaptured-local) - add_warning(shadow) - add_warning(string-plus-int) # clang 8+ - add_warning(undef) - add_warning(unreachable-code-return) - add_warning(unreachable-code) - add_warning(unused-exception-parameter) - add_warning(unused-macros) - add_warning(unused-member-function) - add_warning(zero-as-null-pointer-constant) - - if (WEVERYTHING) - add_warning(everything) - no_warning(c++98-compat-pedantic) - no_warning(c++98-compat) - no_warning(c99-extensions) - no_warning(conversion) - no_warning(ctad-maybe-unsupported) # clang 9+, linux-only - no_warning(deprecated-dynamic-exception-spec) - no_warning(disabled-macro-expansion) - no_warning(documentation-unknown-command) - no_warning(double-promotion) - no_warning(exit-time-destructors) - no_warning(float-equal) - no_warning(global-constructors) - no_warning(gnu-anonymous-struct) - no_warning(missing-prototypes) - no_warning(missing-variable-declarations) - no_warning(nested-anon-types) - no_warning(packed) - no_warning(padded) - no_warning(return-std-move-in-c++11) # clang 7+ - no_warning(shift-sign-overflow) - no_warning(sign-conversion) - no_warning(switch-enum) - no_warning(undefined-func-template) - no_warning(unused-template) - no_warning(vla-extension) - no_warning(vla) - no_warning(weak-template-vtables) - no_warning(weak-vtables) - no_warning(zero-length-array) - - # TODO Enable conversion, sign-conversion, double-promotion warnings. - endif () -elseif (COMPILER_GCC) - # Add compiler options only to c++ compiler - function(add_cxx_compile_options option) - add_compile_options("$<$,CXX>:${option}>") - endfunction() - # Warn about boolean expression compared with an integer value different from true/false - add_cxx_compile_options(-Wbool-compare) - # Warn whenever a pointer is cast such that the required alignment of the target is increased. - add_cxx_compile_options(-Wcast-align) - # Warn whenever a pointer is cast so as to remove a type qualifier from the target type. - add_cxx_compile_options(-Wcast-qual) - # Warn when deleting a pointer to incomplete type, which may cause undefined behavior at runtime - add_cxx_compile_options(-Wdelete-incomplete) - # Warn if a requested optimization pass is disabled. Code is too big or too complex - add_cxx_compile_options(-Wdisabled-optimization) - # Warn about duplicated conditions in an if-else-if chain - add_cxx_compile_options(-Wduplicated-cond) - # Warn about a comparison between values of different enumerated types - add_cxx_compile_options(-Wenum-compare) - # Warn about uninitialized variables that are initialized with themselves - add_cxx_compile_options(-Winit-self) - # Warn about logical not used on the left hand side operand of a comparison - add_cxx_compile_options(-Wlogical-not-parentheses) - # Warn about suspicious uses of logical operators in expressions - add_cxx_compile_options(-Wlogical-op) - # Warn if there exists a path from the function entry to a use of the variable that is uninitialized. - add_cxx_compile_options(-Wmaybe-uninitialized) - # Warn when the indentation of the code does not reflect the block structure - add_cxx_compile_options(-Wmisleading-indentation) - # Warn if a global function is defined without a previous declaration - disabled because of build times - # add_cxx_compile_options(-Wmissing-declarations) - # Warn if a user-supplied include directory does not exist - add_cxx_compile_options(-Wmissing-include-dirs) - # Obvious - add_cxx_compile_options(-Wnon-virtual-dtor) - # Obvious - add_cxx_compile_options(-Wno-return-local-addr) - # This warning is disabled due to false positives if compiled with libc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90037 - #add_cxx_compile_options(-Wnull-dereference) - # Obvious - add_cxx_compile_options(-Wodr) - # Obvious - add_cxx_compile_options(-Wold-style-cast) - # Warn when a function declaration hides virtual functions from a base class - # add_cxx_compile_options(-Woverloaded-virtual) - # Warn about placement new expressions with undefined behavior - add_cxx_compile_options(-Wplacement-new=2) - # Warn about anything that depends on the “size of” a function type or of void - add_cxx_compile_options(-Wpointer-arith) - # Warn if anything is declared more than once in the same scope - add_cxx_compile_options(-Wredundant-decls) - # Member initialization reordering - add_cxx_compile_options(-Wreorder) - # Obvious - add_cxx_compile_options(-Wshadow) - # Warn if left shifting a negative value - add_cxx_compile_options(-Wshift-negative-value) - # Warn about a definition of an unsized deallocation function - add_cxx_compile_options(-Wsized-deallocation) - # Warn when the sizeof operator is applied to a parameter that is declared as an array in a function definition - add_cxx_compile_options(-Wsizeof-array-argument) - # Warn for suspicious length parameters to certain string and memory built-in functions if the argument uses sizeof - add_cxx_compile_options(-Wsizeof-pointer-memaccess) - - if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9) - # Warn about overriding virtual functions that are not marked with the override keyword - add_cxx_compile_options(-Wsuggest-override) - endif () - - # Warn whenever a switch statement has an index of boolean type and the case values are outside the range of a boolean type - add_cxx_compile_options(-Wswitch-bool) - # Warn if a self-comparison always evaluates to true or false - add_cxx_compile_options(-Wtautological-compare) - # Warn about trampolines generated for pointers to nested functions - add_cxx_compile_options(-Wtrampolines) - # Obvious - add_cxx_compile_options(-Wunused) - # Warn if vector operation is not implemented via SIMD capabilities of the architecture - add_cxx_compile_options(-Wvector-operation-performance) -endif () - -if (COMPILER_GCC) - # If we leave this optimization enabled, gcc-7 replaces a pair of SSE intrinsics (16 byte load, store) with a call to memcpy. - # It leads to slow code. This is compiler bug. It looks like this: - # - # (gdb) bt - #0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11 - #1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720 - #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../dbms/src/Common/memcpySmall.h:37 - - add_definitions ("-fno-tree-loop-distribute-patterns") -endif () - -add_subdirectory (src) - -set(dbms_headers) -set(dbms_sources) - -add_headers_and_sources(clickhouse_common_io src/Common) -add_headers_and_sources(clickhouse_common_io src/Common/HashTable) -add_headers_and_sources(clickhouse_common_io src/IO) -list (REMOVE_ITEM clickhouse_common_io_sources src/Common/malloc.cpp src/Common/new_delete.cpp) - -if(USE_RDKAFKA) - add_headers_and_sources(dbms src/Storages/Kafka) -endif() - - -list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD}) -list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON}) - -list (APPEND dbms_sources src/Functions/IFunction.cpp src/Functions/FunctionFactory.cpp src/Functions/FunctionHelpers.cpp src/Functions/extractTimeZoneFromFunctionArguments.cpp) -list (APPEND dbms_headers src/Functions/IFunctionImpl.h src/Functions/FunctionFactory.h src/Functions/FunctionHelpers.h src/Functions/extractTimeZoneFromFunctionArguments.h) - -list (APPEND dbms_sources - src/AggregateFunctions/AggregateFunctionFactory.cpp - src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp - src/AggregateFunctions/AggregateFunctionState.cpp - src/AggregateFunctions/parseAggregateFunctionParameters.cpp) - -list (APPEND dbms_headers - src/AggregateFunctions/IAggregateFunction.h - src/AggregateFunctions/IAggregateFunctionCombinator.h - src/AggregateFunctions/AggregateFunctionFactory.h - src/AggregateFunctions/AggregateFunctionCombinatorFactory.h - src/AggregateFunctions/AggregateFunctionState.h - src/AggregateFunctions/FactoryHelpers.h - src/AggregateFunctions/parseAggregateFunctionParameters.h) - -list (APPEND dbms_sources src/TableFunctions/ITableFunction.cpp src/TableFunctions/TableFunctionFactory.cpp) -list (APPEND dbms_headers src/TableFunctions/ITableFunction.h src/TableFunctions/TableFunctionFactory.h) -list (APPEND dbms_sources src/Dictionaries/DictionaryFactory.cpp src/Dictionaries/DictionarySourceFactory.cpp src/Dictionaries/DictionaryStructure.cpp src/Dictionaries/getDictionaryConfigurationFromAST.cpp) -list (APPEND dbms_headers src/Dictionaries/DictionaryFactory.h src/Dictionaries/DictionarySourceFactory.h src/Dictionaries/DictionaryStructure.h src/Dictionaries/getDictionaryConfigurationFromAST.h) - -if (NOT ENABLE_SSL) - list (REMOVE_ITEM clickhouse_common_io_sources src/Common/OpenSSLHelpers.cpp) - list (REMOVE_ITEM clickhouse_common_io_headers src/Common/OpenSSLHelpers.h) -endif () - -add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) - -add_library (clickhouse_malloc OBJECT src/Common/malloc.cpp) -set_source_files_properties(src/Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") - -add_library (clickhouse_new_delete STATIC src/Common/new_delete.cpp) -target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io jemalloc) - -if (OS_FREEBSD) - target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST) -endif () - -add_subdirectory(src/Common/ZooKeeper) -add_subdirectory(src/Common/Config) - -set (all_modules) -macro(add_object_library name common_path) - if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) - add_headers_and_sources(dbms ${common_path}) - else () - list (APPEND all_modules ${name}) - add_headers_and_sources(${name} ${common_path}) - add_library(${name} SHARED ${${name}_sources} ${${name}_headers}) - target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all) - endif () -endmacro() - -add_object_library(clickhouse_access src/Access) -add_object_library(clickhouse_core src/Core) -add_object_library(clickhouse_compression src/Compression) -add_object_library(clickhouse_datastreams src/DataStreams) -add_object_library(clickhouse_datatypes src/DataTypes) -add_object_library(clickhouse_databases src/Databases) -add_object_library(clickhouse_disks src/Disks) -add_object_library(clickhouse_interpreters src/Interpreters) -add_object_library(clickhouse_interpreters_clusterproxy src/Interpreters/ClusterProxy) -add_object_library(clickhouse_columns src/Columns) -add_object_library(clickhouse_storages src/Storages) -add_object_library(clickhouse_storages_distributed src/Storages/Distributed) -add_object_library(clickhouse_storages_mergetree src/Storages/MergeTree) -add_object_library(clickhouse_storages_liveview src/Storages/LiveView) -add_object_library(clickhouse_client src/Client) -add_object_library(clickhouse_formats src/Formats) -add_object_library(clickhouse_processors src/Processors) -add_object_library(clickhouse_processors_executors src/Processors/Executors) -add_object_library(clickhouse_processors_formats src/Processors/Formats) -add_object_library(clickhouse_processors_formats_impl src/Processors/Formats/Impl) -add_object_library(clickhouse_processors_transforms src/Processors/Transforms) -add_object_library(clickhouse_processors_sources src/Processors/Sources) - - -if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) - add_library (dbms STATIC ${dbms_headers} ${dbms_sources}) - target_link_libraries (dbms PRIVATE jemalloc) - set (all_modules dbms) -else() - add_library (dbms SHARED ${dbms_headers} ${dbms_sources}) - target_link_libraries (dbms PUBLIC ${all_modules}) - target_link_libraries (clickhouse_interpreters PRIVATE jemalloc) - list (APPEND all_modules dbms) - # force all split libs to be linked - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed") -endif () - -macro (dbms_target_include_directories) - foreach (module ${all_modules}) - target_include_directories (${module} ${ARGN}) - endforeach () -endmacro () - -macro (dbms_target_link_libraries) - foreach (module ${all_modules}) - target_link_libraries (${module} ${ARGN}) - endforeach () -endmacro () - -if (USE_EMBEDDED_COMPILER) - dbms_target_link_libraries (PRIVATE ${REQUIRED_LLVM_LIBRARIES}) - dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS}) -endif () - -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL") - # Won't generate debug info for files with heavy template instantiation to achieve faster linking and lower size. - set_source_files_properties( - src/Dictionaries/FlatDictionary.cpp - src/Dictionaries/HashedDictionary.cpp - src/Dictionaries/CacheDictionary.cpp - src/Dictionaries/TrieDictionary.cpp - src/Dictionaries/RangeHashedDictionary.cpp - src/Dictionaries/ComplexKeyHashedDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp - src/Dictionaries/ODBCBlockInputStream.cpp - src/Dictionaries/HTTPDictionarySource.cpp - src/Dictionaries/LibraryDictionarySource.cpp - src/Dictionaries/ExecutableDictionarySource.cpp - src/Dictionaries/ClickHouseDictionarySource.cpp - PROPERTIES COMPILE_FLAGS -g0) -endif () - -# Otherwise it will slow down stack traces printing too much. -set_source_files_properties( - src/Common/Elf.cpp - src/Common/Dwarf.cpp - src/Common/SymbolIndex.cpp - PROPERTIES COMPILE_FLAGS "-O3 ${WITHOUT_COVERAGE}") - -target_link_libraries (clickhouse_common_io - PUBLIC - common - PRIVATE - string_utils - widechar_width - ${LINK_LIBRARIES_ONLY_ON_X86_64} - PUBLIC - ${DOUBLE_CONVERSION_LIBRARIES} - ryu - PUBLIC - ${Poco_Net_LIBRARY} - ${Poco_Util_LIBRARY} - ${Poco_Foundation_LIBRARY} - ${Poco_XML_LIBRARY} -) - -if(RE2_LIBRARY) - target_link_libraries(clickhouse_common_io PUBLIC ${RE2_LIBRARY}) -endif() -if(RE2_ST_LIBRARY) - target_link_libraries(clickhouse_common_io PUBLIC ${RE2_ST_LIBRARY}) -endif() - -target_link_libraries(clickhouse_common_io - PUBLIC - ${CITYHASH_LIBRARIES} - PRIVATE - ${Poco_XML_LIBRARY} - ${ZLIB_LIBRARIES} - ${EXECINFO_LIBRARIES} - PUBLIC - ${Boost_SYSTEM_LIBRARY} - ${Boost_PROGRAM_OPTIONS_LIBRARY} - PUBLIC - roaring -) - -if (USE_RDKAFKA) - dbms_target_link_libraries(PRIVATE ${CPPKAFKA_LIBRARY} ${RDKAFKA_LIBRARY}) - if(NOT USE_INTERNAL_RDKAFKA_LIBRARY) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR}) - endif() -endif() - - -if(RE2_INCLUDE_DIR) - target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${RE2_INCLUDE_DIR}) -endif() - -if(CPUID_LIBRARY) - target_link_libraries(clickhouse_common_io PRIVATE ${CPUID_LIBRARY}) -endif() - -if(CPUINFO_LIBRARY) - target_link_libraries(clickhouse_common_io PRIVATE ${CPUINFO_LIBRARY}) -endif() - -dbms_target_link_libraries ( - PRIVATE - clickhouse_parsers - clickhouse_common_config - clickhouse_common_zookeeper - string_utils # FIXME: not sure if it's private - PUBLIC - clickhouse_common_io - PRIVATE - clickhouse_dictionaries_embedded - ${LZ4_LIBRARY} - PUBLIC - ${MYSQLXX_LIBRARY} - PRIVATE - ${BTRIE_LIBRARIES} - ${Boost_PROGRAM_OPTIONS_LIBRARY} - ${Boost_FILESYSTEM_LIBRARY} - PUBLIC - ${Boost_SYSTEM_LIBRARY} -) - -target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) # uses some includes from core -dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) - -target_include_directories(clickhouse_common_io SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) -dbms_target_include_directories(SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) - -dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR}) - -if (NOT USE_INTERNAL_LZ4_LIBRARY AND LZ4_INCLUDE_DIR) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR}) -endif () - -if (ZSTD_LIBRARY) - dbms_target_link_libraries(PRIVATE ${ZSTD_LIBRARY}) - if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${ZSTD_INCLUDE_DIR}) - endif () -endif() - -if (NOT USE_INTERNAL_BOOST_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) -endif () - -if (Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) -endif() - -if (USE_POCO_SQLODBC) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_SQL_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY}) - if (NOT USE_INTERNAL_POCO_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQL_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR} SYSTEM PUBLIC ${Poco_SQL_INCLUDE_DIR}) - endif() -endif() - -if (Poco_Data_FOUND) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) -endif() - -if (USE_POCO_DATAODBC) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_Data_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_DataODBC_LIBRARY}) - if (NOT USE_INTERNAL_POCO_LIBRARY) - dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR}) - endif() -endif() - -if (USE_POCO_MONGODB) - dbms_target_link_libraries (PRIVATE ${Poco_MongoDB_LIBRARY}) -endif() - -if (USE_POCO_REDIS) - dbms_target_link_libraries (PRIVATE ${Poco_Redis_LIBRARY}) -endif() - -if (USE_POCO_NETSSL) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) -endif() - -if (USE_POCO_JSON) - dbms_target_link_libraries (PRIVATE ${Poco_JSON_LIBRARY}) -endif() - -dbms_target_link_libraries (PRIVATE ${Poco_Foundation_LIBRARY}) - -if (USE_ICU) - dbms_target_link_libraries (PRIVATE ${ICU_LIBRARIES}) - dbms_target_include_directories (SYSTEM PRIVATE ${ICU_INCLUDE_DIRS}) -endif () - -if (USE_CAPNP) - dbms_target_link_libraries (PRIVATE ${CAPNP_LIBRARIES}) -endif () - -if (USE_PARQUET) - dbms_target_link_libraries(PRIVATE ${PARQUET_LIBRARY}) - if (NOT USE_INTERNAL_PARQUET_LIBRARY OR USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${PARQUET_INCLUDE_DIR} ${ARROW_INCLUDE_DIR}) - endif () -endif () - -if (USE_AVRO) - dbms_target_link_libraries(PRIVATE ${AVROCPP_LIBRARY}) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${AVROCPP_INCLUDE_DIR}) -endif () - -if (OPENSSL_CRYPTO_LIBRARY) - dbms_target_link_libraries (PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) - target_link_libraries (clickhouse_common_io PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) -endif () - -dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR}) -dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) - -if (USE_PROTOBUF) - dbms_target_link_libraries (PRIVATE ${Protobuf_LIBRARY}) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR}) -endif () - -if (USE_HDFS) - target_link_libraries (clickhouse_common_io PUBLIC ${HDFS3_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR}) -endif() - -if (USE_AWS_S3) - target_link_libraries (clickhouse_common_io PUBLIC ${AWS_S3_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_CORE_INCLUDE_DIR}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_INCLUDE_DIR}) -endif() - -if (USE_BROTLI) - target_link_libraries (clickhouse_common_io PRIVATE ${BROTLI_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR}) -endif() - -dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR}) -target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) - -target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) - -add_subdirectory (programs) -add_subdirectory (tests) - -if (ENABLE_TESTS AND USE_GTEST) - macro (grep_gtest_sources BASE_DIR DST_VAR) - # Cold match files that are not in tests/ directories - file(GLOB_RECURSE "${DST_VAR}" RELATIVE "${BASE_DIR}" "gtest*.cpp") - endmacro() - - # attach all dbms gtest sources - grep_gtest_sources(${ClickHouse_SOURCE_DIR}/dbms dbms_gtest_sources) - add_executable(unit_tests_dbms ${dbms_gtest_sources}) - - # gtest framework has substandard code - target_compile_options(unit_tests_dbms PRIVATE - -Wno-zero-as-null-pointer-constant - -Wno-undef - -Wno-sign-compare - -Wno-used-but-marked-unused - -Wno-missing-noreturn - -Wno-gnu-zero-variadic-macro-arguments - ) - - target_link_libraries(unit_tests_dbms PRIVATE ${GTEST_BOTH_LIBRARIES} clickhouse_functions clickhouse_parsers dbms clickhouse_common_zookeeper string_utils) - add_check(unit_tests_dbms) -endif () diff --git a/dbms/programs/client/readpassphrase/readpassphrase.c b/dbms/programs/client/readpassphrase/readpassphrase.c deleted file mode 100644 index 8c56877196c..00000000000 --- a/dbms/programs/client/readpassphrase/readpassphrase.c +++ /dev/null @@ -1,211 +0,0 @@ -/* $OpenBSD: readpassphrase.c,v 1.26 2016/10/18 12:47:18 millert Exp $ */ - -/* - * Copyright (c) 2000-2002, 2007, 2010 - * Todd C. Miller - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - * Sponsored in part by the Defense Advanced Research Projects - * Agency (DARPA) and Air Force Research Laboratory, Air Force - * Materiel Command, USAF, under agreement number F39502-99-1-0512. - */ - -/* OPENBSD ORIGINAL: lib/libc/gen/readpassphrase.c */ - -#include "includes.h" - -#ifndef HAVE_READPASSPHRASE - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef TCSASOFT -/* If we don't have TCSASOFT define it so that ORing it it below is a no-op. */ -# define TCSASOFT 0 -#endif - -/* SunOS 4.x which lacks _POSIX_VDISABLE, but has VDISABLE */ -#if !defined(_POSIX_VDISABLE) && defined(VDISABLE) -# define _POSIX_VDISABLE VDISABLE -#endif - -static volatile sig_atomic_t signo[NSIG]; - -static void handler(int); - -char * -readpassphrase(const char *prompt, char *buf, size_t bufsiz, int flags) -{ - ssize_t nr; - int input, output, save_errno, i, need_restart; - char ch, *p, *end; - struct termios term, oterm; - struct sigaction sa, savealrm, saveint, savehup, savequit, saveterm; - struct sigaction savetstp, savettin, savettou, savepipe; - - /* I suppose we could alloc on demand in this case (XXX). */ - if (bufsiz == 0) { - errno = EINVAL; - return(NULL); - } - -restart: - for (i = 0; i < NSIG; i++) - signo[i] = 0; - nr = -1; - save_errno = 0; - need_restart = 0; - /* - * Read and write to /dev/tty if available. If not, read from - * stdin and write to stderr unless a tty is required. - */ - if ((flags & RPP_STDIN) || - (input = output = open(_PATH_TTY, O_RDWR)) == -1) { - if (flags & RPP_REQUIRE_TTY) { - errno = ENOTTY; - return(NULL); - } - input = STDIN_FILENO; - output = STDERR_FILENO; - } - - /* - * Turn off echo if possible. - * If we are using a tty but are not the foreground pgrp this will - * generate SIGTTOU, so do it *before* installing the signal handlers. - */ - if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) { - memcpy(&term, &oterm, sizeof(term)); - if (!(flags & RPP_ECHO_ON)) - term.c_lflag &= ~(ECHO | ECHONL); -#ifdef VSTATUS - if (term.c_cc[VSTATUS] != _POSIX_VDISABLE) - term.c_cc[VSTATUS] = _POSIX_VDISABLE; -#endif - (void)tcsetattr(input, TCSAFLUSH|TCSASOFT, &term); - } else { - memset(&term, 0, sizeof(term)); - term.c_lflag |= ECHO; - memset(&oterm, 0, sizeof(oterm)); - oterm.c_lflag |= ECHO; - } - - /* - * Catch signals that would otherwise cause the user to end - * up with echo turned off in the shell. Don't worry about - * things like SIGXCPU and SIGVTALRM for now. - */ - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; /* don't restart system calls */ - sa.sa_handler = handler; - (void)sigaction(SIGALRM, &sa, &savealrm); - (void)sigaction(SIGHUP, &sa, &savehup); - (void)sigaction(SIGINT, &sa, &saveint); - (void)sigaction(SIGPIPE, &sa, &savepipe); - (void)sigaction(SIGQUIT, &sa, &savequit); - (void)sigaction(SIGTERM, &sa, &saveterm); - (void)sigaction(SIGTSTP, &sa, &savetstp); - (void)sigaction(SIGTTIN, &sa, &savettin); - (void)sigaction(SIGTTOU, &sa, &savettou); - - if (!(flags & RPP_STDIN)) - (void)write(output, prompt, strlen(prompt)); - end = buf + bufsiz - 1; - p = buf; - while ((nr = read(input, &ch, 1)) == 1 && ch != '\n' && ch != '\r') { - if (p < end) { - if ((flags & RPP_SEVENBIT)) - ch &= 0x7f; - if (isalpha((unsigned char)ch)) { - if ((flags & RPP_FORCELOWER)) - ch = (char)tolower((unsigned char)ch); - if ((flags & RPP_FORCEUPPER)) - ch = (char)toupper((unsigned char)ch); - } - *p++ = ch; - } - } - *p = '\0'; - save_errno = errno; - if (!(term.c_lflag & ECHO)) - (void)write(output, "\n", 1); - - /* Restore old terminal settings and signals. */ - if (memcmp(&term, &oterm, sizeof(term)) != 0) { - const int sigttou = signo[SIGTTOU]; - - /* Ignore SIGTTOU generated when we are not the fg pgrp. */ - while (tcsetattr(input, TCSAFLUSH|TCSASOFT, &oterm) == -1 && - errno == EINTR && !signo[SIGTTOU]) - continue; - signo[SIGTTOU] = sigttou; - } - (void)sigaction(SIGALRM, &savealrm, NULL); - (void)sigaction(SIGHUP, &savehup, NULL); - (void)sigaction(SIGINT, &saveint, NULL); - (void)sigaction(SIGQUIT, &savequit, NULL); - (void)sigaction(SIGPIPE, &savepipe, NULL); - (void)sigaction(SIGTERM, &saveterm, NULL); - (void)sigaction(SIGTSTP, &savetstp, NULL); - (void)sigaction(SIGTTIN, &savettin, NULL); - (void)sigaction(SIGTTOU, &savettou, NULL); - if (input != STDIN_FILENO) - (void)close(input); - - /* - * If we were interrupted by a signal, resend it to ourselves - * now that we have restored the signal handlers. - */ - for (i = 0; i < NSIG; i++) { - if (signo[i]) { - kill(getpid(), i); - switch (i) { - case SIGTSTP: - case SIGTTIN: - case SIGTTOU: - need_restart = 1; - } - } - } - if (need_restart) - goto restart; - - if (save_errno) - errno = save_errno; - return(nr == -1 ? NULL : buf); -} -//DEF_WEAK(readpassphrase); - -#if 0 -char * -getpass(const char *prompt) -{ - static char buf[_PASSWORD_LEN + 1]; - - return(readpassphrase(prompt, buf, sizeof(buf), RPP_ECHO_OFF)); -} -#endif - -static void handler(int s) -{ - - signo[s] = 1; -} -#endif /* HAVE_READPASSPHRASE */ diff --git a/dbms/programs/server/TCPHandlerFactory.h b/dbms/programs/server/TCPHandlerFactory.h deleted file mode 100644 index 0eb8be13a2d..00000000000 --- a/dbms/programs/server/TCPHandlerFactory.h +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once - -#include -#include -#include "IServer.h" -#include "TCPHandler.h" - -namespace Poco { class Logger; } - -namespace DB -{ - -class TCPHandlerFactory : public Poco::Net::TCPServerConnectionFactory -{ -private: - IServer & server; - Poco::Logger * log; - -public: - explicit TCPHandlerFactory(IServer & server_, bool secure_ = false) - : server(server_) - , log(&Logger::get(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) - { - } - - Poco::Net::TCPServerConnection * createConnection(const Poco::Net::StreamSocket & socket) override - { - LOG_TRACE(log, - "TCP Request. " - << "Address: " - << socket.peerAddress().toString()); - - return new TCPHandler(server, socket); - } -}; - -} diff --git a/dbms/programs/server/config.xml b/dbms/programs/server/config.xml deleted file mode 100644 index e0d527f9538..00000000000 --- a/dbms/programs/server/config.xml +++ /dev/null @@ -1,528 +0,0 @@ - - - - - - trace - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 10 - - - - 8123 - 9000 - 9004 - - - - - - - - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key - - /etc/clickhouse-server/dhparam.pem - none - true - true - sslv2,sslv3 - true - - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - - - - - - - 9009 - - - - - - - - - - - - - - - - - - - - 4096 - 3 - - - 100 - - - - - - 8589934592 - - - 5368709120 - - - - /var/lib/clickhouse/ - - - /var/lib/clickhouse/tmp/ - - - - - - /var/lib/clickhouse/user_files/ - - - users.xml - - - default - - - - - - default - - - - - - - - - false - - - - - - - - localhost - 9000 - - - - - - - localhost - 9000 - - - - - localhost - 9000 - - - - - - - 127.0.0.1 - 9000 - - - - - 127.0.0.2 - 9000 - - - - - - - localhost - 9440 - 1 - - - - - - - localhost - 9000 - - - - - localhost - 1 - - - - - - - - - - - - - - - - - - - - - - - - 3600 - - - - 3600 - - - 60 - - - - - - - - - - - - - system - query_log
- - toYYYYMM(event_date) - - - - - 7500 -
- - - - system - trace_log
- - toYYYYMM(event_date) - 7500 -
- - - - system - query_thread_log
- toYYYYMM(event_date) - 7500 -
- - - - - - - - system - metric_log
- 7500 - 1000 -
- - - - - - - - - - - - *_dictionary.xml - - - - - - - - - - /clickhouse/task_queue/ddl - - - - - - - - - - - - - - - - click_cost - any - - 0 - 3600 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - - - /var/lib/clickhouse/format_schemas/ - - - - - - -
diff --git a/dbms/programs/server/users.xml b/dbms/programs/server/users.xml deleted file mode 100644 index d631fbb0f8a..00000000000 --- a/dbms/programs/server/users.xml +++ /dev/null @@ -1,107 +0,0 @@ - - - - - - - - 10000000000 - - - 0 - - - random - - - - - 1 - - - - - - - - - - - - - ::/0 - - - - default - - - default - - - - - - - - - - - 3600 - - - 0 - 0 - 0 - 0 - 0 - - - - diff --git a/dbms/src/Access/AccessFlags.h b/dbms/src/Access/AccessFlags.h deleted file mode 100644 index f15e7d1e274..00000000000 --- a/dbms/src/Access/AccessFlags.h +++ /dev/null @@ -1,526 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -/// Represents a combination of access types which can be granted globally, on databases, tables, columns, etc. -/// For example "SELECT, CREATE USER" is an access type. -class AccessFlags -{ -public: - AccessFlags(AccessType type); - - /// The same as AccessFlags(AccessType::NONE). - AccessFlags() = default; - - /// Constructs from a string like "SELECT". - AccessFlags(const std::string_view & keyword); - - /// Constructs from a list of strings like "SELECT, UPDATE, INSERT". - AccessFlags(const std::vector & keywords); - AccessFlags(const Strings & keywords); - - AccessFlags(const AccessFlags & src) = default; - AccessFlags(AccessFlags && src) = default; - AccessFlags & operator =(const AccessFlags & src) = default; - AccessFlags & operator =(AccessFlags && src) = default; - - /// Returns the access type which contains two specified access types. - AccessFlags & operator |=(const AccessFlags & other) { flags |= other.flags; return *this; } - friend AccessFlags operator |(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) |= right; } - - /// Returns the access type which contains the common part of two access types. - AccessFlags & operator &=(const AccessFlags & other) { flags &= other.flags; return *this; } - friend AccessFlags operator &(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) &= right; } - - /// Returns the access type which contains only the part of the first access type which is not the part of the second access type. - /// (lhs - rhs) is the same as (lhs & ~rhs). - AccessFlags & operator -=(const AccessFlags & other) { flags &= ~other.flags; return *this; } - friend AccessFlags operator -(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) -= right; } - - AccessFlags operator ~() const { AccessFlags res; res.flags = ~flags; return res; } - - bool isEmpty() const { return flags.none(); } - explicit operator bool() const { return !isEmpty(); } - bool contains(const AccessFlags & other) const { return (flags & other.flags) == other.flags; } - - friend bool operator ==(const AccessFlags & left, const AccessFlags & right) { return left.flags == right.flags; } - friend bool operator !=(const AccessFlags & left, const AccessFlags & right) { return !(left == right); } - - void clear() { flags.reset(); } - - /// Returns a comma-separated list of keywords, like "SELECT, CREATE USER, UPDATE". - String toString() const; - - /// Returns a list of keywords. - std::vector toKeywords() const; - - /// Returns all the flags. - /// These are the same as (allGlobalFlags() | allDatabaseFlags() | allTableFlags() | allColumnsFlags() | allDictionaryFlags()). - static AccessFlags allFlags(); - - /// Returns all the global flags. - static AccessFlags allGlobalFlags(); - - /// Returns all the flags related to a database. - static AccessFlags allDatabaseFlags(); - - /// Returns all the flags related to a table. - static AccessFlags allTableFlags(); - - /// Returns all the flags related to a column. - static AccessFlags allColumnFlags(); - - /// Returns all the flags related to a dictionary. - static AccessFlags allDictionaryFlags(); - -private: - static constexpr size_t NUM_FLAGS = 128; - using Flags = std::bitset; - Flags flags; - - AccessFlags(const Flags & flags_) : flags(flags_) {} - - template - class Impl; -}; - - -namespace ErrorCodes -{ - extern const int UNKNOWN_ACCESS_TYPE; -} - -template -class AccessFlags::Impl -{ -public: - static const Impl & instance() - { - static const Impl res; - return res; - } - - Flags accessTypeToFlags(AccessType type) const - { - return access_type_to_flags_mapping[static_cast(type)]; - } - - Flags keywordToFlags(const std::string_view & keyword) const - { - auto it = keyword_to_flags_map.find(keyword); - if (it == keyword_to_flags_map.end()) - { - String uppercased_keyword{keyword}; - boost::to_upper(uppercased_keyword); - it = keyword_to_flags_map.find(uppercased_keyword); - if (it == keyword_to_flags_map.end()) - throw Exception("Unknown access type: " + String(keyword), ErrorCodes::UNKNOWN_ACCESS_TYPE); - } - return it->second; - } - - Flags keywordsToFlags(const std::vector & keywords) const - { - Flags res; - for (const auto & keyword : keywords) - res |= keywordToFlags(keyword); - return res; - } - - Flags keywordsToFlags(const Strings & keywords) const - { - Flags res; - for (const auto & keyword : keywords) - res |= keywordToFlags(keyword); - return res; - } - - std::vector flagsToKeywords(const Flags & flags_) const - { - std::vector keywords; - flagsToKeywordsRec(flags_, keywords, *flags_to_keyword_tree); - - if (keywords.empty()) - keywords.push_back("USAGE"); - - return keywords; - } - - String flagsToString(const Flags & flags_) const - { - String str; - for (const auto & keyword : flagsToKeywords(flags_)) - { - if (!str.empty()) - str += ", "; - str += keyword; - } - return str; - } - - const Flags & getAllFlags() const { return all_flags; } - const Flags & getGlobalFlags() const { return all_flags_for_target[GLOBAL]; } - const Flags & getDatabaseFlags() const { return all_flags_for_target[DATABASE]; } - const Flags & getTableFlags() const { return all_flags_for_target[TABLE]; } - const Flags & getColumnFlags() const { return all_flags_for_target[COLUMN]; } - const Flags & getDictionaryFlags() const { return all_flags_for_target[DICTIONARY]; } - -private: - enum Target - { - UNKNOWN_TARGET, - GLOBAL, - DATABASE, - TABLE, - VIEW = TABLE, - COLUMN, - DICTIONARY, - }; - - static constexpr size_t NUM_TARGETS = static_cast(DICTIONARY) + 1; - - struct Node; - using NodePtr = std::unique_ptr; - using Nodes = std::vector; - - template - static Nodes nodes(Args&& ... args) - { - Nodes res; - ext::push_back(res, std::move(args)...); - return res; - } - - struct Node - { - std::string_view keyword; - std::vector aliases; - Flags flags; - Target target = UNKNOWN_TARGET; - Nodes children; - - Node(std::string_view keyword_, size_t flag_, Target target_) - : keyword(keyword_), target(target_) - { - flags.set(flag_); - } - - Node(std::string_view keyword_, Nodes children_) - : keyword(keyword_), children(std::move(children_)) - { - for (const auto & child : children) - flags |= child->flags; - } - - template - Node(std::string_view keyword_, NodePtr first_child, Args &&... other_children) - : Node(keyword_, nodes(std::move(first_child), std::move(other_children)...)) {} - }; - - static void flagsToKeywordsRec(const Flags & flags_, std::vector & keywords, const Node & start_node) - { - Flags matching_flags = (flags_ & start_node.flags); - if (matching_flags.any()) - { - if (matching_flags == start_node.flags) - { - keywords.push_back(start_node.keyword); - } - else - { - for (const auto & child : start_node.children) - flagsToKeywordsRec(flags_, keywords, *child); - } - } - } - - static NodePtr makeFlagsToKeywordTree() - { - size_t next_flag = 0; - Nodes all; - - auto show_databases = std::make_unique("SHOW DATABASES", next_flag++, DATABASE); - auto show_tables = std::make_unique("SHOW TABLES", next_flag++, TABLE); - auto show_columns = std::make_unique("SHOW COLUMNS", next_flag++, COLUMN); - auto show_dictionaries = std::make_unique("SHOW DICTIONARIES", next_flag++, DICTIONARY); - auto show = std::make_unique("SHOW", std::move(show_databases), std::move(show_tables), std::move(show_columns), std::move(show_dictionaries)); - ext::push_back(all, std::move(show)); - - auto select = std::make_unique("SELECT", next_flag++, COLUMN); - auto insert = std::make_unique("INSERT", next_flag++, COLUMN); - ext::push_back(all, std::move(select), std::move(insert)); - - auto update = std::make_unique("UPDATE", next_flag++, COLUMN); - ext::push_back(update->aliases, "ALTER UPDATE"); - auto delet = std::make_unique("DELETE", next_flag++, TABLE); - ext::push_back(delet->aliases, "ALTER DELETE"); - - auto add_column = std::make_unique("ADD COLUMN", next_flag++, COLUMN); - add_column->aliases.push_back("ALTER ADD COLUMN"); - auto modify_column = std::make_unique("MODIFY COLUMN", next_flag++, COLUMN); - modify_column->aliases.push_back("ALTER MODIFY COLUMN"); - auto drop_column = std::make_unique("DROP COLUMN", next_flag++, COLUMN); - drop_column->aliases.push_back("ALTER DROP COLUMN"); - auto comment_column = std::make_unique("COMMENT COLUMN", next_flag++, COLUMN); - comment_column->aliases.push_back("ALTER COMMENT COLUMN"); - auto clear_column = std::make_unique("CLEAR COLUMN", next_flag++, COLUMN); - clear_column->aliases.push_back("ALTER CLEAR COLUMN"); - auto alter_column = std::make_unique("ALTER COLUMN", std::move(add_column), std::move(modify_column), std::move(drop_column), std::move(comment_column), std::move(clear_column)); - - auto alter_order_by = std::make_unique("ALTER ORDER BY", next_flag++, TABLE); - alter_order_by->aliases.push_back("MODIFY ORDER BY"); - alter_order_by->aliases.push_back("ALTER MODIFY ORDER BY"); - auto add_index = std::make_unique("ADD INDEX", next_flag++, TABLE); - add_index->aliases.push_back("ALTER ADD INDEX"); - auto drop_index = std::make_unique("DROP INDEX", next_flag++, TABLE); - drop_index->aliases.push_back("ALTER DROP INDEX"); - auto materialize_index = std::make_unique("MATERIALIZE INDEX", next_flag++, TABLE); - materialize_index->aliases.push_back("ALTER MATERIALIZE INDEX"); - auto clear_index = std::make_unique("CLEAR INDEX", next_flag++, TABLE); - clear_index->aliases.push_back("ALTER CLEAR INDEX"); - auto index = std::make_unique("INDEX", std::move(alter_order_by), std::move(add_index), std::move(drop_index), std::move(materialize_index), std::move(clear_index)); - index->aliases.push_back("ALTER INDEX"); - - auto add_constraint = std::make_unique("ADD CONSTRAINT", next_flag++, TABLE); - add_constraint->aliases.push_back("ALTER ADD CONSTRAINT"); - auto drop_constraint = std::make_unique("DROP CONSTRAINT", next_flag++, TABLE); - drop_constraint->aliases.push_back("ALTER DROP CONSTRAINT"); - auto alter_constraint = std::make_unique("CONSTRAINT", std::move(add_constraint), std::move(drop_constraint)); - alter_constraint->aliases.push_back("ALTER CONSTRAINT"); - - auto modify_ttl = std::make_unique("MODIFY TTL", next_flag++, TABLE); - modify_ttl->aliases.push_back("ALTER MODIFY TTL"); - auto materialize_ttl = std::make_unique("MATERIALIZE TTL", next_flag++, TABLE); - materialize_ttl->aliases.push_back("ALTER MATERIALIZE TTL"); - - auto modify_setting = std::make_unique("MODIFY SETTING", next_flag++, TABLE); - modify_setting->aliases.push_back("ALTER MODIFY SETTING"); - - auto move_partition = std::make_unique("MOVE PARTITION", next_flag++, TABLE); - ext::push_back(move_partition->aliases, "ALTER MOVE PARTITION", "MOVE PART", "ALTER MOVE PART"); - auto fetch_partition = std::make_unique("FETCH PARTITION", next_flag++, TABLE); - ext::push_back(fetch_partition->aliases, "ALTER FETCH PARTITION"); - auto freeze_partition = std::make_unique("FREEZE PARTITION", next_flag++, TABLE); - ext::push_back(freeze_partition->aliases, "ALTER FREEZE PARTITION"); - - auto alter_table = std::make_unique("ALTER TABLE", std::move(update), std::move(delet), std::move(alter_column), std::move(index), std::move(alter_constraint), std::move(modify_ttl), std::move(materialize_ttl), std::move(modify_setting), std::move(move_partition), std::move(fetch_partition), std::move(freeze_partition)); - - auto refresh_view = std::make_unique("REFRESH VIEW", next_flag++, VIEW); - ext::push_back(refresh_view->aliases, "ALTER LIVE VIEW REFRESH"); - auto modify_view_query = std::make_unique("MODIFY VIEW QUERY", next_flag++, VIEW); - auto alter_view = std::make_unique("ALTER VIEW", std::move(refresh_view), std::move(modify_view_query)); - - auto alter = std::make_unique("ALTER", std::move(alter_table), std::move(alter_view)); - ext::push_back(all, std::move(alter)); - - auto create_database = std::make_unique("CREATE DATABASE", next_flag++, DATABASE); - auto create_table = std::make_unique("CREATE TABLE", next_flag++, TABLE); - auto create_view = std::make_unique("CREATE VIEW", next_flag++, VIEW); - auto create_dictionary = std::make_unique("CREATE DICTIONARY", next_flag++, DICTIONARY); - auto create = std::make_unique("CREATE", std::move(create_database), std::move(create_table), std::move(create_view), std::move(create_dictionary)); - ext::push_back(all, std::move(create)); - - auto create_temporary_table = std::make_unique("CREATE TEMPORARY TABLE", next_flag++, GLOBAL); - ext::push_back(all, std::move(create_temporary_table)); - - auto drop_database = std::make_unique("DROP DATABASE", next_flag++, DATABASE); - auto drop_table = std::make_unique("DROP TABLE", next_flag++, TABLE); - auto drop_view = std::make_unique("DROP VIEW", next_flag++, VIEW); - auto drop_dictionary = std::make_unique("DROP DICTIONARY", next_flag++, DICTIONARY); - auto drop = std::make_unique("DROP", std::move(drop_database), std::move(drop_table), std::move(drop_view), std::move(drop_dictionary)); - ext::push_back(all, std::move(drop)); - - auto truncate_table = std::make_unique("TRUNCATE TABLE", next_flag++, TABLE); - auto truncate_view = std::make_unique("TRUNCATE VIEW", next_flag++, VIEW); - auto truncate = std::make_unique("TRUNCATE", std::move(truncate_table), std::move(truncate_view)); - ext::push_back(all, std::move(truncate)); - - auto optimize = std::make_unique("OPTIMIZE", next_flag++, TABLE); - optimize->aliases.push_back("OPTIMIZE TABLE"); - ext::push_back(all, std::move(optimize)); - - auto kill_query = std::make_unique("KILL QUERY", next_flag++, GLOBAL); - ext::push_back(all, std::move(kill_query)); - - auto create_user = std::make_unique("CREATE USER", next_flag++, GLOBAL); - auto alter_user = std::make_unique("ALTER USER", next_flag++, GLOBAL); - auto drop_user = std::make_unique("DROP USER", next_flag++, GLOBAL); - auto create_role = std::make_unique("CREATE ROLE", next_flag++, GLOBAL); - auto alter_role = std::make_unique("ALTER ROLE", next_flag++, GLOBAL); - auto drop_role = std::make_unique("DROP ROLE", next_flag++, GLOBAL); - auto create_policy = std::make_unique("CREATE POLICY", next_flag++, GLOBAL); - auto alter_policy = std::make_unique("ALTER POLICY", next_flag++, GLOBAL); - auto drop_policy = std::make_unique("DROP POLICY", next_flag++, GLOBAL); - auto create_quota = std::make_unique("CREATE QUOTA", next_flag++, GLOBAL); - auto alter_quota = std::make_unique("ALTER QUOTA", next_flag++, GLOBAL); - auto drop_quota = std::make_unique("DROP QUOTA", next_flag++, GLOBAL); - auto create_profile = std::make_unique("CREATE SETTINGS PROFILE", next_flag++, GLOBAL); - ext::push_back(create_profile->aliases, "CREATE PROFILE"); - auto alter_profile = std::make_unique("ALTER SETTINGS PROFILE", next_flag++, GLOBAL); - ext::push_back(alter_profile->aliases, "ALTER PROFILE"); - auto drop_profile = std::make_unique("DROP SETTINGS PROFILE", next_flag++, GLOBAL); - ext::push_back(drop_profile->aliases, "DROP PROFILE"); - auto role_admin = std::make_unique("ROLE ADMIN", next_flag++, GLOBAL); - ext::push_back(all, std::move(create_user), std::move(alter_user), std::move(drop_user), std::move(create_role), std::move(alter_role), std::move(drop_role), std::move(create_policy), std::move(alter_policy), std::move(drop_policy), std::move(create_quota), std::move(alter_quota), std::move(drop_quota), std::move(create_profile), std::move(alter_profile), std::move(drop_profile), std::move(role_admin)); - - auto shutdown = std::make_unique("SHUTDOWN", next_flag++, GLOBAL); - ext::push_back(shutdown->aliases, "SYSTEM SHUTDOWN", "SYSTEM KILL"); - auto drop_cache = std::make_unique("DROP CACHE", next_flag++, GLOBAL); - ext::push_back(drop_cache->aliases, "SYSTEM DROP CACHE", "DROP DNS CACHE", "SYSTEM DROP DNS CACHE", "DROP MARK CACHE", "SYSTEM DROP MARK CACHE", "DROP UNCOMPRESSED CACHE", "SYSTEM DROP UNCOMPRESSED CACHE", "DROP COMPILED EXPRESSION CACHE", "SYSTEM DROP COMPILED EXPRESSION CACHE"); - auto reload_config = std::make_unique("RELOAD CONFIG", next_flag++, GLOBAL); - ext::push_back(reload_config->aliases, "SYSTEM RELOAD CONFIG"); - auto reload_dictionary = std::make_unique("RELOAD DICTIONARY", next_flag++, GLOBAL); - ext::push_back(reload_dictionary->aliases, "SYSTEM RELOAD DICTIONARY", "RELOAD DICTIONARIES", "SYSTEM RELOAD DICTIONARIES", "RELOAD EMBEDDED DICTIONARIES", "SYSTEM RELOAD EMBEDDED DICTIONARIES"); - auto stop_merges = std::make_unique("STOP MERGES", next_flag++, TABLE); - ext::push_back(stop_merges->aliases, "SYSTEM STOP MERGES", "START MERGES", "SYSTEM START MERGES"); - auto stop_ttl_merges = std::make_unique("STOP TTL MERGES", next_flag++, TABLE); - ext::push_back(stop_ttl_merges->aliases, "SYSTEM STOP TTL MERGES", "START TTL MERGES", "SYSTEM START TTL MERGES"); - auto stop_fetches = std::make_unique("STOP FETCHES", next_flag++, TABLE); - ext::push_back(stop_fetches->aliases, "SYSTEM STOP FETCHES", "START FETCHES", "SYSTEM START FETCHES"); - auto stop_moves = std::make_unique("STOP MOVES", next_flag++, TABLE); - ext::push_back(stop_moves->aliases, "SYSTEM STOP MOVES", "START MOVES", "SYSTEM START MOVES"); - auto stop_distributed_sends = std::make_unique("STOP DISTRIBUTED SENDS", next_flag++, TABLE); - ext::push_back(stop_distributed_sends->aliases, "SYSTEM STOP DISTRIBUTED SENDS", "START DISTRIBUTED SENDS", "SYSTEM START DISTRIBUTED SENDS"); - auto stop_replicated_sends = std::make_unique("STOP REPLICATED SENDS", next_flag++, TABLE); - ext::push_back(stop_replicated_sends->aliases, "SYSTEM STOP REPLICATED SENDS", "START REPLICATED SENDS", "SYSTEM START REPLICATED SENDS"); - auto stop_replication_queues = std::make_unique("STOP REPLICATION QUEUES", next_flag++, TABLE); - ext::push_back(stop_replication_queues->aliases, "SYSTEM STOP REPLICATION QUEUES", "START REPLICATION QUEUES", "SYSTEM START REPLICATION QUEUES"); - auto sync_replica = std::make_unique("SYNC REPLICA", next_flag++, TABLE); - ext::push_back(sync_replica->aliases, "SYSTEM SYNC REPLICA"); - auto restart_replica = std::make_unique("RESTART REPLICA", next_flag++, TABLE); - ext::push_back(restart_replica->aliases, "SYSTEM RESTART REPLICA"); - auto flush_distributed = std::make_unique("FLUSH DISTRIBUTED", next_flag++, TABLE); - ext::push_back(flush_distributed->aliases, "SYSTEM FLUSH DISTRIBUTED"); - auto flush_logs = std::make_unique("FLUSH LOGS", next_flag++, GLOBAL); - ext::push_back(flush_logs->aliases, "SYSTEM FLUSH LOGS"); - auto system = std::make_unique("SYSTEM", std::move(shutdown), std::move(drop_cache), std::move(reload_config), std::move(reload_dictionary), std::move(stop_merges), std::move(stop_ttl_merges), std::move(stop_fetches), std::move(stop_moves), std::move(stop_distributed_sends), std::move(stop_replicated_sends), std::move(stop_replication_queues), std::move(sync_replica), std::move(restart_replica), std::move(flush_distributed), std::move(flush_logs)); - ext::push_back(all, std::move(system)); - - auto dict_get = std::make_unique("dictGet()", next_flag++, DICTIONARY); - dict_get->aliases.push_back("dictHas()"); - dict_get->aliases.push_back("dictGetHierarchy()"); - dict_get->aliases.push_back("dictIsIn()"); - ext::push_back(all, std::move(dict_get)); - - auto address_to_line = std::make_unique("addressToLine()", next_flag++, GLOBAL); - auto address_to_symbol = std::make_unique("addressToSymbol()", next_flag++, GLOBAL); - auto demangle = std::make_unique("demangle()", next_flag++, GLOBAL); - auto introspection = std::make_unique("INTROSPECTION", std::move(address_to_line), std::move(address_to_symbol), std::move(demangle)); - ext::push_back(introspection->aliases, "INTROSPECTION FUNCTIONS"); - ext::push_back(all, std::move(introspection)); - - auto file = std::make_unique("file()", next_flag++, GLOBAL); - auto url = std::make_unique("url()", next_flag++, GLOBAL); - auto input = std::make_unique("input()", next_flag++, GLOBAL); - auto values = std::make_unique("values()", next_flag++, GLOBAL); - auto numbers = std::make_unique("numbers()", next_flag++, GLOBAL); - auto zeros = std::make_unique("zeros()", next_flag++, GLOBAL); - auto merge = std::make_unique("merge()", next_flag++, DATABASE); - auto remote = std::make_unique("remote()", next_flag++, GLOBAL); - ext::push_back(remote->aliases, "remoteSecure()", "cluster()"); - auto mysql = std::make_unique("mysql()", next_flag++, GLOBAL); - auto odbc = std::make_unique("odbc()", next_flag++, GLOBAL); - auto jdbc = std::make_unique("jdbc()", next_flag++, GLOBAL); - auto hdfs = std::make_unique("hdfs()", next_flag++, GLOBAL); - auto s3 = std::make_unique("s3()", next_flag++, GLOBAL); - auto table_functions = std::make_unique("TABLE FUNCTIONS", std::move(file), std::move(url), std::move(input), std::move(values), std::move(numbers), std::move(zeros), std::move(merge), std::move(remote), std::move(mysql), std::move(odbc), std::move(jdbc), std::move(hdfs), std::move(s3)); - ext::push_back(all, std::move(table_functions)); - - auto node_all = std::make_unique("ALL", std::move(all)); - node_all->aliases.push_back("ALL PRIVILEGES"); - return node_all; - } - - void makeKeywordToFlagsMap(Node * start_node = nullptr) - { - if (!start_node) - { - start_node = flags_to_keyword_tree.get(); - keyword_to_flags_map["USAGE"] = {}; - keyword_to_flags_map["NONE"] = {}; - keyword_to_flags_map["NO PRIVILEGES"] = {}; - } - start_node->aliases.emplace_back(start_node->keyword); - for (auto & alias : start_node->aliases) - { - boost::to_upper(alias); - keyword_to_flags_map[alias] = start_node->flags; - } - for (auto & child : start_node->children) - makeKeywordToFlagsMap(child.get()); - } - - void makeAccessTypeToFlagsMapping() - { - access_type_to_flags_mapping.resize(MAX_ACCESS_TYPE); - for (auto access_type : ext::range_with_static_cast(0, MAX_ACCESS_TYPE)) - { - auto str = toKeyword(access_type); - auto it = keyword_to_flags_map.find(str); - if (it == keyword_to_flags_map.end()) - { - String uppercased{str}; - boost::to_upper(uppercased); - it = keyword_to_flags_map.find(uppercased); - } - access_type_to_flags_mapping[static_cast(access_type)] = it->second; - } - } - - void collectAllFlags(const Node * start_node = nullptr) - { - if (!start_node) - { - start_node = flags_to_keyword_tree.get(); - all_flags = start_node->flags; - } - if (start_node->target != UNKNOWN_TARGET) - all_flags_for_target[start_node->target] |= start_node->flags; - for (const auto & child : start_node->children) - collectAllFlags(child.get()); - } - - Impl() - { - flags_to_keyword_tree = makeFlagsToKeywordTree(); - makeKeywordToFlagsMap(); - makeAccessTypeToFlagsMapping(); - collectAllFlags(); - } - - std::unique_ptr flags_to_keyword_tree; - std::unordered_map keyword_to_flags_map; - std::vector access_type_to_flags_mapping; - Flags all_flags; - Flags all_flags_for_target[NUM_TARGETS]; -}; - - -inline AccessFlags::AccessFlags(AccessType type) : flags(Impl<>::instance().accessTypeToFlags(type)) {} -inline AccessFlags::AccessFlags(const std::string_view & keyword) : flags(Impl<>::instance().keywordToFlags(keyword)) {} -inline AccessFlags::AccessFlags(const std::vector & keywords) : flags(Impl<>::instance().keywordsToFlags(keywords)) {} -inline AccessFlags::AccessFlags(const Strings & keywords) : flags(Impl<>::instance().keywordsToFlags(keywords)) {} -inline String AccessFlags::toString() const { return Impl<>::instance().flagsToString(flags); } -inline std::vector AccessFlags::toKeywords() const { return Impl<>::instance().flagsToKeywords(flags); } -inline AccessFlags AccessFlags::allFlags() { return Impl<>::instance().getAllFlags(); } -inline AccessFlags AccessFlags::allGlobalFlags() { return Impl<>::instance().getGlobalFlags(); } -inline AccessFlags AccessFlags::allDatabaseFlags() { return Impl<>::instance().getDatabaseFlags(); } -inline AccessFlags AccessFlags::allTableFlags() { return Impl<>::instance().getTableFlags(); } -inline AccessFlags AccessFlags::allColumnFlags() { return Impl<>::instance().getColumnFlags(); } -inline AccessFlags AccessFlags::allDictionaryFlags() { return Impl<>::instance().getDictionaryFlags(); } - -inline AccessFlags operator |(AccessType left, AccessType right) { return AccessFlags(left) | right; } -inline AccessFlags operator &(AccessType left, AccessType right) { return AccessFlags(left) & right; } -inline AccessFlags operator -(AccessType left, AccessType right) { return AccessFlags(left) - right; } -inline AccessFlags operator ~(AccessType x) { return ~AccessFlags(x); } - -} diff --git a/dbms/src/Access/AccessType.h b/dbms/src/Access/AccessType.h deleted file mode 100644 index 27892076d59..00000000000 --- a/dbms/src/Access/AccessType.h +++ /dev/null @@ -1,318 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ -/// Represents an access type which can be granted on databases, tables, columns, etc. -enum class AccessType -{ - NONE, /// no access - ALL, /// full access - - SHOW_DATABASES, /// allows to execute SHOW DATABASES, SHOW CREATE DATABASE, USE - SHOW_TABLES, /// allows to execute SHOW TABLES, EXISTS , CHECK
- SHOW_COLUMNS, /// allows to execute SHOW CREATE TABLE, DESCRIBE - SHOW_DICTIONARIES, /// allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS - SHOW, /// allows to execute SHOW, USE, EXISTS, CHECK, DESCRIBE - - SELECT, - INSERT, - UPDATE, /// allows to execute ALTER UPDATE - DELETE, /// allows to execute ALTER DELETE - - ADD_COLUMN, - DROP_COLUMN, - MODIFY_COLUMN, - COMMENT_COLUMN, - CLEAR_COLUMN, - ALTER_COLUMN, /// allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN - - ALTER_ORDER_BY, - ADD_INDEX, - DROP_INDEX, - MATERIALIZE_INDEX, - CLEAR_INDEX, - INDEX, /// allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX - - ADD_CONSTRAINT, - DROP_CONSTRAINT, - ALTER_CONSTRAINT, /// allows to execute ALTER {ADD|DROP} CONSTRAINT - - MODIFY_TTL, /// allows to execute ALTER MODIFY TTL - MATERIALIZE_TTL, /// allows to execute ALTER MATERIALIZE TTL - MODIFY_SETTING, /// allows to execute ALTER MODIFY SETTING - - MOVE_PARTITION, - FETCH_PARTITION, - FREEZE_PARTITION, - - ALTER_TABLE, /// allows to execute ALTER TABLE ... - - REFRESH_VIEW, /// allows to execute ALTER LIVE VIEW REFRESH - MODIFY_VIEW_QUERY, /// allows to execute ALTER TABLE MODIFY QUERY - ALTER_VIEW, /// allows to execute ALTER LIVE VIEW REFRESH, ALTER TABLE MODIFY QUERY - - ALTER, /// allows to execute ALTER {TABLE|LIVE VIEW} ... - - CREATE_DATABASE, /// allows to execute {CREATE|ATTACH} DATABASE - CREATE_TABLE, /// allows to execute {CREATE|ATTACH} TABLE - CREATE_VIEW, /// allows to execute {CREATE|ATTACH} VIEW - CREATE_DICTIONARY, /// allows to execute {CREATE|ATTACH} DICTIONARY - CREATE_TEMPORARY_TABLE, /// allows to create and manipulate temporary tables and views. - CREATE, /// allows to execute {CREATE|ATTACH} [TEMPORARY] {DATABASE|TABLE|VIEW|DICTIONARY} - - DROP_DATABASE, - DROP_TABLE, - DROP_VIEW, - DROP_DICTIONARY, - DROP, /// allows to execute DROP {DATABASE|TABLE|VIEW|DICTIONARY} - - TRUNCATE_TABLE, - TRUNCATE_VIEW, - TRUNCATE, /// allows to execute TRUNCATE {TABLE|VIEW} - - OPTIMIZE, /// allows to execute OPTIMIZE TABLE - - KILL_QUERY, /// allows to kill a query started by another user (anyone can kill his own queries) - - CREATE_USER, - ALTER_USER, - DROP_USER, - CREATE_ROLE, - ALTER_ROLE, - DROP_ROLE, - CREATE_POLICY, - ALTER_POLICY, - DROP_POLICY, - CREATE_QUOTA, - ALTER_QUOTA, - DROP_QUOTA, - CREATE_SETTINGS_PROFILE, - ALTER_SETTINGS_PROFILE, - DROP_SETTINGS_PROFILE, - - ROLE_ADMIN, /// allows to grant and revoke any roles. - - SHUTDOWN, - DROP_CACHE, - RELOAD_CONFIG, - RELOAD_DICTIONARY, - STOP_MERGES, - STOP_TTL_MERGES, - STOP_FETCHES, - STOP_MOVES, - STOP_DISTRIBUTED_SENDS, - STOP_REPLICATED_SENDS, - STOP_REPLICATION_QUEUES, - SYNC_REPLICA, - RESTART_REPLICA, - FLUSH_DISTRIBUTED, - FLUSH_LOGS, - SYSTEM, /// allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} - - dictGet, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictHas, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictGetHierarchy, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictIsIn, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - - addressToLine, /// allows to execute function addressToLine - addressToSymbol, /// allows to execute function addressToSymbol - demangle, /// allows to execute function demangle - INTROSPECTION, /// allows to execute functions addressToLine, addressToSymbol, demangle - - file, - url, - input, - values, - numbers, - zeros, - merge, - remote, - mysql, - odbc, - jdbc, - hdfs, - s3, - TABLE_FUNCTIONS, /// allows to execute any table function -}; - -constexpr size_t MAX_ACCESS_TYPE = static_cast(AccessType::TABLE_FUNCTIONS) + 1; - -std::string_view toString(AccessType type); - - -namespace impl -{ - template - class AccessTypeToKeywordConverter - { - public: - static const AccessTypeToKeywordConverter & instance() - { - static const AccessTypeToKeywordConverter res; - return res; - } - - std::string_view convert(AccessType type) const - { - return access_type_to_keyword_mapping[static_cast(type)]; - } - - private: - void addToMapping(AccessType type, const std::string_view & str) - { - String str2{str}; - boost::replace_all(str2, "_", " "); - if (islower(str2[0])) - str2 += "()"; - access_type_to_keyword_mapping[static_cast(type)] = str2; - } - - AccessTypeToKeywordConverter() - { -#define ACCESS_TYPE_TO_KEYWORD_CASE(type) \ - addToMapping(AccessType::type, #type) - - ACCESS_TYPE_TO_KEYWORD_CASE(NONE); - ACCESS_TYPE_TO_KEYWORD_CASE(ALL); - - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_DATABASES); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_TABLES); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_COLUMNS); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_DICTIONARIES); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW); - - ACCESS_TYPE_TO_KEYWORD_CASE(SELECT); - ACCESS_TYPE_TO_KEYWORD_CASE(INSERT); - ACCESS_TYPE_TO_KEYWORD_CASE(UPDATE); - ACCESS_TYPE_TO_KEYWORD_CASE(DELETE); - - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(COMMENT_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_COLUMN); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_ORDER_BY); - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(MATERIALIZE_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(INDEX); - - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_CONSTRAINT); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_CONSTRAINT); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_CONSTRAINT); - - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_TTL); - ACCESS_TYPE_TO_KEYWORD_CASE(MATERIALIZE_TTL); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_SETTING); - - ACCESS_TYPE_TO_KEYWORD_CASE(MOVE_PARTITION); - ACCESS_TYPE_TO_KEYWORD_CASE(FETCH_PARTITION); - ACCESS_TYPE_TO_KEYWORD_CASE(FREEZE_PARTITION); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_TABLE); - - ACCESS_TYPE_TO_KEYWORD_CASE(REFRESH_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_VIEW_QUERY); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_VIEW); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER); - - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_DATABASE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_TEMPORARY_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE); - - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_DATABASE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP); - - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE); - - ACCESS_TYPE_TO_KEYWORD_CASE(OPTIMIZE); - - ACCESS_TYPE_TO_KEYWORD_CASE(KILL_QUERY); - - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_SETTINGS_PROFILE); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_SETTINGS_PROFILE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_SETTINGS_PROFILE); - ACCESS_TYPE_TO_KEYWORD_CASE(ROLE_ADMIN); - - ACCESS_TYPE_TO_KEYWORD_CASE(SHUTDOWN); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_CACHE); - ACCESS_TYPE_TO_KEYWORD_CASE(RELOAD_CONFIG); - ACCESS_TYPE_TO_KEYWORD_CASE(RELOAD_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_MERGES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_TTL_MERGES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_FETCHES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_MOVES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_DISTRIBUTED_SENDS); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_REPLICATED_SENDS); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_REPLICATION_QUEUES); - ACCESS_TYPE_TO_KEYWORD_CASE(SYNC_REPLICA); - ACCESS_TYPE_TO_KEYWORD_CASE(RESTART_REPLICA); - ACCESS_TYPE_TO_KEYWORD_CASE(FLUSH_DISTRIBUTED); - ACCESS_TYPE_TO_KEYWORD_CASE(FLUSH_LOGS); - ACCESS_TYPE_TO_KEYWORD_CASE(SYSTEM); - - ACCESS_TYPE_TO_KEYWORD_CASE(dictGet); - ACCESS_TYPE_TO_KEYWORD_CASE(dictHas); - ACCESS_TYPE_TO_KEYWORD_CASE(dictGetHierarchy); - ACCESS_TYPE_TO_KEYWORD_CASE(dictIsIn); - - ACCESS_TYPE_TO_KEYWORD_CASE(addressToLine); - ACCESS_TYPE_TO_KEYWORD_CASE(addressToSymbol); - ACCESS_TYPE_TO_KEYWORD_CASE(demangle); - ACCESS_TYPE_TO_KEYWORD_CASE(INTROSPECTION); - - ACCESS_TYPE_TO_KEYWORD_CASE(file); - ACCESS_TYPE_TO_KEYWORD_CASE(url); - ACCESS_TYPE_TO_KEYWORD_CASE(input); - ACCESS_TYPE_TO_KEYWORD_CASE(values); - ACCESS_TYPE_TO_KEYWORD_CASE(numbers); - ACCESS_TYPE_TO_KEYWORD_CASE(zeros); - ACCESS_TYPE_TO_KEYWORD_CASE(merge); - ACCESS_TYPE_TO_KEYWORD_CASE(remote); - ACCESS_TYPE_TO_KEYWORD_CASE(mysql); - ACCESS_TYPE_TO_KEYWORD_CASE(odbc); - ACCESS_TYPE_TO_KEYWORD_CASE(jdbc); - ACCESS_TYPE_TO_KEYWORD_CASE(hdfs); - ACCESS_TYPE_TO_KEYWORD_CASE(s3); - ACCESS_TYPE_TO_KEYWORD_CASE(TABLE_FUNCTIONS); - -#undef ACCESS_TYPE_TO_KEYWORD_CASE - } - - std::array access_type_to_keyword_mapping; - }; -} - -inline std::string_view toKeyword(AccessType type) { return impl::AccessTypeToKeywordConverter<>::instance().convert(type); } - -} diff --git a/dbms/src/Access/EnabledRowPolicies.cpp b/dbms/src/Access/EnabledRowPolicies.cpp deleted file mode 100644 index a525fb65606..00000000000 --- a/dbms/src/Access/EnabledRowPolicies.cpp +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -size_t EnabledRowPolicies::Hash::operator()(const DatabaseAndTableNameRef & database_and_table_name) const -{ - return std::hash{}(database_and_table_name.first) - std::hash{}(database_and_table_name.second); -} - - -EnabledRowPolicies::EnabledRowPolicies(const Params & params_) - : params(params_) -{ -} - -EnabledRowPolicies::~EnabledRowPolicies() = default; - - -ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType type) const -{ - /// We don't lock `mutex` here. - auto loaded = map_of_mixed_conditions.load(); - auto it = loaded->find({database, table_name}); - if (it == loaded->end()) - return {}; - return it->second.mixed_conditions[type]; -} - - -ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType type, const ASTPtr & extra_condition) const -{ - ASTPtr main_condition = getCondition(database, table_name, type); - if (!main_condition) - return extra_condition; - if (!extra_condition) - return main_condition; - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "and"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children.push_back(main_condition); - exp_list->children.push_back(extra_condition); - return function; -} - - -std::vector EnabledRowPolicies::getCurrentPolicyIDs() const -{ - /// We don't lock `mutex` here. - auto loaded = map_of_mixed_conditions.load(); - std::vector policy_ids; - for (const auto & mixed_conditions : *loaded | boost::adaptors::map_values) - boost::range::copy(mixed_conditions.policy_ids, std::back_inserter(policy_ids)); - return policy_ids; -} - - -std::vector EnabledRowPolicies::getCurrentPolicyIDs(const String & database, const String & table_name) const -{ - /// We don't lock `mutex` here. - auto loaded = map_of_mixed_conditions.load(); - auto it = loaded->find({database, table_name}); - if (it == loaded->end()) - return {}; - return it->second.policy_ids; -} - -} diff --git a/dbms/src/Access/RowPolicyCache.cpp b/dbms/src/Access/RowPolicyCache.cpp deleted file mode 100644 index 9509923adbf..00000000000 --- a/dbms/src/Access/RowPolicyCache.cpp +++ /dev/null @@ -1,311 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace -{ - bool tryGetLiteralBool(const IAST & ast, bool & value) - { - try - { - if (const ASTLiteral * literal = ast.as()) - { - value = !literal->value.isNull() && applyVisitor(FieldVisitorConvertToNumber(), literal->value); - return true; - } - return false; - } - catch (...) - { - return false; - } - } - - ASTPtr applyFunctionAND(ASTs arguments) - { - bool const_arguments = true; - boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool - { - bool b; - if (!tryGetLiteralBool(*argument, b)) - return false; - const_arguments &= b; - return true; - }); - - if (!const_arguments) - return std::make_shared(Field{UInt8(0)}); - if (arguments.empty()) - return std::make_shared(Field{UInt8(1)}); - if (arguments.size() == 1) - return arguments[0]; - - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "and"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children = std::move(arguments); - return function; - } - - - ASTPtr applyFunctionOR(ASTs arguments) - { - bool const_arguments = false; - boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool - { - bool b; - if (!tryGetLiteralBool(*argument, b)) - return false; - const_arguments |= b; - return true; - }); - - if (const_arguments) - return std::make_shared(Field{UInt8(1)}); - if (arguments.empty()) - return std::make_shared(Field{UInt8(0)}); - if (arguments.size() == 1) - return arguments[0]; - - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "or"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children = std::move(arguments); - return function; - } - - - using ConditionType = RowPolicy::ConditionType; - constexpr size_t MAX_CONDITION_TYPE = RowPolicy::MAX_CONDITION_TYPE; - - - /// Accumulates conditions from multiple row policies and joins them using the AND logical operation. - class ConditionsMixer - { - public: - void add(const ASTPtr & condition, bool is_restrictive) - { - if (is_restrictive) - restrictions.push_back(condition); - else - permissions.push_back(condition); - } - - ASTPtr getResult() && - { - /// Process permissive conditions. - restrictions.push_back(applyFunctionOR(std::move(permissions))); - - /// Process restrictive conditions. - return applyFunctionAND(std::move(restrictions)); - } - - private: - ASTs permissions; - ASTs restrictions; - }; -} - - -void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_) -{ - policy = policy_; - roles = &policy->to_roles; - - for (auto type : ext::range_with_static_cast(0, MAX_CONDITION_TYPE)) - { - parsed_conditions[type] = nullptr; - const String & condition = policy->conditions[type]; - if (condition.empty()) - continue; - - auto previous_range = std::pair(std::begin(policy->conditions), std::begin(policy->conditions) + type); - auto previous_it = std::find(previous_range.first, previous_range.second, condition); - if (previous_it != previous_range.second) - { - /// The condition is already parsed before. - parsed_conditions[type] = parsed_conditions[previous_it - previous_range.first]; - continue; - } - - /// Try to parse the condition. - try - { - ParserExpression parser; - parsed_conditions[type] = parseQuery(parser, condition, 0); - } - catch (...) - { - tryLogCurrentException( - &Poco::Logger::get("RowPolicy"), - String("Could not parse the condition ") + RowPolicy::conditionTypeToString(type) + " of row policy " - + backQuote(policy->getFullName())); - } - } -} - - -RowPolicyCache::RowPolicyCache(const AccessControlManager & access_control_manager_) - : access_control_manager(access_control_manager_) -{ -} - -RowPolicyCache::~RowPolicyCache() = default; - - -std::shared_ptr RowPolicyCache::getEnabledRowPolicies(const UUID & user_id, const std::vector & enabled_roles) -{ - std::lock_guard lock{mutex}; - ensureAllRowPoliciesRead(); - - EnabledRowPolicies::Params params; - params.user_id = user_id; - params.enabled_roles = enabled_roles; - auto it = enabled_row_policies.find(params); - if (it != enabled_row_policies.end()) - { - auto from_cache = it->second.lock(); - if (from_cache) - return from_cache; - enabled_row_policies.erase(it); - } - - auto res = std::shared_ptr(new EnabledRowPolicies(params)); - enabled_row_policies.emplace(std::move(params), res); - mixConditionsFor(*res); - return res; -} - - -void RowPolicyCache::ensureAllRowPoliciesRead() -{ - /// `mutex` is already locked. - if (all_policies_read) - return; - all_policies_read = true; - - subscription = access_control_manager.subscribeForChanges( - [&](const UUID & id, const AccessEntityPtr & entity) - { - if (entity) - rowPolicyAddedOrChanged(id, typeid_cast(entity)); - else - rowPolicyRemoved(id); - }); - - for (const UUID & id : access_control_manager.findAll()) - { - auto quota = access_control_manager.tryRead(id); - if (quota) - all_policies.emplace(id, PolicyInfo(quota)); - } -} - - -void RowPolicyCache::rowPolicyAddedOrChanged(const UUID & policy_id, const RowPolicyPtr & new_policy) -{ - std::lock_guard lock{mutex}; - auto it = all_policies.find(policy_id); - if (it == all_policies.end()) - { - it = all_policies.emplace(policy_id, PolicyInfo(new_policy)).first; - } - else - { - if (it->second.policy == new_policy) - return; - } - - auto & info = it->second; - info.setPolicy(new_policy); - mixConditions(); -} - - -void RowPolicyCache::rowPolicyRemoved(const UUID & policy_id) -{ - std::lock_guard lock{mutex}; - all_policies.erase(policy_id); - mixConditions(); -} - - -void RowPolicyCache::mixConditions() -{ - /// `mutex` is already locked. - std::erase_if( - enabled_row_policies, - [&](const std::pair> & pr) - { - auto elem = pr.second.lock(); - if (!elem) - return true; // remove from the `enabled_row_policies` map. - mixConditionsFor(*elem); - return false; // keep in the `enabled_row_policies` map. - }); -} - - -void RowPolicyCache::mixConditionsFor(EnabledRowPolicies & enabled) -{ - /// `mutex` is already locked. - struct Mixers - { - ConditionsMixer mixers[MAX_CONDITION_TYPE]; - std::vector policy_ids; - }; - using MapOfMixedConditions = EnabledRowPolicies::MapOfMixedConditions; - using DatabaseAndTableName = EnabledRowPolicies::DatabaseAndTableName; - using DatabaseAndTableNameRef = EnabledRowPolicies::DatabaseAndTableNameRef; - using Hash = EnabledRowPolicies::Hash; - - std::unordered_map map_of_mixers; - - for (const auto & [policy_id, info] : all_policies) - { - const auto & policy = *info.policy; - auto & mixers = map_of_mixers[std::pair{policy.getDatabase(), policy.getTableName()}]; - if (info.roles->match(enabled.params.user_id, enabled.params.enabled_roles)) - { - mixers.policy_ids.push_back(policy_id); - for (auto type : ext::range(0, MAX_CONDITION_TYPE)) - if (info.parsed_conditions[type]) - mixers.mixers[type].add(info.parsed_conditions[type], policy.isRestrictive()); - } - } - - auto map_of_mixed_conditions = boost::make_shared(); - for (auto & [database_and_table_name, mixers] : map_of_mixers) - { - auto database_and_table_name_keeper = std::make_unique(); - database_and_table_name_keeper->first = database_and_table_name.first; - database_and_table_name_keeper->second = database_and_table_name.second; - auto & mixed_conditions = (*map_of_mixed_conditions)[DatabaseAndTableNameRef{database_and_table_name_keeper->first, - database_and_table_name_keeper->second}]; - mixed_conditions.database_and_table_name_keeper = std::move(database_and_table_name_keeper); - mixed_conditions.policy_ids = std::move(mixers.policy_ids); - for (auto type : ext::range(0, MAX_CONDITION_TYPE)) - mixed_conditions.mixed_conditions[type] = std::move(mixers.mixers[type]).getResult(); - } - - enabled.map_of_mixed_conditions.store(map_of_mixed_conditions); -} - -} diff --git a/dbms/src/CMakeLists.txt b/dbms/src/CMakeLists.txt deleted file mode 100644 index b54266f4693..00000000000 --- a/dbms/src/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -add_subdirectory (Access) -add_subdirectory (Columns) -add_subdirectory (Common) -add_subdirectory (Core) -add_subdirectory (DataStreams) -add_subdirectory (DataTypes) -add_subdirectory (Dictionaries) -add_subdirectory (Disks) -add_subdirectory (Storages) -add_subdirectory (Parsers) -add_subdirectory (IO) -add_subdirectory (Functions) -add_subdirectory (Interpreters) -add_subdirectory (AggregateFunctions) -add_subdirectory (Client) -add_subdirectory (TableFunctions) -add_subdirectory (Processors) -add_subdirectory (Formats) -add_subdirectory (Compression) diff --git a/dbms/src/Client/CMakeLists.txt b/dbms/src/Client/CMakeLists.txt deleted file mode 100644 index 9b9ec442a3c..00000000000 --- a/dbms/src/Client/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -# TODO: make separate lib datastream, block, ... -#include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) -#add_headers_and_sources(clickhouse_client .) -#add_library(clickhouse_client ${clickhouse_client_headers} ${clickhouse_client_sources}) -#target_link_libraries (clickhouse_client clickhouse_common_io ${Poco_Net_LIBRARY}) -#target_include_directories (clickhouse_client PRIVATE ${DBMS_INCLUDE_DIR}) - -add_subdirectory(tests) diff --git a/dbms/src/Common/RWLock.cpp b/dbms/src/Common/RWLock.cpp deleted file mode 100644 index 5dfc1b55c63..00000000000 --- a/dbms/src/Common/RWLock.cpp +++ /dev/null @@ -1,299 +0,0 @@ -#include "RWLock.h" -#include -#include -#include -#include - - -namespace ProfileEvents -{ - extern const Event RWLockAcquiredReadLocks; - extern const Event RWLockAcquiredWriteLocks; - extern const Event RWLockReadersWaitMilliseconds; - extern const Event RWLockWritersWaitMilliseconds; -} - - -namespace CurrentMetrics -{ - extern const Metric RWLockWaitingReaders; - extern const Metric RWLockWaitingWriters; - extern const Metric RWLockActiveReaders; - extern const Metric RWLockActiveWriters; -} - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; - extern const int DEADLOCK_AVOIDED; -} - - -/** A single-use object that represents lock's ownership - * For the purpose of exception safety guarantees LockHolder is to be used in two steps: - * 1. Create an instance (allocating all the memory needed) - * 2. Associate the instance with the lock (attach to the lock and locking request group) - */ -class RWLockImpl::LockHolderImpl -{ - bool bound{false}; - Type lock_type; - String query_id; - CurrentMetrics::Increment active_client_increment; - RWLock parent; - GroupsContainer::iterator it_group; - -public: - LockHolderImpl(const LockHolderImpl & other) = delete; - LockHolderImpl& operator=(const LockHolderImpl & other) = delete; - - /// Implicit memory allocation for query_id is done here - LockHolderImpl(const String & query_id_, Type type) - : lock_type{type}, query_id{query_id_}, - active_client_increment{ - type == Type::Read ? CurrentMetrics::RWLockActiveReaders : CurrentMetrics::RWLockActiveWriters} - { - } - - ~LockHolderImpl(); - -private: - /// A separate method which binds the lock holder to the owned lock - /// N.B. It is very important that this method produces no allocations - bool bindWith(RWLock && parent_, GroupsContainer::iterator it_group_) noexcept - { - if (bound) - return false; - it_group = it_group_; - parent = std::move(parent_); - ++it_group->refererrs; - bound = true; - return true; - } - - friend class RWLockImpl; -}; - - -namespace -{ - /// Global information about all read locks that query has. It is needed to avoid some type of deadlocks. - - class QueryLockInfo - { - private: - mutable std::mutex mutex; - std::map queries; - - public: - void add(const String & query_id) - { - std::lock_guard lock(mutex); - - const auto res = queries.emplace(query_id, 1); // may throw - if (!res.second) - ++res.first->second; - } - - void remove(const String & query_id) noexcept - { - std::lock_guard lock(mutex); - - const auto query_it = queries.find(query_id); - if (query_it != queries.cend() && --query_it->second == 0) - queries.erase(query_it); - } - - void check(const String & query_id) const - { - std::lock_guard lock(mutex); - - if (queries.find(query_id) != queries.cend()) - throw Exception("Possible deadlock avoided. Client should retry.", ErrorCodes::DEADLOCK_AVOIDED); - } - }; - - QueryLockInfo all_read_locks; -} - - -/** To guarantee that we do not get any piece of our data corrupted: - * 1. Perform all actions that include allocations before changing lock's internal state - * 2. Roll back any changes that make the state inconsistent - * - * Note: "SM" in the commentaries below stands for STATE MODIFICATION - */ -RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id) -{ - const bool request_has_query_id = query_id != NO_QUERY; - - Stopwatch watch(CLOCK_MONOTONIC_COARSE); - CurrentMetrics::Increment waiting_client_increment((type == Read) ? CurrentMetrics::RWLockWaitingReaders - : CurrentMetrics::RWLockWaitingWriters); - auto finalize_metrics = [type, &watch] () - { - ProfileEvents::increment((type == Read) ? ProfileEvents::RWLockAcquiredReadLocks - : ProfileEvents::RWLockAcquiredWriteLocks); - ProfileEvents::increment((type == Read) ? ProfileEvents::RWLockReadersWaitMilliseconds - : ProfileEvents::RWLockWritersWaitMilliseconds, watch.elapsedMilliseconds()); - }; - - /// This object is placed above unique_lock, because it may lock in destructor. - auto lock_holder = std::make_shared(query_id, type); - - std::unique_lock lock(mutex); - - /// The FastPath: - /// Check if the same query_id already holds the required lock in which case we can proceed without waiting - if (request_has_query_id) - { - const auto it_query = owner_queries.find(query_id); - if (it_query != owner_queries.end()) - { - const auto current_owner_group = queue.begin(); - - /// XXX: it means we can't upgrade lock from read to write! - if (type == Write) - throw Exception( - "RWLockImpl::getLock(): Cannot acquire exclusive lock while RWLock is already locked", - ErrorCodes::LOGICAL_ERROR); - - if (current_owner_group->type == Write) - throw Exception( - "RWLockImpl::getLock(): RWLock is already locked in exclusive mode", - ErrorCodes::LOGICAL_ERROR); - - /// N.B. Type is Read here, query_id is not empty and it_query is a valid iterator - all_read_locks.add(query_id); /// SM1: may throw on insertion (nothing to roll back) - ++it_query->second; /// SM2: nothrow - lock_holder->bindWith(shared_from_this(), current_owner_group); /// SM3: nothrow - - finalize_metrics(); - return lock_holder; - } - } - - /** If the query already has any active read lock and tries to acquire another read lock - * but it is not in front of the queue and has to wait, deadlock is possible: - * - * Example (four queries, two RWLocks - 'a' and 'b'): - * - * --> time --> - * - * q1: ra rb - * q2: wa - * q3: rb ra - * q4: wb - * - * We will throw an exception instead. - */ - - if (type == Type::Write || queue.empty() || queue.back().type == Type::Write) - { - if (type == Type::Read && request_has_query_id && !queue.empty()) - all_read_locks.check(query_id); - - /// Create a new group of locking requests - queue.emplace_back(type); /// SM1: may throw (nothing to roll back) - } - else if (request_has_query_id && queue.size() > 1) - all_read_locks.check(query_id); - - GroupsContainer::iterator it_group = std::prev(queue.end()); - - /// We need to reference the associated group before waiting to guarantee - /// that this group does not get deleted prematurely - ++it_group->refererrs; - - /// Wait a notification until we will be the only in the group. - it_group->cv.wait(lock, [&] () { return it_group == queue.begin(); }); - - --it_group->refererrs; - - if (request_has_query_id) - { - try - { - if (type == Type::Read) - all_read_locks.add(query_id); /// SM2: may throw on insertion - /// and is safe to roll back unconditionally - const auto emplace_res = - owner_queries.emplace(query_id, 1); /// SM3: may throw on insertion - if (!emplace_res.second) - ++emplace_res.first->second; /// SM4: nothrow - } - catch (...) - { - /// Methods std::list<>::emplace_back() and std::unordered_map<>::emplace() provide strong exception safety - /// We only need to roll back the changes to these objects: all_read_locks and the locking queue - if (type == Type::Read) - all_read_locks.remove(query_id); /// Rollback(SM2): nothrow - - if (it_group->refererrs == 0) - { - const auto next = queue.erase(it_group); /// Rollback(SM1): nothrow - if (next != queue.end()) - next->cv.notify_all(); - } - - throw; - } - } - - lock_holder->bindWith(shared_from_this(), it_group); /// SM: nothrow - - finalize_metrics(); - return lock_holder; -} - - -/** The sequence points of acquiring lock's ownership by an instance of LockHolderImpl: - * 1. all_read_locks is updated - * 2. owner_queries is updated - * 3. request group is updated by LockHolderImpl which in turn becomes "bound" - * - * If by the time when destructor of LockHolderImpl is called the instance has been "bound", - * it is guaranteed that all three steps have been executed successfully and the resulting state is consistent. - * With the mutex locked the order of steps to restore the lock's state can be arbitrary - * - * We do not employ try-catch: if something bad happens, there is nothing we can do =( - */ -RWLockImpl::LockHolderImpl::~LockHolderImpl() -{ - if (!bound || parent == nullptr) - return; - - std::lock_guard lock(parent->mutex); - - /// The associated group must exist (and be the beginning of the queue?) - if (parent->queue.empty() || it_group != parent->queue.begin()) - return; - - /// If query_id is not empty it must be listed in parent->owner_queries - if (query_id != RWLockImpl::NO_QUERY) - { - const auto owner_it = parent->owner_queries.find(query_id); - if (owner_it != parent->owner_queries.end()) - { - if (--owner_it->second == 0) /// SM: nothrow - parent->owner_queries.erase(owner_it); /// SM: nothrow - - if (lock_type == RWLockImpl::Read) - all_read_locks.remove(query_id); /// SM: nothrow - } - } - - /// If we are the last remaining referrer, remove the group and notify the next group - if (--it_group->refererrs == 0) /// SM: nothrow - { - const auto next = parent->queue.erase(it_group); /// SM: nothrow - if (next != parent->queue.end()) - next->cv.notify_all(); - } -} - -} diff --git a/dbms/src/Common/RWLock.h b/dbms/src/Common/RWLock.h deleted file mode 100644 index a7084720d6c..00000000000 --- a/dbms/src/Common/RWLock.h +++ /dev/null @@ -1,78 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -class RWLockImpl; -using RWLock = std::shared_ptr; - - -/// Implements shared lock with FIFO service -/// Can be acquired recursively (several calls for the same query) in Read mode -/// -/// NOTE: it is important to allow acquiring the same lock in Read mode without waiting if it is already -/// acquired by another thread of the same query. Otherwise the following deadlock is possible: -/// - SELECT thread 1 locks in the Read mode -/// - ALTER tries to lock in the Write mode (waits for SELECT thread 1) -/// - SELECT thread 2 tries to lock in the Read mode (waits for ALTER) -class RWLockImpl : public std::enable_shared_from_this -{ -public: - enum Type - { - Read, - Write, - }; - - static RWLock create() { return RWLock(new RWLockImpl); } - - /// Just use LockHolder::reset() to release the lock - class LockHolderImpl; - friend class LockHolderImpl; - using LockHolder = std::shared_ptr; - - /// Waits in the queue and returns appropriate lock - /// Empty query_id means the lock is acquired out of the query context (e.g. in a background thread). - LockHolder getLock(Type type, const String & query_id); - - /// Use as query_id to acquire a lock outside the query context. - inline static const String NO_QUERY = String(); - -private: - RWLockImpl() = default; - - struct Group; - using GroupsContainer = std::list; - using OwnerQueryIds = std::unordered_map; - - /// Group of locking requests that should be granted concurrently - /// i.e. a group can contain several readers, but only one writer - struct Group - { - const Type type; - size_t refererrs; - - std::condition_variable cv; /// all locking requests of the group wait on this condvar - - explicit Group(Type type_) : type{type_}, refererrs{0} {} - }; - - GroupsContainer queue; - OwnerQueryIds owner_queries; - - mutable std::mutex mutex; -}; - - -} diff --git a/dbms/src/Common/ThreadFuzzer.cpp b/dbms/src/Common/ThreadFuzzer.cpp deleted file mode 100644 index 2c1bb3c2744..00000000000 --- a/dbms/src/Common/ThreadFuzzer.cpp +++ /dev/null @@ -1,258 +0,0 @@ -#include -#include -#include -#if defined(OS_LINUX) - #include -#endif -#include - -#include - -#include -#include - -#include - -#include -#include - -#include - -/// We will also wrap some thread synchronization functions to inject sleep/migration before or after. -#if defined(OS_LINUX) -#define FOR_EACH_WRAPPED_FUNCTION(M) \ - M(int, pthread_mutex_lock, pthread_mutex_t * arg) \ - M(int, pthread_mutex_unlock, pthread_mutex_t * arg) -#endif - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int CANNOT_MANIPULATE_SIGSET; - extern const int CANNOT_SET_SIGNAL_HANDLER; - extern const int CANNOT_CREATE_TIMER; -} - - -ThreadFuzzer::ThreadFuzzer() -{ - initConfiguration(); - if (!isEffective()) - return; - setup(); -} - - -template -static void initFromEnv(T & what, const char * name) -{ - const char * env = getenv(name); - if (!env) - return; - what = parse(env); -} - -template -static void initFromEnv(std::atomic & what, const char * name) -{ - const char * env = getenv(name); - if (!env) - return; - what.store(parse(env), std::memory_order_relaxed); -} - - -static std::atomic num_cpus = 0; - -#if defined(OS_LINUX) -#define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \ - static std::atomic NAME ## _before_yield_probability = 0; \ - static std::atomic NAME ## _before_migrate_probability = 0; \ - static std::atomic NAME ## _before_sleep_probability = 0; \ - static std::atomic NAME ## _before_sleep_time_us = 0; \ - \ - static std::atomic NAME ## _after_yield_probability = 0; \ - static std::atomic NAME ## _after_migrate_probability = 0; \ - static std::atomic NAME ## _after_sleep_probability = 0; \ - static std::atomic NAME ## _after_sleep_time_us = 0; \ - -FOR_EACH_WRAPPED_FUNCTION(DEFINE_WRAPPER_PARAMS) - -#undef DEFINE_WRAPPER_PARAMS -#endif - -void ThreadFuzzer::initConfiguration() -{ -#if defined(OS_LINUX) - num_cpus.store(get_nprocs(), std::memory_order_relaxed); -#else - (void)num_cpus; -#endif - - initFromEnv(cpu_time_period_us, "THREAD_FUZZER_CPU_TIME_PERIOD_US"); - initFromEnv(yield_probability, "THREAD_FUZZER_YIELD_PROBABILITY"); - initFromEnv(migrate_probability, "THREAD_FUZZER_MIGRATE_PROBABILITY"); - initFromEnv(sleep_probability, "THREAD_FUZZER_SLEEP_PROBABILITY"); - initFromEnv(sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US"); - -#if defined(OS_LINUX) -#define INIT_WRAPPER_PARAMS(RET, NAME, ...) \ - initFromEnv(NAME ## _before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \ - initFromEnv(NAME ## _before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \ - initFromEnv(NAME ## _before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \ - initFromEnv(NAME ## _before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \ - \ - initFromEnv(NAME ## _after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \ - initFromEnv(NAME ## _after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \ - initFromEnv(NAME ## _after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \ - initFromEnv(NAME ## _after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US"); \ - - FOR_EACH_WRAPPED_FUNCTION(INIT_WRAPPER_PARAMS) - -#undef INIT_WRAPPER_PARAMS -#endif -} - - -bool ThreadFuzzer::isEffective() const -{ -#if defined(OS_LINUX) -#define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \ - if (NAME ## _before_yield_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _before_migrate_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _before_sleep_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)) return true; \ - \ - if (NAME ## _after_yield_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _after_migrate_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _after_sleep_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)) return true; \ - - FOR_EACH_WRAPPED_FUNCTION(CHECK_WRAPPER_PARAMS) - -#undef INIT_WRAPPER_PARAMS -#endif - - return cpu_time_period_us != 0 - && (yield_probability > 0 - || migrate_probability > 0 - || (sleep_probability > 0 && sleep_time_us > 0)); -} - - -static void injection( - double yield_probability, - double migrate_probability, - double sleep_probability, - double sleep_time_us [[maybe_unused]]) -{ - if (yield_probability > 0 - && std::bernoulli_distribution(yield_probability)(thread_local_rng)) - { - sched_yield(); - } - -#if defined(OS_LINUX) - int num_cpus_loaded = num_cpus.load(std::memory_order_relaxed); - if (num_cpus_loaded > 0 - && migrate_probability > 0 - && std::bernoulli_distribution(migrate_probability)(thread_local_rng)) - { - int migrate_to = std::uniform_int_distribution<>(0, num_cpus_loaded - 1)(thread_local_rng); - - cpu_set_t set{}; - CPU_ZERO(&set); - CPU_SET(migrate_to, &set); - - (void)sched_setaffinity(0, sizeof(set), &set); - } -#else - UNUSED(migrate_probability); -#endif - - if (sleep_probability > 0 - && sleep_time_us > 0 - && std::bernoulli_distribution(sleep_probability)(thread_local_rng)) - { - sleepForNanoseconds(sleep_time_us * 1000); - } -} - - -void ThreadFuzzer::signalHandler(int) -{ - auto saved_errno = errno; - - auto & fuzzer = ThreadFuzzer::instance(); - injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.sleep_probability, fuzzer.sleep_time_us); - - errno = saved_errno; -} - -void ThreadFuzzer::setup() -{ - struct sigaction sa{}; - sa.sa_handler = signalHandler; - sa.sa_flags = SA_RESTART; - -#if defined(OS_LINUX) - if (sigemptyset(&sa.sa_mask)) - throwFromErrno("Failed to clean signal mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET); - - if (sigaddset(&sa.sa_mask, SIGPROF)) - throwFromErrno("Failed to add signal to mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET); -#else - // the two following functions always return 0 under mac - sigemptyset(&sa.sa_mask); - sigaddset(&sa.sa_mask, SIGPROF); -#endif - - if (sigaction(SIGPROF, &sa, nullptr)) - throwFromErrno("Failed to setup signal handler for thread fuzzer", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); - - static constexpr UInt32 timer_precision = 1000000; - - struct timeval interval; - interval.tv_sec = cpu_time_period_us / timer_precision; - interval.tv_usec = cpu_time_period_us % timer_precision; - - struct itimerval timer = {.it_interval = interval, .it_value = interval}; - - if (0 != setitimer(ITIMER_PROF, &timer, nullptr)) - throwFromErrno("Failed to create profiling timer", ErrorCodes::CANNOT_CREATE_TIMER); -} - - -/// We expect that for every function like pthread_mutex_lock there is the same function with two underscores prefix. -/// NOTE We cannot use dlsym(... RTLD_NEXT), because it will call pthread_mutex_lock and it will lead to infinite recursion. - -#if defined(OS_LINUX) -#define MAKE_WRAPPER(RET, NAME, ...) \ - extern "C" RET __ ## NAME(__VA_ARGS__); /* NOLINT */ \ - extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \ - { \ - injection( \ - NAME ## _before_yield_probability.load(std::memory_order_relaxed), \ - NAME ## _before_migrate_probability.load(std::memory_order_relaxed), \ - NAME ## _before_sleep_probability.load(std::memory_order_relaxed), \ - NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)); \ - \ - auto && ret{__ ## NAME(arg)}; \ - \ - injection( \ - NAME ## _after_yield_probability.load(std::memory_order_relaxed), \ - NAME ## _after_migrate_probability.load(std::memory_order_relaxed), \ - NAME ## _after_sleep_probability.load(std::memory_order_relaxed), \ - NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)); \ - \ - return ret; \ - } \ - - FOR_EACH_WRAPPED_FUNCTION(MAKE_WRAPPER) - -#undef MAKE_WRAPPER -#endif - -} diff --git a/dbms/src/Common/ZooKeeper/CMakeLists.txt b/dbms/src/Common/ZooKeeper/CMakeLists.txt deleted file mode 100644 index aa6efcd3ca1..00000000000 --- a/dbms/src/Common/ZooKeeper/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) - -add_headers_and_sources(clickhouse_common_zookeeper .) - -add_library(clickhouse_common_zookeeper ${clickhouse_common_zookeeper_headers} ${clickhouse_common_zookeeper_sources}) - -target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY}) -target_include_directories(clickhouse_common_zookeeper PUBLIC ${DBMS_INCLUDE_DIR}) - -if (ENABLE_TESTS) - add_subdirectory (tests) -endif () diff --git a/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt b/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt deleted file mode 100644 index 06716e49918..00000000000 --- a/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt +++ /dev/null @@ -1,23 +0,0 @@ -add_executable(zkutil_test_commands zkutil_test_commands.cpp) -target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper) - -add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp) -target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper) - -add_executable(zkutil_test_lock zkutil_test_lock.cpp) -target_link_libraries(zkutil_test_lock PRIVATE clickhouse_common_zookeeper) - -add_executable(zkutil_expiration_test zkutil_expiration_test.cpp) -target_link_libraries(zkutil_expiration_test PRIVATE clickhouse_common_zookeeper) - -add_executable(zkutil_test_async zkutil_test_async.cpp) -target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper) - -add_executable(zkutil_zookeeper_holder zkutil_zookeeper_holder.cpp) -target_link_libraries(zkutil_zookeeper_holder PRIVATE clickhouse_common_zookeeper) - -add_executable (zk_many_watches_reconnect zk_many_watches_reconnect.cpp) -target_link_libraries (zk_many_watches_reconnect PRIVATE clickhouse_common_zookeeper clickhouse_common_config) - -add_executable (zookeeper_impl zookeeper_impl.cpp) -target_link_libraries (zookeeper_impl PRIVATE clickhouse_common_zookeeper) diff --git a/dbms/src/Functions/FunctionJoinGet.cpp b/dbms/src/Functions/FunctionJoinGet.cpp deleted file mode 100644 index 6a6c0c4a97e..00000000000 --- a/dbms/src/Functions/FunctionJoinGet.cpp +++ /dev/null @@ -1,108 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; -} - -static auto getJoin(const ColumnsWithTypeAndName & arguments, const Context & context) -{ - if (arguments.size() != 3) - throw Exception{"Function joinGet takes 3 arguments", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; - - String join_name; - if (auto name_col = checkAndGetColumnConst(arguments[0].column.get())) - { - join_name = name_col->getValue(); - } - else - throw Exception{"Illegal type " + arguments[0].type->getName() + " of first argument of function joinGet, expected a const string.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; - - size_t dot = join_name.find('.'); - String database_name; - if (dot == String::npos) - { - database_name = context.getCurrentDatabase(); - dot = 0; - } - else - { - database_name = join_name.substr(0, dot); - ++dot; - } - String table_name = join_name.substr(dot); - auto table = DatabaseCatalog::instance().getTable({database_name, table_name}); - auto storage_join = std::dynamic_pointer_cast(table); - if (!storage_join) - throw Exception{"Table " + join_name + " should have engine StorageJoin", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; - - String attr_name; - if (auto name_col = checkAndGetColumnConst(arguments[1].column.get())) - { - attr_name = name_col->getValue(); - } - else - throw Exception{"Illegal type " + arguments[1].type->getName() - + " of second argument of function joinGet, expected a const string.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; - return std::make_pair(storage_join, attr_name); -} - -FunctionBaseImplPtr JoinGetOverloadResolver::build(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const -{ - auto [storage_join, attr_name] = getJoin(arguments, context); - auto join = storage_join->getJoin(); - DataTypes data_types(arguments.size()); - - auto table_lock = storage_join->lockStructureForShare(context.getInitialQueryId()); - for (size_t i = 0; i < arguments.size(); ++i) - data_types[i] = arguments[i].type; - - auto return_type = join->joinGetReturnType(attr_name); - return std::make_unique(table_lock, storage_join, join, attr_name, data_types, return_type); -} - -DataTypePtr JoinGetOverloadResolver::getReturnType(const ColumnsWithTypeAndName & arguments) const -{ - auto [storage_join, attr_name] = getJoin(arguments, context); - auto join = storage_join->getJoin(); - return join->joinGetReturnType(attr_name); -} - - -void ExecutableFunctionJoinGet::execute(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) -{ - auto ctn = block.getByPosition(arguments[2]); - if (isColumnConst(*ctn.column)) - ctn.column = ctn.column->cloneResized(1); - ctn.name = ""; // make sure the key name never collide with the join columns - Block key_block = {ctn}; - join->joinGet(key_block, attr_name); - auto & result_ctn = key_block.getByPosition(1); - if (isColumnConst(*ctn.column)) - result_ctn.column = ColumnConst::create(result_ctn.column, input_rows_count); - block.getByPosition(result) = result_ctn; -} - -ExecutableFunctionImplPtr FunctionJoinGet::prepare(const Block &, const ColumnNumbers &, size_t) const -{ - return std::make_unique(join, attr_name); -} - -void registerFunctionJoinGet(FunctionFactory & factory) -{ - factory.registerFunction(); -} - -} diff --git a/dbms/src/Functions/URL/path.cpp b/dbms/src/Functions/URL/path.cpp deleted file mode 100644 index 43e21ece00f..00000000000 --- a/dbms/src/Functions/URL/path.cpp +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include -#include "FunctionsURL.h" -#include - -namespace DB -{ - -struct ExtractPath -{ - static size_t getReserveLengthForElement() { return 25; } - - static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) - { - res_data = data; - res_size = 0; - - Pos pos = data; - Pos end = pos + size; - - if (end != (pos = find_first_symbols<'/'>(pos, end)) && pos[1] == '/' && end != (pos = find_first_symbols<'/'>(pos + 2, end))) - { - Pos query_string_or_fragment = find_first_symbols<'?', '#'>(pos, end); - - res_data = pos; - res_size = query_string_or_fragment - res_data; - } - } -}; - -struct NamePath { static constexpr auto name = "path"; }; -using FunctionPath = FunctionStringToString, NamePath>; - -void registerFunctionPath(FunctionFactory & factory) -{ - factory.registerFunction(); -} - -} diff --git a/dbms/src/Functions/URL/pathFull.cpp b/dbms/src/Functions/URL/pathFull.cpp deleted file mode 100644 index da31737c0f9..00000000000 --- a/dbms/src/Functions/URL/pathFull.cpp +++ /dev/null @@ -1,37 +0,0 @@ -#include -#include -#include "FunctionsURL.h" -#include - -namespace DB -{ - -struct ExtractPathFull -{ - static size_t getReserveLengthForElement() { return 30; } - - static void execute(const Pos data, const size_t size, Pos & res_data, size_t & res_size) - { - res_data = data; - res_size = 0; - - Pos pos = data; - Pos end = pos + size; - - if (end != (pos = find_first_symbols<'/'>(pos, end)) && pos[1] == '/' && end != (pos = find_first_symbols<'/'>(pos + 2, end))) - { - res_data = pos; - res_size = end - res_data; - } - } -}; - -struct NamePathFull { static constexpr auto name = "pathFull"; }; -using FunctionPathFull = FunctionStringToString, NamePathFull>; - -void registerFunctionPathFull(FunctionFactory & factory) -{ - factory.registerFunction(); -} - -} diff --git a/dbms/src/Functions/array/arrayPushBack.cpp b/dbms/src/Functions/array/arrayPushBack.cpp deleted file mode 100644 index 74d9596dcd2..00000000000 --- a/dbms/src/Functions/array/arrayPushBack.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include "arrayPush.h" -#include - - -namespace DB -{ - -class FunctionArrayPushBack : public FunctionArrayPush -{ -public: - static constexpr auto name = "arrayPushBack"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionArrayPushBack(const Context & context_) : FunctionArrayPush(context_, false, name) {} -}; - -void registerFunctionArrayPushBack(FunctionFactory & factory) -{ - factory.registerFunction(); -} - -} diff --git a/dbms/src/Functions/array/arrayPushFront.cpp b/dbms/src/Functions/array/arrayPushFront.cpp deleted file mode 100644 index ab8535b6672..00000000000 --- a/dbms/src/Functions/array/arrayPushFront.cpp +++ /dev/null @@ -1,23 +0,0 @@ -#include "arrayPush.h" -#include - - -namespace DB -{ - - -class FunctionArrayPushFront : public FunctionArrayPush -{ -public: - static constexpr auto name = "arrayPushFront"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionArrayPushFront(const Context & context_) : FunctionArrayPush(context_, true, name) {} -}; - - -void registerFunctionArrayPushFront(FunctionFactory & factory) -{ - factory.registerFunction(); -} - -} diff --git a/dbms/src/Functions/array/hasAll.cpp b/dbms/src/Functions/array/hasAll.cpp deleted file mode 100644 index 8d833adb5f5..00000000000 --- a/dbms/src/Functions/array/hasAll.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include "hasAllAny.h" -#include - - -namespace DB -{ - -class FunctionArrayHasAll : public FunctionArrayHasAllAny -{ -public: - static constexpr auto name = "hasAll"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionArrayHasAll(const Context & context_) : FunctionArrayHasAllAny(context_, true, name) {} -}; - -void registerFunctionHasAll(FunctionFactory & factory) -{ - factory.registerFunction(); -} - -} diff --git a/dbms/src/Functions/array/hasAny.cpp b/dbms/src/Functions/array/hasAny.cpp deleted file mode 100644 index 84a3a736364..00000000000 --- a/dbms/src/Functions/array/hasAny.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include "hasAllAny.h" -#include - - -namespace DB -{ - -class FunctionArrayHasAny : public FunctionArrayHasAllAny -{ -public: - static constexpr auto name = "hasAny"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionArrayHasAny(const Context & context_) : FunctionArrayHasAllAny(context_, false, name) {} -}; - -void registerFunctionHasAny(FunctionFactory & factory) -{ - factory.registerFunction(); -} - -} diff --git a/dbms/src/Functions/in.cpp b/dbms/src/Functions/in.cpp deleted file mode 100644 index 0b25ca201bb..00000000000 --- a/dbms/src/Functions/in.cpp +++ /dev/null @@ -1,132 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int ILLEGAL_COLUMN; -} - -/** in(x, set) - function for evaluating the IN - * notIn(x, set) - and NOT IN. - */ - -template -struct FunctionInName; - -template <> -struct FunctionInName -{ - static constexpr auto name = "in"; -}; - -template <> -struct FunctionInName -{ - static constexpr auto name = "globalIn"; -}; - -template <> -struct FunctionInName -{ - static constexpr auto name = "notIn"; -}; - -template <> -struct FunctionInName -{ - static constexpr auto name = "globalNotIn"; -}; - -template -class FunctionIn : public IFunction -{ -public: - static constexpr auto name = FunctionInName::name; - static FunctionPtr create(const Context &) - { - return std::make_shared(); - } - - String getName() const override - { - return name; - } - - size_t getNumberOfArguments() const override - { - return 2; - } - - DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override - { - return std::make_shared(); - } - - bool useDefaultImplementationForConstants() const override { return true; } - - void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t /*input_rows_count*/) override - { - /// NOTE: after updating this code, check that FunctionIgnoreExceptNull returns the same type of column. - - /// Second argument must be ColumnSet. - ColumnPtr column_set_ptr = block.getByPosition(arguments[1]).column; - const ColumnSet * column_set = checkAndGetColumnConstData(column_set_ptr.get()); - if (!column_set) - column_set = checkAndGetColumn(column_set_ptr.get()); - if (!column_set) - throw Exception("Second argument for function '" + getName() + "' must be Set; found " + column_set_ptr->getName(), - ErrorCodes::ILLEGAL_COLUMN); - - Block block_of_key_columns; - - /// First argument may be a tuple or a single column. - const ColumnWithTypeAndName & left_arg = block.getByPosition(arguments[0]); - const ColumnTuple * tuple = typeid_cast(left_arg.column.get()); - const ColumnConst * const_tuple = checkAndGetColumnConst(left_arg.column.get()); - const DataTypeTuple * type_tuple = typeid_cast(left_arg.type.get()); - - ColumnPtr materialized_tuple; - if (const_tuple) - { - materialized_tuple = const_tuple->convertToFullColumn(); - tuple = typeid_cast(materialized_tuple.get()); - } - - auto set = column_set->getData(); - auto set_types = set->getDataTypes(); - if (tuple && (set_types.size() != 1 || !set_types[0]->equals(*type_tuple))) - { - const auto & tuple_columns = tuple->getColumns(); - const DataTypes & tuple_types = type_tuple->getElements(); - size_t tuple_size = tuple_columns.size(); - for (size_t i = 0; i < tuple_size; ++i) - block_of_key_columns.insert({ tuple_columns[i], tuple_types[i], "" }); - } - else - block_of_key_columns.insert(left_arg); - - block.getByPosition(result).column = set->execute(block_of_key_columns, negative); - } -}; - - -void registerFunctionsIn(FunctionFactory & factory) -{ - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); -} - -} diff --git a/dbms/src/IO/AIO.h b/dbms/src/IO/AIO.h deleted file mode 100644 index 7a2b85dc42e..00000000000 --- a/dbms/src/IO/AIO.h +++ /dev/null @@ -1,76 +0,0 @@ -#pragma once - -#include - -#if defined(__linux__) - -/// https://stackoverflow.com/questions/20759750/resolving-redefinition-of-timespec-in-time-h -#define timespec linux_timespec -#define timeval linux_timeval -#define itimerspec linux_itimerspec -#define sigset_t linux_sigset_t - -#include - -#undef timespec -#undef timeval -#undef itimerspec -#undef sigset_t - - -/** Small wrappers for asynchronous I/O. - */ - -int io_setup(unsigned nr, aio_context_t * ctxp); - -int io_destroy(aio_context_t ctx); - -/// last argument is an array of pointers technically speaking -int io_submit(aio_context_t ctx, long nr, struct iocb * iocbpp[]); - -int io_getevents(aio_context_t ctx, long min_nr, long max_nr, io_event * events, struct timespec * timeout); - - -struct AIOContext : private boost::noncopyable -{ - aio_context_t ctx; - - AIOContext(unsigned int nr_events = 128); - ~AIOContext(); -}; - -#elif defined(__FreeBSD__) - -#include -#include -#include -#include - -typedef struct kevent io_event; -typedef int aio_context_t; - -struct iocb -{ - struct aiocb aio; - long aio_data; -}; - -int io_setup(void); - -int io_destroy(void); - -/// last argument is an array of pointers technically speaking -int io_submit(int ctx, long nr, struct iocb * iocbpp[]); - -int io_getevents(int ctx, long min_nr, long max_nr, struct kevent * events, struct timespec * timeout); - - -struct AIOContext : private boost::noncopyable -{ - int ctx; - - AIOContext(unsigned int nr_events = 128); - ~AIOContext(); -}; - -#endif diff --git a/dbms/src/IO/S3Common.cpp b/dbms/src/IO/S3Common.cpp deleted file mode 100644 index 137fe22c872..00000000000 --- a/dbms/src/IO/S3Common.cpp +++ /dev/null @@ -1,154 +0,0 @@ -#include - -#if USE_AWS_S3 - -#include -#include - -#include -#include -#include -#include -#include -#include - - -namespace -{ -const std::pair & convertLogLevel(Aws::Utils::Logging::LogLevel log_level) -{ - static const std::unordered_map> mapping = { - {Aws::Utils::Logging::LogLevel::Off, {LogsLevel::none, Message::PRIO_FATAL}}, - {Aws::Utils::Logging::LogLevel::Fatal, {LogsLevel::error, Message::PRIO_FATAL}}, - {Aws::Utils::Logging::LogLevel::Error, {LogsLevel::error, Message::PRIO_ERROR}}, - {Aws::Utils::Logging::LogLevel::Warn, {LogsLevel::warning, Message::PRIO_WARNING}}, - {Aws::Utils::Logging::LogLevel::Info, {LogsLevel::information, Message::PRIO_INFORMATION}}, - {Aws::Utils::Logging::LogLevel::Debug, {LogsLevel::debug, Message::PRIO_DEBUG}}, - {Aws::Utils::Logging::LogLevel::Trace, {LogsLevel::trace, Message::PRIO_TRACE}}, - }; - return mapping.at(log_level); -} - -class AWSLogger final : public Aws::Utils::Logging::LogSystemInterface -{ -public: - ~AWSLogger() final = default; - - Aws::Utils::Logging::LogLevel GetLogLevel() const final { return Aws::Utils::Logging::LogLevel::Trace; } - - void Log(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * format_str, ...) final // NOLINT - { - auto & [level, prio] = convertLogLevel(log_level); - LOG_SIMPLE(log, std::string(tag) + ": " + format_str, level, prio); - } - - void LogStream(Aws::Utils::Logging::LogLevel log_level, const char * tag, const Aws::OStringStream & message_stream) final - { - auto & [level, prio] = convertLogLevel(log_level); - LOG_SIMPLE(log, std::string(tag) + ": " + message_stream.str(), level, prio); - } - - void Flush() final {} - -private: - Poco::Logger * log = &Poco::Logger::get("AWSClient"); -}; -} - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -namespace S3 -{ - ClientFactory::ClientFactory() - { - aws_options = Aws::SDKOptions {}; - Aws::InitAPI(aws_options); - Aws::Utils::Logging::InitializeAWSLogging(std::make_shared()); - } - - ClientFactory::~ClientFactory() - { - Aws::Utils::Logging::ShutdownAWSLogging(); - Aws::ShutdownAPI(aws_options); - } - - ClientFactory & ClientFactory::instance() - { - static ClientFactory ret; - return ret; - } - - std::shared_ptr ClientFactory::create( // NOLINT - const String & endpoint, - const String & access_key_id, - const String & secret_access_key) - { - Aws::Client::ClientConfiguration cfg; - if (!endpoint.empty()) - cfg.endpointOverride = endpoint; - - Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key); - - return std::make_shared( - credentials, // Aws credentials. - std::move(cfg), // Client configuration. - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, // Sign policy. - endpoint.empty() // Use virtual addressing only if endpoint is not specified. - ); - } - - - URI::URI(const Poco::URI & uri_) - { - static const std::regex bucket_key_pattern("([^/]+)/(.*)"); /// TODO std::regex is discouraged - - uri = uri_; - - // s3://* - if (uri.getScheme() == "s3" || uri.getScheme() == "S3") - { - bucket = uri.getAuthority(); - if (bucket.empty()) - throw Exception ("Invalid S3 URI: no bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - - const auto & path = uri.getPath(); - // s3://bucket or s3://bucket/ - if (path.length() <= 1) - throw Exception ("Invalid S3 URI: no key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - - key = path.substr(1); - return; - } - - if (uri.getHost().empty()) - throw Exception("Invalid S3 URI: no host: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - - endpoint = uri.getScheme() + "://" + uri.getAuthority(); - - // Parse bucket and key from path. - std::smatch match; - std::regex_search(uri.getPath(), match, bucket_key_pattern); - if (!match.empty()) - { - bucket = match.str(1); - if (bucket.empty()) - throw Exception ("Invalid S3 URI: no bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - - key = match.str(2); - if (key.empty()) - throw Exception ("Invalid S3 URI: no key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - } - else - throw Exception("Invalid S3 URI: no bucket or key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - } -} - -} - -#endif diff --git a/dbms/src/Interpreters/AnalyzedJoin.cpp b/dbms/src/Interpreters/AnalyzedJoin.cpp deleted file mode 100644 index f3ab350c373..00000000000 --- a/dbms/src/Interpreters/AnalyzedJoin.cpp +++ /dev/null @@ -1,241 +0,0 @@ -#include - -#include - -#include -#include - -#include - - -namespace DB -{ - -namespace ErrorCodes -{ -} - -AnalyzedJoin::AnalyzedJoin(const Settings & settings, VolumePtr tmp_volume_) - : size_limits(SizeLimits{settings.max_rows_in_join, settings.max_bytes_in_join, settings.join_overflow_mode}) - , default_max_bytes(settings.default_max_bytes_in_join) - , join_use_nulls(settings.join_use_nulls) - , max_joined_block_rows(settings.max_joined_block_size_rows) - , join_algorithm(settings.join_algorithm) - , partial_merge_join_optimizations(settings.partial_merge_join_optimizations) - , partial_merge_join_rows_in_right_blocks(settings.partial_merge_join_rows_in_right_blocks) - , tmp_volume(tmp_volume_) -{ - if (settings.partial_merge_join) - join_algorithm = JoinAlgorithm::PREFER_PARTIAL_MERGE; -} - -void AnalyzedJoin::addUsingKey(const ASTPtr & ast) -{ - key_names_left.push_back(ast->getColumnName()); - key_names_right.push_back(ast->getAliasOrColumnName()); - - key_asts_left.push_back(ast); - key_asts_right.push_back(ast); - - auto & right_key = key_names_right.back(); - if (renames.count(right_key)) - right_key = renames[right_key]; -} - -void AnalyzedJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast) -{ - key_names_left.push_back(left_table_ast->getColumnName()); - key_names_right.push_back(right_table_ast->getAliasOrColumnName()); - - key_asts_left.push_back(left_table_ast); - key_asts_right.push_back(right_table_ast); -} - -/// @return how many times right key appears in ON section. -size_t AnalyzedJoin::rightKeyInclusion(const String & name) const -{ - if (hasUsing()) - return 0; - - size_t count = 0; - for (const auto & key_name : key_names_right) - if (name == key_name) - ++count; - return count; -} - -void AnalyzedJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix) -{ - NameSet joined_columns; - NamesAndTypesList dedup_columns; - - for (auto & column : columns_from_joined_table) - { - if (joined_columns.count(column.name)) - continue; - - joined_columns.insert(column.name); - - dedup_columns.push_back(column); - auto & inserted = dedup_columns.back(); - - if (left_table_columns.count(column.name)) - inserted.name = right_table_prefix + column.name; - - original_names[inserted.name] = column.name; - if (inserted.name != column.name) - renames[column.name] = inserted.name; - } - - columns_from_joined_table.swap(dedup_columns); -} - -NameSet AnalyzedJoin::getQualifiedColumnsSet() const -{ - NameSet out; - for (const auto & names : original_names) - out.insert(names.first); - return out; -} - -NamesWithAliases AnalyzedJoin::getNamesWithAliases(const NameSet & required_columns) const -{ - NamesWithAliases out; - for (const auto & column : required_columns) - { - auto it = original_names.find(column); - if (it != original_names.end()) - out.emplace_back(it->second, it->first); /// {original_name, name} - } - return out; -} - -ASTPtr AnalyzedJoin::leftKeysList() const -{ - ASTPtr keys_list = std::make_shared(); - keys_list->children = key_asts_left; - return keys_list; -} - -ASTPtr AnalyzedJoin::rightKeysList() const -{ - ASTPtr keys_list = std::make_shared(); - if (hasOn()) - keys_list->children = key_asts_right; - return keys_list; -} - -Names AnalyzedJoin::requiredJoinedNames() const -{ - NameSet required_columns_set(key_names_right.begin(), key_names_right.end()); - for (const auto & joined_column : columns_added_by_join) - required_columns_set.insert(joined_column.name); - - return Names(required_columns_set.begin(), required_columns_set.end()); -} - -NameSet AnalyzedJoin::requiredRightKeys() const -{ - NameSet required; - for (const auto & name : key_names_right) - for (const auto & column : columns_added_by_join) - if (name == column.name) - required.insert(name); - return required; -} - -NamesWithAliases AnalyzedJoin::getRequiredColumns(const Block & sample, const Names & action_required_columns) const -{ - NameSet required_columns(action_required_columns.begin(), action_required_columns.end()); - - for (auto & column : requiredJoinedNames()) - if (!sample.has(column)) - required_columns.insert(column); - - return getNamesWithAliases(required_columns); -} - -void AnalyzedJoin::addJoinedColumn(const NameAndTypePair & joined_column) -{ - if (join_use_nulls && isLeftOrFull(table_join.kind)) - { - auto type = joined_column.type->canBeInsideNullable() ? makeNullable(joined_column.type) : joined_column.type; - columns_added_by_join.emplace_back(NameAndTypePair(joined_column.name, std::move(type))); - } - else - columns_added_by_join.push_back(joined_column); -} - -void AnalyzedJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) const -{ - bool right_or_full_join = isRightOrFull(table_join.kind); - bool left_or_full_join = isLeftOrFull(table_join.kind); - - for (auto & col : sample_block) - { - /// Materialize column. - /// Column is not empty if it is constant, but after Join all constants will be materialized. - /// So, we need remove constants from header. - if (col.column) - col.column = nullptr; - - bool make_nullable = join_use_nulls && right_or_full_join; - - if (make_nullable && col.type->canBeInsideNullable()) - col.type = makeNullable(col.type); - } - - for (const auto & col : columns_added_by_join) - { - auto res_type = col.type; - - bool make_nullable = join_use_nulls && left_or_full_join; - - if (make_nullable && res_type->canBeInsideNullable()) - res_type = makeNullable(res_type); - - sample_block.insert(ColumnWithTypeAndName(nullptr, res_type, col.name)); - } -} - -bool AnalyzedJoin::sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y) -{ - if (!x && !y) - return true; - if (!x || !y) - return false; - - return x->table_join.kind == y->table_join.kind - && x->table_join.strictness == y->table_join.strictness - && x->key_names_left == y->key_names_left - && x->key_names_right == y->key_names_right - && x->columns_added_by_join == y->columns_added_by_join; -} - -bool AnalyzedJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, ASTTableJoin::Kind kind_) const -{ - if (strictness_ == strictness() && kind_ == kind()) - return true; - - /// Compatibility: old ANY INNER == new SEMI LEFT - if (strictness_ == ASTTableJoin::Strictness::Semi && isLeft(kind_) && - strictness() == ASTTableJoin::Strictness::RightAny && isInner(kind())) - return true; - if (strictness() == ASTTableJoin::Strictness::Semi && isLeft(kind()) && - strictness_ == ASTTableJoin::Strictness::RightAny && isInner(kind_)) - return true; - - return false; -} - -bool AnalyzedJoin::allowMergeJoin() const -{ - bool is_any = (strictness() == ASTTableJoin::Strictness::Any); - bool is_all = (strictness() == ASTTableJoin::Strictness::All); - bool is_semi = (strictness() == ASTTableJoin::Strictness::Semi); - - bool allow_merge_join = (isLeft(kind()) && (is_any || is_all || is_semi)) || (isInner(kind()) && is_all); - return allow_merge_join; -} - -} diff --git a/dbms/src/Interpreters/AnalyzedJoin.h b/dbms/src/Interpreters/AnalyzedJoin.h deleted file mode 100644 index f1341a16a4c..00000000000 --- a/dbms/src/Interpreters/AnalyzedJoin.h +++ /dev/null @@ -1,139 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -namespace DB -{ - -class Context; -class ASTSelectQuery; -struct DatabaseAndTableWithAlias; -class Block; - -struct Settings; - -class Volume; -using VolumePtr = std::shared_ptr; - -class AnalyzedJoin -{ - /** Query of the form `SELECT expr(x) AS k FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k` - * The join is made by column k. - * During the JOIN, - * - in the "right" table, it will be available by alias `k`, since `Project` action for the subquery was executed. - * - in the "left" table, it will be accessible by the name `expr(x)`, since `Project` action has not been executed yet. - * You must remember both of these options. - * - * Query of the form `SELECT ... from t1 ANY LEFT JOIN (SELECT ... from t2) ON expr(t1 columns) = expr(t2 columns)` - * to the subquery will be added expression `expr(t2 columns)`. - * It's possible to use name `expr(t2 columns)`. - */ - - friend class SyntaxAnalyzer; - - const SizeLimits size_limits; - const size_t default_max_bytes; - const bool join_use_nulls; - const size_t max_joined_block_rows = 0; - JoinAlgorithm join_algorithm; - const bool partial_merge_join_optimizations = false; - const size_t partial_merge_join_rows_in_right_blocks = 0; - - Names key_names_left; - Names key_names_right; /// Duplicating names are qualified. - ASTs key_asts_left; - ASTs key_asts_right; - ASTTableJoin table_join; - ASOF::Inequality asof_inequality = ASOF::Inequality::GreaterOrEquals; - - /// All columns which can be read from joined table. Duplicating names are qualified. - NamesAndTypesList columns_from_joined_table; - /// Columns will be added to block by JOIN. It's a subset of columns_from_joined_table with corrected Nullability - NamesAndTypesList columns_added_by_join; - - /// Name -> original name. Names are the same as in columns_from_joined_table list. - std::unordered_map original_names; - /// Original name -> name. Only ranamed columns. - std::unordered_map renames; - - VolumePtr tmp_volume; - -public: - AnalyzedJoin(const Settings &, VolumePtr tmp_volume); - - /// for StorageJoin - AnalyzedJoin(SizeLimits limits, bool use_nulls, ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, - const Names & key_names_right_) - : size_limits(limits) - , default_max_bytes(0) - , join_use_nulls(use_nulls) - , join_algorithm(JoinAlgorithm::HASH) - , key_names_right(key_names_right_) - { - table_join.kind = kind; - table_join.strictness = strictness; - } - - ASTTableJoin::Kind kind() const { return table_join.kind; } - ASTTableJoin::Strictness strictness() const { return table_join.strictness; } - bool sameStrictnessAndKind(ASTTableJoin::Strictness, ASTTableJoin::Kind) const; - const SizeLimits & sizeLimits() const { return size_limits; } - VolumePtr getTemporaryVolume() { return tmp_volume; } - bool allowMergeJoin() const; - bool preferMergeJoin() const { return join_algorithm == JoinAlgorithm::PREFER_PARTIAL_MERGE; } - bool forceMergeJoin() const { return join_algorithm == JoinAlgorithm::PARTIAL_MERGE; } - bool forceHashJoin() const { return join_algorithm == JoinAlgorithm::HASH; } - - bool forceNullableRight() const { return join_use_nulls && isLeftOrFull(table_join.kind); } - bool forceNullableLeft() const { return join_use_nulls && isRightOrFull(table_join.kind); } - size_t defaultMaxBytes() const { return default_max_bytes; } - size_t maxJoinedBlockRows() const { return max_joined_block_rows; } - size_t maxRowsInRightBlock() const { return partial_merge_join_rows_in_right_blocks; } - bool enablePartialMergeJoinOptimizations() const { return partial_merge_join_optimizations; } - - void addUsingKey(const ASTPtr & ast); - void addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast); - - bool hasUsing() const { return table_join.using_expression_list != nullptr; } - bool hasOn() const { return table_join.on_expression != nullptr; } - - NameSet getQualifiedColumnsSet() const; - NamesWithAliases getNamesWithAliases(const NameSet & required_columns) const; - NamesWithAliases getRequiredColumns(const Block & sample, const Names & action_required_columns) const; - - void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix); - size_t rightKeyInclusion(const String & name) const; - NameSet requiredRightKeys() const; - - void addJoinedColumn(const NameAndTypePair & joined_column); - void addJoinedColumnsAndCorrectNullability(Block & sample_block) const; - - void setAsofInequality(ASOF::Inequality inequality) { asof_inequality = inequality; } - ASOF::Inequality getAsofInequality() { return asof_inequality; } - - ASTPtr leftKeysList() const; - ASTPtr rightKeysList() const; /// For ON syntax only - - Names requiredJoinedNames() const; - const Names & keyNamesLeft() const { return key_names_left; } - const Names & keyNamesRight() const { return key_names_right; } - const NamesAndTypesList & columnsFromJoinedTable() const { return columns_from_joined_table; } - const NamesAndTypesList & columnsAddedByJoin() const { return columns_added_by_join; } - - /// StorageJoin overrides key names (cause of different names qualification) - void setRightKeys(const Names & keys) { key_names_right = keys; } - - static bool sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y); -}; - -} diff --git a/dbms/src/Interpreters/ColumnNamesContext.cpp b/dbms/src/Interpreters/ColumnNamesContext.cpp deleted file mode 100644 index 4d23c6f0e8b..00000000000 --- a/dbms/src/Interpreters/ColumnNamesContext.cpp +++ /dev/null @@ -1,118 +0,0 @@ -#include -#include -#include - -namespace DB -{ - -bool ColumnNamesContext::addTableAliasIfAny(const IAST & ast) -{ - String alias = ast.tryGetAlias(); - if (alias.empty()) - return false; - - table_aliases.insert(alias); - return true; -} - -bool ColumnNamesContext::addColumnAliasIfAny(const IAST & ast) -{ - String alias = ast.tryGetAlias(); - if (alias.empty()) - return false; - - if (required_names.count(alias)) - masked_columns.insert(alias); - - complex_aliases.insert(alias); - return true; -} - -void ColumnNamesContext::addColumnIdentifier(const ASTIdentifier & node) -{ - if (!IdentifierSemantic::getColumnName(node)) - return; - - /// There should be no complex cases after query normalization. Names to aliases: one-to-many. - String alias = node.tryGetAlias(); - required_names[node.name].addInclusion(alias); -} - -bool ColumnNamesContext::addArrayJoinAliasIfAny(const IAST & ast) -{ - String alias = ast.tryGetAlias(); - if (alias.empty()) - return false; - - array_join_columns.insert(alias); - return true; -} - -void ColumnNamesContext::addArrayJoinIdentifier(const ASTIdentifier & node) -{ - array_join_columns.insert(node.name); -} - -size_t ColumnNamesContext::nameInclusion(const String & name) const -{ - auto it = required_names.find(name); - if (it != required_names.end()) - return it->second.appears; - return 0; -} - -NameSet ColumnNamesContext::requiredColumns() const -{ - NameSet required; - for (const auto & pr : required_names) - { - const auto & name = pr.first; - String table_name = Nested::extractTableName(name); - - /// Tech debt. There's its own logic for ARRAY JOIN columns. - if (array_join_columns.count(name) || array_join_columns.count(table_name)) - continue; - - if (!complex_aliases.count(name) || masked_columns.count(name)) - required.insert(name); - } - return required; -} - -std::ostream & operator << (std::ostream & os, const ColumnNamesContext & cols) -{ - os << "required_names: "; - for (const auto & pr : cols.required_names) - { - os << "'" << pr.first << "'"; - for (auto & alias : pr.second.aliases) - os << "/'" << alias << "'"; - } - os << " source_tables: "; - for (const auto & x : cols.tables) - { - auto alias = x.alias(); - auto name = x.name(); - if (alias && name) - os << "'" << *alias << "'/'" << *name << "' "; - else if (alias) - os << "'" << *alias << "' "; - else if (name) - os << "'" << *name << "' "; - } - os << "table_aliases: "; - for (const auto & x : cols.table_aliases) - os << "'" << x << "' "; - os << "complex_aliases: "; - for (const auto & x : cols.complex_aliases) - os << "'" << x << "' "; - os << "masked_columns: "; - for (const auto & x : cols.masked_columns) - os << "'" << x << "' "; - os << "array_join_columns: "; - for (const auto & x : cols.array_join_columns) - os << "'" << x << "' "; - return os; -} - -} diff --git a/dbms/src/Interpreters/ColumnNamesContext.h b/dbms/src/Interpreters/ColumnNamesContext.h deleted file mode 100644 index c30102cf8d7..00000000000 --- a/dbms/src/Interpreters/ColumnNamesContext.h +++ /dev/null @@ -1,89 +0,0 @@ -#pragma once - -#include -#include - -#include -#include -#include -#include - -namespace DB -{ - -/// Information about table and column names extracted from ASTSelectQuery block. Do not include info from subselects. -struct ColumnNamesContext -{ - struct JoinedTable - { - const ASTTableExpression * expr = nullptr; - const ASTTableJoin * join = nullptr; - - std::optional alias() const - { - String alias; - if (expr) - { - if (expr->database_and_table_name) - alias = expr->database_and_table_name->tryGetAlias(); - else if (expr->table_function) - alias = expr->table_function->tryGetAlias(); - else if (expr->subquery) - alias = expr->subquery->tryGetAlias(); - } - if (!alias.empty()) - return alias; - return {}; - } - - std::optional name() const - { - if (expr) - return tryGetIdentifierName(expr->database_and_table_name); - return {}; - } - - std::optional joinKind() const - { - if (join) - return join->kind; - return {}; - } - }; - - struct NameInfo - { - std::set aliases; - size_t appears = 0; - - void addInclusion(const String & alias) - { - if (!alias.empty()) - aliases.insert(alias); - ++appears; - } - }; - - std::unordered_map required_names; - NameSet table_aliases; - NameSet private_aliases; - NameSet complex_aliases; - NameSet masked_columns; - NameSet array_join_columns; - std::vector tables; /// ordered list of visited tables in FROM section with joins - bool has_table_join = false; - bool has_array_join = false; - - bool addTableAliasIfAny(const IAST & ast); - bool addColumnAliasIfAny(const IAST & ast); - void addColumnIdentifier(const ASTIdentifier & node); - bool addArrayJoinAliasIfAny(const IAST & ast); - void addArrayJoinIdentifier(const ASTIdentifier & node); - - NameSet requiredColumns() const; - size_t nameInclusion(const String & name) const; -}; - -std::ostream & operator << (std::ostream & os, const ColumnNamesContext & cols); - -} diff --git a/dbms/src/Interpreters/InDepthNodeVisitor.h b/dbms/src/Interpreters/InDepthNodeVisitor.h deleted file mode 100644 index 7bb4f5e4d54..00000000000 --- a/dbms/src/Interpreters/InDepthNodeVisitor.h +++ /dev/null @@ -1,80 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace DB -{ - -/// Visits AST tree in depth, call functions for nodes according to Matcher type data. -/// You need to define Data, visit() and needChildVisit() in Matcher class. -template -class InDepthNodeVisitor -{ -public: - using Data = typename Matcher::Data; - - InDepthNodeVisitor(Data & data_, std::ostream * ostr_ = nullptr) - : data(data_), - visit_depth(0), - ostr(ostr_) - {} - - void visit(T & ast) - { - DumpASTNode dump(*ast, ostr, visit_depth, typeid(Matcher).name()); - - if constexpr (!_top_to_bottom) - visitChildren(ast); - - Matcher::visit(ast, data); - - if constexpr (_top_to_bottom) - visitChildren(ast); - } - -private: - Data & data; - size_t visit_depth; - std::ostream * ostr; - - void visitChildren(T & ast) - { - for (auto & child : ast->children) - if (Matcher::needChildVisit(ast, child)) - visit(child); - } -}; - -template -using ConstInDepthNodeVisitor = InDepthNodeVisitor; - -/// Simple matcher for one node type without complex traversal logic. -template -class OneTypeMatcher -{ -public: - using Data = Data_; - using TypeToVisit = typename Data::TypeToVisit; - - static bool needChildVisit(const ASTPtr & node, const ASTPtr &) - { - if (node && node->as()) - return visit_children; - - return true; - } - - static void visit(T & ast, Data & data) - { - if (auto * t = typeid_cast(ast.get())) - data.visit(*t, ast); - } -}; - -template -using ConstOneTypeMatcher = OneTypeMatcher; - -} diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.cpp b/dbms/src/Interpreters/InterpreterAlterQuery.cpp deleted file mode 100644 index ddf1e27af87..00000000000 --- a/dbms/src/Interpreters/InterpreterAlterQuery.cpp +++ /dev/null @@ -1,289 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; - extern const int SUPPORT_IS_DISABLED; - extern const int INCORRECT_QUERY; -} - - -InterpreterAlterQuery::InterpreterAlterQuery(const ASTPtr & query_ptr_, const Context & context_) - : query_ptr(query_ptr_), context(context_) -{ -} - -BlockIO InterpreterAlterQuery::execute() -{ - const auto & alter = query_ptr->as(); - - if (!alter.cluster.empty()) - return executeDDLQueryOnCluster(query_ptr, context, getRequiredAccess()); - - context.checkAccess(getRequiredAccess()); - auto table_id = context.resolveStorageID(alter, Context::ResolveOrdinary); - StoragePtr table = DatabaseCatalog::instance().getTable(table_id); - - /// Add default database to table identifiers that we can encounter in e.g. default expressions, - /// mutation expression, etc. - AddDefaultDatabaseVisitor visitor(table_id.getDatabaseName()); - ASTPtr command_list_ptr = alter.command_list->ptr(); - visitor.visit(command_list_ptr); - - AlterCommands alter_commands; - PartitionCommands partition_commands; - MutationCommands mutation_commands; - LiveViewCommands live_view_commands; - for (ASTAlterCommand * command_ast : alter.command_list->commands) - { - if (auto alter_command = AlterCommand::parse(command_ast)) - alter_commands.emplace_back(std::move(*alter_command)); - else if (auto partition_command = PartitionCommand::parse(command_ast)) - { - if (partition_command->type == PartitionCommand::DROP_DETACHED_PARTITION - && !context.getSettingsRef().allow_drop_detached) - throw DB::Exception("Cannot execute query: DROP DETACHED PART is disabled " - "(see allow_drop_detached setting)", ErrorCodes::SUPPORT_IS_DISABLED); - partition_commands.emplace_back(std::move(*partition_command)); - } - else if (auto mut_command = MutationCommand::parse(command_ast)) - { - if (mut_command->type == MutationCommand::MATERIALIZE_TTL && !table->hasAnyTTL()) - throw Exception("Cannot MATERIALIZE TTL as there is no TTL set for table " - + table->getStorageID().getNameForLogs(), ErrorCodes::INCORRECT_QUERY); - - mutation_commands.emplace_back(std::move(*mut_command)); - } - else if (auto live_view_command = LiveViewCommand::parse(command_ast)) - live_view_commands.emplace_back(std::move(*live_view_command)); - else - throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR); - } - - if (!mutation_commands.empty()) - { - auto table_lock_holder = table->lockStructureForShare(context.getCurrentQueryId()); - MutationsInterpreter(table, mutation_commands, context, false).validate(table_lock_holder); - table->mutate(mutation_commands, context); - } - - if (!partition_commands.empty()) - { - table->alterPartition(query_ptr, partition_commands, context); - } - - if (!live_view_commands.empty()) - { - live_view_commands.validate(*table); - for (const LiveViewCommand & command : live_view_commands) - { - auto live_view = std::dynamic_pointer_cast(table); - switch (command.type) - { - case LiveViewCommand::REFRESH: - live_view->refresh(); - break; - } - } - } - - if (!alter_commands.empty()) - { - auto table_lock_holder = table->lockAlterIntention(); - StorageInMemoryMetadata metadata = table->getInMemoryMetadata(); - alter_commands.validate(metadata, context); - alter_commands.prepare(metadata); - table->checkAlterIsPossible(alter_commands, context.getSettingsRef()); - table->alter(alter_commands, context, table_lock_holder); - } - - return {}; -} - - -AccessRightsElements InterpreterAlterQuery::getRequiredAccess() const -{ - AccessRightsElements required_access; - const auto & alter = query_ptr->as(); - for (ASTAlterCommand * command : alter.command_list->commands) - boost::range::push_back(required_access, getRequiredAccessForCommand(*command, alter.database, alter.table)); - return required_access; -} - - -AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const ASTAlterCommand & command, const String & database, const String & table) -{ - AccessRightsElements required_access; - - auto column_name = [&]() -> String { return getIdentifierName(command.column); }; - auto column_name_from_col_decl = [&]() -> std::string_view { return command.col_decl->as().name; }; - auto column_names_from_update_assignments = [&]() -> std::vector - { - std::vector column_names; - for (const ASTPtr & assignment_ast : command.update_assignments->children) - column_names.emplace_back(assignment_ast->as().column_name); - return column_names; - }; - - switch (command.type) - { - case ASTAlterCommand::UPDATE: - { - required_access.emplace_back(AccessType::UPDATE, database, table, column_names_from_update_assignments()); - break; - } - case ASTAlterCommand::DELETE: - { - required_access.emplace_back(AccessType::DELETE, database, table); - break; - } - case ASTAlterCommand::ADD_COLUMN: - { - required_access.emplace_back(AccessType::ADD_COLUMN, database, table, column_name_from_col_decl()); - break; - } - case ASTAlterCommand::DROP_COLUMN: - { - if (command.clear_column) - required_access.emplace_back(AccessType::CLEAR_COLUMN, database, table, column_name()); - else - required_access.emplace_back(AccessType::DROP_COLUMN, database, table, column_name()); - break; - } - case ASTAlterCommand::MODIFY_COLUMN: - { - required_access.emplace_back(AccessType::MODIFY_COLUMN, database, table, column_name_from_col_decl()); - break; - } - case ASTAlterCommand::COMMENT_COLUMN: - { - required_access.emplace_back(AccessType::COMMENT_COLUMN, database, table, column_name()); - break; - } - case ASTAlterCommand::MODIFY_ORDER_BY: - { - required_access.emplace_back(AccessType::ALTER_ORDER_BY, database, table); - break; - } - case ASTAlterCommand::ADD_INDEX: - { - required_access.emplace_back(AccessType::ADD_INDEX, database, table); - break; - } - case ASTAlterCommand::DROP_INDEX: - { - if (command.clear_index) - required_access.emplace_back(AccessType::CLEAR_INDEX, database, table); - else - required_access.emplace_back(AccessType::DROP_INDEX, database, table); - break; - } - case ASTAlterCommand::MATERIALIZE_INDEX: - { - required_access.emplace_back(AccessType::MATERIALIZE_INDEX, database, table); - break; - } - case ASTAlterCommand::ADD_CONSTRAINT: - { - required_access.emplace_back(AccessType::ADD_CONSTRAINT, database, table); - break; - } - case ASTAlterCommand::DROP_CONSTRAINT: - { - required_access.emplace_back(AccessType::DROP_CONSTRAINT, database, table); - break; - } - case ASTAlterCommand::MODIFY_TTL: - { - required_access.emplace_back(AccessType::MODIFY_TTL, database, table); - break; - } - case ASTAlterCommand::MATERIALIZE_TTL: - { - required_access.emplace_back(AccessType::MATERIALIZE_TTL, database, table); - break; - } - case ASTAlterCommand::MODIFY_SETTING: - { - required_access.emplace_back(AccessType::MODIFY_SETTING, database, table); - break; - } - case ASTAlterCommand::ATTACH_PARTITION: - { - required_access.emplace_back(AccessType::INSERT, database, table); - break; - } - case ASTAlterCommand::DROP_PARTITION: [[fallthrough]]; - case ASTAlterCommand::DROP_DETACHED_PARTITION: - { - required_access.emplace_back(AccessType::DELETE, database, table); - break; - } - case ASTAlterCommand::MOVE_PARTITION: - { - if ((command.move_destination_type == PartDestinationType::DISK) - || (command.move_destination_type == PartDestinationType::VOLUME)) - { - required_access.emplace_back(AccessType::MOVE_PARTITION, database, table); - } - else if (command.move_destination_type == PartDestinationType::TABLE) - { - required_access.emplace_back(AccessType::SELECT | AccessType::DELETE, database, table); - required_access.emplace_back(AccessType::INSERT, command.to_database, command.to_table); - } - break; - } - case ASTAlterCommand::REPLACE_PARTITION: - { - required_access.emplace_back(AccessType::SELECT, command.from_database, command.from_table); - required_access.emplace_back(AccessType::DELETE | AccessType::INSERT, database, table); - break; - } - case ASTAlterCommand::FETCH_PARTITION: - { - required_access.emplace_back(AccessType::FETCH_PARTITION, database, table); - break; - } - case ASTAlterCommand::FREEZE_PARTITION: [[fallthrough]]; - case ASTAlterCommand::FREEZE_ALL: - { - required_access.emplace_back(AccessType::FREEZE_PARTITION, database, table); - break; - } - case ASTAlterCommand::MODIFY_QUERY: - { - required_access.emplace_back(AccessType::MODIFY_VIEW_QUERY, database, table); - break; - } - case ASTAlterCommand::LIVE_VIEW_REFRESH: - { - required_access.emplace_back(AccessType::REFRESH_VIEW, database, table); - break; - } - case ASTAlterCommand::NO_TYPE: break; - } - - return required_access; -} - -} diff --git a/dbms/src/Interpreters/Join.cpp b/dbms/src/Interpreters/Join.cpp deleted file mode 100644 index e60f532d517..00000000000 --- a/dbms/src/Interpreters/Join.cpp +++ /dev/null @@ -1,1509 +0,0 @@ -#include - -#include - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_TYPE_OF_FIELD; - extern const int NOT_IMPLEMENTED; - extern const int UNSUPPORTED_JOIN_KEYS; - extern const int LOGICAL_ERROR; - extern const int SET_SIZE_LIMIT_EXCEEDED; - extern const int TYPE_MISMATCH; -} - - -static ColumnPtr filterWithBlanks(ColumnPtr src_column, const IColumn::Filter & filter, bool inverse_filter = false) -{ - ColumnPtr column = src_column->convertToFullColumnIfConst(); - MutableColumnPtr mut_column = column->cloneEmpty(); - mut_column->reserve(column->size()); - - if (inverse_filter) - { - for (size_t row = 0; row < filter.size(); ++row) - { - if (filter[row]) - mut_column->insertDefault(); - else - mut_column->insertFrom(*column, row); - } - } - else - { - for (size_t row = 0; row < filter.size(); ++row) - { - if (filter[row]) - mut_column->insertFrom(*column, row); - else - mut_column->insertDefault(); - } - } - - return mut_column; -} - -static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable) -{ - if (nullable) - { - JoinCommon::convertColumnToNullable(column); - } - else - { - /// We have to replace values masked by NULLs with defaults. - if (column.column) - if (auto * nullable_column = checkAndGetColumn(*column.column)) - column.column = filterWithBlanks(column.column, nullable_column->getNullMapColumn().getData(), true); - - JoinCommon::removeColumnNullability(column); - } - - return std::move(column); -} - -static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable, const ColumnUInt8 & negative_null_map) -{ - if (nullable) - { - JoinCommon::convertColumnToNullable(column); - if (column.type->isNullable() && !negative_null_map.empty()) - { - MutableColumnPtr mutable_column = (*std::move(column.column)).mutate(); - assert_cast(*mutable_column).applyNegatedNullMap(negative_null_map); - column.column = std::move(mutable_column); - } - } - else - JoinCommon::removeColumnNullability(column); - - return std::move(column); -} - -static void changeNullability(MutableColumnPtr & mutable_column) -{ - ColumnPtr column = std::move(mutable_column); - if (auto * nullable = checkAndGetColumn(*column)) - column = nullable->getNestedColumnPtr(); - else - column = makeNullable(column); - - mutable_column = (*std::move(column)).mutate(); -} - -static ColumnPtr emptyNotNullableClone(const ColumnPtr & column) -{ - if (column->isNullable()) - return checkAndGetColumn(*column)->getNestedColumnPtr()->cloneEmpty(); - return column->cloneEmpty(); -} - -static ColumnPtr changeLowCardinality(const ColumnPtr & column, const ColumnPtr & dst_sample) -{ - if (dst_sample->lowCardinality()) - { - MutableColumnPtr lc = dst_sample->cloneEmpty(); - typeid_cast(*lc).insertRangeFromFullColumn(*column, 0, column->size()); - return lc; - } - - return column->convertToFullColumnIfLowCardinality(); -} - -/// Change both column nullability and low cardinality -static void changeColumnRepresentation(const ColumnPtr & src_column, ColumnPtr & dst_column) -{ - bool nullable_src = src_column->isNullable(); - bool nullable_dst = dst_column->isNullable(); - - ColumnPtr dst_not_null = emptyNotNullableClone(dst_column); - bool lowcard_src = emptyNotNullableClone(src_column)->lowCardinality(); - bool lowcard_dst = dst_not_null->lowCardinality(); - bool change_lowcard = (!lowcard_src && lowcard_dst) || (lowcard_src && !lowcard_dst); - - if (nullable_src && !nullable_dst) - { - auto * nullable = checkAndGetColumn(*src_column); - if (change_lowcard) - dst_column = changeLowCardinality(nullable->getNestedColumnPtr(), dst_column); - else - dst_column = nullable->getNestedColumnPtr(); - } - else if (!nullable_src && nullable_dst) - { - if (change_lowcard) - dst_column = makeNullable(changeLowCardinality(src_column, dst_not_null)); - else - dst_column = makeNullable(src_column); - } - else /// same nullability - { - if (change_lowcard) - { - if (auto * nullable = checkAndGetColumn(*src_column)) - { - dst_column = makeNullable(changeLowCardinality(nullable->getNestedColumnPtr(), dst_not_null)); - assert_cast(*dst_column->assumeMutable()).applyNullMap(nullable->getNullMapColumn()); - } - else - dst_column = changeLowCardinality(src_column, dst_not_null); - } - else - dst_column = src_column; - } -} - - -Join::Join(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_) - : table_join(table_join_) - , kind(table_join->kind()) - , strictness(table_join->strictness()) - , key_names_right(table_join->keyNamesRight()) - , nullable_right_side(table_join->forceNullableRight()) - , nullable_left_side(table_join->forceNullableLeft()) - , any_take_last_row(any_take_last_row_) - , asof_inequality(table_join->getAsofInequality()) - , data(std::make_shared()) - , log(&Logger::get("Join")) -{ - setSampleBlock(right_sample_block); -} - - -Join::Type Join::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes) -{ - size_t keys_size = key_columns.size(); - - if (keys_size == 0) - return Type::CROSS; - - bool all_fixed = true; - size_t keys_bytes = 0; - key_sizes.resize(keys_size); - for (size_t j = 0; j < keys_size; ++j) - { - if (!key_columns[j]->isFixedAndContiguous()) - { - all_fixed = false; - break; - } - key_sizes[j] = key_columns[j]->sizeOfValueIfFixed(); - keys_bytes += key_sizes[j]; - } - - /// If there is one numeric key that fits in 64 bits - if (keys_size == 1 && key_columns[0]->isNumeric()) - { - size_t size_of_field = key_columns[0]->sizeOfValueIfFixed(); - if (size_of_field == 1) - return Type::key8; - if (size_of_field == 2) - return Type::key16; - if (size_of_field == 4) - return Type::key32; - if (size_of_field == 8) - return Type::key64; - if (size_of_field == 16) - return Type::keys128; - throw Exception("Logical error: numeric column has sizeOfField not in 1, 2, 4, 8, 16.", ErrorCodes::LOGICAL_ERROR); - } - - /// If the keys fit in N bits, we will use a hash table for N-bit-packed keys - if (all_fixed && keys_bytes <= 16) - return Type::keys128; - if (all_fixed && keys_bytes <= 32) - return Type::keys256; - - /// If there is single string key, use hash table of it's values. - if (keys_size == 1 - && (typeid_cast(key_columns[0]) - || (isColumnConst(*key_columns[0]) && typeid_cast(&assert_cast(key_columns[0])->getDataColumn())))) - return Type::key_string; - - if (keys_size == 1 && typeid_cast(key_columns[0])) - return Type::key_fixed_string; - - /// Otherwise, will use set of cryptographic hashes of unambiguously serialized values. - return Type::hashed; -} - -static const IColumn * extractAsofColumn(const ColumnRawPtrs & key_columns) -{ - return key_columns.back(); -} - -template -static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes) -{ - if constexpr (is_asof_join) - { - auto key_column_copy = key_columns; - auto key_size_copy = key_sizes; - key_column_copy.pop_back(); - key_size_copy.pop_back(); - return KeyGetter(key_column_copy, key_size_copy, nullptr); - } - else - return KeyGetter(key_columns, key_sizes, nullptr); -} - -template -struct KeyGetterForTypeImpl; - -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodOneNumber; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodOneNumber; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodOneNumber; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodOneNumber; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodString; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodFixedString; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodKeysFixed; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodKeysFixed; -}; -template struct KeyGetterForTypeImpl -{ - using Type = ColumnsHashing::HashMethodHashed; -}; - -template -struct KeyGetterForType -{ - using Value = typename Data::value_type; - using Mapped_t = typename Data::mapped_type; - using Mapped = std::conditional_t, const Mapped_t, Mapped_t>; - using Type = typename KeyGetterForTypeImpl::Type; -}; - - -void Join::init(Type type_) -{ - data->type = type_; - - if (kind == ASTTableJoin::Kind::Cross) - return; - joinDispatchInit(kind, strictness, data->maps); - joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { map.create(data->type); }); -} - -size_t Join::getTotalRowCount() const -{ - size_t res = 0; - - if (data->type == Type::CROSS) - { - for (const auto & block : data->blocks) - res += block.rows(); - } - else - { - joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { res += map.getTotalRowCount(data->type); }); - } - - return res; -} - -size_t Join::getTotalByteCount() const -{ - size_t res = 0; - - if (data->type == Type::CROSS) - { - for (const auto & block : data->blocks) - res += block.bytes(); - } - else - { - joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { res += map.getTotalByteCountImpl(data->type); }); - res += data->pool.size(); - } - - return res; -} - -void Join::setSampleBlock(const Block & block) -{ - /// You have to restore this lock if you call the function outside of ctor. - //std::unique_lock lock(rwlock); - - LOG_DEBUG(log, "setSampleBlock: " << block.dumpStructure()); - - if (!empty()) - return; - - JoinCommon::splitAdditionalColumns(block, key_names_right, right_table_keys, sample_block_with_columns_to_add); - - initRequiredRightKeys(); - - JoinCommon::removeLowCardinalityInplace(right_table_keys); - initRightBlockStructure(data->sample_block); - - ColumnRawPtrs key_columns = JoinCommon::extractKeysForJoin(right_table_keys, key_names_right); - - JoinCommon::createMissedColumns(sample_block_with_columns_to_add); - if (nullable_right_side) - JoinCommon::convertColumnsToNullable(sample_block_with_columns_to_add); - - if (strictness == ASTTableJoin::Strictness::Asof) - { - if (kind != ASTTableJoin::Kind::Left and kind != ASTTableJoin::Kind::Inner) - throw Exception("ASOF only supports LEFT and INNER as base joins", ErrorCodes::NOT_IMPLEMENTED); - - const IColumn * asof_column = key_columns.back(); - size_t asof_size; - - asof_type = AsofRowRefs::getTypeSize(asof_column, asof_size); - if (!asof_type) - { - std::string msg = "ASOF join not supported for type: "; - msg += asof_column->getFamilyName(); - throw Exception(msg, ErrorCodes::BAD_TYPE_OF_FIELD); - } - - key_columns.pop_back(); - - if (key_columns.empty()) - throw Exception("ASOF join cannot be done without a joining column", ErrorCodes::LOGICAL_ERROR); - - /// this is going to set up the appropriate hash table for the direct lookup part of the join - /// However, this does not depend on the size of the asof join key (as that goes into the BST) - /// Therefore, add it back in such that it can be extracted appropriately from the full stored - /// key_columns and key_sizes - init(chooseMethod(key_columns, key_sizes)); - key_sizes.push_back(asof_size); - } - else - { - /// Choose data structure to use for JOIN. - init(chooseMethod(key_columns, key_sizes)); - } -} - -namespace -{ - /// Inserting an element into a hash table of the form `key -> reference to a string`, which will then be used by JOIN. - template - struct Inserter - { - static ALWAYS_INLINE void insertOne(const Join & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, - Arena & pool) - { - auto emplace_result = key_getter.emplaceKey(map, i, pool); - - if (emplace_result.isInserted() || join.anyTakeLastRow()) - new (&emplace_result.getMapped()) typename Map::mapped_type(stored_block, i); - } - - static ALWAYS_INLINE void insertAll(const Join &, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool) - { - auto emplace_result = key_getter.emplaceKey(map, i, pool); - - if (emplace_result.isInserted()) - new (&emplace_result.getMapped()) typename Map::mapped_type(stored_block, i); - else - { - /// The first element of the list is stored in the value of the hash table, the rest in the pool. - emplace_result.getMapped().insert({stored_block, i}, pool); - } - } - - static ALWAYS_INLINE void insertAsof(Join & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool, - const IColumn * asof_column) - { - auto emplace_result = key_getter.emplaceKey(map, i, pool); - typename Map::mapped_type * time_series_map = &emplace_result.getMapped(); - - if (emplace_result.isInserted()) - time_series_map = new (time_series_map) typename Map::mapped_type(join.getAsofType()); - time_series_map->insert(join.getAsofType(), asof_column, stored_block, i); - } - }; - - - template - void NO_INLINE insertFromBlockImplTypeCase( - Join & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, - const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) - { - [[maybe_unused]] constexpr bool mapped_one = std::is_same_v || - std::is_same_v; - constexpr bool is_asof_join = STRICTNESS == ASTTableJoin::Strictness::Asof; - - const IColumn * asof_column [[maybe_unused]] = nullptr; - if constexpr (is_asof_join) - asof_column = extractAsofColumn(key_columns); - - auto key_getter = createKeyGetter(key_columns, key_sizes); - - for (size_t i = 0; i < rows; ++i) - { - if (has_null_map && (*null_map)[i]) - continue; - - if constexpr (is_asof_join) - Inserter::insertAsof(join, map, key_getter, stored_block, i, pool, asof_column); - else if constexpr (mapped_one) - Inserter::insertOne(join, map, key_getter, stored_block, i, pool); - else - Inserter::insertAll(join, map, key_getter, stored_block, i, pool); - } - } - - - template - void insertFromBlockImplType( - Join & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, - const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) - { - if (null_map) - insertFromBlockImplTypeCase(join, map, rows, key_columns, key_sizes, stored_block, null_map, pool); - else - insertFromBlockImplTypeCase(join, map, rows, key_columns, key_sizes, stored_block, null_map, pool); - } - - - template - void insertFromBlockImpl( - Join & join, Join::Type type, Maps & maps, size_t rows, const ColumnRawPtrs & key_columns, - const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) - { - switch (type) - { - case Join::Type::EMPTY: break; - case Join::Type::CROSS: break; /// Do nothing. We have already saved block, and it is enough. - - #define M(TYPE) \ - case Join::Type::TYPE: \ - insertFromBlockImplType>::Type>(\ - join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, pool); \ - break; - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - } - } -} - -void Join::initRequiredRightKeys() -{ - const Names & left_keys = table_join->keyNamesLeft(); - const Names & right_keys = table_join->keyNamesRight(); - NameSet required_keys(table_join->requiredRightKeys().begin(), table_join->requiredRightKeys().end()); - - for (size_t i = 0; i < right_keys.size(); ++i) - { - const String & right_key_name = right_keys[i]; - - if (required_keys.count(right_key_name) && !required_right_keys.has(right_key_name)) - { - const auto & right_key = right_table_keys.getByName(right_key_name); - required_right_keys.insert(right_key); - required_right_keys_sources.push_back(left_keys[i]); - } - } -} - -void Join::initRightBlockStructure(Block & saved_block_sample) -{ - /// We could remove key columns for LEFT | INNER HashJoin but we should keep them for JoinSwitcher (if any). - bool save_key_columns = !table_join->forceHashJoin() || isRightOrFull(kind); - if (save_key_columns) - { - saved_block_sample = right_table_keys.cloneEmpty(); - } - else if (strictness == ASTTableJoin::Strictness::Asof) - { - /// Save ASOF key - saved_block_sample.insert(right_table_keys.safeGetByPosition(right_table_keys.columns() - 1)); - } - - /// Save non key columns - for (auto & column : sample_block_with_columns_to_add) - saved_block_sample.insert(column); - - if (nullable_right_side) - JoinCommon::convertColumnsToNullable(saved_block_sample, (isFull(kind) ? right_table_keys.columns() : 0)); -} - -Block Join::structureRightBlock(const Block & block) const -{ - Block structured_block; - for (auto & sample_column : savedBlockSample().getColumnsWithTypeAndName()) - { - ColumnWithTypeAndName column = block.getByName(sample_column.name); - if (sample_column.column->isNullable()) - JoinCommon::convertColumnToNullable(column); - structured_block.insert(column); - } - - return structured_block; -} - -bool Join::addJoinedBlock(const Block & source_block, bool check_limits) -{ - if (empty()) - throw Exception("Logical error: Join was not initialized", ErrorCodes::LOGICAL_ERROR); - - /// There's no optimization for right side const columns. Remove constness if any. - Block block = materializeBlock(source_block); - size_t rows = block.rows(); - - ColumnRawPtrs key_columns = JoinCommon::materializeColumnsInplace(block, key_names_right); - - /// We will insert to the map only keys, where all components are not NULL. - ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); - - /// If RIGHT or FULL save blocks with nulls for NonJoinedBlockInputStream - UInt8 save_nullmap = 0; - if (isRightOrFull(kind) && null_map) - { - for (size_t i = 0; !save_nullmap && i < null_map->size(); ++i) - save_nullmap |= (*null_map)[i]; - } - - Block structured_block = structureRightBlock(block); - size_t total_rows = 0; - size_t total_bytes = 0; - - { - std::unique_lock lock(data->rwlock); - - data->blocks.emplace_back(std::move(structured_block)); - Block * stored_block = &data->blocks.back(); - - if (rows) - data->empty = false; - - if (kind != ASTTableJoin::Kind::Cross) - { - joinDispatch(kind, strictness, data->maps, [&](auto, auto strictness_, auto & map) - { - insertFromBlockImpl(*this, data->type, map, rows, key_columns, key_sizes, stored_block, null_map, data->pool); - }); - } - - if (save_nullmap) - data->blocks_nullmaps.emplace_back(stored_block, null_map_holder); - - if (!check_limits) - return true; - - /// TODO: Do not calculate them every time - total_rows = getTotalRowCount(); - total_bytes = getTotalByteCount(); - } - - return table_join->sizeLimits().check(total_rows, total_bytes, "JOIN", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED); -} - - -namespace -{ - -class AddedColumns -{ -public: - using TypeAndNames = std::vector>; - - AddedColumns(const Block & sample_block_with_columns_to_add, - const Block & block_with_columns_to_add, - const Block & block, - const Block & saved_block_sample, - const ColumnsWithTypeAndName & extras, - const Join & join_, - const ColumnRawPtrs & key_columns_, - const Sizes & key_sizes_) - : join(join_) - , key_columns(key_columns_) - , key_sizes(key_sizes_) - , rows_to_add(block.rows()) - , need_filter(false) - { - size_t num_columns_to_add = sample_block_with_columns_to_add.columns(); - - columns.reserve(num_columns_to_add); - type_name.reserve(num_columns_to_add); - right_indexes.reserve(num_columns_to_add); - - for (size_t i = 0; i < num_columns_to_add; ++i) - { - const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.safeGetByPosition(i); - - /// Don't insert column if it's in left block or not explicitly required. - if (!block.has(src_column.name) && block_with_columns_to_add.has(src_column.name)) - addColumn(src_column); - } - - for (auto & extra : extras) - addColumn(extra); - - for (auto & tn : type_name) - right_indexes.push_back(saved_block_sample.getPositionByName(tn.second)); - } - - size_t size() const { return columns.size(); } - - ColumnWithTypeAndName moveColumn(size_t i) - { - return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].first, type_name[i].second); - } - - template - void appendFromBlock(const Block & block, size_t row_num) - { - if constexpr (has_defaults) - applyLazyDefaults(); - - for (size_t j = 0; j < right_indexes.size(); ++j) - columns[j]->insertFrom(*block.getByPosition(right_indexes[j]).column, row_num); - } - - void appendDefaultRow() - { - ++lazy_defaults_count; - } - - void applyLazyDefaults() - { - if (lazy_defaults_count) - { - for (size_t j = 0; j < right_indexes.size(); ++j) - columns[j]->insertManyDefaults(lazy_defaults_count); - lazy_defaults_count = 0; - } - } - - const Join & join; - const ColumnRawPtrs & key_columns; - const Sizes & key_sizes; - size_t rows_to_add; - std::unique_ptr offsets_to_replicate; - bool need_filter; - -private: - TypeAndNames type_name; - MutableColumns columns; - std::vector right_indexes; - size_t lazy_defaults_count = 0; - - void addColumn(const ColumnWithTypeAndName & src_column) - { - columns.push_back(src_column.column->cloneEmpty()); - columns.back()->reserve(src_column.column->size()); - type_name.emplace_back(src_column.type, src_column.name); - } -}; - -template -void addFoundRowAll(const typename Map::mapped_type & mapped, AddedColumns & added, IColumn::Offset & current_offset) -{ - if constexpr (add_missing) - added.applyLazyDefaults(); - - for (auto it = mapped.begin(); it.ok(); ++it) - { - added.appendFromBlock(*it->block, it->row_num); - ++current_offset; - } -}; - -template -void addNotFoundRow(AddedColumns & added [[maybe_unused]], IColumn::Offset & current_offset [[maybe_unused]]) -{ - if constexpr (add_missing) - { - added.appendDefaultRow(); - if constexpr (need_offset) - ++current_offset; - } -} - -template -void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]) -{ - if constexpr (need_filter) - filter[pos] = 1; -} - - -/// Joins right table columns which indexes are present in right_indexes using specified map. -/// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). -template -NO_INLINE IColumn::Filter joinRightColumns(const Map & map, AddedColumns & added_columns, const ConstNullMapPtr & null_map [[maybe_unused]]) -{ - constexpr bool is_any_join = STRICTNESS == ASTTableJoin::Strictness::Any; - constexpr bool is_all_join = STRICTNESS == ASTTableJoin::Strictness::All; - constexpr bool is_asof_join = STRICTNESS == ASTTableJoin::Strictness::Asof; - constexpr bool is_semi_join = STRICTNESS == ASTTableJoin::Strictness::Semi; - constexpr bool is_anti_join = STRICTNESS == ASTTableJoin::Strictness::Anti; - constexpr bool left = KIND == ASTTableJoin::Kind::Left; - constexpr bool right = KIND == ASTTableJoin::Kind::Right; - constexpr bool full = KIND == ASTTableJoin::Kind::Full; - - constexpr bool add_missing = (left || full) && !is_semi_join; - constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right); - - size_t rows = added_columns.rows_to_add; - IColumn::Filter filter; - if constexpr (need_filter) - filter = IColumn::Filter(rows, 0); - - Arena pool; - - if constexpr (need_replication) - added_columns.offsets_to_replicate = std::make_unique(rows); - - const IColumn * asof_column [[maybe_unused]] = nullptr; - if constexpr (is_asof_join) - asof_column = extractAsofColumn(added_columns.key_columns); - - auto key_getter = createKeyGetter(added_columns.key_columns, added_columns.key_sizes); - - IColumn::Offset current_offset = 0; - - for (size_t i = 0; i < rows; ++i) - { - if constexpr (has_null_map) - { - if ((*null_map)[i]) - { - addNotFoundRow(added_columns, current_offset); - - if constexpr (need_replication) - (*added_columns.offsets_to_replicate)[i] = current_offset; - continue; - } - } - - auto find_result = key_getter.findKey(map, i, pool); - - if (find_result.isFound()) - { - auto & mapped = find_result.getMapped(); - - if constexpr (is_asof_join) - { - const Join & join = added_columns.join; - if (const RowRef * found = mapped.findAsof(join.getAsofType(), join.getAsofInequality(), asof_column, i)) - { - setUsed(filter, i); - mapped.setUsed(); - added_columns.appendFromBlock(*found->block, found->row_num); - } - else - addNotFoundRow(added_columns, current_offset); - } - else if constexpr (is_all_join) - { - setUsed(filter, i); - mapped.setUsed(); - addFoundRowAll(mapped, added_columns, current_offset); - } - else if constexpr ((is_any_join || is_semi_join) && right) - { - /// Use first appeared left key + it needs left columns replication - if (mapped.setUsedOnce()) - { - setUsed(filter, i); - addFoundRowAll(mapped, added_columns, current_offset); - } - } - else if constexpr (is_any_join && KIND == ASTTableJoin::Kind::Inner) - { - /// Use first appeared left key only - if (mapped.setUsedOnce()) - { - setUsed(filter, i); - added_columns.appendFromBlock(*mapped.block, mapped.row_num); - } - } - else if constexpr (is_any_join && full) - { - /// TODO - } - else if constexpr (is_anti_join) - { - if constexpr (right) - mapped.setUsed(); - } - else /// ANY LEFT, SEMI LEFT, old ANY (RightAny) - { - setUsed(filter, i); - mapped.setUsed(); - added_columns.appendFromBlock(*mapped.block, mapped.row_num); - } - } - else - { - if constexpr (is_anti_join && left) - setUsed(filter, i); - addNotFoundRow(added_columns, current_offset); - } - - if constexpr (need_replication) - (*added_columns.offsets_to_replicate)[i] = current_offset; - } - - added_columns.applyLazyDefaults(); - return filter; -} - -template -IColumn::Filter joinRightColumnsSwitchNullability(const Map & map, AddedColumns & added_columns, const ConstNullMapPtr & null_map) -{ - if (added_columns.need_filter) - { - if (null_map) - return joinRightColumns(map, added_columns, null_map); - else - return joinRightColumns(map, added_columns, nullptr); - } - else - { - if (null_map) - return joinRightColumns(map, added_columns, null_map); - else - return joinRightColumns(map, added_columns, nullptr); - } -} - -template -IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_columns, Join::Type type, const ConstNullMapPtr & null_map) -{ - switch (type) - { - #define M(TYPE) \ - case Join::Type::TYPE: \ - return joinRightColumnsSwitchNullability>::Type>(\ - *maps_.TYPE, added_columns, null_map);\ - break; - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - - default: - throw Exception("Unsupported JOIN keys. Type: " + toString(static_cast(type)), ErrorCodes::UNSUPPORTED_JOIN_KEYS); - } -} - -} /// nameless - - -template -void Join::joinBlockImpl( - Block & block, - const Names & key_names_left, - const Block & block_with_columns_to_add, - const Maps & maps_) const -{ - constexpr bool is_any_join = STRICTNESS == ASTTableJoin::Strictness::Any; - constexpr bool is_all_join = STRICTNESS == ASTTableJoin::Strictness::All; - constexpr bool is_asof_join = STRICTNESS == ASTTableJoin::Strictness::Asof; - constexpr bool is_semi_join = STRICTNESS == ASTTableJoin::Strictness::Semi; - constexpr bool is_anti_join = STRICTNESS == ASTTableJoin::Strictness::Anti; - - constexpr bool left = KIND == ASTTableJoin::Kind::Left; - constexpr bool right = KIND == ASTTableJoin::Kind::Right; - constexpr bool inner = KIND == ASTTableJoin::Kind::Inner; - constexpr bool full = KIND == ASTTableJoin::Kind::Full; - - constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right); - constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left)); - - /// Rare case, when keys are constant or low cardinality. To avoid code bloat, simply materialize them. - Columns materialized_keys = JoinCommon::materializeColumns(block, key_names_left); - ColumnRawPtrs key_columns = JoinCommon::getRawPointers(materialized_keys); - - /// Keys with NULL value in any column won't join to anything. - ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); - - size_t existing_columns = block.columns(); - - /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. - * Because if they are constants, then in the "not joined" rows, they may have different values - * - default values, which can differ from the values of these constants. - */ - if constexpr (right || full) - { - materializeBlockInplace(block); - - if (nullable_left_side) - JoinCommon::convertColumnsToNullable(block); - } - - /** For LEFT/INNER JOIN, the saved blocks do not contain keys. - * For FULL/RIGHT JOIN, the saved blocks contain keys; - * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. - * For ASOF, the last column is used as the ASOF column - */ - ColumnsWithTypeAndName extras; - if constexpr (is_asof_join) - extras.push_back(right_table_keys.getByName(key_names_right.back())); - - AddedColumns added_columns(sample_block_with_columns_to_add, block_with_columns_to_add, block, savedBlockSample(), - extras, *this, key_columns, key_sizes); - bool has_required_right_keys = (required_right_keys.columns() != 0); - added_columns.need_filter = need_filter || has_required_right_keys; - - IColumn::Filter row_filter = switchJoinRightColumns(maps_, added_columns, data->type, null_map); - - for (size_t i = 0; i < added_columns.size(); ++i) - block.insert(added_columns.moveColumn(i)); - - std::vector right_keys_to_replicate [[maybe_unused]]; - - if constexpr (need_filter) - { - /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(row_filter, -1); - - /// Add join key columns from right block if needed. - for (size_t i = 0; i < required_right_keys.columns(); ++i) - { - const auto & right_key = required_right_keys.getByPosition(i); - const auto & left_name = required_right_keys_sources[i]; - - const auto & col = block.getByName(left_name); - bool is_nullable = nullable_right_side || right_key.type->isNullable(); - block.insert(correctNullability({col.column, col.type, right_key.name}, is_nullable)); - } - } - else if (has_required_right_keys) - { - /// Some trash to represent IColumn::Filter as ColumnUInt8 needed for ColumnNullable::applyNullMap() - auto null_map_filter_ptr = ColumnUInt8::create(); - ColumnUInt8 & null_map_filter = assert_cast(*null_map_filter_ptr); - null_map_filter.getData().swap(row_filter); - const IColumn::Filter & filter = null_map_filter.getData(); - - /// Add join key columns from right block if needed. - for (size_t i = 0; i < required_right_keys.columns(); ++i) - { - const auto & right_key = required_right_keys.getByPosition(i); - const auto & left_name = required_right_keys_sources[i]; - - const auto & col = block.getByName(left_name); - bool is_nullable = nullable_right_side || right_key.type->isNullable(); - - ColumnPtr thin_column = filterWithBlanks(col.column, filter); - block.insert(correctNullability({thin_column, col.type, right_key.name}, is_nullable, null_map_filter)); - - if constexpr (need_replication) - right_keys_to_replicate.push_back(block.getPositionByName(right_key.name)); - } - } - - if constexpr (need_replication) - { - std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; - - /// If ALL ... JOIN - we replicate all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); - - /// Replicate additional right keys - for (size_t pos : right_keys_to_replicate) - block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); - } -} - - -void Join::joinBlockImplCross(Block & block) const -{ - /// Add new columns to the block. - size_t num_existing_columns = block.columns(); - size_t num_columns_to_add = sample_block_with_columns_to_add.columns(); - - size_t rows_left = block.rows(); - - ColumnRawPtrs src_left_columns(num_existing_columns); - MutableColumns dst_columns(num_existing_columns + num_columns_to_add); - - for (size_t i = 0; i < num_existing_columns; ++i) - { - src_left_columns[i] = block.getByPosition(i).column.get(); - dst_columns[i] = src_left_columns[i]->cloneEmpty(); - } - - for (size_t i = 0; i < num_columns_to_add; ++i) - { - const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.getByPosition(i); - dst_columns[num_existing_columns + i] = src_column.column->cloneEmpty(); - block.insert(src_column); - } - - /// NOTE It would be better to use `reserve`, as well as `replicate` methods to duplicate the values of the left block. - - for (size_t i = 0; i < rows_left; ++i) - { - for (const Block & block_right : data->blocks) - { - size_t rows_right = block_right.rows(); - - for (size_t col_num = 0; col_num < num_existing_columns; ++col_num) - for (size_t j = 0; j < rows_right; ++j) - dst_columns[col_num]->insertFrom(*src_left_columns[col_num], i); - - for (size_t col_num = 0; col_num < num_columns_to_add; ++col_num) - { - const IColumn * column_right = block_right.getByPosition(col_num).column.get(); - - for (size_t j = 0; j < rows_right; ++j) - dst_columns[num_existing_columns + col_num]->insertFrom(*column_right, j); - } - } - } - - block = block.cloneWithColumns(std::move(dst_columns)); -} - -static void checkTypeOfKey(const Block & block_left, const Block & block_right) -{ - auto & [c1, left_type_origin, left_name] = block_left.safeGetByPosition(0); - auto & [c2, right_type_origin, right_name] = block_right.safeGetByPosition(0); - auto left_type = removeNullable(left_type_origin); - auto right_type = removeNullable(right_type_origin); - - if (!left_type->equals(*right_type)) - throw Exception("Type mismatch of columns to joinGet by: " - + left_name + " " + left_type->getName() + " at left, " - + right_name + " " + right_type->getName() + " at right", - ErrorCodes::TYPE_MISMATCH); -} - - -DataTypePtr Join::joinGetReturnType(const String & column_name) const -{ - std::shared_lock lock(data->rwlock); - - if (!sample_block_with_columns_to_add.has(column_name)) - throw Exception("StorageJoin doesn't contain column " + column_name, ErrorCodes::LOGICAL_ERROR); - return sample_block_with_columns_to_add.getByName(column_name).type; -} - - -template -void Join::joinGetImpl(Block & block, const String & column_name, const Maps & maps_) const -{ - joinBlockImpl( - block, {block.getByPosition(0).name}, {sample_block_with_columns_to_add.getByName(column_name)}, maps_); -} - - -// TODO: support composite key -// TODO: return multiple columns as named tuple -// TODO: return array of values when strictness == ASTTableJoin::Strictness::All -void Join::joinGet(Block & block, const String & column_name) const -{ - std::shared_lock lock(data->rwlock); - - if (key_names_right.size() != 1) - throw Exception("joinGet only supports StorageJoin containing exactly one key", ErrorCodes::LOGICAL_ERROR); - - checkTypeOfKey(block, right_table_keys); - - if ((strictness == ASTTableJoin::Strictness::Any || strictness == ASTTableJoin::Strictness::RightAny) && - kind == ASTTableJoin::Kind::Left) - { - joinGetImpl(block, column_name, std::get(data->maps)); - } - else - throw Exception("joinGet only supports StorageJoin of type Left Any", ErrorCodes::LOGICAL_ERROR); -} - - -void Join::joinBlock(Block & block, ExtraBlockPtr &) -{ - std::shared_lock lock(data->rwlock); - - const Names & key_names_left = table_join->keyNamesLeft(); - JoinCommon::checkTypesOfKeys(block, key_names_left, right_table_keys, key_names_right); - - if (joinDispatch(kind, strictness, data->maps, [&](auto kind_, auto strictness_, auto & map) - { - joinBlockImpl(block, key_names_left, sample_block_with_columns_to_add, map); - })) - { - /// Joined - } - else if (kind == ASTTableJoin::Kind::Cross) - joinBlockImplCross(block); - else - throw Exception("Logical error: unknown combination of JOIN", ErrorCodes::LOGICAL_ERROR); -} - - -void Join::joinTotals(Block & block) const -{ - JoinCommon::joinTotals(totals, sample_block_with_columns_to_add, key_names_right, block); -} - - -template -struct AdderNonJoined -{ - static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_right) - { - constexpr bool mapped_asof = std::is_same_v; - [[maybe_unused]] constexpr bool mapped_one = std::is_same_v || std::is_same_v; - - if constexpr (mapped_asof) - { - /// Do nothing - } - else if constexpr (mapped_one) - { - for (size_t j = 0; j < columns_right.size(); ++j) - { - const auto & mapped_column = mapped.block->getByPosition(j).column; - columns_right[j]->insertFrom(*mapped_column, mapped.row_num); - } - - ++rows_added; - } - else - { - for (auto it = mapped.begin(); it.ok(); ++it) - { - for (size_t j = 0; j < columns_right.size(); ++j) - { - const auto & mapped_column = it->block->getByPosition(j).column; - columns_right[j]->insertFrom(*mapped_column, it->row_num); - } - - ++rows_added; - } - } - } -}; - - -/// Stream from not joined earlier rows of the right table. -class NonJoinedBlockInputStream : public IBlockInputStream -{ -public: - NonJoinedBlockInputStream(const Join & parent_, const Block & result_sample_block_, UInt64 max_block_size_) - : parent(parent_) - , max_block_size(max_block_size_) - , result_sample_block(materializeBlock(result_sample_block_)) - { - bool remap_keys = parent.table_join->hasUsing(); - std::unordered_map left_to_right_key_remap; - - for (size_t i = 0; i < parent.table_join->keyNamesLeft().size(); ++i) - { - const String & left_key_name = parent.table_join->keyNamesLeft()[i]; - const String & right_key_name = parent.table_join->keyNamesRight()[i]; - - size_t left_key_pos = result_sample_block.getPositionByName(left_key_name); - size_t right_key_pos = parent.savedBlockSample().getPositionByName(right_key_name); - - if (remap_keys && !parent.required_right_keys.has(right_key_name)) - left_to_right_key_remap[left_key_pos] = right_key_pos; - } - - /// result_sample_block: left_sample_block + left expressions, right not key columns, required right keys - size_t left_columns_count = result_sample_block.columns() - - parent.sample_block_with_columns_to_add.columns() - parent.required_right_keys.columns(); - - for (size_t left_pos = 0; left_pos < left_columns_count; ++left_pos) - { - /// We need right 'x' for 'RIGHT JOIN ... USING(x)'. - if (left_to_right_key_remap.count(left_pos)) - { - size_t right_key_pos = left_to_right_key_remap[left_pos]; - setRightIndex(right_key_pos, left_pos); - } - else - column_indices_left.emplace_back(left_pos); - } - - const auto & saved_block_sample = parent.savedBlockSample(); - for (size_t right_pos = 0; right_pos < saved_block_sample.columns(); ++right_pos) - { - const String & name = saved_block_sample.getByPosition(right_pos).name; - if (!result_sample_block.has(name)) - continue; - - size_t result_position = result_sample_block.getPositionByName(name); - - /// Don't remap left keys twice. We need only qualified right keys here - if (result_position < left_columns_count) - continue; - - setRightIndex(right_pos, result_position); - } - - if (column_indices_left.size() + column_indices_right.size() + same_result_keys.size() != result_sample_block.columns()) - throw Exception("Error in columns mapping in RIGHT|FULL JOIN. Left: " + toString(column_indices_left.size()) + - ", right: " + toString(column_indices_right.size()) + - ", same: " + toString(same_result_keys.size()) + - ", result: " + toString(result_sample_block.columns()), - ErrorCodes::LOGICAL_ERROR); - } - - String getName() const override { return "NonJoined"; } - - Block getHeader() const override { return result_sample_block; } - - -protected: - Block readImpl() override - { - if (parent.data->blocks.empty()) - return Block(); - return createBlock(); - } - -private: - const Join & parent; - UInt64 max_block_size; - - Block result_sample_block; - /// Indices of columns in result_sample_block that should be generated - std::vector column_indices_left; - /// Indices of columns that come from the right-side table: right_pos -> result_pos - std::unordered_map column_indices_right; - /// - std::unordered_map same_result_keys; - /// Which right columns (saved in parent) need nullability change before placing them in result block - std::vector right_nullability_adds; - std::vector right_nullability_removes; - /// Which right columns (saved in parent) need LowCardinality change before placing them in result block - std::vector> right_lowcard_changes; - - std::any position; - std::optional nulls_position; - - void setRightIndex(size_t right_pos, size_t result_position) - { - if (!column_indices_right.count(right_pos)) - { - column_indices_right[right_pos] = result_position; - extractColumnChanges(right_pos, result_position); - } - else - same_result_keys[result_position] = column_indices_right[right_pos]; - } - - void extractColumnChanges(size_t right_pos, size_t result_pos) - { - const auto & src = parent.savedBlockSample().getByPosition(right_pos).column; - const auto & dst = result_sample_block.getByPosition(result_pos).column; - - if (!src->isNullable() && dst->isNullable()) - right_nullability_adds.push_back(right_pos); - - if (src->isNullable() && !dst->isNullable()) - right_nullability_removes.push_back(right_pos); - - ColumnPtr src_not_null = emptyNotNullableClone(src); - ColumnPtr dst_not_null = emptyNotNullableClone(dst); - - if (src_not_null->lowCardinality() != dst_not_null->lowCardinality()) - right_lowcard_changes.push_back({right_pos, dst_not_null}); - } - - Block createBlock() - { - MutableColumns columns_right = parent.savedBlockSample().cloneEmptyColumns(); - - size_t rows_added = 0; - - auto fill_callback = [&](auto, auto strictness, auto & map) - { - rows_added = fillColumnsFromMap(map, columns_right); - }; - - if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps, fill_callback)) - throw Exception("Logical error: unknown JOIN strictness (must be on of: ANY, ALL, ASOF)", ErrorCodes::LOGICAL_ERROR); - - fillNullsFromBlocks(columns_right, rows_added); - - if (!rows_added) - return {}; - - for (size_t pos : right_nullability_removes) - changeNullability(columns_right[pos]); - - for (auto & [pos, dst_sample] : right_lowcard_changes) - columns_right[pos] = changeLowCardinality(std::move(columns_right[pos]), dst_sample)->assumeMutable(); - - for (size_t pos : right_nullability_adds) - changeNullability(columns_right[pos]); - - Block res = result_sample_block.cloneEmpty(); - - /// @note it's possible to make ColumnConst here and materialize it later - for (size_t pos : column_indices_left) - res.getByPosition(pos).column = res.getByPosition(pos).column->cloneResized(rows_added); - - for (auto & pr : column_indices_right) - { - auto & right_column = columns_right[pr.first]; - auto & result_column = res.getByPosition(pr.second).column; -#ifndef NDEBUG - if (result_column->getName() != right_column->getName()) - throw Exception("Wrong columns assign in RIGHT|FULL JOIN: " + result_column->getName() + - " " + right_column->getName(), ErrorCodes::LOGICAL_ERROR); -#endif - result_column = std::move(right_column); - } - - for (auto & pr : same_result_keys) - { - auto & src_column = res.getByPosition(pr.second).column; - auto & dst_column = res.getByPosition(pr.first).column; - changeColumnRepresentation(src_column, dst_column); - } - - return res; - } - - template - size_t fillColumnsFromMap(const Maps & maps, MutableColumns & columns_keys_and_right) - { - switch (parent.data->type) - { - #define M(TYPE) \ - case Join::Type::TYPE: \ - return fillColumns(*maps.TYPE, columns_keys_and_right); - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - default: - throw Exception("Unsupported JOIN keys. Type: " + toString(static_cast(parent.data->type)), - ErrorCodes::UNSUPPORTED_JOIN_KEYS); - } - - __builtin_unreachable(); - } - - template - size_t fillColumns(const Map & map, MutableColumns & columns_keys_and_right) - { - using Mapped = typename Map::mapped_type; - using Iterator = typename Map::const_iterator; - - size_t rows_added = 0; - - if (!position.has_value()) - position = std::make_any(map.begin()); - - Iterator & it = std::any_cast(position); - auto end = map.end(); - - for (; it != end; ++it) - { - const Mapped & mapped = it->getMapped(); - - if (mapped.getUsed()) - continue; - - AdderNonJoined::add(mapped, rows_added, columns_keys_and_right); - - if (rows_added >= max_block_size) - { - ++it; - break; - } - } - - return rows_added; - } - - void fillNullsFromBlocks(MutableColumns & columns_keys_and_right, size_t & rows_added) - { - if (!nulls_position.has_value()) - nulls_position = parent.data->blocks_nullmaps.begin(); - - auto end = parent.data->blocks_nullmaps.end(); - - for (auto & it = *nulls_position; it != end && rows_added < max_block_size; ++it) - { - const Block * block = it->first; - const NullMap & nullmap = assert_cast(*it->second).getData(); - - for (size_t row = 0; row < nullmap.size(); ++row) - { - if (nullmap[row]) - { - for (size_t col = 0; col < columns_keys_and_right.size(); ++col) - columns_keys_and_right[col]->insertFrom(*block->getByPosition(col).column, row); - ++rows_added; - } - } - } - } -}; - - -BlockInputStreamPtr Join::createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const -{ - if (table_join->strictness() == ASTTableJoin::Strictness::Asof || - table_join->strictness() == ASTTableJoin::Strictness::Semi) - return {}; - - if (isRightOrFull(table_join->kind())) - return std::make_shared(*this, result_sample_block, max_block_size); - return {}; -} - - -bool Join::hasStreamWithNonJoinedRows() const -{ - if (table_join->strictness() == ASTTableJoin::Strictness::Asof || - table_join->strictness() == ASTTableJoin::Strictness::Semi) - return false; - - return isRightOrFull(table_join->kind()); -} - -} diff --git a/dbms/src/Interpreters/Join.h b/dbms/src/Interpreters/Join.h deleted file mode 100644 index d9f0cfb55cb..00000000000 --- a/dbms/src/Interpreters/Join.h +++ /dev/null @@ -1,390 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include -#include - - -namespace DB -{ - -class AnalyzedJoin; - -namespace JoinStuff -{ - -/// Base class with optional flag attached that's needed to implement RIGHT and FULL JOINs. -template -struct WithFlags; - -template -struct WithFlags : T -{ - using Base = T; - using T::T; - - mutable std::atomic used {}; - void setUsed() const { used.store(true, std::memory_order_relaxed); } /// Could be set simultaneously from different threads. - bool getUsed() const { return used; } - - bool setUsedOnce() const - { - /// fast check to prevent heavy CAS with seq_cst order - if (used.load(std::memory_order_relaxed)) - return false; - - bool expected = false; - return used.compare_exchange_strong(expected, true); - } -}; - -template -struct WithFlags : T -{ - using Base = T; - using T::T; - - void setUsed() const {} - bool getUsed() const { return true; } - bool setUsedOnce() const { return true; } -}; - -using MappedOne = WithFlags; -using MappedAll = WithFlags; -using MappedOneFlagged = WithFlags; -using MappedAllFlagged = WithFlags; -using MappedAsof = WithFlags; - -} - -/** Data structure for implementation of JOIN. - * It is just a hash table: keys -> rows of joined ("right") table. - * Additionally, CROSS JOIN is supported: instead of hash table, it use just set of blocks without keys. - * - * JOIN-s could be of these types: - * - ALL × LEFT/INNER/RIGHT/FULL - * - ANY × LEFT/INNER/RIGHT - * - SEMI/ANTI x LEFT/RIGHT - * - ASOF x LEFT/INNER - * - CROSS - * - * ALL means usual JOIN, when rows are multiplied by number of matching rows from the "right" table. - * ANY uses one line per unique key from right talbe. For LEFT JOIN it would be any row (with needed joined key) from the right table, - * for RIGHT JOIN it would be any row from the left table and for INNER one it would be any row from right and any row from left. - * SEMI JOIN filter left table by keys that are present in right table for LEFT JOIN, and filter right table by keys from left table - * for RIGHT JOIN. In other words SEMI JOIN returns only rows which joining keys present in another table. - * ANTI JOIN is the same as SEMI JOIN but returns rows with joining keys that are NOT present in another table. - * SEMI/ANTI JOINs allow to get values from both tables. For filter table it gets any row with joining same key. For ANTI JOIN it returns - * defaults other table columns. - * ASOF JOIN is not-equi join. For one key column it finds nearest value to join according to join inequality. - * It's expected that ANY|SEMI LEFT JOIN is more efficient that ALL one. - * - * If INNER is specified - leave only rows that have matching rows from "right" table. - * If LEFT is specified - in case when there is no matching row in "right" table, fill it with default values instead. - * If RIGHT is specified - first process as INNER, but track what rows from the right table was joined, - * and at the end, add rows from right table that was not joined and substitute default values for columns of left table. - * If FULL is specified - first process as LEFT, but track what rows from the right table was joined, - * and at the end, add rows from right table that was not joined and substitute default values for columns of left table. - * - * Thus, LEFT and RIGHT JOINs are not symmetric in terms of implementation. - * - * All JOINs (except CROSS) are done by equality condition on keys (equijoin). - * Non-equality and other conditions are not supported. - * - * Implementation: - * - * 1. Build hash table in memory from "right" table. - * This hash table is in form of keys -> row in case of ANY or keys -> [rows...] in case of ALL. - * This is done in insertFromBlock method. - * - * 2. Process "left" table and join corresponding rows from "right" table by lookups in the map. - * This is done in joinBlock methods. - * - * In case of ANY LEFT JOIN - form new columns with found values or default values. - * This is the most simple. Number of rows in left table does not change. - * - * In case of ANY INNER JOIN - form new columns with found values, - * and also build a filter - in what rows nothing was found. - * Then filter columns of "left" table. - * - * In case of ALL ... JOIN - form new columns with all found rows, - * and also fill 'offsets' array, describing how many times we need to replicate values of "left" table. - * Then replicate columns of "left" table. - * - * How Nullable keys are processed: - * - * NULLs never join to anything, even to each other. - * During building of map, we just skip keys with NULL value of any component. - * During joining, we simply treat rows with any NULLs in key as non joined. - * - * Default values for outer joins (LEFT, RIGHT, FULL): - * - * Behaviour is controlled by 'join_use_nulls' settings. - * If it is false, we substitute (global) default value for the data type, for non-joined rows - * (zero, empty string, etc. and NULL for Nullable data types). - * If it is true, we always generate Nullable column and substitute NULLs for non-joined rows, - * as in standard SQL. - */ -class Join : public IJoin -{ -public: - Join(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_ = false); - - bool empty() { return data->type == Type::EMPTY; } - - /** Add block of data from right hand of JOIN to the map. - * Returns false, if some limit was exceeded and you should not insert more data. - */ - bool addJoinedBlock(const Block & block, bool check_limits) override; - - /** Join data from the map (that was previously built by calls to addJoinedBlock) to the block with data from "left" table. - * Could be called from different threads in parallel. - */ - void joinBlock(Block & block, ExtraBlockPtr & not_processed) override; - - /// Infer the return type for joinGet function - DataTypePtr joinGetReturnType(const String & column_name) const; - - /// Used by joinGet function that turns StorageJoin into a dictionary - void joinGet(Block & block, const String & column_name) const; - - /** Keep "totals" (separate part of dataset, see WITH TOTALS) to use later. - */ - void setTotals(const Block & block) override { totals = block; } - bool hasTotals() const override { return totals; } - - void joinTotals(Block & block) const override; - - /** For RIGHT and FULL JOINs. - * A stream that will contain default values from left table, joined with rows from right table, that was not joined before. - * Use only after all calls to joinBlock was done. - * left_sample_block is passed without account of 'use_nulls' setting (columns will be converted to Nullable inside). - */ - BlockInputStreamPtr createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const override; - bool hasStreamWithNonJoinedRows() const override; - - /// Number of keys in all built JOIN maps. - size_t getTotalRowCount() const final; - /// Sum size in bytes of all buffers, used for JOIN maps and for all memory pools. - size_t getTotalByteCount() const final; - - bool alwaysReturnsEmptySet() const final { return isInnerOrRight(getKind()) && data->empty; } - - ASTTableJoin::Kind getKind() const { return kind; } - ASTTableJoin::Strictness getStrictness() const { return strictness; } - AsofRowRefs::Type getAsofType() const { return *asof_type; } - ASOF::Inequality getAsofInequality() const { return asof_inequality; } - bool anyTakeLastRow() const { return any_take_last_row; } - - /// Different types of keys for maps. - #define APPLY_FOR_JOIN_VARIANTS(M) \ - M(key8) \ - M(key16) \ - M(key32) \ - M(key64) \ - M(key_string) \ - M(key_fixed_string) \ - M(keys128) \ - M(keys256) \ - M(hashed) - - - /// Used for reading from StorageJoin and applying joinGet function - #define APPLY_FOR_JOIN_VARIANTS_LIMITED(M) \ - M(key8) \ - M(key16) \ - M(key32) \ - M(key64) \ - M(key_string) \ - M(key_fixed_string) - - enum class Type - { - EMPTY, - CROSS, - #define M(NAME) NAME, - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - }; - - - /** Different data structures, that are used to perform JOIN. - */ - template - struct MapsTemplate - { - std::unique_ptr> key8; - std::unique_ptr> key16; - std::unique_ptr>> key32; - std::unique_ptr>> key64; - std::unique_ptr> key_string; - std::unique_ptr> key_fixed_string; - std::unique_ptr> keys128; - std::unique_ptr> keys256; - std::unique_ptr> hashed; - - void create(Type which) - { - switch (which) - { - case Type::EMPTY: break; - case Type::CROSS: break; - - #define M(NAME) \ - case Type::NAME: NAME = std::make_unique(); break; - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - } - } - - size_t getTotalRowCount(Type which) const - { - switch (which) - { - case Type::EMPTY: return 0; - case Type::CROSS: return 0; - - #define M(NAME) \ - case Type::NAME: return NAME ? NAME->size() : 0; - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - } - - __builtin_unreachable(); - } - - size_t getTotalByteCountImpl(Type which) const - { - switch (which) - { - case Type::EMPTY: return 0; - case Type::CROSS: return 0; - - #define M(NAME) \ - case Type::NAME: return NAME ? NAME->getBufferSizeInBytes() : 0; - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - } - - __builtin_unreachable(); - } - }; - - using MapsOne = MapsTemplate; - using MapsAll = MapsTemplate; - using MapsOneFlagged = MapsTemplate; - using MapsAllFlagged = MapsTemplate; - using MapsAsof = MapsTemplate; - - using MapsVariant = std::variant; - using BlockNullmapList = std::deque>; - - struct RightTableData - { - /// Protect state for concurrent use in insertFromBlock and joinBlock. - /// @note that these methods could be called simultaneously only while use of StorageJoin. - mutable std::shared_mutex rwlock; - - Type type = Type::EMPTY; - bool empty = true; - - MapsVariant maps; - Block sample_block; /// Block as it would appear in the BlockList - BlocksList blocks; /// Blocks of "right" table. - BlockNullmapList blocks_nullmaps; /// Nullmaps for blocks of "right" table (if needed) - - /// Additional data - strings for string keys and continuation elements of single-linked lists of references to rows. - Arena pool; - }; - - void reuseJoinedData(const Join & join) - { - data = join.data; - } - - std::shared_ptr getJoinedData() const - { - return data; - } - -private: - friend class NonJoinedBlockInputStream; - friend class JoinSource; - - std::shared_ptr table_join; - ASTTableJoin::Kind kind; - ASTTableJoin::Strictness strictness; - - /// Names of key columns in right-side table (in the order they appear in ON/USING clause). @note It could contain duplicates. - const Names & key_names_right; - - bool nullable_right_side; /// In case of LEFT and FULL joins, if use_nulls, convert right-side columns to Nullable. - bool nullable_left_side; /// In case of RIGHT and FULL joins, if use_nulls, convert left-side columns to Nullable. - bool any_take_last_row; /// Overwrite existing values when encountering the same key again - std::optional asof_type; - ASOF::Inequality asof_inequality; - - /// Right table data. StorageJoin shares it between many Join objects. - std::shared_ptr data; - Sizes key_sizes; - - /// Block with columns from the right-side table except key columns. - Block sample_block_with_columns_to_add; - /// Block with key columns in the same order they appear in the right-side table (duplicates appear once). - Block right_table_keys; - /// Block with key columns right-side table keys that are needed in result (would be attached after joined columns). - Block required_right_keys; - /// Left table column names that are sources for required_right_keys columns - std::vector required_right_keys_sources; - - Poco::Logger * log; - - Block totals; - - void init(Type type_); - - /** Set information about structure of right hand of JOIN (joined data). - */ - void setSampleBlock(const Block & block); - - const Block & savedBlockSample() const { return data->sample_block; } - - /// Modify (structure) right block to save it in block list - Block structureRightBlock(const Block & stored_block) const; - void initRightBlockStructure(Block & saved_block_sample); - void initRequiredRightKeys(); - - template - void joinBlockImpl( - Block & block, - const Names & key_names_left, - const Block & block_with_columns_to_add, - const Maps & maps) const; - - void joinBlockImplCross(Block & block) const; - - template - void joinGetImpl(Block & block, const String & column_name, const Maps & maps) const; - - static Type chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes); -}; - -} diff --git a/dbms/src/Interpreters/castColumn.h b/dbms/src/Interpreters/castColumn.h deleted file mode 100644 index 28914f34977..00000000000 --- a/dbms/src/Interpreters/castColumn.h +++ /dev/null @@ -1,12 +0,0 @@ -#pragma once - -#include -#include - - -namespace DB -{ -ColumnPtr castColumn(const ColumnWithTypeAndName & arg, const DataTypePtr & type); -ColumnPtr castColumn(const ColumnWithTypeAndName & arg, const DataTypePtr & type, const Context & context); - -} diff --git a/dbms/src/Interpreters/joinDispatch.h b/dbms/src/Interpreters/joinDispatch.h deleted file mode 100644 index 840b9b91a66..00000000000 --- a/dbms/src/Interpreters/joinDispatch.h +++ /dev/null @@ -1,106 +0,0 @@ -#pragma once - -#include -#include - -#include - - -/** Used in implementation of Join to process different data structures. - */ - -namespace DB -{ - -template -struct MapGetter; - -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOneFlagged; }; -template <> struct MapGetter { using Map = Join::MapsOneFlagged; }; - -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOneFlagged; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; - -template <> struct MapGetter { using Map = Join::MapsAll; }; -template <> struct MapGetter { using Map = Join::MapsAll; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; - -/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; - -/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; - -template -struct MapGetter -{ - using Map = Join::MapsAsof; -}; - - -static constexpr std::array STRICTNESSES = { - ASTTableJoin::Strictness::RightAny, - ASTTableJoin::Strictness::Any, - ASTTableJoin::Strictness::All, - ASTTableJoin::Strictness::Asof, - ASTTableJoin::Strictness::Semi, - ASTTableJoin::Strictness::Anti, -}; - -static constexpr std::array KINDS = { - ASTTableJoin::Kind::Left, - ASTTableJoin::Kind::Inner, - ASTTableJoin::Kind::Full, - ASTTableJoin::Kind::Right -}; - -/// Init specified join map -inline bool joinDispatchInit(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, Join::MapsVariant & maps) -{ - return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) - { - constexpr auto i = ij / STRICTNESSES.size(); - constexpr auto j = ij % STRICTNESSES.size(); - if (kind == KINDS[i] && strictness == STRICTNESSES[j]) - { - maps = typename MapGetter::Map(); - return true; - } - return false; - }); -} - -/// Call function on specified join map -template -inline bool joinDispatch(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, MapsVariant & maps, Func && func) -{ - return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) - { - // NOTE: Avoid using nested static loop as GCC and CLANG have bugs in different ways - // See https://stackoverflow.com/questions/44386415/gcc-and-clang-disagree-about-c17-constexpr-lambda-captures - constexpr auto i = ij / STRICTNESSES.size(); - constexpr auto j = ij % STRICTNESSES.size(); - if (kind == KINDS[i] && strictness == STRICTNESSES[j]) - { - func( - std::integral_constant(), - std::integral_constant(), - std::get::Map>(maps)); - return true; - } - return false; - }); -} - -} diff --git a/dbms/src/Interpreters/misc.h b/dbms/src/Interpreters/misc.h deleted file mode 100644 index e2f34375dc0..00000000000 --- a/dbms/src/Interpreters/misc.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -namespace DB -{ - -inline bool functionIsInOperator(const std::string & name) -{ - return name == "in" || name == "notIn"; -} - -inline bool functionIsInOrGlobalInOperator(const std::string & name) -{ - return functionIsInOperator(name) || name == "globalIn" || name == "globalNotIn"; -} - -inline bool functionIsLikeOperator(const std::string & name) -{ - return name == "like" || name == "notLike"; -} - -} diff --git a/dbms/src/Interpreters/tests/CMakeLists.txt b/dbms/src/Interpreters/tests/CMakeLists.txt deleted file mode 100644 index da45c1a5153..00000000000 --- a/dbms/src/Interpreters/tests/CMakeLists.txt +++ /dev/null @@ -1,67 +0,0 @@ -add_executable (expression expression.cpp) -target_link_libraries (expression PRIVATE dbms clickhouse_parsers) - -add_executable (create_query create_query.cpp) -target_link_libraries (create_query PRIVATE dbms clickhouse_parsers) - -add_executable (select_query select_query.cpp) -target_link_libraries (select_query PRIVATE clickhouse_storages_system dbms clickhouse_common_io) - -add_executable (aggregate aggregate.cpp) -target_link_libraries (aggregate PRIVATE dbms) - -add_executable (hash_map hash_map.cpp) -target_include_directories (hash_map SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) -target_link_libraries (hash_map PRIVATE dbms) - -add_executable (hash_map_lookup hash_map_lookup.cpp) -target_include_directories (hash_map_lookup SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) -target_link_libraries (hash_map_lookup PRIVATE dbms) - -add_executable (hash_map3 hash_map3.cpp) -target_include_directories(hash_map3 SYSTEM BEFORE PRIVATE ${METROHASH_INCLUDE_DIR}) -target_link_libraries (hash_map3 PRIVATE dbms ${FARMHASH_LIBRARIES} ${METROHASH_LIBRARIES}) - -add_executable (hash_map_string hash_map_string.cpp) -target_include_directories (hash_map_string SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) -target_link_libraries (hash_map_string PRIVATE dbms) - -add_executable (hash_map_string_2 hash_map_string_2.cpp) -target_link_libraries (hash_map_string_2 PRIVATE dbms) - -add_executable (hash_map_string_3 hash_map_string_3.cpp) -target_include_directories(hash_map_string_3 SYSTEM BEFORE PRIVATE ${METROHASH_INCLUDE_DIR}) -target_link_libraries (hash_map_string_3 PRIVATE dbms ${FARMHASH_LIBRARIES} ${METROHASH_LIBRARIES}) - -add_executable (hash_map_string_small hash_map_string_small.cpp) -target_include_directories (hash_map_string_small SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) -target_link_libraries (hash_map_string_small PRIVATE dbms) - -add_executable (string_hash_map string_hash_map.cpp) -target_link_libraries (string_hash_map PRIVATE dbms) - -add_executable (string_hash_map_aggregation string_hash_map.cpp) -target_link_libraries (string_hash_map_aggregation PRIVATE dbms) - -add_executable (two_level_hash_map two_level_hash_map.cpp) -target_include_directories (two_level_hash_map SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) -target_link_libraries (two_level_hash_map PRIVATE dbms) - -add_executable (logical_expressions_optimizer logical_expressions_optimizer.cpp) -target_link_libraries (logical_expressions_optimizer PRIVATE dbms clickhouse_parsers) - -add_executable (in_join_subqueries_preprocessor in_join_subqueries_preprocessor.cpp) -target_link_libraries (in_join_subqueries_preprocessor PRIVATE dbms clickhouse_parsers) -add_check(in_join_subqueries_preprocessor) - -add_executable (expression_analyzer expression_analyzer.cpp) -target_link_libraries (expression_analyzer PRIVATE dbms clickhouse_storages_system clickhouse_parsers clickhouse_common_io) -add_check(expression_analyzer) - -add_executable (users users.cpp) -target_link_libraries (users PRIVATE dbms clickhouse_common_config) - -if (OS_LINUX) - add_executable (internal_iotop internal_iotop.cpp) - target_link_libraries (internal_iotop PRIVATE dbms) -endif () diff --git a/dbms/src/Interpreters/tests/gtest_cycle_aliases.cpp b/dbms/src/Interpreters/tests/gtest_cycle_aliases.cpp deleted file mode 100644 index c8037b23d84..00000000000 --- a/dbms/src/Interpreters/tests/gtest_cycle_aliases.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -using namespace DB; - -TEST(QueryNormalizer, SimpleCycleAlias) -{ - String query = "a as b, b as a"; - ParserExpressionList parser(false); - ASTPtr ast = parseQuery(parser, query, 0); - - Aliases aliases; - aliases["a"] = parseQuery(parser, "b as a", 0)->children[0]; - aliases["b"] = parseQuery(parser, "a as b", 0)->children[0]; - - Settings settings; - QueryNormalizer::Data normalizer_data(aliases, settings); - EXPECT_THROW(QueryNormalizer(normalizer_data).visit(ast), Exception); -} diff --git a/dbms/src/Interpreters/tests/logical_expressions_optimizer.cpp b/dbms/src/Interpreters/tests/logical_expressions_optimizer.cpp deleted file mode 100644 index c21c4dda299..00000000000 --- a/dbms/src/Interpreters/tests/logical_expressions_optimizer.cpp +++ /dev/null @@ -1,295 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - - -namespace -{ - -struct TestEntry -{ - std::string input; - std::string expected_output; - UInt64 limit; -}; - -using TestEntries = std::vector; -using TestResult = std::pair; - -void run(); -void performTests(const TestEntries & entries); -TestResult check(const TestEntry & entry); -bool parse(DB::ASTPtr & ast, const std::string & query); -bool equals(const DB::ASTPtr & lhs, const DB::ASTPtr & rhs); -void reorder(DB::IAST * ast); - - -void run() -{ - /// NOTE: Queries are not always realistic, but we are only interested in the syntax. - TestEntries entries = - { - { - "SELECT 1", - "SELECT 1", - 3 - }, - - // WHERE - - { - "SELECT name, value FROM report WHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report WHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - 4 - }, - - { - "SELECT name, value FROM report WHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report WHERE name IN ('Alice', 'Bob', 'Carol')", - 3 - }, - - { - "SELECT name, value FROM report WHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report WHERE name IN ('Alice', 'Bob', 'Carol')", - 2 - }, - - { - "SELECT name, value FROM report WHERE (name = 'Alice') OR (value = 1000) OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report WHERE (value = 1000) OR name IN ('Alice', 'Bob', 'Carol')", - 2 - }, - - { - "SELECT name, value FROM report WHERE (name = 'Alice') OR (value = 1000) OR (name = 'Bob') OR (name = 'Carol') OR (value = 2000)", - "SELECT name, value FROM report WHERE name IN ('Alice', 'Bob', 'Carol') OR value IN (1000, 2000)", - 2 - }, - - { - "SELECT value FROM report WHERE ((value + 1) = 1000) OR ((2 * value) = 2000) OR ((2 * value) = 4000) OR ((value + 1) = 3000)", - "SELECT value FROM report WHERE ((value + 1) IN (1000, 3000)) OR ((2 * value) IN (2000, 4000))", - 2 - }, - - { - "SELECT name, value FROM report WHERE ((name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')) AND ((value = 1000) OR (value = 2000))", - "SELECT name, value FROM report WHERE name IN ('Alice', 'Bob', 'Carol') AND ((value = 1000) OR (value = 2000))", - 3 - }, - - // PREWHERE - - { - "SELECT name, value FROM report PREWHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report PREWHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - 4 - }, - - { - "SELECT name, value FROM report PREWHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report PREWHERE name IN ('Alice', 'Bob', 'Carol')", - 3 - }, - - { - "SELECT name, value FROM report PREWHERE (name = 'Alice') OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report PREWHERE name IN ('Alice', 'Bob', 'Carol')", - 2 - }, - - { - "SELECT name, value FROM report PREWHERE (name = 'Alice') OR (value = 1000) OR (name = 'Bob') OR (name = 'Carol')", - "SELECT name, value FROM report PREWHERE (value = 1000) OR name IN ('Alice', 'Bob', 'Carol')", - 2 - }, - - { - "SELECT name, value FROM report PREWHERE (name = 'Alice') OR (value = 1000) OR (name = 'Bob') OR (name = 'Carol') OR (value = 2000)", - "SELECT name, value FROM report PREWHERE name IN ('Alice', 'Bob', 'Carol') OR value IN (1000, 2000)", - 2 - }, - - { - "SELECT value FROM report PREWHERE ((value + 1) = 1000) OR ((2 * value) = 2000) OR ((2 * value) = 4000) OR ((value + 1) = 3000)", - "SELECT value FROM report PREWHERE (value + 1) IN (1000, 3000) OR (2 * value) IN (2000, 4000)", - 2 - }, - - // HAVING - - { - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING number = 1", - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING number = 1", - 2 - }, - - { - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING (number = 1) OR (number = 2)", - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING number IN (1, 2)", - 2 - }, - - { - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING (number = 1) OR (number = 2)", - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING (number = 1) OR (number = 2)", - 3 - }, - - { - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING ((number + 1) = 1) OR ((number + 1) = 2) OR ((number + 3) = 7)", - "SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number HAVING ((number + 3) = 7) OR (number + 1) IN (1, 2)", - 2 - }, - - // PREWHERE + WHERE + HAVING - - { - "SELECT number, count(), 1 AS T, 2 AS U FROM (SELECT * FROM system.numbers LIMIT 10) PREWHERE (U = 1) OR (U = 2) " - "WHERE (T = 1) OR (T = 2) GROUP BY number HAVING (number = 1) OR (number = 2)", - "SELECT number, count(), 1 AS T, 2 AS U FROM (SELECT * FROM system.numbers LIMIT 10) PREWHERE U IN (1, 2) " - "WHERE T IN (1, 2) GROUP BY number HAVING number IN (1, 2)", - 2 - }, - - { - "SELECT number, count(), 1 AS T, 2 AS U FROM (SELECT * FROM system.numbers LIMIT 10) PREWHERE (U = 1) OR (U = 2) OR (U = 3) " - "WHERE (T = 1) OR (T = 2) GROUP BY number HAVING (number = 1) OR (number = 2)", - "SELECT number, count(), 1 AS T, 2 AS U FROM (SELECT * FROM system.numbers LIMIT 10) PREWHERE U IN (1, 2, 3) " - "WHERE (T = 1) OR (T = 2) GROUP BY number HAVING (number = 1) OR (number = 2)", - 3 - }, - - { - "SELECT x = 1 OR x=2 OR (x = 3 AS x3) AS y, 4 AS x", - "SELECT x IN (1, 2, 3) AS y, 4 AS x", - 2 - } - }; - - performTests(entries); -} - -void performTests(const TestEntries & entries) -{ - unsigned int count = 0; - unsigned int i = 1; - - for (const auto & entry : entries) - { - auto res = check(entry); - if (res.first) - { - ++count; - std::cout << "Test " << i << " passed.\n"; - } - else - std::cout << "Test " << i << " failed. Expected: " << entry.expected_output << ". Received: " << res.second << "\n"; - - ++i; - } - std::cout << count << " out of " << entries.size() << " test(s) passed.\n"; -} - -TestResult check(const TestEntry & entry) -{ - try - { - /// Parse and optimize the incoming query. - DB::ASTPtr ast_input; - if (!parse(ast_input, entry.input)) - return TestResult(false, "parse error"); - - auto select_query = typeid_cast(&*ast_input); - - DB::LogicalExpressionsOptimizer optimizer(select_query, entry.limit); - optimizer.perform(); - - /// Parse the expected result. - DB::ASTPtr ast_expected; - if (!parse(ast_expected, entry.expected_output)) - return TestResult(false, "parse error"); - - /// Compare the optimized query and the expected result. - bool res = equals(ast_input, ast_expected); - std::string output = DB::queryToString(ast_input); - - return TestResult(res, output); - } - catch (DB::Exception & e) - { - return TestResult(false, e.displayText()); - } -} - -bool parse(DB::ASTPtr & ast, const std::string & query) -{ - DB::ParserSelectQuery parser; - std::string message; - auto begin = query.data(); - auto end = begin + query.size(); - ast = DB::tryParseQuery(parser, begin, end, message, false, "", false, 0); - return ast != nullptr; -} - -bool equals(const DB::ASTPtr & lhs, const DB::ASTPtr & rhs) -{ - DB::ASTPtr lhs_reordered = lhs->clone(); - reorder(&*lhs_reordered); - - DB::ASTPtr rhs_reordered = rhs->clone(); - reorder(&*rhs_reordered); - - return lhs_reordered->getTreeHash() == rhs_reordered->getTreeHash(); -} - -void reorderImpl(DB::IAST * ast) -{ - if (ast == nullptr) - return; - - auto & children = ast->children; - if (children.empty()) - return; - - for (auto & child : children) - reorderImpl(&*child); - - std::sort(children.begin(), children.end(), [](const DB::ASTPtr & lhs, const DB::ASTPtr & rhs) - { - return lhs->getTreeHash() < rhs->getTreeHash(); - }); -} - -void reorder(DB::IAST * ast) -{ - if (ast == nullptr) - return; - - auto select_query = typeid_cast(ast); - if (select_query == nullptr) - return; - - reorderImpl(select_query->where().get()); - reorderImpl(select_query->prewhere().get()); - reorderImpl(select_query->having().get()); -} - -} - -int main() -{ - run(); - return 0; -} diff --git a/dbms/src/Parsers/ASTCreateQuotaQuery.h b/dbms/src/Parsers/ASTCreateQuotaQuery.h deleted file mode 100644 index 2968c2cc607..00000000000 --- a/dbms/src/Parsers/ASTCreateQuotaQuery.h +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include -#include - - -namespace DB -{ -class ASTExtendedRoleSet; - - -/** CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name - * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] - * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING} [,...]] - * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] - * - * ALTER QUOTA [IF EXISTS] name - * [RENAME TO new_name] - * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] - * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING | - * UNSET TRACKING} [,...]] - * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] - */ -class ASTCreateQuotaQuery : public IAST -{ -public: - bool alter = false; - bool attach = false; - - bool if_exists = false; - bool if_not_exists = false; - bool or_replace = false; - - String name; - String new_name; - using KeyType = Quota::KeyType; - std::optional key_type; - - using ResourceType = Quota::ResourceType; - using ResourceAmount = Quota::ResourceAmount; - static constexpr size_t MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - - struct Limits - { - std::optional max[MAX_RESOURCE_TYPE]; - bool unset_tracking = false; - std::chrono::seconds duration = std::chrono::seconds::zero(); - bool randomize_interval = false; - }; - std::vector all_limits; - - std::shared_ptr roles; - - String getID(char) const override; - ASTPtr clone() const override; - void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; -}; -} diff --git a/dbms/src/Parsers/ASTCreateSettingsProfileQuery.h b/dbms/src/Parsers/ASTCreateSettingsProfileQuery.h deleted file mode 100644 index b3a60853e57..00000000000 --- a/dbms/src/Parsers/ASTCreateSettingsProfileQuery.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ -class ASTSettingsProfileElements; -class ASTExtendedRoleSet; - - -/** CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name - * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] - * - * ALTER SETTINGS PROFILE [IF EXISTS] name - * [RENAME TO new_name] - * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] - */ -class ASTCreateSettingsProfileQuery : public IAST -{ -public: - bool alter = false; - bool attach = false; - - bool if_exists = false; - bool if_not_exists = false; - bool or_replace = false; - - String name; - String new_name; - - std::shared_ptr settings; - - std::shared_ptr to_roles; - - String getID(char) const override; - ASTPtr clone() const override; - void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; -}; -} diff --git a/dbms/src/Parsers/CMakeLists.txt b/dbms/src/Parsers/CMakeLists.txt deleted file mode 100644 index 086384196aa..00000000000 --- a/dbms/src/Parsers/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) -add_headers_and_sources(clickhouse_parsers .) -add_library(clickhouse_parsers ${clickhouse_parsers_headers} ${clickhouse_parsers_sources}) -target_link_libraries(clickhouse_parsers PUBLIC clickhouse_common_io) -target_include_directories(clickhouse_parsers PUBLIC ${DBMS_INCLUDE_DIR}) - -if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/src/Parsers/iostream_debug_helpers.h") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") -endif () - -if(ENABLE_TESTS) - add_subdirectory(tests) -endif() diff --git a/dbms/src/Parsers/ParserCreateQuotaQuery.h b/dbms/src/Parsers/ParserCreateQuotaQuery.h deleted file mode 100644 index 18e6ef6f9f7..00000000000 --- a/dbms/src/Parsers/ParserCreateQuotaQuery.h +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ -/** Parses queries like - * CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name - * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] - * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING} [,...]] - * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] - * - * ALTER QUOTA [IF EXISTS] name - * [RENAME TO new_name] - * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] - * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING | - * UNSET TRACKING} [,...]] - * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] - */ -class ParserCreateQuotaQuery : public IParserBase -{ -public: - ParserCreateQuotaQuery & enableAttachMode(bool enable_) { attach_mode = enable_; return *this; } - -protected: - const char * getName() const override { return "CREATE QUOTA or ALTER QUOTA query"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - -private: - bool attach_mode = false; -}; -} diff --git a/dbms/src/Parsers/ParserSettingsProfileElement.h b/dbms/src/Parsers/ParserSettingsProfileElement.h deleted file mode 100644 index ec8e1abb5b5..00000000000 --- a/dbms/src/Parsers/ParserSettingsProfileElement.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ -/** Parses a string like this: - * {variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE]} | PROFILE 'profile_name' - */ -class ParserSettingsProfileElement : public IParserBase -{ -public: - ParserSettingsProfileElement & useIDMode(bool enable_) { id_mode = enable_; return *this; } - -protected: - const char * getName() const override { return "SettingsProfileElement"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - -private: - bool id_mode = false; -}; - - -class ParserSettingsProfileElements : public IParserBase -{ -public: - ParserSettingsProfileElements & useIDMode(bool enable_) { id_mode = enable_; return *this; } - -protected: - const char * getName() const override { return "SettingsProfileElements"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - -private: - bool id_mode = false; -};} diff --git a/dbms/src/Parsers/parseUserName.cpp b/dbms/src/Parsers/parseUserName.cpp deleted file mode 100644 index 3993935e386..00000000000 --- a/dbms/src/Parsers/parseUserName.cpp +++ /dev/null @@ -1,73 +0,0 @@ -#include -#include -#include -#include - -namespace DB -{ -namespace -{ - bool parseUserNameImpl(IParser::Pos & pos, Expected & expected, String & user_name, String * host_like_pattern) - { - String name; - if (!parseIdentifierOrStringLiteral(pos, expected, name)) - return false; - - boost::algorithm::trim(name); - - String pattern = "@"; - - if (ParserToken{TokenType::At}.ignore(pos, expected)) - { - if (!parseIdentifierOrStringLiteral(pos, expected, pattern)) - return false; - - boost::algorithm::trim(pattern); - } - - if (pattern != "@") - name += '@' + pattern; - - user_name = std::move(name); - if (host_like_pattern) - *host_like_pattern = std::move(pattern); - return true; - } -} - - -bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name) -{ - return parseUserNameImpl(pos, expected, user_name, nullptr); -} - - -bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, String & host_like_pattern) -{ - return parseUserNameImpl(pos, expected, user_name, &host_like_pattern); -} - - -bool parseUserNameOrCurrentUserTag(IParser::Pos & pos, Expected & expected, String & user_name, bool & current_user) -{ - if (ParserKeyword{"CURRENT_USER"}.ignore(pos, expected) || ParserKeyword{"currentUser"}.ignore(pos, expected)) - { - if (ParserToken{TokenType::OpeningRoundBracket}.ignore(pos, expected)) - { - if (!ParserToken{TokenType::ClosingRoundBracket}.ignore(pos, expected)) - return false; - } - current_user = true; - return true; - } - - if (parseUserName(pos, expected, user_name)) - { - current_user = false; - return true; - } - - return false; -} - -} diff --git a/dbms/src/Processors/NullSink.h b/dbms/src/Processors/NullSink.h deleted file mode 100644 index e4968daee29..00000000000 --- a/dbms/src/Processors/NullSink.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once -#include - -namespace DB -{ - -class NullSink : public IProcessor -{ -public: - explicit NullSink(Block header) : IProcessor({std::move(header)}, {}) {} - String getName() const override { return "NullSink"; } - - Status prepare() override - { - inputs.front().close(); - return Status::Finished; - } - - InputPort & getPort() { return inputs.front(); } -}; - -} diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.cpp b/dbms/src/Processors/Sources/SourceFromInputStream.cpp deleted file mode 100644 index 6f2a7eeb28a..00000000000 --- a/dbms/src/Processors/Sources/SourceFromInputStream.cpp +++ /dev/null @@ -1,170 +0,0 @@ -#include -#include -#include -#include - -namespace DB -{ -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - -SourceFromInputStream::SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info_) - : ISourceWithProgress(stream_->getHeader()) - , force_add_aggregating_info(force_add_aggregating_info_) - , stream(std::move(stream_)) -{ - init(); -} - -void SourceFromInputStream::init() -{ - auto & sample = getPort().getHeader(); - for (auto & type : sample.getDataTypes()) - if (typeid_cast(type.get())) - has_aggregate_functions = true; -} - -void SourceFromInputStream::addTotalsPort() -{ - if (has_totals_port) - throw Exception("Totals port was already added for SourceFromInputStream.", ErrorCodes::LOGICAL_ERROR); - - outputs.emplace_back(outputs.front().getHeader(), this); - has_totals_port = true; -} - -IProcessor::Status SourceFromInputStream::prepare() -{ - auto status = ISource::prepare(); - - if (status == Status::Finished) - { - is_generating_finished = true; - - /// Read postfix and get totals if needed. - if (!is_stream_finished && !isCancelled()) - return Status::Ready; - - if (has_totals_port) - { - auto & totals_out = outputs.back(); - - if (totals_out.isFinished()) - return Status::Finished; - - if (has_totals) - { - if (!totals_out.canPush()) - return Status::PortFull; - - totals_out.push(std::move(totals)); - has_totals = false; - } - - totals_out.finish(); - } - } - - return status; -} - -void SourceFromInputStream::work() -{ - if (!is_generating_finished) - { - try - { - ISource::work(); - } - catch (...) - { - /// Won't read suffix in case of exception. - is_stream_finished = true; - throw; - } - - return; - } - - if (is_stream_finished) - return; - - /// Don't cancel for RemoteBlockInputStream (otherwise readSuffix can stack) - if (!typeid_cast(stream.get())) - stream->cancel(false); - - if (rows_before_limit) - { - auto & info = stream->getProfileInfo(); - if (info.hasAppliedLimit()) - rows_before_limit->add(info.getRowsBeforeLimit()); - } - - stream->readSuffix(); - - if (auto totals_block = stream->getTotals()) - { - totals.setColumns(totals_block.getColumns(), 1); - has_totals = true; - } - - is_stream_finished = true; -} - -Chunk SourceFromInputStream::generate() -{ - if (is_stream_finished) - return {}; - - if (!is_stream_started) - { - stream->readPrefix(); - is_stream_started = true; - } - - auto block = stream->read(); - if (!block && !isCancelled()) - { - if (rows_before_limit) - { - auto & info = stream->getProfileInfo(); - if (info.hasAppliedLimit()) - rows_before_limit->add(info.getRowsBeforeLimit()); - } - - stream->readSuffix(); - - if (auto totals_block = stream->getTotals()) - { - if (totals_block.rows() == 1) /// Sometimes we can get empty totals. Skip it. - { - totals.setColumns(totals_block.getColumns(), 1); - has_totals = true; - } - } - - is_stream_finished = true; - return {}; - } - -#ifndef NDEBUG - assertBlocksHaveEqualStructure(getPort().getHeader(), block, "SourceFromInputStream"); -#endif - - UInt64 num_rows = block.rows(); - Chunk chunk(block.getColumns(), num_rows); - - if (force_add_aggregating_info || has_aggregate_functions) - { - auto info = std::make_shared(); - info->bucket_num = block.info.bucket_num; - info->is_overflows = block.info.is_overflows; - chunk.setChunkInfo(std::move(info)); - } - - return chunk; -} - -} diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.h b/dbms/src/Processors/Transforms/AggregatingTransform.h deleted file mode 100644 index 469392e5840..00000000000 --- a/dbms/src/Processors/Transforms/AggregatingTransform.h +++ /dev/null @@ -1,105 +0,0 @@ -#pragma once -#include -#include -#include -#include -#include - -namespace DB -{ - -class AggregatedChunkInfo : public ChunkInfo -{ -public: - bool is_overflows = false; - Int32 bucket_num = -1; -}; - -class IBlockInputStream; -using BlockInputStreamPtr = std::shared_ptr; - -struct AggregatingTransformParams -{ - Aggregator::Params params; - Aggregator aggregator; - bool final; - - AggregatingTransformParams(const Aggregator::Params & params_, bool final_) - : params(params_), aggregator(params), final(final_) {} - - Block getHeader() const { return aggregator.getHeader(final); } -}; - -struct ManyAggregatedData -{ - ManyAggregatedDataVariants variants; - std::vector> mutexes; - std::atomic num_finished = 0; - - explicit ManyAggregatedData(size_t num_threads = 0) : variants(num_threads), mutexes(num_threads) - { - for (auto & elem : variants) - elem = std::make_shared(); - - for (auto & mut : mutexes) - mut = std::make_unique(); - } -}; - -using AggregatingTransformParamsPtr = std::shared_ptr; -using ManyAggregatedDataPtr = std::shared_ptr; - -class AggregatingTransform : public IProcessor -{ -public: - AggregatingTransform(Block header, AggregatingTransformParamsPtr params_); - - /// For Parallel aggregating. - AggregatingTransform(Block header, AggregatingTransformParamsPtr params_, - ManyAggregatedDataPtr many_data, size_t current_variant, - size_t temporary_data_merge_threads, size_t max_threads); - ~AggregatingTransform() override; - - String getName() const override { return "AggregatingTransform"; } - Status prepare() override; - void work() override; - Processors expandPipeline() override; - -protected: - void consume(Chunk chunk); - -private: - /// To read the data that was flushed into the temporary data file. - Processors processors; - - AggregatingTransformParamsPtr params; - Logger * log = &Logger::get("AggregatingTransform"); - - ColumnRawPtrs key_columns; - Aggregator::AggregateColumns aggregate_columns; - bool no_more_keys = false; - - ManyAggregatedDataPtr many_data; - AggregatedDataVariants & variants; - size_t max_threads = 1; - size_t temporary_data_merge_threads = 1; - - /// TODO: calculate time only for aggregation. - Stopwatch watch; - - UInt64 src_rows = 0; - UInt64 src_bytes = 0; - - bool is_generate_initialized = false; - bool is_consume_finished = false; - bool is_pipeline_created = false; - - Chunk current_chunk; - bool read_current_chunk = false; - - bool is_consume_started = false; - - void initGenerate(); -}; - -} diff --git a/dbms/src/Storages/TableStructureLockHolder.h b/dbms/src/Storages/TableStructureLockHolder.h deleted file mode 100644 index 50f196517e3..00000000000 --- a/dbms/src/Storages/TableStructureLockHolder.h +++ /dev/null @@ -1,45 +0,0 @@ -#pragma once - -#include - -namespace DB -{ - -/// Structs that hold table structure (columns, their types, default values etc.) locks when executing queries. -/// See IStorage::lock* methods for comments. - -struct TableStructureWriteLockHolder -{ - void release() - { - *this = {}; - } - - void releaseAllExceptAlterIntention() - { - structure_lock.reset(); - } - -private: - friend class IStorage; - - /// Order is important. - std::unique_lock alter_lock; - RWLockImpl::LockHolder structure_lock; -}; - -struct TableStructureReadLockHolder -{ - void release() - { - *this = {}; - } - -private: - friend class IStorage; - - /// Order is important. - RWLockImpl::LockHolder structure_lock; -}; - -} diff --git a/dbms/src/TableFunctions/ITableFunction.cpp b/dbms/src/TableFunctions/ITableFunction.cpp deleted file mode 100644 index 233da7495d8..00000000000 --- a/dbms/src/TableFunctions/ITableFunction.cpp +++ /dev/null @@ -1,19 +0,0 @@ -#include -#include - - -namespace ProfileEvents -{ - extern const Event TableFunctionExecute; -} - -namespace DB -{ - -StoragePtr ITableFunction::execute(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const -{ - ProfileEvents::increment(ProfileEvents::TableFunctionExecute); - return executeImpl(ast_function, context, table_name); -} - -} diff --git a/dbms/tests/config/decimals_dictionary.xml b/dbms/tests/config/decimals_dictionary.xml deleted file mode 100644 index ff465b91b85..00000000000 --- a/dbms/tests/config/decimals_dictionary.xml +++ /dev/null @@ -1,197 +0,0 @@ - - - flat_decimals - - - localhost - 9000 - default - - test_00950 -
decimals
- - - 0 - - - - - - key - - - d32 - Decimal32(4) - 0 - - - d64 - Decimal64(6) - 0 - - - d128 - Decimal128(1) - 0 - - - - - - hashed_decimals - - - localhost - 9000 - default - - test_00950 - decimals
-
- - 0 - - - - - - key - - - d32 - Decimal32(4) - 0 - - - d64 - Decimal64(6) - 0 - - - d128 - Decimal128(1) - 0 - - -
- - - cache_decimals - - - localhost - 9000 - default - - test_00950 - decimals
-
- - 0 - - 1000 - - - - key - - - d32 - Decimal32(4) - 0 - - - d64 - Decimal64(6) - 0 - - - d128 - Decimal128(1) - 0 - - -
- - - complex_hashed_decimals - - - localhost - 9000 - default - - test_00950 - decimals
-
- - 0 - - - - - - - key - UInt64 - - - - d32 - Decimal32(4) - 0 - - - d64 - Decimal64(6) - 0 - - - d128 - Decimal128(1) - 0 - - -
- - - complex_cache_decimals - - - localhost - 9000 - default - - test_00950 - decimals
-
- - 0 - - 1000 - - - - - key - UInt64 - - - - d32 - Decimal32(4) - 0 - - - d64 - Decimal64(6) - 0 - - - d128 - Decimal128(1) - 0 - - -
- diff --git a/dbms/tests/config/ints_dictionary.xml b/dbms/tests/config/ints_dictionary.xml deleted file mode 100644 index 5cf8419ad77..00000000000 --- a/dbms/tests/config/ints_dictionary.xml +++ /dev/null @@ -1,514 +0,0 @@ - - - flat_ints - - - localhost - 9000 - default - - test_00950 - ints
-
- - 0 - - - - - - key - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- - - hashed_ints - - - localhost - 9000 - default - - test_00950 - ints
-
- - 0 - - - - - - key - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- - - hashed_sparse_ints - - - localhost - 9000 - default - - test_00950 - ints
-
- - 0 - - - - - - key - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- - - cache_ints - - - localhost - 9000 - default - - test_00950 - ints
-
- - 0 - - 1000 - - - - key - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- - - complex_hashed_ints - - - localhost - 9000 - default - - test_00950 - ints
-
- - 0 - - - - - - - key - UInt64 - - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- - - complex_cache_ints - - - localhost - 9000 - default - - test_00950 - ints
-
- - 0 - - 1000 - - - - - key - UInt64 - - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- - - -one_cell_cache_ints - - - localhost - 9000 - default - - test_01054 - ints
-
- -0 - - 1 - - - - key - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- - - - one_cell_cache_ints_overflow - - - localhost - 9000 - default - - test_01054_overflow - ints
-
- - 0 - - 1 - - - - key - - - i8 - Int8 - 0 - - - i16 - Int16 - 0 - - - i32 - Int32 - 0 - - - i64 - Int64 - 0 - - - u8 - UInt8 - 0 - - - u16 - UInt16 - 0 - - - u32 - UInt32 - 0 - - - u64 - UInt64 - 0 - - -
- -
\ No newline at end of file diff --git a/dbms/tests/config/strings_dictionary.xml b/dbms/tests/config/strings_dictionary.xml deleted file mode 100644 index 88fad6ae2d7..00000000000 --- a/dbms/tests/config/strings_dictionary.xml +++ /dev/null @@ -1,209 +0,0 @@ - - - flat_strings - - - localhost - 9000 - default - - test_00950 - strings
-
- - 0 - - - - - - key - - - str - String - - - -
- - - hashed_strings - - - localhost - 9000 - default - - test_00950 - strings
-
- - 0 - - - - - - key - - - str - String - - - -
- - - cache_strings - - - localhost - 9000 - default - - test_00950 - strings
-
- - 0 - - 1000 - - - - key - - - str - String - - - -
- - - complex_hashed_strings - - - localhost - 9000 - default - - test_00950 - strings
-
- - 0 - - - - - - - key - UInt64 - - - - str - String - - - -
- - - complex_cache_strings - - - localhost - 9000 - default - - test_00950 - strings
-
- - 0 - - 1000 - - - - - key - UInt64 - - - - str - String - - - -
- - - complex_hashed_strings_key - - - localhost - 9000 - default - - test_00950 - strings
-
- - 0 - - - - - - - str - String - - - - key - UInt64 - 0 - - -
- - - complex_cache_strings_key - - - localhost - 9000 - default - - test_00950 - strings
-
- - 0 - - 1000 - - - - - str - String - - - - key - UInt64 - 0 - - -
-
diff --git a/dbms/tests/decimals_dictionary.xml b/dbms/tests/decimals_dictionary.xml deleted file mode 120000 index a6661ee64e5..00000000000 --- a/dbms/tests/decimals_dictionary.xml +++ /dev/null @@ -1 +0,0 @@ -../../dbms/tests/config/decimals_dictionary.xml \ No newline at end of file diff --git a/dbms/tests/instructions/developer_instruction_en.md b/dbms/tests/instructions/developer_instruction_en.md deleted file mode 120000 index 81ffff89507..00000000000 --- a/dbms/tests/instructions/developer_instruction_en.md +++ /dev/null @@ -1 +0,0 @@ -../../../docs/en/development/developer_instruction.md \ No newline at end of file diff --git a/dbms/tests/instructions/developer_instruction_ru.md b/dbms/tests/instructions/developer_instruction_ru.md deleted file mode 120000 index 9f912ebfec9..00000000000 --- a/dbms/tests/instructions/developer_instruction_ru.md +++ /dev/null @@ -1 +0,0 @@ -../../../docs/ru/development/developer_instruction.md \ No newline at end of file diff --git a/dbms/tests/integration/CMakeLists.txt b/dbms/tests/integration/CMakeLists.txt deleted file mode 100644 index 54d5f5e727a..00000000000 --- a/dbms/tests/integration/CMakeLists.txt +++ /dev/null @@ -1,24 +0,0 @@ -if(CLICKHOUSE_SPLIT_BINARY) - set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse-server CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse-client) -else() - set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse) -endif() - -find_program(DOCKER_CMD docker) -find_program(DOCKER_COMPOSE_CMD docker-compose) -find_program(PYTEST_CMD pytest) -find_program(SUDO_CMD sudo) - -# will mount only one binary to docker container - build with .so cant work -if(MAKE_STATIC_LIBRARIES AND DOCKER_CMD) - if(INTEGRATION_USE_RUNNER AND SUDO_CMD) - add_test(NAME integration-runner WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND ${SUDO_CMD} ${CMAKE_CURRENT_SOURCE_DIR}/runner --binary ${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse --configs-dir ${ClickHouse_SOURCE_DIR}/dbms/programs/server/) - message(STATUS "Using tests in docker with runner SUDO=${SUDO_CMD}; DOCKER=${DOCKER_CMD};") - endif() - if(NOT INTEGRATION_USE_RUNNER AND DOCKER_COMPOSE_CMD AND PYTEST_CMD) - # To run one test with debug: - # cmake . -DPYTEST_OPT="-ss;test_cluster_copier" - add_test(NAME integration-pytest WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND env ${TEST_USE_BINARIES} "CLICKHOUSE_TESTS_BASE_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/dbms/programs/server/" ${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}) - message(STATUS "Using tests in docker DOCKER=${DOCKER_CMD}; DOCKER_COMPOSE=${DOCKER_COMPOSE_CMD}; PYTEST=${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}") - endif() -endif() diff --git a/dbms/tests/integration/README.md b/dbms/tests/integration/README.md deleted file mode 100644 index 64d8b29e35a..00000000000 --- a/dbms/tests/integration/README.md +++ /dev/null @@ -1,111 +0,0 @@ -## ClickHouse integration tests - -This directory contains tests that involve several ClickHouse instances, custom configs, ZooKeeper, etc. - -### Running natively - -Prerequisites: -* Ubuntu 14.04 (Trusty) or higher. -* [docker](https://www.docker.com/community-edition#/download). Minimum required API version: 1.25, check with `docker version`. - -You must install latest Docker from -https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository -Don't use Docker from your system repository. - -* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python-pip libpq-dev` -* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` -* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install docker-compose docker dicttoxml kazoo PyMySQL psycopg2 pymongo tzlocal kafka-python protobuf pytest-timeout minio rpm-confluent-schemaregistry` - -(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python-pytest python-dicttoxml python-docker python-pymysql python-pymongo python-tzlocal python-kazoo python-psycopg2 python-kafka python-pytest-timeout python-minio` - -If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login. -(You must close all your sessions (for example, restart your computer)) -To check, that you have access to Docker, run `docker ps`. - -Run the tests with the `pytest` command. To select which tests to run, use: `pytest -k ` - -By default tests are run with system-wide client binary, server binary and base configs. To change that, -set the following environment variables: -* `CLICKHOUSE_TESTS_SERVER_BIN_PATH` to choose the server binary. -* `CLICKHOUSE_TESTS_CLIENT_BIN_PATH` to choose the client binary. -* `CLICKHOUSE_TESTS_BASE_CONFIG_DIR` to choose the directory from which base configs (`config.xml` and - `users.xml`) are taken. - - -### Running with runner script - -The only requirement is fresh configured docker and -docker pull yandex/clickhouse-integration-tests-runner - -Notes: -* If you want to run integration tests without `sudo` you have to add your user to docker group `sudo usermod -aG docker $USER`. [More information](https://docs.docker.com/install/linux/linux-postinstall/) about docker configuration. -* If you already had run these tests without `./runner` script you may have problems with pytest cache. It can be removed with `rm -r __pycache__ .pytest_cache/`. -* Some tests maybe require a lot of resources (CPU, RAM, etc.). Better not try large tests like `test_cluster_copier` or `test_distributed_ddl*` on your laptop. - -You can run tests via `./runner` script and pass pytest arguments as last arg: -``` -$ ./runner --binary $HOME/ClickHouse/dbms/programs/clickhouse --bridge-binary $HOME/ClickHouse/dbms/programs/clickhouse-odbc-bridge --configs-dir $HOME/ClickHouse/dbms/programs/server/ 'test_odbc_interaction -ss' -Start tests -============================= test session starts ============================== -platform linux2 -- Python 2.7.15rc1, pytest-4.0.0, py-1.7.0, pluggy-0.8.0 -rootdir: /ClickHouse/dbms/tests/integration, inifile: pytest.ini -collected 6 items - -test_odbc_interaction/test.py Removing network clickhouse_default -... - -Killing roottestodbcinteraction_node1_1 ... done -Killing roottestodbcinteraction_mysql1_1 ... done -Killing roottestodbcinteraction_postgres1_1 ... done -Removing roottestodbcinteraction_node1_1 ... done -Removing roottestodbcinteraction_mysql1_1 ... done -Removing roottestodbcinteraction_postgres1_1 ... done -Removing network roottestodbcinteraction_default - -==================== 6 passed, 1 warnings in 95.21 seconds ===================== - -``` - -Path to binary and configs maybe specified via env variables: -``` -$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/dbms/programs/server/ -$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/dbms/programs/clickhouse -$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/dbms/programs/clickhouse-odbc-bridge -$ ./runner 'test_odbc_interaction' -$ # or ./runner '-v -ss' -Start tests -============================= test session starts ============================== -platform linux2 -- Python 2.7.15rc1, pytest-4.0.0, py-1.7.0, pluggy-0.8.0 -rootdir: /ClickHouse/dbms/tests/integration, inifile: pytest.ini -collected 6 items - -test_odbc_interaction/test.py ...... [100%] -==================== 6 passed, 1 warnings in 96.33 seconds ===================== -``` - -You can just open shell inside a container by overwritting the command: -./runner --command=bash - -### Rebuilding the docker containers - -The main container used for integration tests lives in `docker/test/integration/Dockerfile`. Rebuild it with -``` -cd docker/test/integration -docker build -t yandex/clickhouse-integration-test . -``` - -The helper container used by the `runner` script is in `dbms/tests/integration/image/Dockerfile`. - -### Adding new tests - -To add new test named `foo`, create a directory `test_foo` with an empty `__init__.py` and a file -named `test.py` containing tests in it. All functions with names starting with `test` will become test cases. - -`helpers` directory contains utilities for: -* Launching a ClickHouse cluster with or without ZooKeeper in docker containers. -* Sending queries to launched instances. -* Introducing network failures such as severing network link between two instances. - -To assert that two TSV files must be equal, wrap them in the `TSV` class and use the regular `assert` -statement. Example: `assert TSV(result) == TSV(reference)`. In case the assertion fails, `pytest` -will automagically detect the types of variables and only the small diff of two files is printed. diff --git a/dbms/tests/integration/pytest.ini b/dbms/tests/integration/pytest.ini deleted file mode 100644 index adb431deaa1..00000000000 --- a/dbms/tests/integration/pytest.ini +++ /dev/null @@ -1,4 +0,0 @@ -[pytest] -python_files = test*.py -norecursedirs = _instances -timeout = 300 diff --git a/dbms/tests/integration/test_allowed_client_hosts/configs/users.xml b/dbms/tests/integration/test_allowed_client_hosts/configs/users.xml deleted file mode 100644 index 3142ec5355a..00000000000 --- a/dbms/tests/integration/test_allowed_client_hosts/configs/users.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - default - - - - diff --git a/dbms/tests/integration/test_config_corresponding_root/configs/config.xml b/dbms/tests/integration/test_config_corresponding_root/configs/config.xml deleted file mode 100644 index 154ebf6c35e..00000000000 --- a/dbms/tests/integration/test_config_corresponding_root/configs/config.xml +++ /dev/null @@ -1,415 +0,0 @@ - - - - - - trace - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 10 - - - - 8123 - 9000 - - - - - - - - - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key - - /etc/clickhouse-server/dhparam.pem - none - true - true - sslv2,sslv3 - true - - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - - - - - - - 9009 - - - - - - - - - - - - - - - - - - - - 4096 - 3 - - - 100 - - - - - - 8589934592 - - - 5368709120 - - - - /var/lib/clickhouse/ - - - /var/lib/clickhouse/tmp/ - - - /var/lib/clickhouse/user_files/ - - - users.xml - - - default - - - - - - default - - - - - - - - - false - - - - - - - - localhost - 9000 - - - - - - - localhost - 9000 - - - - - localhost - 9000 - - - - - - - localhost - 9440 - 1 - - - - - - - localhost - 9000 - - - - - localhost - 1 - - - - - - - - - - - - - - - - - 3600 - - - - 3600 - - - 60 - - - - - - - - - - system - query_log
- - toYYYYMM(event_date) - - 7500 -
- - - - system - query_thread_log
- toYYYYMM(event_date) - 7500 -
- - - - - - - - - - - - - - - *_dictionary.xml - - - - - - - - - - /clickhouse/task_queue/ddl - - - - - - - - - - - - - - - - click_cost - any - - 0 - 3600 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - - - /var/lib/clickhouse/format_schemas/ - - - -
diff --git a/dbms/tests/integration/test_dictionaries_mysql/test.py b/dbms/tests/integration/test_dictionaries_mysql/test.py deleted file mode 100644 index 80424a3471a..00000000000 --- a/dbms/tests/integration/test_dictionaries_mysql/test.py +++ /dev/null @@ -1,95 +0,0 @@ -import pytest -import os -import time - -## sudo -H pip install PyMySQL -import pymysql.cursors - -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry - -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CONFIG_FILES = ['configs/dictionaries/mysql_dict1.xml', 'configs/dictionaries/mysql_dict2.xml', 'configs/remote_servers.xml'] - -cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs')) -instance = cluster.add_instance('instance', main_configs=CONFIG_FILES, with_mysql = True) - -create_table_mysql_template = """ - CREATE TABLE IF NOT EXISTS `test`.`{}` ( - `id` int(11) NOT NULL, - `value` varchar(50) NOT NULL, - PRIMARY KEY (`id`) - ) ENGINE=InnoDB; - """ - -create_clickhouse_dictionary_table_template = """ - CREATE TABLE IF NOT EXISTS `test`.`dict_table_{}` (`id` Int32, `value` String) ENGINE = Dictionary({}) - """ - -@pytest.fixture(scope="module") -def started_cluster(): - try: - #time.sleep(30) - cluster.start() - - # Create a MySQL database - mysql_connection = get_mysql_conn() - create_mysql_db(mysql_connection, 'test') - mysql_connection.close() - - # Create database in ClickHouse - instance.query("CREATE DATABASE IF NOT EXISTS test") - - # Create database in ClickChouse using MySQL protocol (will be used for data insertion) - instance.query("CREATE DATABASE clickhouse_mysql ENGINE = MySQL('mysql1:3306', 'test', 'root', 'clickhouse')") - - yield cluster - - finally: - cluster.shutdown() - - -def test_load_mysql_dictionaries(started_cluster): - # Load dictionaries - query = instance.query - query("SYSTEM RELOAD DICTIONARIES") - - for n in range(0, 5): - # Create MySQL tables, fill them and create CH dict tables - prepare_mysql_table('test', str(n)) - - # Check dictionaries are loaded and have correct number of elements - for n in range(0, 100): - # Force reload of dictionaries (each 10 iteration) - if (n % 10) == 0: - query("SYSTEM RELOAD DICTIONARIES") - - # Check number of row - assert query("SELECT count() FROM `test`.`dict_table_{}`".format('test' + str(n % 5))).rstrip() == '10000' - -def create_mysql_db(mysql_connection, name): - with mysql_connection.cursor() as cursor: - cursor.execute("CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET 'utf8'".format(name)) - -def prepare_mysql_table(table_name, index): - mysql_connection = get_mysql_conn() - - # Create table - create_mysql_table(mysql_connection, table_name + str(index)) - - # Insert rows using CH - query = instance.query - query("INSERT INTO `clickhouse_mysql`.{}(id, value) select number, concat('{} value ', toString(number)) from numbers(10000) ".format(table_name + str(index), table_name + str(index))) - assert query("SELECT count() FROM `clickhouse_mysql`.{}".format(table_name + str(index))).rstrip() == '10000' - mysql_connection.close() - - #Create CH Dictionary tables based on MySQL tables - query(create_clickhouse_dictionary_table_template.format(table_name + str(index), 'dict' + str(index))) - -def get_mysql_conn(): - conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.10', port=3308) - return conn - -def create_mysql_table(conn, table_name): - with conn.cursor() as cursor: - cursor.execute(create_table_mysql_template.format(table_name)) diff --git a/dbms/tests/integration/test_disk_access_storage/configs/access_control_path.xml b/dbms/tests/integration/test_disk_access_storage/configs/access_control_path.xml deleted file mode 100644 index 7814472ee9b..00000000000 --- a/dbms/tests/integration/test_disk_access_storage/configs/access_control_path.xml +++ /dev/null @@ -1,4 +0,0 @@ - - -/var/lib/clickhouse/access - diff --git a/dbms/tests/integration/test_disk_access_storage/test.py b/dbms/tests/integration/test_disk_access_storage/test.py deleted file mode 100644 index d5e1f283167..00000000000 --- a/dbms/tests/integration/test_disk_access_storage/test.py +++ /dev/null @@ -1,106 +0,0 @@ -import pytest -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', config_dir='configs', main_configs=['configs/access_control_path.xml'], stay_alive=True) - - -@pytest.fixture(scope="module", autouse=True) -def started_cluster(): - try: - cluster.start() - yield cluster - - finally: - cluster.shutdown() - - -def create_entities(): - instance.query("CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000") - instance.query("CREATE USER u1 SETTINGS PROFILE s1") - instance.query("CREATE ROLE rx SETTINGS PROFILE s1") - instance.query("CREATE USER u2 IDENTIFIED BY 'qwerty' HOST LOCAL DEFAULT ROLE rx") - instance.query("CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2") - instance.query("CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a<1000 TO u1, u2") - instance.query("CREATE QUOTA q FOR INTERVAL 1 HOUR SET MAX QUERIES = 100 TO ALL EXCEPT rx") - - -@pytest.fixture(autouse=True) -def drop_entities(): - instance.query("DROP USER IF EXISTS u1, u2") - instance.query("DROP ROLE IF EXISTS rx, ry") - instance.query("DROP ROW POLICY IF EXISTS p ON mydb.mytable") - instance.query("DROP QUOTA IF EXISTS q") - instance.query("DROP SETTINGS PROFILE IF EXISTS s1, s2") - - -def test_create(): - create_entities() - - def check(): - assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" - assert instance.query("SHOW CREATE USER u2") == "CREATE USER u2 HOST LOCAL DEFAULT ROLE rx\n" - assert instance.query("SHOW CREATE ROW POLICY p ON mydb.mytable") == "CREATE POLICY p ON mydb.mytable FOR SELECT USING a < 1000 TO u1, u2\n" - assert instance.query("SHOW CREATE QUOTA q") == "CREATE QUOTA q KEYED BY \\'none\\' FOR INTERVAL 1 HOUR MAX QUERIES = 100 TO ALL EXCEPT rx\n" - assert instance.query("SHOW GRANTS FOR u1") == "" - assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx TO u2\n" - assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s1\n" - assert instance.query("SHOW GRANTS FOR rx") == "" - assert instance.query("SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000\n" - assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2\n" - - check() - instance.restart_clickhouse() # Check persistency - check() - - -def test_alter(): - create_entities() - instance.restart_clickhouse() - - instance.query("CREATE ROLE ry") - instance.query("GRANT ry TO u2") - instance.query("ALTER USER u2 DEFAULT ROLE ry") - instance.query("GRANT rx TO ry WITH ADMIN OPTION") - instance.query("ALTER ROLE rx SETTINGS PROFILE s2") - instance.query("GRANT SELECT ON mydb.mytable TO u1") - instance.query("GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION") - instance.query("ALTER SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY") - - def check(): - assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" - assert instance.query("SHOW CREATE USER u2") == "CREATE USER u2 HOST LOCAL DEFAULT ROLE ry\n" - assert instance.query("SHOW GRANTS FOR u1") == "GRANT SELECT ON mydb.mytable TO u1\n" - assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx, ry TO u2\n" - assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s2\n" - assert instance.query("SHOW CREATE ROLE ry") == "CREATE ROLE ry\n" - assert instance.query("SHOW GRANTS FOR rx") == "GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION\n" - assert instance.query("SHOW GRANTS FOR ry") == "GRANT rx TO ry WITH ADMIN OPTION\n" - assert instance.query("SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY\n" - assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2\n" - - check() - instance.restart_clickhouse() # Check persistency - check() - - -def test_drop(): - create_entities() - instance.restart_clickhouse() - - instance.query("DROP USER u2") - instance.query("DROP ROLE rx") - instance.query("DROP ROW POLICY p ON mydb.mytable") - instance.query("DROP QUOTA q") - instance.query("DROP SETTINGS PROFILE s1") - - def check(): - assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1\n" - assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2\n" - assert "User `u2` not found" in instance.query_and_get_error("SHOW CREATE USER u2") - assert "Row policy `p ON mydb.mytable` not found" in instance.query_and_get_error("SHOW CREATE ROW POLICY p ON mydb.mytable") - assert "Quota `q` not found" in instance.query_and_get_error("SHOW CREATE QUOTA q") - - check() - instance.restart_clickhouse() # Check persistency - check() diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py b/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py deleted file mode 100644 index 72c3001ee91..00000000000 --- a/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py +++ /dev/null @@ -1,157 +0,0 @@ -import itertools -import timeit - -import pytest - -from helpers.cluster import ClickHouseCluster -from helpers.network import PartitionManager -from helpers.test_tools import TSV - - -cluster = ClickHouseCluster(__file__) - -NODES = {'node' + str(i): None for i in (1, 2)} - -CREATE_TABLES_SQL = ''' -CREATE DATABASE test; - -CREATE TABLE base_table( - node String -) -ENGINE = MergeTree -PARTITION BY node -ORDER BY node; - -CREATE TABLE distributed_table -ENGINE = Distributed(test_cluster, default, base_table) AS base_table; -''' - -INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}')" - -SELECTS_SQL = { - 'distributed': 'SELECT node FROM distributed_table ORDER BY node', - 'remote': ("SELECT node FROM remote('node1,node2', default.base_table) " - "ORDER BY node"), -} - -EXCEPTION_NETWORK = 'e.displayText() = DB::NetException: ' -EXCEPTION_TIMEOUT = 'Timeout exceeded while reading from socket (' -EXCEPTION_CONNECT = 'Timeout: connect timed out: ' - -TIMEOUT_MEASUREMENT_EPS = 0.01 - -EXPECTED_BEHAVIOR = { - 'default': { - 'times': 3, - 'timeout': 1, - }, - 'ready_to_wait': { - 'times': 5, - 'timeout': 3, - }, -} - -TIMEOUT_DIFF_UPPER_BOUND = { - 'default': { - 'distributed': 5.5, - 'remote': 2.5, - }, - 'ready_to_wait': { - 'distributed': 3, - 'remote': 1.5, - }, -} - -def _check_exception(exception, expected_tries=3): - lines = exception.split('\n') - - assert len(lines) > 4, "Unexpected exception (expected: timeout info)" - - assert lines[0].startswith('Received exception from server (version') - - assert lines[1].startswith('Code: 279') - assert lines[1].endswith('All connection tries failed. Log: ') - - assert lines[2] == '', "Unexpected exception text (expected: empty line)" - - for i, line in enumerate(lines[3:3 + expected_tries]): - expected_lines = ( - 'Code: 209, ' + EXCEPTION_NETWORK + EXCEPTION_TIMEOUT, - 'Code: 209, ' + EXCEPTION_NETWORK + EXCEPTION_CONNECT, - ) - - assert any(line.startswith(expected) for expected in expected_lines), \ - 'Unexpected exception at one of the connection attempts' - - assert lines[3 + expected_tries] == '', 'Wrong number of connect attempts' - - -@pytest.fixture(scope="module", params=["configs", "configs_secure"]) -def started_cluster(request): - - cluster = ClickHouseCluster(__file__) - cluster.__with_ssl_config = request.param == "configs_secure" - for name in NODES: - NODES[name] = cluster.add_instance(name, config_dir=request.param) - try: - cluster.start() - - for node_id, node in NODES.items(): - node.query(CREATE_TABLES_SQL) - node.query(INSERT_SQL_TEMPLATE.format(node_id=node_id)) - - yield cluster - - finally: - cluster.shutdown() - - -def _check_timeout_and_exception(node, user, query_base, query): - repeats = EXPECTED_BEHAVIOR[user]['times'] - expected_timeout = EXPECTED_BEHAVIOR[user]['timeout'] * repeats - - start = timeit.default_timer() - exception = node.query_and_get_error(query, user=user) - - # And it should timeout no faster than: - measured_timeout = timeit.default_timer() - start - - assert expected_timeout - measured_timeout <= TIMEOUT_MEASUREMENT_EPS - assert measured_timeout - expected_timeout <= TIMEOUT_DIFF_UPPER_BOUND[user][query_base] - - # And exception should reflect connection attempts: - _check_exception(exception, repeats) - - -@pytest.mark.parametrize( - ('first_user', 'node_name', 'query_base'), - tuple(itertools.product(EXPECTED_BEHAVIOR, NODES, SELECTS_SQL)), -) -def test_reconnect(started_cluster, node_name, first_user, query_base): - node = NODES[node_name] - query = SELECTS_SQL[query_base] - if started_cluster.__with_ssl_config: - query = query.replace('remote(', 'remoteSecure(') - - # Everything is up, select should work: - assert TSV(node.query(query, - user=first_user)) == TSV('node1\nnode2') - - with PartitionManager() as pm: - # Break the connection. - pm.partition_instances(*NODES.values()) - - # Now it shouldn't: - _check_timeout_and_exception(node, first_user, query_base, query) - - # Other user should have different timeout and exception - _check_timeout_and_exception( - node, - 'default' if first_user != 'default' else 'ready_to_wait', - query_base, - query, - ) - - # select should work again: - assert TSV(node.query(query, - user=first_user)) == TSV('node1\nnode2') diff --git a/dbms/tests/integration/test_globs_in_filepath/test.py b/dbms/tests/integration/test_globs_in_filepath/test.py deleted file mode 100644 index 70bdb7777fb..00000000000 --- a/dbms/tests/integration/test_globs_in_filepath/test.py +++ /dev/null @@ -1,133 +0,0 @@ -import pytest - -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node') -path_to_userfiles_from_defaut_config = "/var/lib/clickhouse/user_files/" # should be the same as in config file - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - - yield cluster - - except Exception as ex: - print(ex) - raise ex - finally: - cluster.shutdown() - -def test_strange_filenames(start_cluster): - # 2 rows data - some_data = "\t111.222\nData\t333.444" - - node.exec_in_container(['bash', '-c', 'mkdir {}strange_names/'.format(path_to_userfiles_from_defaut_config)], privileged=True, user='root') - - files = ["p.o.i.n.t.s", - "b}{ra{ces", - "b}.o{t.h"] - - # filename inside testing data for debug simplicity - for filename in files: - node.exec_in_container(['bash', '-c', 'echo "{}{}" > {}strange_names/{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, filename)], privileged=True, user='root') - - test_requests = [("p.o.??n.t.s", "2"), - ("p.o.*t.s", "2"), - ("b}{r?{ces", "2"), - ("b}*ces", "2"), - ("b}.?{t.h", "2")] - - for pattern, value in test_requests: - assert node.query(''' - select count(*) from file('strange_names/{}', 'TSV', 'text String, number Float64') - '''.format(pattern)) == '{}\n'.format(value) - assert node.query(''' - select count(*) from file('{}strange_names/{}', 'TSV', 'text String, number Float64') - '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) - -def test_linear_structure(start_cluster): - # 2 rows data - some_data = "\t123.456\nData\t789.012" - - files = ["file1", "file2", "file3", "file4", "file5", - "file000", "file111", "file222", "file333", "file444", - "a_file", "b_file", "c_file", "d_file", "e_file", - "a_data", "b_data", "c_data", "d_data", "e_data"] - - # filename inside testing data for debug simplicity - for filename in files: - node.exec_in_container(['bash', '-c', 'echo "{}{}" > {}{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, filename)], privileged=True, user='root') - - test_requests = [("file{0..9}", "10"), - ("file?", "10"), - ("nothing*", "0"), - ("file{0..9}{0..9}{0..9}", "10"), - ("file???", "10"), - ("file*", "20"), - ("a_{file,data}", "4"), - ("?_{file,data}", "20"), - ("{a,b,c,d,e}_{file,data}", "20"), - ("{a,b,c,d,e}?{file,data}", "20"), - ("*", "40")] - - for pattern, value in test_requests: - assert node.query(''' - select count(*) from file('{}', 'TSV', 'text String, number Float64') - '''.format(pattern)) == '{}\n'.format(value) - assert node.query(''' - select count(*) from file('{}{}', 'TSV', 'text String, number Float64') - '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) - -def test_deep_structure(start_cluster): - # 2 rows data - some_data = "\t135.791\nData\t246.802" - dirs = ["directory1/", "directory2/", "some_more_dir/", "we/", - "directory1/big_dir/", - "directory1/dir1/", "directory1/dir2/", "directory1/dir3/", - "directory2/dir1/", "directory2/dir2/", "directory2/one_more_dir/", - "some_more_dir/yet_another_dir/", - "we/need/", "we/need/to/", "we/need/to/go/", "we/need/to/go/deeper/"] - - for dir in dirs: - node.exec_in_container(['bash', '-c', 'mkdir {}{}'.format(path_to_userfiles_from_defaut_config, dir)], privileged=True, user='root') - - # all directories appeared in files must be listed in dirs - files = [] - for i in range(10): - for j in range(10): - for k in range(10): - files.append("directory1/big_dir/file" + str(i) + str(j) + str(k)) - - for dir in dirs: - files.append(dir+"file") - - # filename inside testing data for debug simplicity - for filename in files: - node.exec_in_container(['bash', '-c', 'echo "{}{}" > {}{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, filename)], privileged=True, user='root') - - test_requests = [ ("directory{1..5}/big_dir/*", "2002"), ("directory{0..6}/big_dir/*{0..9}{0..9}{0..9}", "2000"), - ("?", "0"), - ("directory{0..5}/dir{1..3}/file", "10"), ("directory{0..5}/dir?/file", "10"), - ("we/need/to/go/deeper/file", "2"), ("*/*/*/*/*/*", "2"), ("we/need/??/go/deeper/*?*?*?*?*", "2")] - - for pattern, value in test_requests: - assert node.query(''' - select count(*) from file('{}', 'TSV', 'text String, number Float64') - '''.format(pattern)) == '{}\n'.format(value) - assert node.query(''' - select count(*) from file('{}{}', 'TSV', 'text String, number Float64') - '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) - -def test_table_function_and_virtual_columns(start_cluster): - node.exec_in_container(['bash', '-c', 'mkdir -p {}some/path/to/'.format(path_to_userfiles_from_defaut_config)]) - node.exec_in_container(['bash', '-c', 'touch {}some/path/to/data.CSV'.format(path_to_userfiles_from_defaut_config)]) - node.query("insert into table function file('some/path/to/data.CSV', CSV, 'n UInt8, s String') select number, concat('str_', toString(number)) from numbers(100000)") - assert node.query("select count() from file('some/path/to/data.CSV', CSV, 'n UInt8, s String')").rstrip() == '100000' - node.query("insert into table function file('nonexist.csv', 'CSV', 'val1 UInt32') values (1)") - assert node.query("select * from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip()== '1' - assert "nonexist.csv" in node.query("select _path from file('nonexis?.csv', 'CSV', 'val1 UInt32')").rstrip() - assert "nonexist.csv" in node.query("select _path from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() - assert "nonexist.csv" == node.query("select _file from file('nonexis?.csv', 'CSV', 'val1 UInt32')").rstrip() - assert "nonexist.csv" == node.query("select _file from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() \ No newline at end of file diff --git a/dbms/tests/integration/test_grant_and_revoke/configs/users.xml b/dbms/tests/integration/test_grant_and_revoke/configs/users.xml deleted file mode 100644 index fd40c6a4003..00000000000 --- a/dbms/tests/integration/test_grant_and_revoke/configs/users.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - ::/0 - - default - - - diff --git a/dbms/tests/integration/test_grant_and_revoke/test.py b/dbms/tests/integration/test_grant_and_revoke/test.py deleted file mode 100644 index 25e0e9882de..00000000000 --- a/dbms/tests/integration/test_grant_and_revoke/test.py +++ /dev/null @@ -1,129 +0,0 @@ -import pytest -from helpers.cluster import ClickHouseCluster -import re - -cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', config_dir="configs") - - -@pytest.fixture(scope="module", autouse=True) -def started_cluster(): - try: - cluster.start() - - instance.query("CREATE TABLE test_table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()") - instance.query("INSERT INTO test_table VALUES (1,5), (2,10)") - - yield cluster - - finally: - cluster.shutdown() - - -@pytest.fixture(autouse=True) -def reset_users_and_roles(): - try: - yield - finally: - instance.query("DROP USER IF EXISTS A, B") - instance.query("DROP ROLE IF EXISTS R1, R2") - - -def test_login(): - instance.query("CREATE USER A") - instance.query("CREATE USER B") - assert instance.query("SELECT 1", user='A') == "1\n" - assert instance.query("SELECT 1", user='B') == "1\n" - - -def test_grant_and_revoke(): - instance.query("CREATE USER A") - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - instance.query('GRANT SELECT ON test_table TO A') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" - - instance.query('REVOKE SELECT ON test_table FROM A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - -def test_grant_option(): - instance.query("CREATE USER A") - instance.query("CREATE USER B") - - instance.query('GRANT SELECT ON test_table TO A') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" - assert "Not enough privileges" in instance.query_and_get_error("GRANT SELECT ON test_table TO B", user='A') - - instance.query('GRANT SELECT ON test_table TO A WITH GRANT OPTION') - instance.query("GRANT SELECT ON test_table TO B", user='A') - assert instance.query("SELECT * FROM test_table", user='B') == "1\t5\n2\t10\n" - - instance.query('REVOKE SELECT ON test_table FROM A, B') - - -def test_create_role(): - instance.query("CREATE USER A") - instance.query('CREATE ROLE R1') - - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - instance.query('GRANT SELECT ON test_table TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - instance.query('GRANT R1 TO A') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" - - instance.query('REVOKE R1 FROM A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - -def test_grant_role_to_role(): - instance.query("CREATE USER A") - instance.query('CREATE ROLE R1') - instance.query('CREATE ROLE R2') - - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - instance.query('GRANT R1 TO A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - instance.query('GRANT R2 TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - instance.query('GRANT SELECT ON test_table TO R2') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" - - -def test_combine_privileges(): - instance.query("CREATE USER A ") - instance.query('CREATE ROLE R1') - instance.query('CREATE ROLE R2') - - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - - instance.query('GRANT R1 TO A') - instance.query('GRANT SELECT(x) ON test_table TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - assert instance.query("SELECT x FROM test_table", user='A') == "1\n2\n" - - instance.query('GRANT SELECT(y) ON test_table TO R2') - instance.query('GRANT R2 TO A') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" - - -def test_admin_option(): - instance.query("CREATE USER A") - instance.query("CREATE USER B") - instance.query('CREATE ROLE R1') - - instance.query('GRANT SELECT ON test_table TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B') - - instance.query('GRANT R1 TO A') - assert "Not enough privileges" in instance.query_and_get_error("GRANT R1 TO B", user='A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B') - - instance.query('GRANT R1 TO A WITH ADMIN OPTION') - instance.query("GRANT R1 TO B", user='A') - assert instance.query("SELECT * FROM test_table", user='B') == "1\t5\n2\t10\n" diff --git a/dbms/tests/integration/test_mysql_database_engine/test.py b/dbms/tests/integration/test_mysql_database_engine/test.py deleted file mode 100644 index 86e0b9df5fd..00000000000 --- a/dbms/tests/integration/test_mysql_database_engine/test.py +++ /dev/null @@ -1,122 +0,0 @@ -import time -import contextlib - -import pymysql.cursors -import pytest - -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -clickhouse_node = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql=True) - - -@pytest.fixture(scope="module") -def started_cluster(): - try: - cluster.start() - yield cluster - finally: - cluster.shutdown() - - -class MySQLNodeInstance: - def __init__(self, user='root', password='clickhouse', hostname='127.0.0.1', port=3308): - self.user = user - self.port = port - self.hostname = hostname - self.password = password - self.mysql_connection = None # lazy init - - def query(self, execution_query): - if self.mysql_connection is None: - self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.hostname, port=self.port) - with self.mysql_connection.cursor() as cursor: - cursor.execute(execution_query) - - def close(self): - if self.mysql_connection is not None: - self.mysql_connection.close() - - -def test_mysql_ddl_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: - mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") - - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', 'test_database', 'root', 'clickhouse')") - assert 'test_database' in clickhouse_node.query('SHOW DATABASES') - - mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;') - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') - - time.sleep(3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained - mysql_node.query('ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11)') - assert 'add_column' in clickhouse_node.query("SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") - - time.sleep(3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained - mysql_node.query('ALTER TABLE `test_database`.`test_table` DROP COLUMN `add_column`') - assert 'add_column' not in clickhouse_node.query("SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") - - mysql_node.query('DROP TABLE `test_database`.`test_table`;') - assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') - - clickhouse_node.query("DROP DATABASE test_database") - assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') - - mysql_node.query("DROP DATABASE test_database") - - -def test_clickhouse_ddl_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: - mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") - mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;') - - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', 'test_database', 'root', 'clickhouse')") - - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') - clickhouse_node.query("DROP TABLE test_database.test_table") - assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') - clickhouse_node.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') - clickhouse_node.query("DETACH TABLE test_database.test_table") - assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') - clickhouse_node.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') - - clickhouse_node.query("DROP DATABASE test_database") - assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') - - mysql_node.query("DROP DATABASE test_database") - - -def test_clickhouse_dml_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: - mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") - mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `i``d` int(11) NOT NULL, PRIMARY KEY (`i``d`)) ENGINE=InnoDB;') - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', 'test_database', 'root', 'clickhouse')") - - assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '0' - clickhouse_node.query("INSERT INTO `test_database`.`test_table`(`i\`d`) select number from numbers(10000)") - assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '10000' - - mysql_node.query("DROP DATABASE test_database") - - -def test_clickhouse_join_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: - mysql_node.query("CREATE DATABASE IF NOT EXISTS test DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE test.t1_mysql_local (" - "pays VARCHAR(55) DEFAULT 'FRA' NOT NULL," - "service VARCHAR(5) DEFAULT '' NOT NULL," - "opco CHAR(3) DEFAULT '' NOT NULL" - ")") - mysql_node.query("CREATE TABLE test.t2_mysql_local (" - "service VARCHAR(5) DEFAULT '' NOT NULL," - "opco VARCHAR(5) DEFAULT ''" - ")") - clickhouse_node.query("CREATE TABLE default.t1_remote_mysql AS mysql('mysql1:3306','test','t1_mysql_local','root','clickhouse')") - clickhouse_node.query("CREATE TABLE default.t2_remote_mysql AS mysql('mysql1:3306','test','t2_mysql_local','root','clickhouse')") - assert clickhouse_node.query("SELECT s.pays " - "FROM default.t1_remote_mysql AS s " - "LEFT JOIN default.t1_remote_mysql AS s_ref " - "ON (s_ref.opco = s.opco AND s_ref.service = s.service)") == '' - mysql_node.query("DROP DATABASE test") diff --git a/dbms/tests/integration/test_mysql_protocol/test.py b/dbms/tests/integration/test_mysql_protocol/test.py deleted file mode 100644 index 7987076c29a..00000000000 --- a/dbms/tests/integration/test_mysql_protocol/test.py +++ /dev/null @@ -1,321 +0,0 @@ -# coding: utf-8 - -import docker -import datetime -import math -import os -import pytest -import subprocess -import time -import pymysql.connections - -from docker.models.containers import Container - -from helpers.cluster import ClickHouseCluster - - -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - -config_dir = os.path.join(SCRIPT_DIR, './configs') -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', config_dir=config_dir, env_variables={'UBSAN_OPTIONS': 'print_stacktrace=1'}) - -server_port = 9001 - - -@pytest.fixture(scope="module") -def server_address(): - cluster.start() - try: - yield cluster.get_instance_ip('node') - finally: - cluster.shutdown() - - -@pytest.fixture(scope='module') -def mysql_client(): - docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'mysql', 'docker_compose.yml') - subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) - yield docker.from_env().containers.get(cluster.project_name + '_mysql1_1') - - -@pytest.fixture(scope='module') -def mysql_server(mysql_client): - """Return MySQL container when it is healthy. - - :type mysql_client: Container - :rtype: Container - """ - retries = 30 - for i in range(retries): - info = mysql_client.client.api.inspect_container(mysql_client.name) - if info['State']['Health']['Status'] == 'healthy': - break - time.sleep(1) - else: - raise Exception('Mysql server has not started in %d seconds.' % retries) - - return mysql_client - - -@pytest.fixture(scope='module') -def golang_container(): - docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'golang', 'docker_compose.yml') - subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) - yield docker.from_env().containers.get(cluster.project_name + '_golang1_1') - - -@pytest.fixture(scope='module') -def php_container(): - docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'php-mysqlnd', 'docker_compose.yml') - subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) - yield docker.from_env().containers.get(cluster.project_name + '_php1_1') - - -@pytest.fixture(scope='module') -def nodejs_container(): - docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'mysqljs', 'docker_compose.yml') - subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) - yield docker.from_env().containers.get(cluster.project_name + '_mysqljs1_1') - - -def test_mysql_client(mysql_client, server_address): - # type: (Container, str) -> None - code, (stdout, stderr) = mysql_client.exec_run(''' - mysql --protocol tcp -h {host} -P {port} default -u user_with_double_sha1 --password=abacaba - -e "SELECT 1;" - '''.format(host=server_address, port=server_port), demux=True) - - assert stdout == '\n'.join(['1', '1', '']) - - code, (stdout, stderr) = mysql_client.exec_run(''' - mysql --protocol tcp -h {host} -P {port} default -u default --password=123 - -e "SELECT 1 as a;" - -e "SELECT 'тест' as b;" - '''.format(host=server_address, port=server_port), demux=True) - - assert stdout == '\n'.join(['a', '1', 'b', 'тест', '']) - - code, (stdout, stderr) = mysql_client.exec_run(''' - mysql --protocol tcp -h {host} -P {port} default -u default --password=abc -e "select 1 as a;" - '''.format(host=server_address, port=server_port), demux=True) - - assert stderr == 'mysql: [Warning] Using a password on the command line interface can be insecure.\n' \ - 'ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n' - - code, (stdout, stderr) = mysql_client.exec_run(''' - mysql --protocol tcp -h {host} -P {port} default -u default --password=123 - -e "use system;" - -e "select count(*) from (select name from tables limit 1);" - -e "use system2;" - '''.format(host=server_address, port=server_port), demux=True) - - assert stdout == 'count()\n1\n' - assert stderr == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \ - "ERROR 81 (00000) at line 1: Database system2 doesn't exist\n" - - code, (stdout, stderr) = mysql_client.exec_run(''' - mysql --protocol tcp -h {host} -P {port} default -u default --password=123 - -e "CREATE DATABASE x;" - -e "USE x;" - -e "CREATE TABLE table1 (column UInt32) ENGINE = Memory;" - -e "INSERT INTO table1 VALUES (0), (1), (5);" - -e "INSERT INTO table1 VALUES (0), (1), (5);" - -e "SELECT * FROM table1 ORDER BY column;" - -e "DROP DATABASE x;" - -e "CREATE TEMPORARY TABLE tmp (tmp_column UInt32);" - -e "INSERT INTO tmp VALUES (0), (1);" - -e "SELECT * FROM tmp ORDER BY tmp_column;" - '''.format(host=server_address, port=server_port), demux=True) - - assert stdout == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', '']) - - -def test_mysql_federated(mysql_server, server_address): - node.query('''DROP DATABASE IF EXISTS mysql_federated''', settings={"password": "123"}) - node.query('''CREATE DATABASE mysql_federated''', settings={"password": "123"}) - node.query('''CREATE TABLE mysql_federated.test (col UInt32) ENGINE = Log''', settings={"password": "123"}) - node.query('''INSERT INTO mysql_federated.test VALUES (0), (1), (5)''', settings={"password": "123"}) - - code, (_, stderr) = mysql_server.exec_run(''' - mysql - -e "DROP SERVER IF EXISTS clickhouse;" - -e "CREATE SERVER clickhouse FOREIGN DATA WRAPPER mysql OPTIONS (USER 'default', PASSWORD '123', HOST '{host}', PORT {port}, DATABASE 'mysql_federated');" - -e "DROP DATABASE IF EXISTS mysql_federated;" - -e "CREATE DATABASE mysql_federated;" - '''.format(host=server_address, port=server_port), demux=True) - - assert code == 0 - - code, (stdout, stderr) = mysql_server.exec_run(''' - mysql - -e "CREATE TABLE mysql_federated.test(`col` int UNSIGNED) ENGINE=FEDERATED CONNECTION='clickhouse';" - -e "SELECT * FROM mysql_federated.test ORDER BY col;" - '''.format(host=server_address, port=server_port), demux=True) - - assert stdout == '\n'.join(['col', '0', '1', '5', '']) - - code, (stdout, stderr) = mysql_server.exec_run(''' - mysql - -e "INSERT INTO mysql_federated.test VALUES (0), (1), (5);" - -e "SELECT * FROM mysql_federated.test ORDER BY col;" - '''.format(host=server_address, port=server_port), demux=True) - - assert stdout == '\n'.join(['col', '0', '0', '1', '1', '5', '5', '']) - - -def test_python_client(server_address): - client = pymysql.connections.Connection(host=server_address, user='user_with_double_sha1', password='abacaba', database='default', port=server_port) - - with pytest.raises(pymysql.InternalError) as exc_info: - client.query('select name from tables') - - assert exc_info.value.args == (60, "Table default.tables doesn't exist.") - - cursor = client.cursor(pymysql.cursors.DictCursor) - cursor.execute("select 1 as a, 'тест' as b") - assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] - - with pytest.raises(pymysql.InternalError) as exc_info: - pymysql.connections.Connection(host=server_address, user='default', password='abacab', database='default', port=server_port) - - assert exc_info.value.args == (516, 'default: Authentication failed: password is incorrect or there is no user with such name') - - client = pymysql.connections.Connection(host=server_address, user='default', password='123', database='default', port=server_port) - - with pytest.raises(pymysql.InternalError) as exc_info: - client.query('select name from tables') - - assert exc_info.value.args == (60, "Table default.tables doesn't exist.") - - cursor = client.cursor(pymysql.cursors.DictCursor) - cursor.execute("select 1 as a, 'тест' as b") - assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] - - client.select_db('system') - - with pytest.raises(pymysql.InternalError) as exc_info: - client.select_db('system2') - - assert exc_info.value.args == (81, "Database system2 doesn't exist") - - cursor = client.cursor(pymysql.cursors.DictCursor) - cursor.execute('CREATE DATABASE x') - client.select_db('x') - cursor.execute("CREATE TABLE table1 (a UInt32) ENGINE = Memory") - cursor.execute("INSERT INTO table1 VALUES (1), (3)") - cursor.execute("INSERT INTO table1 VALUES (1), (4)") - cursor.execute("SELECT * FROM table1 ORDER BY a") - assert cursor.fetchall() == [{'a': 1}, {'a': 1}, {'a': 3}, {'a': 4}] - - -def test_golang_client(server_address, golang_container): - # type: (str, Container) -> None - with open(os.path.join(SCRIPT_DIR, 'clients', 'golang', '0.reference')) as fp: - reference = fp.read() - - code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user default --password 123 --database ' - 'abc'.format(host=server_address, port=server_port), demux=True) - - assert code == 1 - assert stderr == "Error 81: Database abc doesn't exist\n" - - code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user default --password 123 --database ' - 'default'.format(host=server_address, port=server_port), demux=True) - - assert code == 0 - assert stdout == reference - - code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database ' - 'default'.format(host=server_address, port=server_port), demux=True) - assert code == 0 - assert stdout == reference - - -def test_php_client(server_address, php_container): - # type: (str, Container) -> None - code, (stdout, stderr) = php_container.exec_run('php -f test.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True) - assert code == 0 - assert stdout == 'tables\n' - - code, (stdout, stderr) = php_container.exec_run('php -f test_ssl.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True) - assert code == 0 - assert stdout == 'tables\n' - - code, (stdout, stderr) = php_container.exec_run('php -f test.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) - assert code == 0 - assert stdout == 'tables\n' - - code, (stdout, stderr) = php_container.exec_run('php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) - assert code == 0 - assert stdout == 'tables\n' - - -def test_mysqljs_client(server_address, nodejs_container): - code, (_, stderr) = nodejs_container.exec_run('node test.js {host} {port} user_with_sha256 abacaba'.format(host=server_address, port=server_port), demux=True) - assert code == 1 - assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr - - code, (_, stderr) = nodejs_container.exec_run('node test.js {host} {port} user_with_empty_password ""'.format(host=server_address, port=server_port), demux=True) - assert code == 0 - - code, (_, _) = nodejs_container.exec_run('node test.js {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) - assert code == 0 - - code, (_, _) = nodejs_container.exec_run('node test.js {host} {port} user_with_empty_password 123'.format(host=server_address, port=server_port), demux=True) - assert code == 1 - - -def test_types(server_address): - client = pymysql.connections.Connection(host=server_address, user='default', password='123', database='default', port=server_port) - - cursor = client.cursor(pymysql.cursors.DictCursor) - cursor.execute( - "select " - "toInt8(-pow(2, 7)) as Int8_column, " - "toUInt8(pow(2, 8) - 1) as UInt8_column, " - "toInt16(-pow(2, 15)) as Int16_column, " - "toUInt16(pow(2, 16) - 1) as UInt16_column, " - "toInt32(-pow(2, 31)) as Int32_column, " - "toUInt32(pow(2, 32) - 1) as UInt32_column, " - "toInt64('-9223372036854775808') as Int64_column, " # -2^63 - "toUInt64('18446744073709551615') as UInt64_column, " # 2^64 - 1 - "'тест' as String_column, " - "toFixedString('тест', 8) as FixedString_column, " - "toFloat32(1.5) as Float32_column, " - "toFloat64(1.5) as Float64_column, " - "toFloat32(NaN) as Float32_NaN_column, " - "-Inf as Float64_Inf_column, " - "toDate('2019-12-08') as Date_column, " - "toDate('1970-01-01') as Date_min_column, " - "toDate('1970-01-02') as Date_after_min_column, " - "toDateTime('2019-12-08 08:24:03') as DateTime_column" - ) - - result = cursor.fetchall()[0] - expected = [ - ('Int8_column', -2 ** 7), - ('UInt8_column', 2 ** 8 - 1), - ('Int16_column', -2 ** 15), - ('UInt16_column', 2 ** 16 - 1), - ('Int32_column', -2 ** 31), - ('UInt32_column', 2 ** 32 - 1), - ('Int64_column', -2 ** 63), - ('UInt64_column', 2 ** 64 - 1), - ('String_column', 'тест'), - ('FixedString_column', 'тест'), - ('Float32_column', 1.5), - ('Float64_column', 1.5), - ('Float32_NaN_column', float('nan')), - ('Float64_Inf_column', float('-inf')), - ('Date_column', datetime.date(2019, 12, 8)), - ('Date_min_column', '0000-00-00'), - ('Date_after_min_column', datetime.date(1970, 1, 2)), - ('DateTime_column', datetime.datetime(2019, 12, 8, 8, 24, 3)), - ] - - for key, value in expected: - if isinstance(value, float) and math.isnan(value): - assert math.isnan(result[key]) - else: - assert result[key] == value diff --git a/dbms/tests/integration/test_odbc_interaction/test.py b/dbms/tests/integration/test_odbc_interaction/test.py deleted file mode 100644 index 1e876e507f4..00000000000 --- a/dbms/tests/integration/test_odbc_interaction/test.py +++ /dev/null @@ -1,228 +0,0 @@ -import time -import pytest - -import os -import pymysql.cursors -import psycopg2 -from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT -from helpers.cluster import ClickHouseCluster - -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - -cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs')) -node1 = cluster.add_instance('node1', with_odbc_drivers=True, with_mysql=True, image='yandex/clickhouse-integration-test', main_configs=['configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml', 'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml', 'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'], stay_alive=True) - -create_table_sql_template = """ - CREATE TABLE `clickhouse`.`{}` ( - `id` int(11) NOT NULL, - `name` varchar(50) NOT NULL, - `age` int NOT NULL default 0, - `money` int NOT NULL default 0, - `column_x` int default NULL, - PRIMARY KEY (`id`)) ENGINE=InnoDB; - """ -def get_mysql_conn(): - conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308) - return conn - -def create_mysql_db(conn, name): - with conn.cursor() as cursor: - cursor.execute( - "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name)) - -def create_mysql_table(conn, table_name): - with conn.cursor() as cursor: - cursor.execute(create_table_sql_template.format(table_name)) - -def get_postgres_conn(): - conn_string = "host='localhost' user='postgres' password='mysecretpassword'" - conn = psycopg2.connect(conn_string) - conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) - conn.autocommit = True - return conn - -def create_postgres_db(conn, name): - cursor = conn.cursor() - cursor.execute("CREATE SCHEMA {}".format(name)) - -@pytest.fixture(scope="module") -def started_cluster(): - try: - cluster.start() - sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] - - print "sqlite data received" - node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t1(x INTEGER PRIMARY KEY ASC, y, z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t2(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t3(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t4(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - print "sqlite tables created" - mysql_conn = get_mysql_conn() - print "mysql connection received" - ## create mysql db and table - create_mysql_db(mysql_conn, 'clickhouse') - print "mysql database created" - - postgres_conn = get_postgres_conn() - print "postgres connection received" - - create_postgres_db(postgres_conn, 'clickhouse') - print "postgres db created" - - cursor = postgres_conn.cursor() - cursor.execute("create table if not exists clickhouse.test_table (column1 int primary key, column2 varchar(40) not null)") - - yield cluster - - except Exception as ex: - print(ex) - raise ex - finally: - cluster.shutdown() - -def test_mysql_simple_select_works(started_cluster): - mysql_setup = node1.odbc_drivers["MySQL"] - - table_name = 'test_insert_select' - conn = get_mysql_conn() - create_mysql_table(conn, table_name) - - # Check that NULL-values are handled correctly by the ODBC-bridge - with conn.cursor() as cursor: - cursor.execute("INSERT INTO clickhouse.{} VALUES(50, 'null-guy', 127, 255, NULL), (100, 'non-null-guy', 127, 255, 511);".format(table_name)) - conn.commit() - assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}') SETTINGS external_table_functions_use_nulls=1".format(mysql_setup["DSN"], table_name)) == '\\N\n511\n' - assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}') SETTINGS external_table_functions_use_nulls=0".format(mysql_setup["DSN"], table_name)) == '0\n511\n' - - node1.query(''' -CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nullable(UInt32)) ENGINE = MySQL('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse'); -'''.format(table_name, table_name)) - - node1.query("INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, NULL from numbers(49) ".format(table_name)) - node1.query("INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, 42 from numbers(51, 49) ".format(table_name)) - - assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NOT NULL".format(table_name)) == '50\n' - assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NULL".format(table_name)) == '50\n' - assert node1.query("SELECT count(*) FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name)) == '100\n' - - # previously this test fails with segfault - # just to be sure :) - assert node1.query("select 1") == "1\n" - - conn.close() - - -def test_sqlite_simple_select_function_works(started_cluster): - sqlite_setup = node1.odbc_drivers["SQLite3"] - sqlite_db = sqlite_setup["Database"] - - node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t1 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - assert node1.query("select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\t3\n" - - assert node1.query("select y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "2\n" - assert node1.query("select z from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\n" - assert node1.query("select x from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\n" - assert node1.query("select x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\n" - assert node1.query("select z, x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\t1\t2\n" - assert node1.query("select count(), sum(x) from odbc('DSN={}', '{}') group by x".format(sqlite_setup["DSN"], 't1')) == "1\t1\n" - -def test_sqlite_simple_select_storage_works(started_cluster): - sqlite_setup = node1.odbc_drivers["SQLite3"] - sqlite_db = sqlite_setup["Database"] - - node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t4 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - node1.query("create table SqliteODBC (x Int32, y String, z String) engine = ODBC('DSN={}', '', 't4')".format(sqlite_setup["DSN"])) - - assert node1.query("select * from SqliteODBC") == "1\t2\t3\n" - assert node1.query("select y from SqliteODBC") == "2\n" - assert node1.query("select z from SqliteODBC") == "3\n" - assert node1.query("select x from SqliteODBC") == "1\n" - assert node1.query("select x, y from SqliteODBC") == "1\t2\n" - assert node1.query("select z, x, y from SqliteODBC") == "3\t1\t2\n" - assert node1.query("select count(), sum(x) from SqliteODBC group by x") == "1\t1\n" - -def test_sqlite_odbc_hashed_dictionary(started_cluster): - sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] - node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t2 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - - assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "3\n" - assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "1\n" # default - - time.sleep(5) # first reload - node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t2 values(200, 2, 7);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - - # No reload because of invalidate query - time.sleep(5) - assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "3\n" - assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "1\n" # still default - - node1.exec_in_container(["bash", "-c", "echo 'REPLACE INTO t2 values(1, 2, 5);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - - # waiting for reload - time.sleep(5) - - assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "5\n" - assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "7\n" # new value - -def test_sqlite_odbc_cached_dictionary(started_cluster): - sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] - node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t3 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - - assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "3\n" - - node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t3 values(200, 2, 7);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - - assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(200))") == "7\n" # new value - - node1.exec_in_container(["bash", "-c", "echo 'REPLACE INTO t3 values(1, 2, 12);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') - - time.sleep(5) - - assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "12\n" - -def test_postgres_odbc_hached_dictionary_with_schema(started_cluster): - conn = get_postgres_conn() - cursor = conn.cursor() - cursor.execute("insert into clickhouse.test_table values(1, 'hello'),(2, 'world')") - time.sleep(5) - assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))") == "hello\n" - assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))") == "world\n" - -def test_postgres_odbc_hached_dictionary_no_tty_pipe_overflow(started_cluster): - conn = get_postgres_conn() - cursor = conn.cursor() - cursor.execute("insert into clickhouse.test_table values(3, 'xxx')") - for i in xrange(100): - try: - node1.query("system reload dictionary postgres_odbc_hashed", timeout=5) - except Exception as ex: - assert False, "Exception occured -- odbc-bridge hangs: " + str(ex) - - assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))") == "xxx\n" - -def test_bridge_dies_with_parent(started_cluster): - node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))") - - clickhouse_pid = node1.get_process_pid("clickhouse server") - bridge_pid = node1.get_process_pid("odbc-bridge") - assert clickhouse_pid is not None - assert bridge_pid is not None - - while clickhouse_pid is not None: - try: - node1.exec_in_container(["bash", "-c", "kill {}".format(clickhouse_pid)], privileged=True, user='root') - except: - pass - clickhouse_pid = node1.get_process_pid("clickhouse server") - time.sleep(1) - - time.sleep(1) # just for sure, that odbc-bridge caught signal - bridge_pid = node1.get_process_pid("odbc-bridge") - - if bridge_pid: - out = node1.exec_in_container(["gdb", "-p", str(bridge_pid), "--ex", "thread apply all bt", "--ex", "q"], privileged=True, user='root') - print("Bridge is running, gdb output:") - print(out) - - assert clickhouse_pid is None - assert bridge_pid is None diff --git a/dbms/tests/integration/test_quota/configs/users.xml b/dbms/tests/integration/test_quota/configs/users.xml deleted file mode 100644 index 15a5364449b..00000000000 --- a/dbms/tests/integration/test_quota/configs/users.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - ::/0 - - default - myQuota - true - - - diff --git a/dbms/tests/integration/test_quota/test.py b/dbms/tests/integration/test_quota/test.py deleted file mode 100644 index 85d2ded16c1..00000000000 --- a/dbms/tests/integration/test_quota/test.py +++ /dev/null @@ -1,251 +0,0 @@ -import pytest -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry -import os -import re -import time - -cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - config_dir="configs") - -query_from_system_quotas = "SELECT * FROM system.quotas ORDER BY name"; - -query_from_system_quota_usage = "SELECT id, key, duration, "\ - "queries, errors, result_rows, result_bytes, read_rows, read_bytes "\ - "FROM system.quota_usage ORDER BY id, key, duration"; - -def system_quotas(): - return instance.query(query_from_system_quotas).rstrip('\n') - -def system_quota_usage(): - return instance.query(query_from_system_quota_usage).rstrip('\n') - - -def copy_quota_xml(local_file_name, reload_immediately = True): - script_dir = os.path.dirname(os.path.realpath(__file__)) - instance.copy_file_to_container(os.path.join(script_dir, local_file_name), '/etc/clickhouse-server/users.d/quota.xml') - if reload_immediately: - instance.query("SYSTEM RELOAD CONFIG") - - -@pytest.fixture(scope="module", autouse=True) -def started_cluster(): - try: - cluster.start() - - instance.query("CREATE TABLE test_table(x UInt32) ENGINE = MergeTree ORDER BY tuple()") - instance.query("INSERT INTO test_table SELECT number FROM numbers(50)") - - yield cluster - - finally: - cluster.shutdown() - - -@pytest.fixture(autouse=True) -def reset_quotas_and_usage_info(): - try: - yield - finally: - instance.query("DROP QUOTA IF EXISTS qA, qB") - copy_quota_xml('simpliest.xml') # To reset usage info. - copy_quota_xml('normal_limits.xml') - - -def test_quota_from_users_xml(): - assert instance.query("SELECT currentQuota()") == "myQuota\n" - assert instance.query("SELECT currentQuotaID()") == "e651da9c-a748-8703-061a-7e5e5096dae7\n" - assert instance.query("SELECT currentQuotaKey()") == "default\n" - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" - - instance.query("SELECT * from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" - - instance.query("SELECT COUNT() from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t51\t208\t50\t200" - - -def test_simpliest_quota(): - # Simpliest quota doesn't even track usage. - copy_quota_xml('simpliest.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" - - instance.query("SELECT * from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" - - -def test_tracking_quota(): - # Now we're tracking usage. - copy_quota_xml('tracking.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" - - instance.query("SELECT * from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" - - instance.query("SELECT COUNT() from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t51\t208\t50\t200" - - -def test_exceed_quota(): - # Change quota, now the limits are tiny so we will exceed the quota. - copy_quota_xml('tiny_limits.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1]\t[1]\t[1]\t[0]\t[1]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" - - assert re.search("Quota.*has\ been\ exceeded", instance.query_and_get_error("SELECT * from test_table")) - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t1\t0\t0\t50\t0" - - # Change quota, now the limits are enough to execute queries. - copy_quota_xml('normal_limits.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t1\t0\t0\t50\t0" - - instance.query("SELECT * from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t1\t50\t200\t100\t200" - - -def test_add_remove_interval(): - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" - - # Add interval. - copy_quota_xml('two_intervals.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952,63113904]\t[0,1]\t[1000,0]\t[0,0]\t[0,0]\t[0,30000]\t[1000,0]\t[0,20000]\t[0,120]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0\n"\ - "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t63113904\t0\t0\t0\t0\t0\t0" - - instance.query("SELECT * from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200\n"\ - "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t63113904\t1\t0\t50\t200\t50\t200" - - # Remove interval. - copy_quota_xml('normal_limits.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" - - instance.query("SELECT * from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t100\t400\t100\t400" - - # Remove all intervals. - copy_quota_xml('simpliest.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" - - instance.query("SELECT * from test_table") - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" - - # Add one interval back. - copy_quota_xml('normal_limits.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" - - -def test_add_remove_quota(): - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" - - # Add quota. - copy_quota_xml('two_quotas.xml') - assert system_quotas() ==\ - "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]\n"\ - "myQuota2\t4590510c-4d13-bf21-ec8a-c2187b092e73\tusers.xml\tclient key or user name\t[]\t[3600,2629746]\t[1,0]\t[0,0]\t[0,0]\t[4000,0]\t[400000,0]\t[4000,0]\t[400000,0]\t[60,1800]" - - # Drop quota. - copy_quota_xml('normal_limits.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - - # Drop all quotas. - copy_quota_xml('no_quotas.xml') - assert system_quotas() == "" - assert system_quota_usage() == "" - - # Add one quota back. - copy_quota_xml('normal_limits.xml') - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" - - -def test_reload_users_xml_by_timer(): - assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" - - time.sleep(1) # The modification time of the 'quota.xml' file should be different, - # because config files are reload by timer only when the modification time is changed. - copy_quota_xml('tiny_limits.xml', reload_immediately=False) - assert_eq_with_retry(instance, query_from_system_quotas, "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1]\t[1]\t[1]\t[0]\t[1]\t[0]\t[0]") - - -def test_dcl_introspection(): - assert instance.query("SHOW QUOTAS") == "myQuota\n" - assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000 TO default\n" - expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0/1000 errors=0 result_rows=0 result_bytes=0 read_rows=0/1000 read_bytes=0 execution_time=0" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE CURRENT")) - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE ALL")) - - instance.query("SELECT * from test_table") - expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - # Add interval. - copy_quota_xml('two_intervals.xml') - assert instance.query("SHOW QUOTAS") == "myQuota\n" - assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000, FOR RANDOMIZED INTERVAL 2 YEAR MAX RESULT BYTES = 30000, MAX READ BYTES = 20000, MAX EXECUTION TIME = 120 TO default\n" - expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*\n"\ - "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0/30000 read_rows=0 read_bytes=0/20000 execution_time=0/120" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - # Drop interval, add quota. - copy_quota_xml('two_quotas.xml') - assert instance.query("SHOW QUOTAS") == "myQuota\nmyQuota2\n" - assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000 TO default\n" - assert instance.query("SHOW CREATE QUOTA myQuota2") == "CREATE QUOTA myQuota2 KEYED BY \\'client key or user name\\' FOR RANDOMIZED INTERVAL 1 HOUR MAX RESULT ROWS = 4000, MAX RESULT BYTES = 400000, MAX READ ROWS = 4000, MAX READ BYTES = 400000, MAX EXECUTION TIME = 60, FOR INTERVAL 1 MONTH MAX EXECUTION TIME = 1800\n" - expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - -def test_dcl_management(): - copy_quota_xml('no_quotas.xml') - assert instance.query("SHOW QUOTAS") == "" - assert instance.query("SHOW QUOTA USAGE") == "" - - instance.query("CREATE QUOTA qA FOR INTERVAL 15 MONTH SET MAX QUERIES = 123 TO CURRENT_USER") - assert instance.query("SHOW QUOTAS") == "qA\n" - assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 5 QUARTER MAX QUERIES = 123 TO default\n" - expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0/123 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - instance.query("SELECT * from test_table") - expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=1/123 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES = 321, MAX ERRORS = 10, FOR INTERVAL 0.5 HOUR MAX EXECUTION TIME = 0.5") - assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 30 MINUTE MAX EXECUTION TIME = 0.5, FOR INTERVAL 5 QUARTER MAX QUERIES = 321, MAX ERRORS = 10 TO default\n" - expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*/0.5\n"\ - "qA key=\\\\'\\\\' interval=\[.*\] queries=1/321 errors=0/10 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH UNSET TRACKING, FOR RANDOMIZED INTERVAL 16 MONTH SET TRACKING, FOR INTERVAL 1800 SECOND UNSET TRACKING") - assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING TO default\n" - expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - instance.query("SELECT * from test_table") - expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=1 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - instance.query("ALTER QUOTA qA RENAME TO qB") - assert instance.query("SHOW CREATE QUOTA qB") == "CREATE QUOTA qB KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING TO default\n" - expected_usage = "qB key=\\\\'\\\\' interval=\[.*\] queries=1 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" - assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - - instance.query("DROP QUOTA qB") - assert instance.query("SHOW QUOTAS") == "" - assert instance.query("SHOW QUOTA USAGE") == "" - - -def test_users_xml_is_readonly(): - assert re.search("storage is readonly", instance.query_and_get_error("DROP QUOTA myQuota")) diff --git a/dbms/tests/integration/test_row_policy/configs/users.xml b/dbms/tests/integration/test_row_policy/configs/users.xml deleted file mode 100644 index 313d8084884..00000000000 --- a/dbms/tests/integration/test_row_policy/configs/users.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - 1 - - - - - - - ::/0 - - default - default - true - - - - - ::/0 - - default - default - - - - - - diff --git a/dbms/tests/integration/test_row_policy/test.py b/dbms/tests/integration/test_row_policy/test.py deleted file mode 100644 index 6db24f5799e..00000000000 --- a/dbms/tests/integration/test_row_policy/test.py +++ /dev/null @@ -1,311 +0,0 @@ -import pytest -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry -import os -import re -import time - -cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance1', config_dir="configs", with_zookeeper=True) -instance2 = cluster.add_instance('instance2', config_dir="configs", with_zookeeper=True) - - -def copy_policy_xml(local_file_name, reload_immediately = True): - script_dir = os.path.dirname(os.path.realpath(__file__)) - instance.copy_file_to_container(os.path.join(script_dir, local_file_name), '/etc/clickhouse-server/users.d/row_policy.xml') - instance2.copy_file_to_container(os.path.join(script_dir, local_file_name), '/etc/clickhouse-server/users.d/row_policy.xml') - if reload_immediately: - instance.query("SYSTEM RELOAD CONFIG") - - -@pytest.fixture(scope="module", autouse=True) -def started_cluster(): - try: - cluster.start() - - instance.query(''' - CREATE DATABASE mydb; - - CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1); - - CREATE TABLE mydb.table (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.table values (0, 0), (0, 1), (1, 0), (1, 1); - - CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0); - - CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1); - - CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.`.filtered_table4` values (0, 0), (0, 1), (1, 0), (1, 1); - ''') - instance2.query(''' - CREATE DATABASE mydb; - - CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1); - - CREATE TABLE mydb.table (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.table values (0, 0), (0, 1), (1, 0), (1, 1); - - CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0); - - CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1); - - CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; - INSERT INTO mydb.`.filtered_table4` values (0, 0), (0, 1), (1, 0), (1, 1); - ''') - - yield cluster - - finally: - cluster.shutdown() - - -@pytest.fixture(autouse=True) -def reset_policies(): - try: - yield - finally: - copy_policy_xml('normal_filters.xml') - instance.query("DROP POLICY IF EXISTS pA, pB ON mydb.filtered_table1") - - -def test_smoke(): - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" - - assert instance.query("SELECT a FROM mydb.filtered_table1") == "1\n1\n" - assert instance.query("SELECT b FROM mydb.filtered_table1") == "0\n1\n" - assert instance.query("SELECT a FROM mydb.filtered_table1 WHERE a = 1") == "1\n1\n" - assert instance.query("SELECT a FROM mydb.filtered_table1 WHERE a IN (1)") == "1\n1\n" - assert instance.query("SELECT a = 1 FROM mydb.filtered_table1") == "1\n1\n" - - assert instance.query("SELECT a FROM mydb.filtered_table3") == "0\n1\n" - assert instance.query("SELECT b FROM mydb.filtered_table3") == "1\n0\n" - assert instance.query("SELECT c FROM mydb.filtered_table3") == "1\n1\n" - assert instance.query("SELECT a + b FROM mydb.filtered_table3") == "1\n1\n" - assert instance.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == "0\n1\n" - assert instance.query("SELECT c = 1 FROM mydb.filtered_table3") == "1\n1\n" - assert instance.query("SELECT a + b = 1 FROM mydb.filtered_table3") == "1\n1\n" - - -def test_join(): - assert instance.query("SELECT * FROM mydb.filtered_table1 as t1 ANY LEFT JOIN mydb.filtered_table1 as t2 ON t1.a = t2.b") == "1\t0\t1\t1\n1\t1\t1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table1 as t2 ANY RIGHT JOIN mydb.filtered_table1 as t1 ON t2.b = t1.a") == "1\t1\t1\t0\n" - - -def test_cannot_trick_row_policy_with_keyword_with(): - assert instance.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - assert instance.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - assert instance.query("WITH 0 AS a SELECT a FROM mydb.filtered_table1") == "1\n1\n" - assert instance.query("WITH 0 AS a SELECT b FROM mydb.filtered_table1") == "0\n1\n" - - -def test_prewhere_not_supported(): - expected_error = "PREWHERE is not supported if the table is filtered by row-level security" - assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table1 PREWHERE 1") - assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table2 PREWHERE 1") - assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table3 PREWHERE 1") - - -def test_single_table_name(): - copy_policy_xml('tag_with_table_name.xml') - assert instance.query("SELECT * FROM mydb.table") == "1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" - - assert instance.query("SELECT a FROM mydb.table") == "1\n1\n" - assert instance.query("SELECT b FROM mydb.table") == "0\n1\n" - assert instance.query("SELECT a FROM mydb.table WHERE a = 1") == "1\n1\n" - assert instance.query("SELECT a = 1 FROM mydb.table") == "1\n1\n" - - assert instance.query("SELECT a FROM mydb.filtered_table3") == "0\n1\n" - assert instance.query("SELECT b FROM mydb.filtered_table3") == "1\n0\n" - assert instance.query("SELECT c FROM mydb.filtered_table3") == "1\n1\n" - assert instance.query("SELECT a + b FROM mydb.filtered_table3") == "1\n1\n" - assert instance.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == "0\n1\n" - assert instance.query("SELECT c = 1 FROM mydb.filtered_table3") == "1\n1\n" - assert instance.query("SELECT a + b = 1 FROM mydb.filtered_table3") == "1\n1\n" - - -def test_custom_table_name(): - copy_policy_xml('multiple_tags_with_table_names.xml') - assert instance.query("SELECT * FROM mydb.table") == "1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" - assert instance.query("SELECT * FROM mydb.`.filtered_table4`") == "0\t1\n1\t0\n" - - assert instance.query("SELECT a FROM mydb.table") == "1\n1\n" - assert instance.query("SELECT b FROM mydb.table") == "0\n1\n" - assert instance.query("SELECT a FROM mydb.table WHERE a = 1") == "1\n1\n" - assert instance.query("SELECT a = 1 FROM mydb.table") == "1\n1\n" - - assert instance.query("SELECT a FROM mydb.`.filtered_table4`") == "0\n1\n" - assert instance.query("SELECT b FROM mydb.`.filtered_table4`") == "1\n0\n" - assert instance.query("SELECT c FROM mydb.`.filtered_table4`") == "1\n1\n" - assert instance.query("SELECT a + b FROM mydb.`.filtered_table4`") == "1\n1\n" - assert instance.query("SELECT a FROM mydb.`.filtered_table4` WHERE c = 1") == "0\n1\n" - assert instance.query("SELECT c = 1 FROM mydb.`.filtered_table4`") == "1\n1\n" - assert instance.query("SELECT a + b = 1 FROM mydb.`.filtered_table4`") == "1\n1\n" - - -def test_change_of_users_xml_changes_row_policies(): - copy_policy_xml('normal_filters.xml') - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" - - copy_policy_xml('all_rows.xml') - assert instance.query("SELECT * FROM mydb.filtered_table1") == "0\t0\n0\t1\n1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n1\t2\t3\t4\n4\t3\t2\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t0\n0\t1\n1\t0\n1\t1\n" - - copy_policy_xml('no_rows.xml') - assert instance.query("SELECT * FROM mydb.filtered_table1") == "" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "" - - copy_policy_xml('normal_filters.xml') - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" - - copy_policy_xml('no_filters.xml') - assert instance.query("SELECT * FROM mydb.filtered_table1") == "0\t0\n0\t1\n1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n1\t2\t3\t4\n4\t3\t2\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t0\n0\t1\n1\t0\n1\t1\n" - - copy_policy_xml('normal_filters.xml') - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" - - -def test_reload_users_xml_by_timer(): - copy_policy_xml('normal_filters.xml') - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" - assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" - - time.sleep(1) # The modification time of the 'row_policy.xml' file should be different. - copy_policy_xml('all_rows.xml', False) - assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table1", "0\t0\n0\t1\n1\t0\n1\t1") - assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table2", "0\t0\t0\t0\n0\t0\t6\t0\n1\t2\t3\t4\n4\t3\t2\t1") - assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table3", "0\t0\n0\t1\n1\t0\n1\t1") - - time.sleep(1) # The modification time of the 'row_policy.xml' file should be different. - copy_policy_xml('normal_filters.xml', False) - assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table1", "1\t0\n1\t1") - assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table2", "0\t0\t0\t0\n0\t0\t6\t0") - assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table3", "0\t1\n1\t0") - - -def test_introspection(): - assert instance.query("SELECT currentRowPolicies('mydb', 'filtered_table1')") == "['default']\n" - assert instance.query("SELECT currentRowPolicies('mydb', 'filtered_table2')") == "['default']\n" - assert instance.query("SELECT currentRowPolicies('mydb', 'filtered_table3')") == "['default']\n" - assert instance.query("SELECT arraySort(currentRowPolicies())") == "[('mydb','filtered_table1','default'),('mydb','filtered_table2','default'),('mydb','filtered_table3','default'),('mydb','local','default')]\n" - - policy1 = "mydb\tfiltered_table1\tdefault\tdefault ON mydb.filtered_table1\t9e8a8f62-4965-2b5e-8599-57c7b99b3549\tusers.xml\t0\ta = 1\t\t\t\t\n" - policy2 = "mydb\tfiltered_table2\tdefault\tdefault ON mydb.filtered_table2\tcffae79d-b9bf-a2ef-b798-019c18470b25\tusers.xml\t0\ta + b < 1 or c - d > 5\t\t\t\t\n" - policy3 = "mydb\tfiltered_table3\tdefault\tdefault ON mydb.filtered_table3\t12fc5cef-e3da-3940-ec79-d8be3911f42b\tusers.xml\t0\tc = 1\t\t\t\t\n" - policy4 = "mydb\tlocal\tdefault\tdefault ON mydb.local\tcdacaeb5-1d97-f99d-2bb0-4574f290629c\tusers.xml\t0\t1\t\t\t\t\n" - assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'filtered_table1'), id) ORDER BY table, name") == policy1 - assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'filtered_table2'), id) ORDER BY table, name") == policy2 - assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'filtered_table3'), id) ORDER BY table, name") == policy3 - assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'local'), id) ORDER BY table, name") == policy4 - assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs(), id) ORDER BY table, name") == policy1 + policy2 + policy3 + policy4 - - -def test_dcl_introspection(): - assert instance.query("SHOW POLICIES ON mydb.filtered_table1") == "another\ndefault\n" - assert instance.query("SHOW POLICIES CURRENT ON mydb.filtered_table2") == "default\n" - assert instance.query("SHOW POLICIES") == "another ON mydb.filtered_table1\nanother ON mydb.filtered_table2\nanother ON mydb.filtered_table3\nanother ON mydb.local\ndefault ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\ndefault ON mydb.local\n" - assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\ndefault ON mydb.local\n" - - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.local") == "CREATE POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" - - copy_policy_xml('all_rows.xml') - assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE POLICY default ON mydb.filtered_table1 FOR SELECT USING 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE POLICY default ON mydb.filtered_table2 FOR SELECT USING 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE POLICY default ON mydb.filtered_table3 FOR SELECT USING 1 TO default\n" - - copy_policy_xml('no_rows.xml') - assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE POLICY default ON mydb.filtered_table1 FOR SELECT USING NULL TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE POLICY default ON mydb.filtered_table2 FOR SELECT USING NULL TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE POLICY default ON mydb.filtered_table3 FOR SELECT USING NULL TO default\n" - - copy_policy_xml('no_filters.xml') - assert instance.query("SHOW POLICIES") == "" - - -def test_dcl_management(): - copy_policy_xml('no_filters.xml') - assert instance.query("SHOW POLICIES") == "" - - instance.query("CREATE POLICY pA ON mydb.filtered_table1 FOR SELECT USING ab") - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n" - - instance.query("ALTER POLICY pA ON mydb.filtered_table1 RENAME TO pB") - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n" - assert instance.query("SHOW POLICIES CURRENT ON mydb.filtered_table1") == "pB\n" - assert instance.query("SHOW CREATE POLICY pB ON mydb.filtered_table1") == "CREATE POLICY pB ON mydb.filtered_table1 FOR SELECT USING a > b TO default\n" - - instance.query("DROP POLICY pB ON mydb.filtered_table1") - assert instance.query("SELECT * FROM mydb.filtered_table1") == "0\t0\n0\t1\n1\t0\n1\t1\n" - assert instance.query("SHOW POLICIES") == "" - - -def test_users_xml_is_readonly(): - assert re.search("storage is readonly", instance.query_and_get_error("DROP POLICY default ON mydb.filtered_table1")) - - -def test_miscellaneous_engines(): - copy_policy_xml('normal_filters.xml') - - # ReplicatedMergeTree - instance.query("DROP TABLE mydb.filtered_table1") - instance.query("CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/00-00/filtered_table1', 'replica1') ORDER BY a") - instance.query("INSERT INTO mydb.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1)") - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" - - # CollapsingMergeTree - instance.query("DROP TABLE mydb.filtered_table1") - instance.query("CREATE TABLE mydb.filtered_table1 (a UInt8, b Int8) ENGINE CollapsingMergeTree(b) ORDER BY a") - instance.query("INSERT INTO mydb.filtered_table1 values (0, 1), (0, 1), (1, 1), (1, 1)") - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t1\n1\t1\n" - - # ReplicatedCollapsingMergeTree - instance.query("DROP TABLE mydb.filtered_table1") - instance.query("CREATE TABLE mydb.filtered_table1 (a UInt8, b Int8) ENGINE ReplicatedCollapsingMergeTree('/clickhouse/tables/00-00/filtered_table1', 'replica1', b) ORDER BY a") - instance.query("INSERT INTO mydb.filtered_table1 values (0, 1), (0, 1), (1, 1), (1, 1)") - assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t1\n1\t1\n" - - # DistributedMergeTree - instance.query("DROP TABLE IF EXISTS mydb.not_filtered_table") - instance.query("CREATE TABLE mydb.not_filtered_table (a UInt8, b UInt8) ENGINE Distributed('test_local_cluster', mydb, local)") - instance.query("CREATE TABLE mydb.local (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a") - instance2.query("CREATE TABLE mydb.local (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a") - instance.query("INSERT INTO mydb.local values (2, 0), (2, 1), (1, 0), (1, 1)") - instance2.query("INSERT INTO mydb.local values (3, 0), (3, 1), (1, 0), (1, 1)") - assert instance.query("SELECT * FROM mydb.not_filtered_table", user="another") == "1\t0\n1\t1\n1\t0\n1\t1\n" - assert instance.query("SELECT sum(a), b FROM mydb.not_filtered_table GROUP BY b ORDER BY b", user="another") == "2\t0\n2\t1\n" diff --git a/dbms/tests/integration/test_settings_constraints_distributed/test.py b/dbms/tests/integration/test_settings_constraints_distributed/test.py deleted file mode 100644 index b23b130b270..00000000000 --- a/dbms/tests/integration/test_settings_constraints_distributed/test.py +++ /dev/null @@ -1,98 +0,0 @@ -import time - -import pytest - -from helpers.client import QueryRuntimeException -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry - -cluster = ClickHouseCluster(__file__) - -node1 = cluster.add_instance('node1') -node2 = cluster.add_instance('node2') -distributed = cluster.add_instance('distributed', main_configs=['configs/remote_servers.xml']) - - -@pytest.fixture(scope="module") -def started_cluster(): - try: - cluster.start() - - for node in [node1, node2]: - node.query("CREATE TABLE sometable (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;") - node.query("INSERT INTO sometable VALUES (toDate('2010-01-10'), 1, 1)") - node.query("CREATE USER shard") - node.query("GRANT ALL ON *.* TO shard") - - distributed.query("CREATE TABLE proxy (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable, toUInt64(date));") - distributed.query("CREATE TABLE shard_settings (name String, value String) ENGINE = Distributed(test_cluster, system, settings);") - distributed.query("CREATE ROLE admin") - distributed.query("GRANT ALL ON *.* TO admin") - - yield cluster - - finally: - cluster.shutdown() - - -def test_select_clamps_settings(started_cluster): - distributed.query("CREATE USER normal DEFAULT ROLE admin SETTINGS max_memory_usage = 80000000") - distributed.query("CREATE USER wasteful DEFAULT ROLE admin SETTINGS max_memory_usage = 2000000000") - distributed.query("CREATE USER readonly DEFAULT ROLE admin SETTINGS readonly = 1") - node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") - node2.query("ALTER USER shard SETTINGS readonly = 1") - - # Check that shards doesn't throw exceptions on constraints violation - query = "SELECT COUNT() FROM proxy" - assert distributed.query(query) == '2\n' - assert distributed.query(query, user = 'normal') == '2\n' - assert distributed.query(query, user = 'wasteful') == '2\n' - assert distributed.query(query, user = 'readonly') == '2\n' - - assert distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) == '2\n' - assert distributed.query(query, settings={"max_memory_usage": 3000000000, "readonly": 2}) == '2\n' - - query = "SELECT COUNT() FROM remote('node{1,2}', 'default', 'sometable')" - assert distributed.query(query) == '2\n' - assert distributed.query(query, user = 'normal') == '2\n' - assert distributed.query(query, user = 'wasteful') == '2\n' - - # Check that shards clamp passed settings. - query = "SELECT hostName() as host, name, value FROM shard_settings WHERE name = 'max_memory_usage' OR name = 'readonly' ORDER BY host, name, value" - assert distributed.query(query) == 'node1\tmax_memory_usage\t99999999\n'\ - 'node1\treadonly\t0\n'\ - 'node2\tmax_memory_usage\t10000000000\n'\ - 'node2\treadonly\t1\n' - assert distributed.query(query, user = 'normal') == 'node1\tmax_memory_usage\t80000000\n'\ - 'node1\treadonly\t0\n'\ - 'node2\tmax_memory_usage\t10000000000\n'\ - 'node2\treadonly\t1\n' - assert distributed.query(query, user = 'wasteful') == 'node1\tmax_memory_usage\t99999999\n'\ - 'node1\treadonly\t0\n'\ - 'node2\tmax_memory_usage\t10000000000\n'\ - 'node2\treadonly\t1\n' - assert distributed.query(query, user = 'readonly') == 'node1\tmax_memory_usage\t99999999\n'\ - 'node1\treadonly\t1\n'\ - 'node2\tmax_memory_usage\t10000000000\n'\ - 'node2\treadonly\t1\n' - - assert distributed.query(query, settings={"max_memory_usage": 1}) == 'node1\tmax_memory_usage\t11111111\n'\ - 'node1\treadonly\t0\n'\ - 'node2\tmax_memory_usage\t10000000000\n'\ - 'node2\treadonly\t1\n' - assert distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) == 'node1\tmax_memory_usage\t40000000\n'\ - 'node1\treadonly\t2\n'\ - 'node2\tmax_memory_usage\t10000000000\n'\ - 'node2\treadonly\t1\n' - assert distributed.query(query, settings={"max_memory_usage": 3000000000, "readonly": 2}) == 'node1\tmax_memory_usage\t99999999\n'\ - 'node1\treadonly\t2\n'\ - 'node2\tmax_memory_usage\t10000000000\n'\ - 'node2\treadonly\t1\n' - -def test_insert_clamps_settings(started_cluster): - node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") - node2.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") - - distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-20'), 2, 2)") - distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-21'), 2, 2)", settings={"max_memory_usage": 5000000}) - assert distributed.query("SELECT COUNT() FROM proxy") == "4\n" diff --git a/dbms/tests/integration/test_settings_profile/test.py b/dbms/tests/integration/test_settings_profile/test.py deleted file mode 100644 index 7f18327c66a..00000000000 --- a/dbms/tests/integration/test_settings_profile/test.py +++ /dev/null @@ -1,128 +0,0 @@ -import pytest -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') - - -@pytest.fixture(scope="module", autouse=True) -def setup_nodes(): - try: - cluster.start() - - instance.query("CREATE USER robin") - - yield cluster - - finally: - cluster.shutdown() - - -@pytest.fixture(autouse=True) -def reset_after_test(): - try: - yield - finally: - instance.query("CREATE USER OR REPLACE robin") - instance.query("DROP ROLE IF EXISTS worker") - instance.query("DROP SETTINGS PROFILE IF EXISTS xyz, alpha") - - -def test_settings_profile(): - # Set settings and constraints via CREATE SETTINGS PROFILE ... TO user - instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") - - instance.query("ALTER SETTINGS PROFILE xyz TO NONE") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" - instance.query("SET max_memory_usage = 80000000", user="robin") - instance.query("SET max_memory_usage = 120000000", user="robin") - - # Set settings and constraints via CREATE USER ... SETTINGS PROFILE - instance.query("ALTER USER robin SETTINGS PROFILE xyz") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") - - instance.query("ALTER USER robin SETTINGS NONE") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" - instance.query("SET max_memory_usage = 80000000", user="robin") - instance.query("SET max_memory_usage = 120000000", user="robin") - - -def test_settings_profile_from_granted_role(): - # Set settings and constraints via granted role - instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000") - instance.query("CREATE ROLE worker SETTINGS PROFILE xyz") - instance.query("GRANT worker TO robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") - - instance.query("REVOKE worker FROM robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" - instance.query("SET max_memory_usage = 80000000", user="robin") - instance.query("SET max_memory_usage = 120000000", user="robin") - - instance.query("ALTER ROLE worker SETTINGS NONE") - instance.query("GRANT worker TO robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" - instance.query("SET max_memory_usage = 80000000", user="robin") - instance.query("SET max_memory_usage = 120000000", user="robin") - - # Set settings and constraints via CREATE SETTINGS PROFILE ... TO granted role - instance.query("ALTER SETTINGS PROFILE xyz TO worker") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") - - instance.query("ALTER SETTINGS PROFILE xyz TO NONE") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" - instance.query("SET max_memory_usage = 80000000", user="robin") - instance.query("SET max_memory_usage = 120000000", user="robin") - - -def test_inheritance_of_settings_profile(): - instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY") - instance.query("CREATE SETTINGS PROFILE alpha SETTINGS PROFILE xyz TO robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000002\n" - assert "Setting max_memory_usage should not be changed" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") - - -def test_alter_and_drop(): - instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000003 MIN 90000000 MAX 110000000 TO robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000003\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") - - instance.query("ALTER SETTINGS PROFILE xyz SETTINGS readonly=1") - assert "Cannot modify 'max_memory_usage' setting in readonly mode" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") - - instance.query("DROP SETTINGS PROFILE xyz") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" - instance.query("SET max_memory_usage = 80000000", user="robin") - instance.query("SET max_memory_usage = 120000000", user="robin") - - -def test_allow_introspection(): - assert "Not enough privileges" in instance.query_and_get_error("SELECT demangle('a')", user="robin") - - instance.query("GRANT ALL ON *.* TO robin") - assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") - - instance.query("ALTER USER robin SETTINGS allow_introspection_functions=1") - assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n" - - instance.query("ALTER USER robin SETTINGS NONE") - assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") - - instance.query("CREATE SETTINGS PROFILE xyz SETTINGS allow_introspection_functions=1 TO robin") - assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n" - - instance.query("DROP SETTINGS PROFILE xyz") - assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") - - instance.query("REVOKE ALL ON *.* FROM robin") - assert "Not enough privileges" in instance.query_and_get_error("SELECT demangle('a')", user="robin") diff --git a/dbms/tests/integration/test_zookeeper_config/test.py b/dbms/tests/integration/test_zookeeper_config/test.py deleted file mode 100644 index d9323ae16f3..00000000000 --- a/dbms/tests/integration/test_zookeeper_config/test.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import print_function -from helpers.cluster import ClickHouseCluster -import pytest -import time - -def test_chroot_with_same_root(): - - cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml') - cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml') - - node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True) - node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True) - nodes = [node1, node2] - - def create_zk_root(zk): - zk.ensure_path('/root_a') - print(zk.get_children('/')) - cluster_1.add_zookeeper_startup_command(create_zk_root) - - try: - cluster_1.start() - - try: - cluster_2.start(destroy_dirs=False) - for i, node in enumerate(nodes): - node.query(''' - CREATE TABLE simple (date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); - '''.format(replica=node.name)) - for j in range(2): # Second insert to test deduplication - node.query("INSERT INTO simple VALUES ({0}, {0})".format(i)) - - time.sleep(1) - - assert node1.query('select count() from simple').strip() == '2' - assert node2.query('select count() from simple').strip() == '2' - - finally: - cluster_2.shutdown() - - finally: - cluster_1.shutdown() - - -def test_chroot_with_different_root(): - - cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml') - cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_b.xml') - - node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True) - node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True) - nodes = [node1, node2] - - def create_zk_roots(zk): - zk.ensure_path('/root_a') - zk.ensure_path('/root_b') - print(zk.get_children('/')) - cluster_1.add_zookeeper_startup_command(create_zk_roots) - - try: - cluster_1.start() - - try: - cluster_2.start(destroy_dirs=False) - - for i, node in enumerate(nodes): - node.query(''' - CREATE TABLE simple (date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); - '''.format(replica=node.name)) - for j in range(2): # Second insert to test deduplication - node.query("INSERT INTO simple VALUES ({0}, {0})".format(i)) - - assert node1.query('select count() from simple').strip() == '1' - assert node2.query('select count() from simple').strip() == '1' - - finally: - cluster_2.shutdown() - - finally: - cluster_1.shutdown() - - -def test_identity(): - - cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_password.xml') - cluster_2 = ClickHouseCluster(__file__) - - node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True) - node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True) - - try: - cluster_1.start() - - node1.query(''' - CREATE TABLE simple (date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); - '''.format(replica=node1.name)) - - with pytest.raises(Exception): - cluster_2.start(destroy_dirs=False) - node2.query(''' - CREATE TABLE simple (date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192); - ''') - - finally: - cluster_1.shutdown() - cluster_2.shutdown() diff --git a/dbms/tests/ints_dictionary.xml b/dbms/tests/ints_dictionary.xml deleted file mode 120000 index bdbf0690125..00000000000 --- a/dbms/tests/ints_dictionary.xml +++ /dev/null @@ -1 +0,0 @@ -../../dbms/tests/config/ints_dictionary.xml \ No newline at end of file diff --git a/dbms/tests/performance/array_fill.xml b/dbms/tests/performance/array_fill.xml deleted file mode 100644 index ccd2c5eba7c..00000000000 --- a/dbms/tests/performance/array_fill.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - 10000 - - - - - SELECT arraySlice(arrayFill(x -> ((x % 2) >= 0), range(100000000)), 1, 10) - SELECT arraySlice(arrayFill(x -> (((x.1) % 2) >= 0), arrayMap(x -> (x, toString(x)), range(100000000))), 1, 10) - SELECT arraySlice(arrayFill(x -> ((x % 2) >= 2), range(100000000)), 1, 10) - SELECT arraySlice(arrayFill(x -> (((x.1) % 2) >= 2), arrayMap(x -> (x, toString(x)), range(100000000))), 1, 10) - SELECT arraySlice(arrayFill(x -> ((x % 2) = 0), range(100000000)), 1, 10) - SELECT arraySlice(arrayFill(x -> (((x.1) % 2) = 0), arrayMap(x -> (x, toString(x)), range(100000000))), 1, 10) - diff --git a/dbms/tests/performance/concat_hits.xml b/dbms/tests/performance/concat_hits.xml deleted file mode 100644 index 49ab27bf540..00000000000 --- a/dbms/tests/performance/concat_hits.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - 5 - 10000 - - - 50 - 60000 - - - - - - test.hits - - - SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, URL)) - SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, SearchPhrase)) - SELECT count() FROM test.hits WHERE NOT ignore(concat(MobilePhoneModel, SearchPhrase)) - SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, 'Hello')) - SELECT count() FROM test.hits WHERE NOT ignore(concat('World', SearchPhrase)) - SELECT count() FROM test.hits WHERE NOT ignore(concat(MobilePhoneModel, 'Hello')) - SELECT count() FROM test.hits WHERE NOT ignore(concat(PageCharset, 'a')) - - SELECT count() FROM test.hits WHERE NOT ignore(format('{}{}', URL, URL)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}{}', URL, SearchPhrase)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}{}', MobilePhoneModel, SearchPhrase)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}Hello', URL)) - SELECT count() FROM test.hits WHERE NOT ignore(format('World{}', SearchPhrase)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}Hello', MobilePhoneModel)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}a', PageCharset)) - - SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, URL, URL)) - SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, SearchPhrase, MobilePhoneModel)) - SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, 'Hello', URL)) - SELECT count() FROM test.hits WHERE NOT ignore(concat('Hello', SearchPhrase, 'World')) - SELECT count() FROM test.hits WHERE NOT ignore(concat(MobilePhoneModel, 'Hello', PageCharset)) - SELECT count() FROM test.hits WHERE NOT ignore(concat('a', PageCharset, 'b')) - - SELECT count() FROM test.hits WHERE NOT ignore(format('{}{}{}', URL, URL, URL)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}{}{}', URL, SearchPhrase, MobilePhoneModel)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}Hello{}', URL, URL)) - SELECT count() FROM test.hits WHERE NOT ignore(format('Hello{}World', SearchPhrase)) - SELECT count() FROM test.hits WHERE NOT ignore(format('{}Hello{}', MobilePhoneModel, PageCharset)) - SELECT count() FROM test.hits WHERE NOT ignore(format('a{}b', PageCharset)) - diff --git a/dbms/tests/queries/0_stateless/00061_merge_tree_alter.reference b/dbms/tests/queries/0_stateless/00061_merge_tree_alter.reference deleted file mode 100644 index 571affd7231..00000000000 --- a/dbms/tests/queries/0_stateless/00061_merge_tree_alter.reference +++ /dev/null @@ -1,101 +0,0 @@ -d Date -k UInt64 -i32 Int32 -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = MergeTree(d, k, 8192) -2015-01-01 10 42 -d Date -k UInt64 -i32 Int32 -n.ui8 Array(UInt8) -n.s Array(String) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = MergeTree(d, k, 8192) -2015-01-01 8 40 [1,2,3] ['12','13','14'] -2015-01-01 10 42 [] [] -d Date -k UInt64 -i32 Int32 -n.ui8 Array(UInt8) -n.s Array(String) -n.d Array(Date) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) -2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] -2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 10 42 [] [] [] -d Date -k UInt64 -i32 Int32 -n.ui8 Array(UInt8) -n.s Array(String) -n.d Array(Date) -s String DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) -2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 -2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 -2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 -2015-01-01 10 42 [] [] [] 0 -d Date -k UInt64 -i32 Int32 -n.ui8 Array(UInt8) -n.s Array(String) -s Int64 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) -2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 -2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 -2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 -2015-01-01 10 42 [] [] 0 -d Date -k UInt64 -i32 Int32 -n.ui8 Array(UInt8) -n.s Array(String) -s UInt32 DEFAULT \'0\' -n.d Array(Date) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32 DEFAULT \'0\', `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) -2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 10 42 [] [] 0 [] -2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 10 42 [] [] 0 [] -d Date -k UInt64 -i32 Int32 -n.s Array(String) -s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.s` Array(String), `s` UInt32 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) -2015-01-01 6 38 ['asd','qwe','qwe'] 100500 -2015-01-01 7 39 ['120','130','140'] 0 -2015-01-01 8 40 ['12','13','14'] 0 -2015-01-01 10 42 [] 0 -d Date -k UInt64 -i32 Int32 -s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) -2015-01-01 6 38 100500 -2015-01-01 7 39 0 -2015-01-01 8 40 0 -2015-01-01 10 42 0 -d Date -k UInt64 -i32 Int32 -s UInt32 DEFAULT \'0\' -n.s Array(String) -n.d Array(Date) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32 DEFAULT \'0\', `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) -2015-01-01 6 38 100500 [] [] -2015-01-01 7 39 0 [] [] -2015-01-01 8 40 0 [] [] -2015-01-01 10 42 0 [] [] -d Date -k UInt64 -i32 Int32 -s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) -2015-01-01 6 38 100500 -2015-01-01 7 39 0 -2015-01-01 8 40 0 -2015-01-01 10 42 0 diff --git a/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference b/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference deleted file mode 100644 index 6f2eb080286..00000000000 --- a/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference +++ /dev/null @@ -1,216 +0,0 @@ -d Date -k UInt64 -i32 Int32 -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 10 42 -d Date -k UInt64 -i32 Int32 -dt DateTime -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 9 41 1992-01-01 08:00:00 -2015-01-01 10 42 0000-00-00 00:00:00 -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] -2015-01-01 9 41 1992-01-01 08:00:00 [] [] -2015-01-01 10 42 0000-00-00 00:00:00 [] [] -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -n.d Array(Date) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -n.d Array(Date) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] -2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] -2015-01-01 10 42 0000-00-00 00:00:00 [] [] [] -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -n.d Array(Date) -s String DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -n.d Array(Date) -s String DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 -2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 -2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 -2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] 0 -2015-01-01 10 42 0000-00-00 00:00:00 [] [] [] 0 -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -s Int64 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -s Int64 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 -2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 -2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 -2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0 -2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0 -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -s UInt32 DEFAULT \'0\' -n.d Array(Date) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32 DEFAULT \'0\', `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -n.ui8 Array(UInt8) -n.s Array(String) -s UInt32 DEFAULT \'0\' -n.d Array(Date) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32 DEFAULT \'0\', `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] -2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0 [] -2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0 [] -d Date -k UInt64 -i32 Int32 -dt DateTime -n.s Array(String) -s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -n.s Array(String) -s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500 -2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0 -2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0 -2015-01-01 9 41 1992-01-01 08:00:00 [] 0 -2015-01-01 10 42 0000-00-00 00:00:00 [] 0 -d Date -k UInt64 -i32 Int32 -dt DateTime -s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 13:26:50 100500 -2015-01-01 7 39 2014-07-14 13:26:50 0 -2015-01-01 8 40 2012-12-12 12:12:12 0 -2015-01-01 9 41 1992-01-01 08:00:00 0 -2015-01-01 10 42 0000-00-00 00:00:00 0 -d Date -k UInt64 -i32 Int32 -dt DateTime -s UInt32 DEFAULT \'0\' -n.s Array(String) -n.d Array(Date) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\', `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -s UInt32 DEFAULT \'0\' -n.s Array(String) -n.d Array(Date) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\', `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 13:26:50 100500 [] [] -2015-01-01 7 39 2014-07-14 13:26:50 0 [] [] -2015-01-01 8 40 2012-12-12 12:12:12 0 [] [] -2015-01-01 9 41 1992-01-01 08:00:00 0 [] [] -2015-01-01 10 42 0000-00-00 00:00:00 0 [] [] -d Date -k UInt64 -i32 Int32 -dt DateTime -s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt DateTime -s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 13:26:50 100500 -2015-01-01 7 39 2014-07-14 13:26:50 0 -2015-01-01 8 40 2012-12-12 12:12:12 0 -2015-01-01 9 41 1992-01-01 08:00:00 0 -2015-01-01 10 42 0000-00-00 00:00:00 0 -d Date -k UInt64 -i32 Int32 -dt Date -s DateTime DEFAULT \'0000-00-00 00:00:00\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime DEFAULT \'0000-00-00 00:00:00\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) -d Date -k UInt64 -i32 Int32 -dt Date -s DateTime DEFAULT \'0000-00-00 00:00:00\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime DEFAULT \'0000-00-00 00:00:00\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) -2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00 -2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00 -2015-01-01 8 40 2012-12-12 0000-00-00 00:00:00 -2015-01-01 9 41 1992-01-01 0000-00-00 00:00:00 -2015-01-01 10 42 0000-00-00 0000-00-00 00:00:00 diff --git a/dbms/tests/queries/0_stateless/00564_temporary_table_management.reference b/dbms/tests/queries/0_stateless/00564_temporary_table_management.reference deleted file mode 100644 index edd17b9ea39..00000000000 --- a/dbms/tests/queries/0_stateless/00564_temporary_table_management.reference +++ /dev/null @@ -1,4 +0,0 @@ -1 -CREATE TEMPORARY TABLE temp_tab (`number` UInt64) ENGINE = Memory -temp_tab -0 diff --git a/dbms/tests/queries/0_stateless/00599_create_view_with_subquery.reference b/dbms/tests/queries/0_stateless/00599_create_view_with_subquery.reference deleted file mode 100644 index 13e0f35b075..00000000000 --- a/dbms/tests/queries/0_stateless/00599_create_view_with_subquery.reference +++ /dev/null @@ -1 +0,0 @@ -CREATE VIEW default.test_view_00599 (`id` UInt64) AS SELECT * FROM default.test_00599 WHERE id = (SELECT 1) diff --git a/dbms/tests/queries/0_stateless/00604_show_create_database.reference b/dbms/tests/queries/0_stateless/00604_show_create_database.reference deleted file mode 100644 index 1fe93a5e393..00000000000 --- a/dbms/tests/queries/0_stateless/00604_show_create_database.reference +++ /dev/null @@ -1 +0,0 @@ -CREATE DATABASE test_00604 ENGINE = Ordinary diff --git a/dbms/tests/queries/0_stateless/00642_cast.reference b/dbms/tests/queries/0_stateless/00642_cast.reference deleted file mode 100644 index f75503efffe..00000000000 --- a/dbms/tests/queries/0_stateless/00642_cast.reference +++ /dev/null @@ -1,13 +0,0 @@ -hello -hello -hello -hello -hello -hello -hello -hello -1970-01-01 00:00:01 -CREATE TABLE default.cast (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = MergeTree ORDER BY e SETTINGS index_granularity = 8192 -x UInt8 -e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') -1 hello diff --git a/dbms/tests/queries/0_stateless/00643_cast_zookeeper.reference b/dbms/tests/queries/0_stateless/00643_cast_zookeeper.reference deleted file mode 100644 index 86a8b164844..00000000000 --- a/dbms/tests/queries/0_stateless/00643_cast_zookeeper.reference +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE test.cast1 (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') ORDER BY e SETTINGS index_granularity = 8192 -x UInt8 -e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') -1 hello -1 hello diff --git a/dbms/tests/queries/0_stateless/00725_comment_columns.reference b/dbms/tests/queries/0_stateless/00725_comment_columns.reference deleted file mode 100644 index 7204496753c..00000000000 --- a/dbms/tests/queries/0_stateless/00725_comment_columns.reference +++ /dev/null @@ -1,38 +0,0 @@ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\', `fourth_column` UInt8 COMMENT \'comment 4\', `fifth_column` UInt8) ENGINE = TinyLog -first_column UInt8 DEFAULT 1 comment 1 -second_column UInt8 MATERIALIZED first_column comment 2 -third_column UInt8 ALIAS second_column comment 3 -fourth_column UInt8 comment 4 -fifth_column UInt8 -┌─table──────────────────────┬─name──────────┬─comment───┐ -│ check_query_comment_column │ first_column │ comment 1 │ -│ check_query_comment_column │ second_column │ comment 2 │ -│ check_query_comment_column │ third_column │ comment 3 │ -│ check_query_comment_column │ fourth_column │ comment 4 │ -│ check_query_comment_column │ fifth_column │ │ -└────────────────────────────┴───────────────┴───────────┘ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\', `fourth_column` UInt8 COMMENT \'comment 4_1\', `fifth_column` UInt8 COMMENT \'comment 5_1\') ENGINE = TinyLog -┌─table──────────────────────┬─name──────────┬─comment─────┐ -│ check_query_comment_column │ first_column │ comment 1_2 │ -│ check_query_comment_column │ second_column │ comment 2_2 │ -│ check_query_comment_column │ third_column │ comment 3_2 │ -│ check_query_comment_column │ fourth_column │ comment 4_2 │ -│ check_query_comment_column │ fifth_column │ comment 5_2 │ -└────────────────────────────┴───────────────┴─────────────┘ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\', `fourth_column` UInt8 COMMENT \'comment 4_2\', `fifth_column` UInt8 COMMENT \'comment 5_2\') ENGINE = TinyLog -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1\', `second_column` UInt8 COMMENT \'comment 2\', `third_column` UInt8 COMMENT \'comment 3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 -first_column UInt8 comment 1 -second_column UInt8 comment 2 -third_column UInt8 comment 3 -┌─table──────────────────────┬─name──────────┬─comment───┐ -│ check_query_comment_column │ first_column │ comment 1 │ -│ check_query_comment_column │ second_column │ comment 2 │ -│ check_query_comment_column │ third_column │ comment 3 │ -└────────────────────────────┴───────────────┴───────────┘ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_2\', `second_column` UInt8 COMMENT \'comment 2_2\', `third_column` UInt8 COMMENT \'comment 3_2\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_3\', `second_column` UInt8 COMMENT \'comment 2_3\', `third_column` UInt8 COMMENT \'comment 3_3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 -┌─table──────────────────────┬─name──────────┬─comment─────┐ -│ check_query_comment_column │ first_column │ comment 1_3 │ -│ check_query_comment_column │ second_column │ comment 2_3 │ -│ check_query_comment_column │ third_column │ comment 3_3 │ -└────────────────────────────┴───────────────┴─────────────┘ diff --git a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference b/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference deleted file mode 100644 index 2873fcbee3b..00000000000 --- a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference +++ /dev/null @@ -1,7 +0,0 @@ -CREATE MATERIALIZED VIEW test_00751.t_mv_00751 (`date` Date, `platform` Enum8('a' = 0, 'b' = 1), `app` Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test_00751.t_00751 WHERE (app = (SELECT min(app) FROM test_00751.u_00751)) AND (platform = (SELECT (SELECT min(platform) FROM test_00751.v_00751))) -2000-01-01 a a -2000-01-02 b b -2000-01-03 a a -2000-01-04 b b -2000-01-02 b b -2000-01-03 a a diff --git a/dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference b/dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference deleted file mode 100644 index 8b1eeea8203..00000000000 --- a/dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192 -column_name1 UInt8 DEFAULT 1 comment -column_name2 UInt8 non default comment -CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192 -column_name1 UInt8 DEFAULT 1 another comment -column_name2 UInt8 non default comment diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference b/dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference deleted file mode 100644 index 93f15318634..00000000000 --- a/dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference +++ /dev/null @@ -1,106 +0,0 @@ -*** Check SHOW CREATE TABLE *** -CREATE TABLE default.alter_column (`x` UInt32, `y` Int32) ENGINE = MergeTree PARTITION BY x ORDER BY x SETTINGS index_granularity = 8192 -*** Check parts *** -0 0 -10 -10 -11 -11 -12 -12 -13 -13 -14 -14 -15 -15 -16 -16 -17 -17 -18 -18 -19 -19 -1 -1 -20 -20 -21 -21 -22 -22 -23 -23 -24 -24 -25 -25 -26 -26 -27 -27 -28 -28 -29 -29 -2 -2 -30 -30 -31 -31 -32 -32 -33 -33 -34 -34 -35 -35 -36 -36 -37 -37 -38 -38 -39 -39 -3 -3 -40 -40 -41 -41 -42 -42 -43 -43 -44 -44 -45 -45 -46 -46 -47 -47 -48 -48 -49 -49 -4 -4 -5 -5 -6 -6 -7 -7 -8 -8 -9 -9 -*** Check SHOW CREATE TABLE after ALTER MODIFY *** -CREATE TABLE default.alter_column (`x` UInt32, `y` Int64) ENGINE = MergeTree PARTITION BY x ORDER BY x SETTINGS index_granularity = 8192 -*** Check parts after ALTER MODIFY *** -0 0 -10 -10 -11 -11 -12 -12 -13 -13 -14 -14 -15 -15 -16 -16 -17 -17 -18 -18 -19 -19 -1 -1 -20 -20 -21 -21 -22 -22 -23 -23 -24 -24 -25 -25 -26 -26 -27 -27 -28 -28 -29 -29 -2 -2 -30 -30 -31 -31 -32 -32 -33 -33 -34 -34 -35 -35 -36 -36 -37 -37 -38 -38 -39 -39 -3 -3 -40 -40 -41 -41 -42 -42 -43 -43 -44 -44 -45 -45 -46 -46 -47 -47 -48 -48 -49 -49 -4 -4 -5 -5 -6 -6 -7 -7 -8 -8 -9 -9 diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by.reference b/dbms/tests/queries/0_stateless/00754_alter_modify_order_by.reference deleted file mode 100644 index 1bcdae884f8..00000000000 --- a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by.reference +++ /dev/null @@ -1,12 +0,0 @@ -*** Check that the parts are sorted according to the new key. *** -1 2 0 10 -1 2 0 20 -1 2 2 40 -1 2 2 50 -1 2 1 30 -*** Check that the rows are collapsed according to the new key. *** -1 2 0 30 -1 2 1 30 -1 2 4 90 -*** Check SHOW CREATE TABLE *** -CREATE TABLE default.summing (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = SummingMergeTree PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference b/dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference deleted file mode 100644 index ebe30941f3f..00000000000 --- a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference +++ /dev/null @@ -1,14 +0,0 @@ -*** Check that the parts are sorted according to the new key. *** -1 2 0 10 -1 2 0 20 -1 2 2 40 -1 2 2 50 -1 2 1 30 -*** Check that the rows are collapsed according to the new key. *** -1 2 0 30 -1 2 1 30 -1 2 4 90 -*** Check SHOW CREATE TABLE *** -CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192 -*** Check SHOW CREATE TABLE after offline ALTER *** -CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `t` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, t * t) SETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference deleted file mode 100644 index 6da97ff6091..00000000000 --- a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference +++ /dev/null @@ -1,18 +0,0 @@ -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 -2 -1 world 2018-10-05 1.1 -2 hello 2018-10-01 2.2 -3 buy 2018-10-11 3.3 -10003 -10003 -274972506.6 -9175437371954010821 -CREATE TABLE default.compression_codec_multiple_more_types (`id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192 -1.5555555555555 hello world! [77] ['John'] -7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] -! -222 -!ZSTD -CREATE TABLE default.test_default_delta (`id` UInt64 CODEC(Delta(8)), `data` String CODEC(Delta(1)), `somedate` Date CODEC(Delta(2)), `somenum` Float64 CODEC(Delta(8)), `somestr` FixedString(3) CODEC(Delta(1)), `othernum` Int64 CODEC(Delta(8)), `yetothernum` Float32 CODEC(Delta(4)), `ddd.age` Array(UInt8) CODEC(Delta(1)), `ddd.Name` Array(String) CODEC(Delta(1)), `ddd.OName` Array(String) CODEC(Delta(1)), `ddd.BName` Array(String) CODEC(Delta(1))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference b/dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference deleted file mode 100644 index 322b207bf7d..00000000000 --- a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference +++ /dev/null @@ -1,26 +0,0 @@ -CREATE TABLE default.compression_codec_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = Log() -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 -2 -CREATE TABLE default.compression_codec_multiple_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = Log() -1 world 2018-10-05 1.1 -2 hello 2018-10-01 2.2 -3 buy 2018-10-11 3.3 -10003 -10003 -274972506.6 -9175437371954010821 -CREATE TABLE default.compression_codec_tiny_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = TinyLog() -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 -2 -CREATE TABLE default.compression_codec_multiple_tiny_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = TinyLog() -1 world 2018-10-05 1.1 -2 hello 2018-10-01 2.2 -3 buy 2018-10-11 3.3 -10003 -10003 -274972506.6 -9175437371954010821 diff --git a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference b/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference deleted file mode 100644 index 7967cf7837e..00000000000 --- a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference +++ /dev/null @@ -1,5 +0,0 @@ -a 2018-01-01 00:00:00 0000-00-00 00:00:00 -b 2018-01-01 00:00:00 b b 2018-01-01 00:00:00 -c 2018-01-01 00:00:00 c c 2018-01-01 00:00:00 -b 2018-01-01 00:00:00 b b 2018-01-01 00:00:00 -c 2018-01-01 00:00:00 c c 2018-01-01 00:00:00 diff --git a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql b/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql deleted file mode 100644 index b8bd6d3384c..00000000000 --- a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql +++ /dev/null @@ -1,14 +0,0 @@ -DROP TABLE IF EXISTS using1; -DROP TABLE IF EXISTS using2; - -CREATE TABLE using1(a String, b DateTime) ENGINE=MergeTree order by a; -CREATE TABLE using2(c String, a String, d DateTime) ENGINE=MergeTree order by c; - -INSERT INTO using1 VALUES ('a', '2018-01-01 00:00:00') ('b', '2018-01-01 00:00:00') ('c', '2018-01-01 00:00:00'); -INSERT INTO using2 VALUES ('d', 'd', '2018-01-01 00:00:00') ('b', 'b', '2018-01-01 00:00:00') ('c', 'c', '2018-01-01 00:00:00'); - -SELECT * FROM using1 t1 ALL LEFT JOIN (SELECT *, c as a, d as b FROM using2) t2 USING (a, b) ORDER BY d; -SELECT * FROM using1 t1 ALL INNER JOIN (SELECT *, c as a, d as b FROM using2) t2 USING (a, b) ORDER BY d; - -DROP TABLE using1; -DROP TABLE using2; diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter.reference b/dbms/tests/queries/0_stateless/00836_indices_alter.reference deleted file mode 100644 index e30c17eb673..00000000000 --- a/dbms/tests/queries/0_stateless/00836_indices_alter.reference +++ /dev/null @@ -1,28 +0,0 @@ -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 2 -1 2 -1 2 -1 2 -1 2 -1 2 -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 2 -1 2 -1 2 -1 2 -1 2 -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 2 -1 2 -1 2 -1 2 -1 2 -1 2 -1 2 -CREATE TABLE default.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 2 diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference b/dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference deleted file mode 100644 index b2c2b41f460..00000000000 --- a/dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference +++ /dev/null @@ -1,58 +0,0 @@ -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 2 -1 2 -1 4 -1 5 -3 2 -19 9 -65 75 -1 2 -1 4 -1 5 -3 2 -19 9 -65 75 -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 4 -1 5 -3 2 -19 9 -65 75 -1 2 -1 4 -1 5 -3 2 -19 9 -65 75 -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 4 -1 5 -3 2 -19 9 -65 75 -1 2 -1 4 -1 5 -3 2 -19 9 -65 75 -CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 3 -1 2 -1 3 -CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 -1 2 -1 3 -1 2 -1 3 diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference b/dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference deleted file mode 100644 index 29bda49a8e5..00000000000 --- a/dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference +++ /dev/null @@ -1,27 +0,0 @@ -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 -2 -2 -1 world 2018-10-05 1.1 -2 hello 2018-10-01 2.2 -3 buy 2018-10-11 3.3 -1 world 2018-10-05 1.1 -2 hello 2018-10-01 2.2 -3 buy 2018-10-11 3.3 -10003 -10003 -10003 -10003 -274972506.6 -274972506.6 -9175437371954010821 -9175437371954010821 -CREATE TABLE test.compression_codec_multiple_more_types_replicated (`id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\') ORDER BY tuple() SETTINGS index_granularity = 8192 -1.5555555555555 hello world! [77] ['John'] -7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] -! -222 diff --git a/dbms/tests/queries/0_stateless/00916_create_or_replace_view.reference b/dbms/tests/queries/0_stateless/00916_create_or_replace_view.reference deleted file mode 100644 index 30d14bf1e41..00000000000 --- a/dbms/tests/queries/0_stateless/00916_create_or_replace_view.reference +++ /dev/null @@ -1,2 +0,0 @@ -CREATE VIEW default.t (`number` UInt64) AS SELECT number FROM system.numbers -CREATE VIEW default.t (`next_number` UInt64) AS SELECT number + 1 AS next_number FROM system.numbers diff --git a/dbms/tests/queries/0_stateless/00933_alter_ttl.reference b/dbms/tests/queries/0_stateless/00933_alter_ttl.reference deleted file mode 100644 index 44ba49026a7..00000000000 --- a/dbms/tests/queries/0_stateless/00933_alter_ttl.reference +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE default.ttl (`d` Date, `a` Int32) ENGINE = MergeTree PARTITION BY toDayOfMonth(d) ORDER BY a TTL d + toIntervalDay(1) SETTINGS index_granularity = 8192 -2100-10-10 3 -2100-10-10 4 -d Date -a Int32 d + toIntervalDay(1) diff --git a/dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference b/dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference deleted file mode 100644 index 986bc6b4a24..00000000000 --- a/dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference +++ /dev/null @@ -1,3 +0,0 @@ -200 -400 -CREATE TABLE test.ttl_repl2 (`d` Date, `x` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/ttl_repl\', \'2\') PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + toIntervalDay(1) SETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00933_ttl_simple.reference b/dbms/tests/queries/0_stateless/00933_ttl_simple.reference deleted file mode 100644 index e8b0c699aec..00000000000 --- a/dbms/tests/queries/0_stateless/00933_ttl_simple.reference +++ /dev/null @@ -1,16 +0,0 @@ -0 0 -0 0 -5 6 -2000-10-10 00:00:00 0 -2000-10-10 00:00:00 0 -2000-10-10 00:00:00 0 -2100-10-10 00:00:00 3 -2100-10-10 2 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL now() - 1000) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 -1 0 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL now() + 1000) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 -1 1 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL today() - 1) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 -1 0 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL today() + 1) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 -1 1 diff --git a/dbms/tests/queries/0_stateless/00980_merge_alter_settings.reference b/dbms/tests/queries/0_stateless/00980_merge_alter_settings.reference deleted file mode 100644 index ee3818d25dc..00000000000 --- a/dbms/tests/queries/0_stateless/00980_merge_alter_settings.reference +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 1, parts_to_delay_insert = 1 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100 -2 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 30 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String, `Data2` UInt64) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 15 diff --git a/dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference b/dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference deleted file mode 100644 index 159102e1ca7..00000000000 --- a/dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference +++ /dev/null @@ -1,12 +0,0 @@ -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192 -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192 -4 -4 -4 -4 -6 -6 -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1 -CREATE TABLE default.replicated_table_for_alter2 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\') ORDER BY id SETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String, `Data2` UInt64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1, check_delay_period = 15 -CREATE TABLE default.replicated_table_for_alter2 (`id` UInt64, `Data` String, `Data2` UInt64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\') ORDER BY id SETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 diff --git a/dbms/tests/queries/0_stateless/00990_hasToken.sh b/dbms/tests/queries/0_stateless/00990_hasToken.sh deleted file mode 100755 index 4ccb77b8ecc..00000000000 --- a/dbms/tests/queries/0_stateless/00990_hasToken.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -# We should have correct env vars from shell_config.sh to run this test - -python $CURDIR/00990_hasToken.python | ${CLICKHOUSE_CLIENT} -nm diff --git a/dbms/tests/queries/0_stateless/00998_constraints_all_tables.reference b/dbms/tests/queries/0_stateless/00998_constraints_all_tables.reference deleted file mode 100644 index 730df555af3..00000000000 --- a/dbms/tests/queries/0_stateless/00998_constraints_all_tables.reference +++ /dev/null @@ -1,14 +0,0 @@ -0 -0 -3 -0 -0 -3 -0 -0 -3 -0 -0 -3 -CREATE TABLE default.constrained (`URL` String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log -CREATE TABLE default.constrained2 (`URL` String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log diff --git a/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference b/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference deleted file mode 100644 index b462a5a7baa..00000000000 --- a/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference +++ /dev/null @@ -1,4 +0,0 @@ -OK -OK -OK -OK diff --git a/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh deleted file mode 100755 index ac66dbc352a..00000000000 --- a/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - - -R1=table_1017_1 -R2=table_1017_2 -T1=table_1017_merge - -${CLICKHOUSE_CLIENT} -n -q " - DROP TABLE IF EXISTS $R1; - DROP TABLE IF EXISTS $R2; - - CREATE TABLE $R1 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r1') ORDER BY x; - CREATE TABLE $R2 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r2') ORDER BY x; - CREATE TABLE $T1 (x UInt32, y UInt32) ENGINE MergeTree() ORDER BY x; - - INSERT INTO $R1 VALUES (0, 1)(1, 2)(2, 3)(3, 4); - INSERT INTO $T1 VALUES (0, 1)(1, 2)(2, 3)(3, 4); -" - -# Check that in mutations of replicated tables predicates do not contain non-deterministic functions -${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE ignore(rand())" 2>&1 \ -| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' - -${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 \ -| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' - - -# For regular tables we do not enforce deterministic functions -${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 DELETE WHERE rand() = 0" 2>&1 > /dev/null \ -&& echo 'OK' || echo 'FAIL' - -${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 > /dev/null \ -&& echo 'OK' || echo 'FAIL' - - -${CLICKHOUSE_CLIENT} -n -q " - DROP TABLE IF EXISTS $R2; - DROP TABLE IF EXISTS $R1; - DROP TABLE IF EXISTS $T1; -" diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference b/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference deleted file mode 100644 index 327c02a4b8a..00000000000 --- a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference +++ /dev/null @@ -1,19 +0,0 @@ -=DICTIONARY in Ordinary DB -CREATE DICTIONARY ordinary_db.dict1 (`key_column` UInt64 DEFAULT 0, `second_column` UInt8 DEFAULT 1, `third_column` String DEFAULT \'qqq\') PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' PASSWORD \'\' DB \'database_for_dict\')) LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()) -dict1 -1 -ordinary_db dict1 -==DETACH DICTIONARY -0 -==ATTACH DICTIONARY -dict1 -1 -ordinary_db dict1 -==DROP DICTIONARY -0 -=DICTIONARY in Memory DB -0 -=DICTIONARY in Lazy DB -=DROP DATABASE WITH DICTIONARY -dict4 -dict4 diff --git a/dbms/tests/queries/0_stateless/01033_quota_dcl.reference b/dbms/tests/queries/0_stateless/01033_quota_dcl.reference deleted file mode 100644 index 7f92f992dd5..00000000000 --- a/dbms/tests/queries/0_stateless/01033_quota_dcl.reference +++ /dev/null @@ -1,2 +0,0 @@ -default -CREATE QUOTA default KEYED BY \'user name\' FOR INTERVAL 1 HOUR TRACKING TO default, readonly diff --git a/dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference b/dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference deleted file mode 100644 index bba4944f4a8..00000000000 --- a/dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE default.BannerDict (`BannerID` UInt64, `CompaignID` UInt64) ENGINE = ODBC(\'DSN=pgconn;Database=postgres\', \'somedb\', \'bannerdict\') diff --git a/dbms/tests/queries/0_stateless/01048_exists_query.sql b/dbms/tests/queries/0_stateless/01048_exists_query.sql deleted file mode 100644 index 6228b57fe3f..00000000000 --- a/dbms/tests/queries/0_stateless/01048_exists_query.sql +++ /dev/null @@ -1,44 +0,0 @@ -EXISTS database_for_dict.t; -EXISTS TABLE database_for_dict.t; -EXISTS DICTIONARY database_for_dict.t; - -DROP DATABASE IF EXISTS database_for_dict; -CREATE DATABASE database_for_dict Engine = Ordinary; - -DROP TABLE IF EXISTS database_for_dict.t; -EXISTS database_for_dict.t; -EXISTS TABLE database_for_dict.t; -EXISTS DICTIONARY database_for_dict.t; - -CREATE TABLE database_for_dict.t (x UInt8) ENGINE = Memory; -EXISTS database_for_dict.t; -EXISTS TABLE database_for_dict.t; -EXISTS DICTIONARY database_for_dict.t; - -DROP TABLE database_for_dict.t; -EXISTS database_for_dict.t; -EXISTS TABLE database_for_dict.t; -EXISTS DICTIONARY database_for_dict.t; - -DROP DICTIONARY IF EXISTS t; -CREATE TEMPORARY TABLE t (x UInt8); -EXISTS t; -- Does not work for temporary tables. Maybe have to fix. -EXISTS TABLE t; -EXISTS DICTIONARY t; - -CREATE DICTIONARY database_for_dict.t (k UInt64, v String) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT TSV)) LIFETIME(1000); -EXISTS database_for_dict.t; -EXISTS TABLE database_for_dict.t; -- Dictionaries are tables as well. But not all tables are dictionaries. -EXISTS DICTIONARY database_for_dict.t; - --- But dictionary-tables cannot be dropped as usual tables. -DROP TABLE database_for_dict.t; -- { serverError 60 } -DROP DICTIONARY database_for_dict.t; -EXISTS database_for_dict.t; -EXISTS TABLE database_for_dict.t; -EXISTS DICTIONARY database_for_dict.t; - -DROP DATABASE database_for_dict; -EXISTS database_for_dict.t; -EXISTS TABLE database_for_dict.t; -EXISTS DICTIONARY database_for_dict.t; diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_1.reference b/dbms/tests/queries/0_stateless/01055_compact_parts_1.reference deleted file mode 100644 index 7c9dd4a0ef9..00000000000 --- a/dbms/tests/queries/0_stateless/01055_compact_parts_1.reference +++ /dev/null @@ -1,2 +0,0 @@ -CREATE TABLE default.mt_compact (`a` Int32, `s` String) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity_bytes = 0, index_granularity = 8192 -CREATE TABLE default.mt_compact (`a` Int32, `s` String) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, index_granularity = 8192, parts_to_delay_insert = 300 diff --git a/dbms/tests/queries/0_stateless/01068_parens.sql b/dbms/tests/queries/0_stateless/01068_parens.sql deleted file mode 100644 index 7cb4f097b15..00000000000 --- a/dbms/tests/queries/0_stateless/01068_parens.sql +++ /dev/null @@ -1 +0,0 @@ -((((((((((((((SELECT((((((((((((((((((((((((((((((((1)))))))))))))))))))))))))))))))))))))))))))))) diff --git a/dbms/tests/queries/0_stateless/01069_database_memory.reference b/dbms/tests/queries/0_stateless/01069_database_memory.reference deleted file mode 100644 index 393c85070b9..00000000000 --- a/dbms/tests/queries/0_stateless/01069_database_memory.reference +++ /dev/null @@ -1,8 +0,0 @@ -CREATE DATABASE memory_01069 ENGINE = Memory() -1 -2 -3 -4 -3 -4 -CREATE TABLE memory_01069.file (`n` UInt8) ENGINE = File(\'CSV\') diff --git a/dbms/tests/queries/0_stateless/01070_alter_with_ttl.reference b/dbms/tests/queries/0_stateless/01070_alter_with_ttl.reference deleted file mode 100644 index 8b2bd9d1389..00000000000 --- a/dbms/tests/queries/0_stateless/01070_alter_with_ttl.reference +++ /dev/null @@ -1,2 +0,0 @@ -CREATE TABLE default.alter_ttl (`i` Int32, `s` String TTL toDate(\'2020-01-01\')) ENGINE = MergeTree ORDER BY i TTL toDate(\'2020-05-05\') SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_ttl (`d` Date, `s` String TTL d + toIntervalDay(1)) ENGINE = MergeTree ORDER BY d TTL d + toIntervalMonth(1) SETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/01073_attach_if_not_exists.sql b/dbms/tests/queries/0_stateless/01073_attach_if_not_exists.sql deleted file mode 100644 index 1c7877a7c73..00000000000 --- a/dbms/tests/queries/0_stateless/01073_attach_if_not_exists.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE TABLE t (a Int) ENGINE = Log; -ATTACH TABLE t; -- { serverError 57 } -ATTACH TABLE IF NOT EXISTS t; -DETACH TABLE t; -ATTACH TABLE IF NOT EXISTS t; -EXISTS TABLE t; -DROP TABLE t; diff --git a/dbms/tests/queries/0_stateless/01073_grant_and_revoke.reference b/dbms/tests/queries/0_stateless/01073_grant_and_revoke.reference deleted file mode 100644 index 4aad0ca65f1..00000000000 --- a/dbms/tests/queries/0_stateless/01073_grant_and_revoke.reference +++ /dev/null @@ -1,11 +0,0 @@ -CREATE USER test_user_01073 -A -B -GRANT DELETE, INSERT ON *.* TO test_user_01073 -GRANT SELECT ON db1.* TO test_user_01073 -GRANT SELECT ON db2.table TO test_user_01073 -GRANT SELECT(col1) ON db3.table TO test_user_01073 -GRANT SELECT(col1, col2) ON db4.table TO test_user_01073 -C -GRANT DELETE ON *.* TO test_user_01073 -GRANT SELECT(col1) ON db4.table TO test_user_01073 diff --git a/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.reference b/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.reference deleted file mode 100644 index 0082653059c..00000000000 --- a/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.reference +++ /dev/null @@ -1,17 +0,0 @@ -CREATE USER test_user_01075 -CREATE USER test_user_01075 -CREATE USER test_user_01075 HOST NONE -CREATE USER test_user_01075 HOST LOCAL -CREATE USER test_user_01075 HOST IP \'192.168.23.15\' -CREATE USER test_user_01075 HOST IP \'2001:db8:11a3:9d7:1f34:8a2e:7a0:765d\' -CREATE USER test_user_01075 HOST LOCAL, IP \'2001:db8:11a3:9d7:1f34:8a2e:7a0:765d\' -CREATE USER test_user_01075 HOST LOCAL -CREATE USER test_user_01075 HOST NONE -CREATE USER test_user_01075 HOST LIKE \'@.somesite.com\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite.com\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite.com\', \'.*.anothersite.org\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite2.com\', \'.*.anothersite2.org\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite3.com\', \'.*.anothersite3.org\' -CREATE USER `test_user_01075_x@localhost` HOST LOCAL -CREATE USER test_user_01075_x -CREATE USER `test_user_01075_x@192.168.23.15` HOST LIKE \'192.168.23.15\' diff --git a/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.sql b/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.sql deleted file mode 100644 index 77a16a9f62a..00000000000 --- a/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.sql +++ /dev/null @@ -1,56 +0,0 @@ -DROP USER IF EXISTS test_user_01075, test_user_01075_x, test_user_01075_x@localhost, test_user_01075_x@'192.168.23.15'; - -CREATE USER test_user_01075; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST ANY; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST NONE; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST LOCAL; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST IP '192.168.23.15'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 ADD HOST IP '127.0.0.1'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 REMOVE HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 REMOVE HOST NAME 'localhost'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST LIKE '@.somesite.com'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite\.com'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite\.com', '.*\.anothersite\.org'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite2\.com', NAME REGEXP '.*\.anothersite2\.org'; -SHOW CREATE USER test_user_01075; - -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite3\.com' HOST NAME REGEXP '.*\.anothersite3\.org'; -SHOW CREATE USER test_user_01075; - -DROP USER test_user_01075; - -CREATE USER test_user_01075_x@localhost; -SHOW CREATE USER test_user_01075_x@localhost; - -ALTER USER test_user_01075_x@localhost RENAME TO test_user_01075_x@'@'; -SHOW CREATE USER test_user_01075_x; - -ALTER USER test_user_01075_x RENAME TO test_user_01075_x@'192.168.23.15'; -SHOW CREATE USER 'test_user_01075_x@192.168.23.15'; - -DROP USER 'test_user_01075_x@192.168.23.15'; diff --git a/dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.reference b/dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.reference deleted file mode 100644 index 35ba20aff3e..00000000000 --- a/dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.reference +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` String DEFAULT \'10\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -1000 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt64 DEFAULT \'10\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt64 DEFAULT 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -1000 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt64 DEFAULT 100) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt16 DEFAULT 100) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -10000 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt8 DEFAULT 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt8 DEFAULT 10, `better_column` UInt8 DEFAULT \'1\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt8 DEFAULT 10, `better_column` UInt8 DEFAULT \'1\', `other_date` String DEFAULT 1) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference b/dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference deleted file mode 100644 index 198f79cf9a4..00000000000 --- a/dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference +++ /dev/null @@ -1,8 +0,0 @@ -Wrong column name. -CREATE TABLE default.table_for_bad_alters (`key` UInt64, `value1` UInt8, `value2` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_bad_alters (`key` UInt64, `value1` UInt8, `value2` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -syntax error at begin of string. -7 -Hello -World -Wrong index name. diff --git a/dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference b/dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference deleted file mode 100644 index 5b376a0654f..00000000000 --- a/dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE test_01083.file (`n` Int8) ENGINE = File(\'TSVWithNamesAndTypes\') -CREATE TABLE test_01083.buffer (`n` Int8) ENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) -CREATE TABLE test_01083.merge (`n` Int8) ENGINE = Merge(\'test_01083\', \'distributed\') -CREATE TABLE test_01083.merge_tf AS merge(\'test_01083\', \'.*\') -CREATE TABLE test_01083.distributed (`n` Int8) ENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') -CREATE TABLE test_01083.distributed_tf AS cluster(\'test_shard_localhost\', \'test_01083\', \'buffer\') -CREATE TABLE test_01083.url (`n` UInt64, `col` String) ENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+test_01083.merge+format+CSV\', \'CSV\') -CREATE TABLE test_01083.rich_syntax AS remote(\'localhos{x|y|t}\', cluster(\'test_shard_localhost\', remote(\'127.0.0.{1..4}\', \'test_01083\', \'view\'))) -CREATE VIEW test_01083.view (`n` Int64) AS SELECT toInt64(n) AS n FROM (SELECT toString(n) AS n FROM test_01083.merge WHERE _table != \'qwerty\' ORDER BY _table ASC) UNION ALL SELECT * FROM test_01083.file -CREATE DICTIONARY test_01083.dict (`n` UInt64, `col` String DEFAULT \'42\') PRIMARY KEY n SOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\')) LIFETIME(MIN 0 MAX 1) LAYOUT(CACHE(SIZE_IN_CELLS 1)) -16 diff --git a/dbms/tests/queries/0_stateless/01086_odbc_roundtrip.sql b/dbms/tests/queries/0_stateless/01086_odbc_roundtrip.sql deleted file mode 100644 index 2c31711d895..00000000000 --- a/dbms/tests/queries/0_stateless/01086_odbc_roundtrip.sql +++ /dev/null @@ -1,14 +0,0 @@ -select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (ANSI)}','system','tables')); -select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (Unicode)}','system','tables')); - -DROP DATABASE IF EXISTS test_01086; -CREATE DATABASE test_01086; -USE test_01086; - -CREATE TABLE t (x UInt8, y Float32, z String) ENGINE = Memory; -INSERT INTO t VALUES (1,0.1,'a я'),(2,0.2,'b ą'),(3,0.3,'c d'); - -select * from odbc('DSN={ClickHouse DSN (ANSI)}','test_01086','t') ORDER BY x; -select * from odbc('DSN={ClickHouse DSN (Unicode)}','test_01086','t') ORDER BY x; - -DROP DATABASE test_01086; diff --git a/dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.sh b/dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.sh deleted file mode 100755 index f8b9862c1c1..00000000000 --- a/dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -url="https://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTPS}/?session_id=test_01098" - -${CLICKHOUSE_CURL} -sSk "$url" --data "CREATE TEMPORARY TABLE tmp_table AS SELECT number AS n FROM numbers(42)" > /dev/null; - -name_expr="'\`' || database || '\`.\`' || name || '\`'" -full_tmp_name=`echo "SELECT $name_expr FROM system.tables WHERE database='_temporary_and_external_tables' AND create_table_query LIKE '%tmp_table%'" | ${CLICKHOUSE_CURL} -sSgk $url -d @-` - -echo "SELECT * FROM $full_tmp_name" | ${CLICKHOUSE_CURL} -sSgk $url -d @- | grep -F "Code: 291" > /dev/null && echo "OK" - -echo -ne '0\n1\n' | ${CLICKHOUSE_CURL} -sSkF 'file=@-' "$url&file_format=CSV&file_types=UInt64&query=SELECT+sum((number+GLOBAL+IN+(SELECT+number+AS+n+FROM+remote('127.0.0.2',+numbers(5))+WHERE+n+GLOBAL+IN+(SELECT+*+FROM+tmp_table)+AND+n+GLOBAL+NOT+IN+(SELECT+*+FROM+file)+))+AS+res),+sum(number*res)+FROM+remote('127.0.0.2',+numbers(10))"; - diff --git a/dbms/tests/queries/0_stateless/01223_dist_on_dist.sql b/dbms/tests/queries/0_stateless/01223_dist_on_dist.sql deleted file mode 100644 index 1c239d6c666..00000000000 --- a/dbms/tests/queries/0_stateless/01223_dist_on_dist.sql +++ /dev/null @@ -1,58 +0,0 @@ -create table if not exists data_01223 (key Int) Engine=Memory(); -create table if not exists dist_layer_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01223); -create table if not exists dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); - -select * from dist_01223; - -insert into data_01223 select * from numbers(3); - -select 'DISTINCT ORDER BY'; -select distinct * from dist_01223 order by key; -select 'GROUP BY ORDER BY'; -select * from dist_01223 group by key order by key; -select 'GROUP BY ORDER BY LIMIT'; -select * from dist_01223 group by key order by key limit 1; -select 'HAVING'; -select * from dist_01223 having key = 1; -select 'GROUP BY HAVING'; -select * from dist_01223 group by key having key = 1; -select 'ORDER BY'; -select * from dist_01223 order by key; -select 'ORDER BY LIMIT'; -select * from dist_01223 order by key limit 1; -select 'ORDER BY LIMIT BY'; -select * from dist_01223 order by key limit 1 by key; -select 'cluster() ORDER BY'; -select * from cluster(test_cluster_two_shards, currentDatabase(), dist_01223) order by key; -select 'cluster() GROUP BY ORDER BY'; -select * from cluster(test_cluster_two_shards, currentDatabase(), dist_01223) group by key order by key; - -select 'LEFT JOIN'; -select toInt32(number) key, b.key from numbers(2) a left join (select distinct * from dist_01223) b using key order by b.key; -select 'RIGHT JOIN'; -select toInt32(number) key, b.key from numbers(2) a right join (select distinct * from dist_01223) b using key order by b.key; - --- more data for GROUP BY -insert into data_01223 select number%3 from numbers(30); - --- group_by_two_level_threshold -select 'GROUP BY ORDER BY group_by_two_level_threshold'; -select * from dist_01223 group by key order by key settings -group_by_two_level_threshold=1, -group_by_two_level_threshold_bytes=1; - --- distributed_aggregation_memory_efficient -select 'GROUP BY ORDER BY distributed_aggregation_memory_efficient'; -select * from dist_01223 group by key order by key settings -distributed_aggregation_memory_efficient=1; - --- distributed_aggregation_memory_efficient/group_by_two_level_threshold -select 'GROUP BY ORDER BY distributed_aggregation_memory_efficient/group_by_two_level_threshold'; -select * from dist_01223 group by key order by key settings -group_by_two_level_threshold=1, -group_by_two_level_threshold_bytes=1, -distributed_aggregation_memory_efficient=1; - -drop table dist_01223; -drop table dist_layer_01223; -drop table data_01223; diff --git a/dbms/tests/queries/bugs/00938_client_suggestions.sh b/dbms/tests/queries/bugs/00938_client_suggestions.sh deleted file mode 100755 index b4bd9e4480d..00000000000 --- a/dbms/tests/queries/bugs/00938_client_suggestions.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -for i in {1..100}; do $CLICKHOUSE_CLIENT --always_load_suggestion_data --query="SELECT 1 FORMAT Null"; done diff --git a/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.reference b/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.reference deleted file mode 100644 index 7f75aa873cb..00000000000 --- a/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.reference +++ /dev/null @@ -1,4 +0,0 @@ -GLOBAL IN distributed_group_by_no_merge -1 -GLOBAL IN -1 diff --git a/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.sql b/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.sql deleted file mode 100644 index e363fef2d2b..00000000000 --- a/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.sql +++ /dev/null @@ -1,18 +0,0 @@ -create table if not exists data_01224 (key Int) Engine=Memory(); -create table if not exists dist_layer_01224 as data_01224 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01224); -create table if not exists dist_01224 as data_01224 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01224); - -select * from dist_01224; -insert into data_01224 select * from numbers(3); - --- "Table expression is undefined, Method: ExpressionAnalyzer::interpretSubquery" -select 'GLOBAL IN distributed_group_by_no_merge'; -select distinct * from dist_01224 where key global in (1) settings distributed_group_by_no_merge=1; - --- requires #9923 -select 'GLOBAL IN'; -select distinct * from dist_01224 where key global in (1); - -drop table dist_01224; -drop table dist_layer_01224; -drop table data_01224; diff --git a/dbms/tests/queries/bugs/totals_rollup_having_block_header.sql b/dbms/tests/queries/bugs/totals_rollup_having_block_header.sql deleted file mode 100644 index 4f7f9692fd0..00000000000 --- a/dbms/tests/queries/bugs/totals_rollup_having_block_header.sql +++ /dev/null @@ -1,16 +0,0 @@ --- triggers assertion in debug build - -DROP TABLE IF EXISTS test.rollup_having; -CREATE TABLE test.rollup_having ( - a Nullable(String), - b Nullable(String) -) ENGINE = Memory; - -INSERT INTO test.rollup_having VALUES (NULL, NULL); -INSERT INTO test.rollup_having VALUES ('a', NULL); -INSERT INTO test.rollup_having VALUES ('a', 'b'); - -SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL; -SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL and b IS NOT NULL; - -DROP TABLE test.rollup_having; diff --git a/dbms/tests/queries/bugs/view_bad_types.sql b/dbms/tests/queries/bugs/view_bad_types.sql deleted file mode 100644 index 38daabfd6b8..00000000000 --- a/dbms/tests/queries/bugs/view_bad_types.sql +++ /dev/null @@ -1,11 +0,0 @@ -DROP TABLE IF EXISTS test.table; -CREATE TABLE test.table (x UInt16) ENGINE = TinyLog; -INSERT INTO test.table SELECT * FROM system.numbers LIMIT 10; - -DROP TABLE IF EXISTS test.view; -CREATE VIEW test.view (x UInt64) AS SELECT * FROM test.table; - -SELECT x, any(x) FROM test.view GROUP BY x; - -DROP TABLE test.view; -DROP TABLE test.table; diff --git a/dbms/tests/strings_dictionary.xml b/dbms/tests/strings_dictionary.xml deleted file mode 120000 index 603d99ef4e8..00000000000 --- a/dbms/tests/strings_dictionary.xml +++ /dev/null @@ -1 +0,0 @@ -../../dbms/tests/config/strings_dictionary.xml \ No newline at end of file diff --git a/docker/builder/README.md b/docker/builder/README.md index 7fd8fe42335..cb9fb7d1b77 100644 --- a/docker/builder/README.md +++ b/docker/builder/README.md @@ -13,21 +13,21 @@ Run build: make build ``` -Before run, ensure that your user has access to docker: -To check, that you have access to Docker, run `docker ps`. -If not, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and relogin. +Before run, ensure that your user has access to docker: +To check, that you have access to Docker, run `docker ps`. +If not, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and relogin. (You must close all your sessions. For example, restart your computer.) -Build results are available in `build_docker` directory at top level of your working copy. +Build results are available in `build_docker` directory at top level of your working copy. It builds only binaries, not packages. For example, run server: ``` -cd $(git rev-parse --show-toplevel)/dbms/src/Server -$(git rev-parse --show-toplevel)/docker/builder/dbms/programs/clickhouse server --config-file $(git rev-parse --show-toplevel)/dbms/programs/server/config.xml +cd $(git rev-parse --show-toplevel)/src/Server +$(git rev-parse --show-toplevel)/docker/builder/programs/clickhouse server --config-file $(git rev-parse --show-toplevel)/programs/server/config.xml ``` Run client: ``` -$(git rev-parse --show-toplevel)/docker/builder/dbms/programs/clickhouse client +$(git rev-parse --show-toplevel)/docker/builder/programs/clickhouse client ``` diff --git a/docker/images.json b/docker/images.json index d21365fd49d..434a3c7af10 100644 --- a/docker/images.json +++ b/docker/images.json @@ -1,10 +1,10 @@ { "docker/packager/deb": "yandex/clickhouse-deb-builder", "docker/packager/binary": "yandex/clickhouse-binary-builder", + "docker/test/coverage": "yandex/clickhouse-coverage", "docker/test/compatibility/centos": "yandex/clickhouse-test-old-centos", "docker/test/compatibility/ubuntu": "yandex/clickhouse-test-old-ubuntu", "docker/test/integration": "yandex/clickhouse-integration-test", - "docker/test/performance": "yandex/clickhouse-performance-test", "docker/test/performance-comparison": "yandex/clickhouse-performance-comparison", "docker/test/pvs": "yandex/clickhouse-pvs-test", "docker/test/stateful": "yandex/clickhouse-stateful-test", @@ -14,5 +14,6 @@ "docker/test/unit": "yandex/clickhouse-unit-test", "docker/test/stress": "yandex/clickhouse-stress-test", "docker/test/split_build_smoke_test": "yandex/clickhouse-split-build-smoke-test", - "dbms/tests/integration/image": "yandex/clickhouse-integration-tests-runner" + "docker/test/codebrowser": "yandex/clickhouse-codebrowser", + "tests/integration/image": "yandex/clickhouse-integration-tests-runner" } diff --git a/docker/packager/README.md b/docker/packager/README.md index e02a45fdaea..5d9751a0fbd 100644 --- a/docker/packager/README.md +++ b/docker/packager/README.md @@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen Usage: -Build deb package with `gcc-8` in `debug` mode: +Build deb package with `gcc-9` in `debug` mode: ``` $ mkdir deb/test_output -$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=gcc-8 --build-type=debug +$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=gcc-9 --build-type=debug $ ls -l deb/test_output -rw-r--r-- 1 root root 3730 clickhouse-client_18.14.2+debug_all.deb -rw-r--r-- 1 root root 84221888 clickhouse-common-static_18.14.2+debug_amd64.deb @@ -18,11 +18,11 @@ $ ls -l deb/test_output ``` -Build ClickHouse binary with `clang-6.0` and `address` sanitizer in `relwithdebuginfo` +Build ClickHouse binary with `clang-9.0` and `address` sanitizer in `relwithdebuginfo` mode: ``` $ mkdir $HOME/some_clickhouse -$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-6.0 --sanitizer=address +$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-9.0 --sanitizer=address $ ls -l $HOME/some_clickhouse -rwxr-xr-x 1 root root 787061952 clickhouse lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index a8faf3ceb01..54755d7c2f5 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -1,3 +1,4 @@ +# Trigger new image build # docker build -t yandex/clickhouse-binary-builder . FROM ubuntu:19.10 @@ -64,5 +65,8 @@ RUN wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/M # It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling. RUN wget "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en" -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +# Download toolchain for FreeBSD 12.1 +RUN wget https://clickhouse-datasets.s3.yandex.net/toolchains/toolchains/freebsd-12.1-toolchain.tar.xz + COPY build.sh / CMD ["/bin/bash", "/build.sh"] diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 94615a5a39d..a341bbd9840 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -8,6 +8,9 @@ tar xJf MacOSX10.14.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-co mkdir -p build/cmake/toolchain/linux-aarch64 tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 +mkdir -p build/cmake/toolchain/freebsd-x86_64 +tar xJf freebsd-12.1-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 + mkdir -p build/build_docker cd build/build_docker ccache --show-stats ||: @@ -16,15 +19,15 @@ rm -f CMakeCache.txt cmake .. -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS ninja ccache --show-stats ||: -mv ./dbms/programs/clickhouse* /output -mv ./dbms/unit_tests_dbms /output +mv ./programs/clickhouse* /output +mv ./src/unit_tests_dbms /output find . -name '*.so' -print -exec mv '{}' /output \; find . -name '*.so.*' -print -exec mv '{}' /output \; # Different files for performance test. if [ "performance" == "$COMBINED_OUTPUT" ] then - cp -r ../dbms/tests/performance /output + cp -r ../tests/performance /output cp -r ../docker/test/performance-comparison/config /output ||: rm /output/unit_tests_dbms ||: rm /output/clickhouse-odbc-bridge ||: @@ -36,9 +39,9 @@ fi if [ "" != "$COMBINED_OUTPUT" ] then mkdir -p /output/config - cp ../dbms/programs/server/config.xml /output/config - cp ../dbms/programs/server/users.xml /output/config - cp -r ../dbms/programs/server/config.d /output/config + cp ../programs/server/config.xml /output/config + cp ../programs/server/users.xml /output/config + cp -r ../programs/server/config.d /output/config tar -czvf "$COMBINED_OUTPUT.tgz" /output rm -r /output/* mv "$COMBINED_OUTPUT.tgz" /output diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index bedde0a2013..6aa550aaf82 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -48,6 +48,7 @@ RUN apt-get --allow-unauthenticated update -y \ libltdl-dev \ libre2-dev \ libjemalloc-dev \ + libmsgpack-dev \ unixodbc-dev \ odbcinst \ tzdata \ diff --git a/docker/packager/freebsd/Vagrantfile b/docker/packager/freebsd/Vagrantfile deleted file mode 100644 index 765f46d5604..00000000000 --- a/docker/packager/freebsd/Vagrantfile +++ /dev/null @@ -1,4 +0,0 @@ -Vagrant.configure("2") do |config| - config.vm.box = "robot-clickhouse/clickhouse-freebsd" - config.vm.synced_folder ".", "/vagrant", disabled: true -end diff --git a/docker/packager/packager b/docker/packager/packager index 3c10788e662..71380b92fac 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -11,48 +11,8 @@ SCRIPT_PATH = os.path.realpath(__file__) IMAGE_MAP = { "deb": "yandex/clickhouse-deb-builder", "binary": "yandex/clickhouse-binary-builder", - "freebsd": os.path.join(os.path.dirname(SCRIPT_PATH), "freebsd"), } -class Vagrant(object): - def __init__(self, path_to_vagrant_file): - self.prefix = "VAGRANT_CWD=" + path_to_vagrant_file - - def __enter__(self): - subprocess.check_call("{} vagrant up".format(self.prefix), shell=True) - self.ssh_path = "/tmp/vagrant-ssh" - subprocess.check_call("{} vagrant ssh-config > {}".format(self.prefix, self.ssh_path), shell=True) - return self - - def copy_to_image(self, local_path, remote_path): - cmd = "scp -F {ssh} -r {lpath} default:{rpath}".format(ssh=self.ssh_path, lpath=local_path, rpath=remote_path) - logging.info("Copying to image %s", cmd) - subprocess.check_call( - cmd, - shell=True - ) - - def copy_from_image(self, remote_path, local_path): - cmd = "scp -F {ssh} -r default:{rpath} {lpath}".format(ssh=self.ssh_path, rpath=remote_path, lpath=local_path) - logging.info("Copying from image %s", cmd) - subprocess.check_call( - cmd, - shell=True - ) - - def execute_cmd(self, cmd): - cmd = '{} vagrant ssh -c "{}"'.format(self.prefix, cmd) - logging.info("Executin cmd %s", cmd) - subprocess.check_call( - cmd, - shell=True - ) - - def __exit__(self, exc_type, exc_val, exc_tb): - logging.info("Destroying image") - subprocess.check_call("{} vagrant destroy --force".format(self.prefix), shell=True) - - def check_image_exists_locally(image_name): try: output = subprocess.check_output("docker images -q {} 2> /dev/null".format(image_name), shell=True) @@ -94,19 +54,11 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache subprocess.check_call(cmd, shell=True) -def run_vagrant_box_with_env(image_path, output_dir, ch_root): - with Vagrant(image_path) as vagrant: - logging.info("Copying folder to vagrant machine") - vagrant.copy_to_image(ch_root, "~/ClickHouse") - logging.info("Running build") - vagrant.execute_cmd("cd ~/ClickHouse && cmake . && ninja") - logging.info("Copying binary back") - vagrant.copy_from_image("~/ClickHouse/dbms/programs/clickhouse", output_dir) - def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, unbundled, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage): CLANG_PREFIX = "clang" DARWIN_SUFFIX = "-darwin" ARM_SUFFIX = "-aarch64" + FREEBSD_SUFFIX = "-freebsd" result = [] cmake_flags = ['$CMAKE_FLAGS', '-DADD_GDB_INDEX_FOR_GOLD=1'] @@ -114,7 +66,8 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ is_clang = compiler.startswith(CLANG_PREFIX) is_cross_darwin = compiler.endswith(DARWIN_SUFFIX) is_cross_arm = compiler.endswith(ARM_SUFFIX) - is_cross_compile = is_cross_darwin or is_cross_arm + is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX) + is_cross_compile = is_cross_darwin or is_cross_arm or is_cross_freebsd # Explicitly use LLD with Clang by default. # Don't force linker for cross-compilation. @@ -131,6 +84,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ elif is_cross_arm: cc = compiler[:-len(ARM_SUFFIX)] cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake") + elif is_cross_freebsd: + cc = compiler[:-len(FREEBSD_SUFFIX)] + cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake") else: cc = compiler @@ -205,11 +161,11 @@ if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') parser = argparse.ArgumentParser(description="ClickHouse building script using prebuilt Docker image") # 'performance' creates a combined .tgz with server and configs to be used for performance test. - parser.add_argument("--package-type", choices=['deb', 'binary', 'performance', 'freebsd'], required=True) + parser.add_argument("--package-type", choices=['deb', 'binary', 'performance'], required=True) parser.add_argument("--clickhouse-repo-path", default="../../") parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") - parser.add_argument("--compiler", choices=("clang-8", "clang-8-darwin", "clang-8-aarch64", "gcc-8", "gcc-9", "clang-9"), default="gcc-8") + parser.add_argument("--compiler", choices=("clang-8", "clang-8-darwin", "clang-9-aarch64", "clang-9-freebsd", "gcc-8", "gcc-9", "clang-9"), default="gcc-8") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--unbundled", action="store_true") parser.add_argument("--split-binary", action="store_true") @@ -247,9 +203,5 @@ if __name__ == "__main__": args.build_type, args.compiler, args.sanitizer, args.package_type, image_type, args.cache, args.distcc_hosts, args.unbundled, args.split_binary, args.clang_tidy, args.version, args.author, args.official, args.alien_pkgs, args.with_coverage) - if image_type != "freebsd": - run_docker_image_with_env(image_name, args.output_dir, env_prepared, ch_root, args.ccache_dir) - else: - logging.info("Running freebsd build, arguments will be ignored") - run_vagrant_box_with_env(image_name, args.output_dir, ch_root) + run_docker_image_with_env(image_name, args.output_dir, env_prepared, ch_root, args.ccache_dir) logging.info("Output placed into {}".format(args.output_dir)) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 119d90665aa..bf48fe467ca 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -42,9 +42,11 @@ function configure rm db0/metadata/system/* -rf ||: # Make copies of the original db for both servers. Use hardlinks instead - # of copying. + # of copying. Be careful to remove preprocessed configs or it can lead to + # weird effects. rm -r left/db ||: rm -r right/db ||: + rm -r db0/preprocessed_configs ||: cp -al db0/ left/db/ cp -al db0/ right/db/ } @@ -97,10 +99,6 @@ function run_tests touch "$x" done - # FIXME remove some broken long tests - rm "$test_prefix"/{IPv4,IPv6,modulo,parse_engine_file,number_formatting_formats,select_format}.xml ||: - - test_files=$(ls "$test_prefix"/*.xml) # FIXME a quick crutch to bring the run time down for the unstable tests -- # if some performance tests xmls were changed in a PR, run only these ones. @@ -111,7 +109,7 @@ function run_tests # and not always correct (e.g. when the reference SHA is really old and # has some other differences to the tested SHA, besides the one introduced # by the PR). - test_files_override=$(sed "s/dbms\/tests\/performance/${test_prefix//\//\\/}/" changed-tests.txt) + test_files_override=$(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-tests.txt) if [ "$test_files_override" != "" ] then test_files=$test_files_override @@ -126,6 +124,17 @@ function run_tests test_files=$(ls "$test_prefix"/$CHPC_TEST_GLOB.xml) fi + if [ "$test_files" == "" ] + then + # FIXME remove some broken long tests + for test_name in {IPv4,IPv6,modulo,parse_engine_file,number_formatting_formats,select_format,arithmetic,cryptographic_hashes,logical_functions_{medium,small}} + do + printf "$test_name\tMarked as broken (see compare.sh)\n" >> skipped-tests.tsv + rm "$test_prefix/$test_name.xml" ||: + done + test_files=$(ls "$test_prefix"/*.xml) + fi + # Run the tests. test_name="" for test in $test_files @@ -141,7 +150,7 @@ function run_tests TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n") # the grep is to filter out set -x output and keep only time output - { time "$script_dir/perf.py" "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue + { time "$script_dir/perf.py" --host localhost localhost --port 9001 9002 -- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue # The test completed with zero status, so we treat stderr as warnings mv "$test_name-err.log" "$test_name-warn.log" @@ -275,9 +284,11 @@ create table test_times_tsv engine File(TSV, 'test-times.tsv') as from test_time join wall_clock using test order by avg_real_per_query desc; -create table all_queries_tsv engine File(TSV, 'all-queries.tsv') as - select left, right, diff, rd, test, query - from queries order by rd[3] desc; +create table all_tests_tsv engine File(TSV, 'all-queries.tsv') as + select left, right, diff, + floor(left > right ? left / right : right / left, 3), + rd, test, query + from queries order by test, query; " 2> >(head -2 >> report-errors.rep) ||: for version in {right,left} @@ -397,7 +408,7 @@ unset IFS # Remember that grep sets error code when nothing is found, hence the bayan # operator. -grep -H -m2 '\(Exception\|Error\):[^:]' ./*-err.log | sed 's/:/\t/' > run-errors.tsv ||: +grep -H -m2 -i '\(Exception\|Error\):[^:]' ./*-err.log | sed 's/:/\t/' > run-errors.tsv ||: } case "$stage" in @@ -429,6 +440,7 @@ case "$stage" in "report") time report ||: + time "$script_dir/report.py" --report=all-queries > all-queries.html 2> >(head -2 >> report-errors.rep) ||: time "$script_dir/report.py" > report.html ;& esac diff --git a/docker/test/performance-comparison/entrypoint.sh b/docker/test/performance-comparison/entrypoint.sh index 4176a1b1d7d..dc0480715d0 100755 --- a/docker/test/performance-comparison/entrypoint.sh +++ b/docker/test/performance-comparison/entrypoint.sh @@ -82,7 +82,7 @@ if [ "$REF_PR" == "" ]; then echo Reference PR is not specified ; exit 1 ; fi ) | tee right-commit.txt # Prepare the list of changed tests for use by compare.sh -git -C ch diff --name-only "$SHA_TO_TEST" "$(git -C ch merge-base "$SHA_TO_TEST"~ master)" -- dbms/tests/performance | tee changed-tests.txt +git -C ch diff --name-only "$SHA_TO_TEST" "$(git -C ch merge-base "$SHA_TO_TEST"~ master)" -- tests/performance | tee changed-tests.txt # Set python output encoding so that we can print queries with Russian letters. export PYTHONIOENCODING=utf-8 @@ -90,17 +90,23 @@ export PYTHONIOENCODING=utf-8 # Use a default number of runs if not told otherwise export CHPC_RUNS=${CHPC_RUNS:-7} +# By default, use the main comparison script from the tested package, so that we +# can change it in PRs. +script_path="right/scripts" +if [ -v CHPC_LOCAL_SCRIPT ] +then + script_path=".." +fi + # Even if we have some errors, try our best to save the logs. set +e -# Use main comparison script from the tested package, so that we can change it -# in PRs. # Older version use 'kill 0', so put the script into a separate process group # FIXME remove set +m in April 2020 set +m { \ time ../download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \ - time stage=configure right/scripts/compare.sh ; \ + time stage=configure "$script_path"/compare.sh ; \ } 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee compare.log set -m diff --git a/docker/test/performance-comparison/perf.py b/docker/test/performance-comparison/perf.py index dc516d7029e..55d93f89c6e 100755 --- a/docker/test/performance-comparison/perf.py +++ b/docker/test/performance-comparison/perf.py @@ -23,8 +23,8 @@ report_stage_end('start') parser = argparse.ArgumentParser(description='Run performance test.') # Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') -parser.add_argument('--host', nargs='*', default=['127.0.0.1', '127.0.0.1'], help="Server hostname. Parallel to '--port'.") -parser.add_argument('--port', nargs='*', default=[9001, 9002], help="Server port. Parallel to '--host'.") +parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.") +parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.") parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.') parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.') args = parser.parse_args() diff --git a/docker/test/performance-comparison/performance_comparison.md b/docker/test/performance-comparison/performance_comparison.md index 7c5172bf110..7407702b475 100644 --- a/docker/test/performance-comparison/performance_comparison.md +++ b/docker/test/performance-comparison/performance_comparison.md @@ -50,7 +50,7 @@ More stages are available, e.g. restart servers or run the tests. See the code. #### Run a single test on the already configured servers ``` -docker/test/performance-comparison/perf.py --host=localhost --port=9000 --runs=1 dbms/tests/performance/logical_functions_small.xml +docker/test/performance-comparison/perf.py --host=localhost --port=9000 --runs=1 tests/performance/logical_functions_small.xml ``` ### References diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index c8ce51e158a..84b0239ccda 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 +import argparse import ast import collections import csv @@ -8,6 +9,11 @@ import os import sys import traceback +parser = argparse.ArgumentParser(description='Create performance test report') +parser.add_argument('--report', default='main', choices=['main', 'all-queries'], + help='Which report to build') +args = parser.parse_args() + report_errors = [] error_tests = 0 slow_average_tests = 0 @@ -16,7 +22,7 @@ slower_queries = 0 unstable_queries = 0 very_unstable_queries = 0 -print(""" +header_template = """ - {% endif %} - - {% endblock %} - {% for path in extra_css %} - - {% endfor %} - {% block extrahead %}{% endblock %} - - {% set direction = config.theme.direction %} - {% if palette.primary or palette.accent %} - {% set primary = palette.primary | replace(" ", "-") | lower %} - {% set accent = palette.accent | replace(" ", "-") | lower %} - - {% else %} - - {% endif %} - - - {% set platform = config.extra.repo_icon or config.repo_url %} - {% if "github" in platform %} - {% include "assets/images/icons/github.a4034fb1.svg" %} - {% elif "gitlab" in platform %} - {% include "assets/images/icons/gitlab.d80e5efc.svg" %} - {% elif "bitbucket" in platform %} - {% include "assets/images/icons/bitbucket.4ebea66e.svg" %} - {% endif %} - - - - - - {% block header %} - {% include "partials/header.html" %} - {% endblock %} -
- {% block hero %} - {% if page and page.meta and page.meta.hero %} - {% include "partials/hero.html" with context %} - {% endif %} - {% endblock %} - {% if feature.tabs %} - {% include "partials/tabs.html" %} - {% endif %} -
-
- {% block site_nav %} - {% if nav %} -
-
-
- {% if not config.extra.single_page %} - {% include "partials/nav.html" %} - {% else %} - {% include "partials/toc.html" %} - {% endif %} -
-
-
- {% endif %} - {% if page.toc %} -
-
-
- {% if not config.extra.single_page %} - {% include "partials/toc.html" %} - {% else %} - {% include "partials/nav.html" %} - {% endif %} -
-
-
- {% endif %} - {% endblock %} -
-
- {% block content %} - {% if config.extra.single_page %} - - {% else %} - {% if page.edit_url %} - - {% endif %} - {% endif %} - {% if not "\x3ch1" in page.content %} -

{{ page.title | default(config.site_name, true)}}

- {% endif %} -
- {% if not config.extra.single_page %} - {{ page.content }} - {% endif %} -
- {% block source %} - {% if page and page.meta and page.meta.source %} -

{{ lang.t("meta.source") }}

- {% set path = page.meta.path | default([""]) %} - {% set file = page.meta.source %} - - {{ file }} - - {% endif %} - {% endblock %} - {% endblock %} - {% block disqus %} - {% if config.extra.disqus and not page.is_homepage %} -

{{ lang.t("meta.comments") }}

- {% include "partials/integrations/disqus.html" %} - {% endif %} - {% endblock %} -
-
-
-
- {% block footer %} - {% include "partials/footer.html" %} - {% endblock %} -
- {% block scripts %} - {% block libs %} - - {% endblock %} - - {% if lang.t("search.language") != "en" %} - {% set languages = lang.t("search.language").split(",") %} - {% if languages | length and languages[0] != "" %} - {% set path = base_url + "/assets/javascripts/lunr" %} - - {% for language in languages | map("trim") %} - {% if language != "en" %} - {% if language == "jp" %} - - {% endif %} - - {% endif %} - {% endfor %} - {% if languages | length > 0 %} - - {% endif %} - {% endif %} - {% endif %} - {% for path in extra_javascript %} - - {% endfor %} - {% endblock %} - {% block analytics %} - {% if config.google_analytics %} - {% include "partials/integrations/analytics.html" %} - {% endif %} - {% endblock %} - - - - - - - - - - - diff --git a/docs/tools/mkdocs-material-theme/main.html b/docs/tools/mkdocs-material-theme/main.html deleted file mode 100644 index 94d9808cc76..00000000000 --- a/docs/tools/mkdocs-material-theme/main.html +++ /dev/null @@ -1 +0,0 @@ -{% extends "base.html" %} diff --git a/docs/tools/mkdocs-material-theme/mkdocs_theme.yml b/docs/tools/mkdocs-material-theme/mkdocs_theme.yml deleted file mode 100644 index c7e3e28c3f1..00000000000 --- a/docs/tools/mkdocs-material-theme/mkdocs_theme.yml +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2016-2017 Martin Donath - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. - -# Language for theme localization -language: en - -# Feature flags for functionality that alters behavior significantly, and thus -# may be a matter of taste -feature: - - # Another layer on top of the main navigation for larger screens in the form - # of tabs, especially useful for larger documentation projects - tabs: false - -# Sets the primary and accent color palettes as defined in the Material Design -# documentation - possible values can be looked up in the getting started guide -palette: - - # Primary color used for header, sidebar and links, default: indigo - primary: - - # Accent color for highlighting user interaction, default: indigo - accent: - -# Fonts used by Material, automatically loaded from Google Fonts - see the site -# for a list of available fonts -font: - - # Default font for text - text: Roboto - - # Fixed-width font for code listings - code: Roboto Mono - -# Favicon to be rendered -favicon: assets/images/favicon.png - -# The logo of the documentation shown in the header and navigation can either -# be a Material Icon ligature (see https://material.io/icons/) or an image URL -logo: - icon: "\uE80C" - -# Material includes the search in the header as a partial, not as a separate -# template, so it's correct that search.html is missing -include_search_page: false - -# Material doesn't use MkDocs search functionality but provides its own. For -# this reason, only the search index needs to be built -search_index_only: true - -# Static pages to build -static_templates: - - 404.html diff --git a/docs/tools/mkdocs-material-theme/partials/flags.html b/docs/tools/mkdocs-material-theme/partials/flags.html deleted file mode 100644 index c7b06fbc4d0..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/flags.html +++ /dev/null @@ -1,8 +0,0 @@ -{% set alt_langs = [['en', 'English'], ['ru', 'Russian'], ['zh', 'Chinese'], ['ja', 'Japanese'], ['fa', 'Farsi']] %} -{% for alt_lang, alt_title in alt_langs %} - - {% include "assets/flags/" + alt_lang + ".svg" %} - -{% endfor %} diff --git a/docs/tools/mkdocs-material-theme/partials/footer.html b/docs/tools/mkdocs-material-theme/partials/footer.html deleted file mode 100644 index b6cff19e6eb..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/footer.html +++ /dev/null @@ -1,63 +0,0 @@ -{% import "partials/language.html" as lang with context %} -
- {% if page.previous_page or page.next_page %} - - {% endif %} - -
diff --git a/docs/tools/mkdocs-material-theme/partials/header.html b/docs/tools/mkdocs-material-theme/partials/header.html deleted file mode 100644 index c122cf4ee59..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/header.html +++ /dev/null @@ -1,50 +0,0 @@ -
- - -
diff --git a/docs/tools/mkdocs-material-theme/partials/hero.html b/docs/tools/mkdocs-material-theme/partials/hero.html deleted file mode 100644 index d0c534fe229..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/hero.html +++ /dev/null @@ -1,9 +0,0 @@ -{% set class = "md-hero" %} -{% if not feature.tabs %} - {% set class = "md-hero md-hero--expand" %} -{% endif %} -
-
- {{ page.meta.hero }} -
-
diff --git a/docs/tools/mkdocs-material-theme/partials/integrations/analytics.html b/docs/tools/mkdocs-material-theme/partials/integrations/analytics.html deleted file mode 100644 index 2b0fcdfdc40..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/integrations/analytics.html +++ /dev/null @@ -1 +0,0 @@ - diff --git a/docs/tools/mkdocs-material-theme/partials/integrations/disqus.html b/docs/tools/mkdocs-material-theme/partials/integrations/disqus.html deleted file mode 100644 index 5f003ca41d9..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/integrations/disqus.html +++ /dev/null @@ -1,14 +0,0 @@ -
- diff --git a/docs/tools/mkdocs-material-theme/partials/language.html b/docs/tools/mkdocs-material-theme/partials/language.html deleted file mode 100644 index 278339b74a8..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language.html +++ /dev/null @@ -1,9 +0,0 @@ -{% import "partials/language/" + config.theme.language + ".html" as lang %} -{% macro t(key) %}{{ { - "search.language": ( - config.extra.search | default({}) - ).language | default(config.theme.language, true), - "search.tokenizer": ( - config.extra.search | default({}) - ).tokenizer | default("", true), -}[key] or lang.t(key) }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/da.html b/docs/tools/mkdocs-material-theme/partials/language/da.html deleted file mode 100644 index e123b499860..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/da.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "da", - "clipboard.copy": "Kopiér til udklipsholderen", - "clipboard.copied": "Kopieret til udklipsholderen", - "edit.link.title": "Redigér denne side", - "footer.previous": "Forrige", - "footer.next": "Næste", - "meta.comments": "Kommentarer", - "meta.source": "Kilde", - "search.placeholder": "Søg", - "search.result.placeholder": "Indtask søgeord", - "search.result.none": "Ingen resultater fundet", - "search.result.one": "1 resultat", - "search.result.other": "# resultater", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Åbn arkiv", - "toc.title": "Indholdsfortegnelse" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/de.html b/docs/tools/mkdocs-material-theme/partials/language/de.html deleted file mode 100644 index e5bbe53dfd0..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/de.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "de", - "clipboard.copy": "In Zwischenablage kopieren", - "clipboard.copied": "In Zwischenablage kopiert", - "edit.link.title": "Seite editieren", - "footer.previous": "Vorherige Seite", - "footer.next": "Nächste Seite", - "meta.comments": "Kommentare", - "meta.source": "Quellcode", - "search.placeholder": "Suche", - "search.result.placeholder": "Suchbegriff eingeben", - "search.result.none": "Keine Suchergebnisse", - "search.result.one": "1 Suchergebnis", - "search.result.other": "# Suchergebnisse", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Quellcode", - "toc.title": "Inhaltsverzeichnis" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/en.html b/docs/tools/mkdocs-material-theme/partials/language/en.html deleted file mode 100644 index 47e40c800bd..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/en.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "en", - "clipboard.copy": "Copy to clipboard", - "clipboard.copied": "Copied to clipboard", - "edit.link.title": "Edit this page", - "footer.previous": "Previous", - "footer.next": "Next", - "meta.comments": "Comments", - "meta.source": "Source", - "nav.latest": "master", - "nav.multi_page": "Multi page version", - "nav.pdf": "PDF version", - "nav.release": "Release", - "nav.single_page": "Single page version", - "nav.source": "ClickHouse source code", - "search.placeholder": "Search", - "search.result.placeholder": "Type to start searching", - "search.result.none": "No matching documents", - "search.result.one": "1 matching document", - "search.result.other": "# matching documents", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Go to repository", - "toc.title": "Table of contents" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/es.html b/docs/tools/mkdocs-material-theme/partials/language/es.html deleted file mode 100644 index 1e2dbf68fa4..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/es.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "es", - "clipboard.copy": "Copiar al portapapeles", - "clipboard.copied": "Copiado al portapapeles", - "edit.link.title": "Editar esta página", - "footer.previous": "Anterior", - "footer.next": "Siguiente", - "meta.comments": "Comentarios", - "meta.source": "Fuente", - "search.placeholder": "Búsqueda", - "search.result.placeholder": "Teclee para comenzar búsqueda", - "search.result.none": "No se encontraron documentos", - "search.result.one": "1 documento encontrado", - "search.result.other": "# documentos encontrados", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Ir al repositorio", - "toc.title": "Tabla de contenidos" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/fa.html b/docs/tools/mkdocs-material-theme/partials/language/fa.html deleted file mode 100644 index b321e1319b8..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/fa.html +++ /dev/null @@ -1,28 +0,0 @@ -{% macro t(key) %}{{ { - "language": "fa", - "direction": "rtl", - "clipboard.copy": "کپی کردن", - "clipboard.copied": "کپی شد", - "edit.link.title": "این صفحه را ویرایش کنید", - "footer.previous": "قبلی", - "footer.next": "بعدی", - "meta.comments": "نظرات", - "meta.source": "منبع", - "nav.latest": "آخرین", - "nav.multi_page": "نسخه چند صفحه ای", - "nav.pdf": "نسخه PDF", - "nav.release": "رهایی", - "nav.single_page": "نسخه تک صفحه", - "nav.source": "کد منبع کلیک", - "search.language": "", - "search.pipeline.stopwords": false, - "search.pipeline.trimmer": false, - "search.placeholder": "جستجو", - "search.result.placeholder": "برای شروع جستجو تایپ کنید", - "search.result.none": "سندی یافت نشد", - "search.result.one": "1 سند یافت شد", - "search.result.other": "# سند یافت شد", - "skip.link.title": "پرش به محتویات", - "source.link.title": "رفتن به مخزن", - "toc.title": "فهرست موضوعات" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/fr.html b/docs/tools/mkdocs-material-theme/partials/language/fr.html deleted file mode 100644 index 87d7faa99fd..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/fr.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "fr", - "clipboard.copy": "Copier dans le presse-papier", - "clipboard.copied": "Copié dans le presse-papier", - "edit.link.title": "Editer cette page", - "footer.previous": "Précédent", - "footer.next": "Suivant", - "meta.comments": "Commentaires", - "meta.source": "Source", - "search.placeholder": "Rechercher", - "search.result.placeholder": "Taper pour démarrer la recherche", - "search.result.none": "Aucun document trouvé", - "search.result.one": "1 document trouvé", - "search.result.other": "# documents trouvés", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Aller au dépôt", - "toc.title": "Table des matières" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/it.html b/docs/tools/mkdocs-material-theme/partials/language/it.html deleted file mode 100644 index d9fe6fe745b..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/it.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "it", - "clipboard.copy": "Copia", - "clipboard.copied": "Copiato", - "edit.link.title": "Modifica", - "footer.previous": "Precedente", - "footer.next": "Prossimo", - "meta.comments": "Commenti", - "meta.source": "Sorgente", - "search.placeholder": "Cerca", - "search.result.placeholder": "Scrivi per iniziare a cercare", - "search.result.none": "Nessun documento trovato", - "search.result.one": "1 documento trovato", - "search.result.other": "# documenti trovati", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Apri repository", - "toc.title": "Indice" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/ja.html b/docs/tools/mkdocs-material-theme/partials/language/ja.html deleted file mode 100644 index 47341ab06ee..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/ja.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "ja", - "clipboard.copy": "クリップボードへコピー", - "clipboard.copied": "コピーしました", - "edit.link.title": "編集", - "footer.previous": "前", - "footer.next": "次", - "meta.comments": "コメント", - "meta.source": "ソース", - "nav.latest": "master", - "nav.multi_page": "マルチページ版", - "nav.pdf": "PDF版", - "nav.release": "リリース", - "nav.single_page": "シングルページ版", - "nav.source": "ClickHouseソースコード", - "search.placeholder": "検索", - "search.result.placeholder": "検索キーワードを入力してください", - "search.result.none": "何も見つかりませんでした", - "search.result.one": "1件見つかりました", - "search.result.other": "#件見つかりました", - "search.tokenizer": "[\s\- 、。,.]+", - "source.link.title": "リポジトリへ", - "toc.title": "目次" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/kr.html b/docs/tools/mkdocs-material-theme/partials/language/kr.html deleted file mode 100644 index 27163eb0bd4..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/kr.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "kr", - "clipboard.copy": "클립보드로 복사", - "clipboard.copied": "클립보드에 복사됨", - "edit.link.title": "이 페이지를 편집", - "footer.previous": "이전", - "footer.next": "다음", - "meta.comments": "댓글", - "meta.source": "출처", - "search.placeholder": "검색", - "search.result.placeholder": "검색어를 입력하세요", - "search.result.none": "검색어와 일치하는 문서가 없습니다", - "search.result.one": "1개의 일치하는 문서", - "search.result.other": "#개의 일치하는 문서", - "search.tokenizer": "[\s\-]+", - "source.link.title": "저장소로 이동", - "toc.title": "목차" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/no.html b/docs/tools/mkdocs-material-theme/partials/language/no.html deleted file mode 100644 index 63484a9726b..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/no.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "no", - "clipboard.copy": "Kopier til utklippstavlen", - "clipboard.copied": "Kopiert til utklippstavlen", - "edit.link.title": "Rediger denne siden", - "footer.previous": "Forrige", - "footer.next": "Neste", - "meta.comments": "Kommentarer", - "meta.source": "Kilde", - "search.placeholder": "Søk", - "search.result.placeholder": "Skriv søkeord", - "search.result.none": "Ingen treff", - "search.result.one": "1 treff", - "search.result.other": "# treff", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Gå til kilde", - "toc.title": "Innholdsfortegnelse" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/pl.html b/docs/tools/mkdocs-material-theme/partials/language/pl.html deleted file mode 100644 index 54889e5c35e..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/pl.html +++ /dev/null @@ -1 +0,0 @@ -{% macro t(key) %}{{ { "language": "pl", "clipboard.copy": "Kopiuj do schowka", "clipboard.copied": "Skopiowane", "edit.link.title": "Edytuj tę stronę", "footer.previous": "Poprzednia strona", "footer.next": "Następna strona", "meta.comments": "Komentarze", "meta.source": "Kod źródłowy", "search.placeholder": "Szukaj", "search.result.placeholder": "Zacznij pisać, aby szukać", "search.result.none": "Brak wyników wyszukiwania", "search.result.one": "Wyniki wyszukiwania: 1", "search.result.other": "Wyniki wyszukiwania: #", "search.tokenizer": "[\s\-]+", "source.link.title": "Idź do repozytorium", "toc.title": "Spis treści" }[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/pt.html b/docs/tools/mkdocs-material-theme/partials/language/pt.html deleted file mode 100644 index 2e43fc9ed71..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/pt.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "pt", - "clipboard.copy": "Copiar para área de transferência", - "clipboard.copied": "Copiado para área de transferência", - "edit.link.title": "Editar esta página", - "footer.previous": "Anterior", - "footer.next": "Próximo", - "meta.comments": "Comentários", - "meta.source": "Fonte", - "search.placeholder": "Buscar", - "search.result.placeholder": "Digite para iniciar a busca", - "search.result.none": "Nenhum resultado encontrado", - "search.result.one": "1 resultado encontrado", - "search.result.other": "# resultados encontrados", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Ir ao repositório", - "toc.title": "Índice" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/ru.html b/docs/tools/mkdocs-material-theme/partials/language/ru.html deleted file mode 100644 index eb8a31e86a4..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/ru.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "ru", - "clipboard.copy": "Копировать в буфер", - "clipboard.copied": "Скопировано в буфер", - "edit.link.title": "Редактировать страницу", - "footer.previous": "Назад", - "footer.next": "Вперед", - "meta.comments": "Комментарии", - "meta.source": "Исходный код", - "nav.latest": "последний", - "nav.multi_page": "Многостраничная версия", - "nav.pdf": "PDF версия", - "nav.release": "Релиз", - "nav.single_page": "Одностраничная версия", - "nav.source": "Исходный код ClickHouse", - "search.placeholder": "Поиск", - "search.result.placeholder": "Начните печатать для поиска", - "search.result.none": "Совпадений не найдено", - "search.result.one": "Найдено 1 совпадение", - "search.result.other": "Найдено # совпадений", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Перейти к репозиторию", - "toc.title": "Содержание" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/sv.html b/docs/tools/mkdocs-material-theme/partials/language/sv.html deleted file mode 100644 index 1d164713eba..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/sv.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "sv", - "clipboard.copy": "Kopiera till urklipp", - "clipboard.copied": "Kopierat till urklipp", - "edit.link.title": "Redigera sidan", - "footer.previous": "Föregående", - "footer.next": "Nästa", - "meta.comments": "Kommentarer", - "meta.source": "Källa", - "search.placeholder": "Sök", - "search.result.placeholder": "Skriv sökord", - "search.result.none": "Inga sökresultat", - "search.result.one": "1 sökresultat", - "search.result.other": "# sökresultat", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Gå till datakatalog", - "toc.title": "Innehållsförteckning" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/zh-Hant.html b/docs/tools/mkdocs-material-theme/partials/language/zh-Hant.html deleted file mode 100644 index f13eca9be8e..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/zh-Hant.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "zh-Hant", - "clipboard.copy": "拷貝", - "clipboard.copied": "已拷貝", - "edit.link.title": "編輯此頁", - "footer.previous": "前進", - "footer.next": "後退", - "meta.comments": "評論", - "meta.source": "來源", - "search.placeholder": "搜尋", - "search.result.placeholder": "鍵入以開始檢索", - "search.result.none": "沒有找到符合條件的結果", - "search.result.one": "找到 1 个符合條件的結果", - "search.result.other": "# 個符合條件的結果", - "search.tokenizer": "[\,\。]+", - "source.link.title": "前往 Github 倉庫", - "toc.title": "目錄" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/zh.html b/docs/tools/mkdocs-material-theme/partials/language/zh.html deleted file mode 100644 index 36f681c8a0e..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/zh.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "zh", - "clipboard.copy": "复制", - "clipboard.copied": "已复制", - "edit.link.title": "编辑此页", - "footer.previous": "后退", - "footer.next": "前进", - "meta.comments": "评论", - "meta.source": "来源", - "nav.latest": "最新", - "nav.multi_page": "多页版本", - "nav.pdf": "PDF版本", - "nav.release": "发布", - "nav.single_page": "单页版本", - "nav.source": "ClickHouse源代码", - "search.placeholder": "搜索", - "search.result.placeholder": "键入以开始搜索", - "search.result.none": "没有找到符合条件的结果", - "search.result.one": "找到 1 个符合条件的结果", - "search.result.other": "# 个符合条件的结果", - "search.tokenizer": "[\,\。]+", - "source.link.title": "前往 Github 仓库", - "toc.title": "目录" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/nav-item.html b/docs/tools/mkdocs-material-theme/partials/nav-item.html deleted file mode 100644 index 4b31a079912..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/nav-item.html +++ /dev/null @@ -1,56 +0,0 @@ -{% if nav_item.title != "hidden" %} -{% set class = "md-nav__item" %} -{% if nav_item.active %} - {% set class = "md-nav__item md-nav__item--active" %} -{% endif %} -{% if nav_item.children %} -
  • - {% if nav_item.active %} - - {% else %} - - {% endif %} - - -
  • -{% elif nav_item == page %} -
  • - {% set toc_ = page.toc %} - - {% if toc_ | first is defined and "\x3ch1 id=" in page.content %} - {% set toc_ = (toc_ | first).children %} - {% endif %} - {% if toc_ | first is defined %} - - {% endif %} - - {{ nav_item.title }} - - {% if toc_ | first is defined %} - {% include "partials/toc.html" %} - {% endif %} -
  • -{% else %} -
  • - - {{ nav_item.title }} - -
  • -{% endif %} -{% endif %} diff --git a/docs/tools/mkdocs-material-theme/partials/nav.html b/docs/tools/mkdocs-material-theme/partials/nav.html deleted file mode 100644 index 9bf1076c349..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/nav.html +++ /dev/null @@ -1,55 +0,0 @@ - diff --git a/docs/tools/mkdocs-material-theme/partials/search.html b/docs/tools/mkdocs-material-theme/partials/search.html deleted file mode 100644 index 84428bdf69c..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/search.html +++ /dev/null @@ -1,21 +0,0 @@ -{% import "partials/language.html" as lang with context %} - diff --git a/docs/tools/mkdocs-material-theme/partials/social.html b/docs/tools/mkdocs-material-theme/partials/social.html deleted file mode 100644 index b990921bb8a..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/social.html +++ /dev/null @@ -1,3 +0,0 @@ - diff --git a/docs/tools/mkdocs-material-theme/partials/source.html b/docs/tools/mkdocs-material-theme/partials/source.html deleted file mode 100644 index 48d4eb1aaff..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/source.html +++ /dev/null @@ -1,25 +0,0 @@ -{% import "partials/language.html" as lang with context %} -{% set platform = config.extra.repo_icon or config.repo_url %} -{% if "github" in platform %} - {% set repo_type = "github" %} -{% elif "gitlab" in platform %} - {% set repo_type = "gitlab" %} -{% elif "bitbucket" in platform %} - {% set repo_type = "bitbucket" %} -{% else %} - {% set repo_type = "" %} -{% endif %} -{% block repo %} - - {% if repo_type %} -
    - - - -
    - {% endif %} -
    - {{ config.repo_name }} -
    -
    -{% endblock %} diff --git a/docs/tools/mkdocs-material-theme/partials/tabs-item.html b/docs/tools/mkdocs-material-theme/partials/tabs-item.html deleted file mode 100644 index 686b5a59b92..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/tabs-item.html +++ /dev/null @@ -1,31 +0,0 @@ -{% if nav_item.is_homepage %} -
  • - {% if not page.ancestors | length and nav | selectattr("url", page.url) %} - - {{ nav_item.title }} - - {% else %} - - {{ nav_item.title }} - - {% endif %} -
  • -{% elif nav_item.children and nav_item.children | length > 0 %} - {% set title = title | default(nav_item.title) %} - {% if (nav_item.children | first).children | length > 0 %} - {% set nav_item = nav_item.children | first %} - {% include "partials/tabs-item.html" %} - {% else %} -
  • - {% if nav_item.active %} - - {{ title }} - - {% else %} - - {{ title }} - - {% endif %} -
  • - {% endif %} -{% endif %} diff --git a/docs/tools/mkdocs-material-theme/partials/tabs.html b/docs/tools/mkdocs-material-theme/partials/tabs.html deleted file mode 100644 index e040436bf10..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/tabs.html +++ /dev/null @@ -1,13 +0,0 @@ -{% set class = "md-tabs" %} -{% if page.ancestors | length > 0 %} - {% set class = "md-tabs md-tabs--active" %} -{% endif %} - diff --git a/docs/tools/mkdocs-material-theme/partials/toc-item.html b/docs/tools/mkdocs-material-theme/partials/toc-item.html deleted file mode 100644 index 3b4f4d76cee..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/toc-item.html +++ /dev/null @@ -1,14 +0,0 @@ -
  • - - {{ toc_item.title }} - - {% if toc_item.children %} - - {% endif %} -
  • diff --git a/docs/tools/mkdocs-material-theme/partials/toc.html b/docs/tools/mkdocs-material-theme/partials/toc.html deleted file mode 100644 index f268ac0c6cf..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/toc.html +++ /dev/null @@ -1,29 +0,0 @@ -{% import "partials/language.html" as lang with context %} - diff --git a/docs/tools/nav.py b/docs/tools/nav.py new file mode 100644 index 00000000000..fe11b21d8e7 --- /dev/null +++ b/docs/tools/nav.py @@ -0,0 +1,62 @@ +import collections +import logging +import os + +import util + + +def find_first_header(content): + for line in content.split('\n'): + if line.startswith('#'): + no_hash = line.lstrip('#') + return no_hash.split('{', 1)[0].strip() + + +def build_nav_entry(root): + if root.endswith('images'): + return None, None, None + result_items = [] + index_meta, index_content = util.read_md_file(os.path.join(root, 'index.md')) + current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title', find_first_header(index_content))) + for filename in os.listdir(root): + path = os.path.join(root, filename) + if os.path.isdir(path): + prio, title, payload = build_nav_entry(path) + if title and payload: + result_items.append((prio, title, payload)) + elif filename.endswith('.md'): + path = os.path.join(root, filename) + meta, content = util.read_md_file(path) + path = path.split('/', 2)[-1] + title = meta.get('toc_title', find_first_header(content)) + if title: + title = title.strip().rstrip('.') + else: + title = meta.get('toc_folder_title', 'hidden') + prio = meta.get('toc_priority', 9999) + logging.debug(f'Nav entry: {prio}, {title}, {path}') + if not content.strip(): + title = 'hidden' + result_items.append((prio, title, path)) + result_items = sorted(result_items, key=lambda x: (x[0], x[1])) + result = collections.OrderedDict([(item[1], item[2]) for item in result_items]) + return index_meta.get('toc_priority', 10000), current_title, result + + +def build_nav(lang, args): + docs_dir = os.path.join(args.docs_dir, lang) + _, _, nav = build_nav_entry(docs_dir) + result = [] + index_key = None + for key, value in nav.items(): + if key and value: + if value == 'index.md': + index_key = key + continue + result.append({key: value}) + if index_key: + key = list(result[0].keys())[0] + result[0][key][index_key] = 'index.md' + result[0][key].move_to_end(index_key, last=False) + print('result', result) + return result diff --git a/docs/tools/release.sh b/docs/tools/release.sh index e0f580c383b..8c61d16966f 100755 --- a/docs/tools/release.sh +++ b/docs/tools/release.sh @@ -4,9 +4,9 @@ set -ex BASE_DIR=$(dirname $(readlink -f $0)) BUILD_DIR="${BASE_DIR}/../build" PUBLISH_DIR="${BASE_DIR}/../publish" -BASE_DOMAIN="${BASE_DOMAIN:-clickhouse.tech}" -GIT_TEST_URI="${GIT_TEST_URI:-git@github.com:ClickHouse/clickhouse.github.io.git}" -GIT_PROD_URI="git@github.com:ClickHouse/clickhouse.github.io.git" +BASE_DOMAIN="${BASE_DOMAIN:-content.clickhouse.tech}" +GIT_TEST_URI="${GIT_TEST_URI:-git@github.com:ClickHouse/clickhouse-website-content.git}" +GIT_PROD_URI="git@github.com:ClickHouse/clickhouse-website-content.git" EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS:---enable-stable-releases --minify}" HISTORY_SIZE="${HISTORY_SIZE:-5}" @@ -44,7 +44,7 @@ then if [[ ! -z "${CLOUDFLARE_TOKEN}" ]] then sleep 1m - git diff --stat="9999,9999" --diff-filter=M HEAD~1 | grep '|' | awk '$1 ~ /\.html$/ { if ($3>4) { url="https://'${BASE_DOMAIN}'/"$1; sub(/\/index.html/, "/", url); print "\""url"\""; }}' | split -l 25 /dev/stdin PURGE + git diff --stat="9999,9999" --diff-filter=M HEAD~1 | grep '|' | awk '$1 ~ /\.html$/ { if ($3>4) { url="https://clickhouse.tech/"$1; sub(/\/index.html/, "/", url); print "\""url"\""; }}' | split -l 25 /dev/stdin PURGE for FILENAME in $(ls PURGE*) do POST_DATA=$(cat "${FILENAME}" | sed -n -e 'H;${x;s/\n/,/g;s/^,//;p;}' | awk '{print "{\"files\":["$0"]}";}') diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 33cf57d41bb..8414ae2c533 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -1,8 +1,8 @@ Babel==2.8.0 backports-abc==0.5 backports.functools-lru-cache==1.6.1 -beautifulsoup4==4.8.2 -certifi==2019.11.28 +beautifulsoup4==4.9.0 +certifi==2020.4.5.1 chardet==3.0.4 click==7.1.1 closure==20191111 @@ -10,22 +10,21 @@ cssmin==0.2.0 future==0.18.2 htmlmin==0.1.12 idna==2.9 -Jinja2==2.11.1 +Jinja2==2.11.2 jinja2-highlight==0.6.1 jsmin==2.2.2 livereload==2.6.1 -lunr==0.5.6 Markdown==3.2.1 MarkupSafe==1.1.1 mkdocs==1.1 mkdocs-htmlproofer-plugin==0.0.3 -mkdocs-macros-plugin==0.4.4 -nltk==3.4.5 +mkdocs-macros-plugin==0.4.6 +nltk==3.5 nose==1.3.7 protobuf==3.11.3 numpy==1.18.2 Pygments==2.5.2 -pymdown-extensions==6.3 +pymdown-extensions==7.0 python-slugify==1.2.6 PyYAML==5.3.1 repackage==0.7.3 diff --git a/docs/tools/translate/add_meta_flag.py b/docs/tools/translate/add_meta_flag.py new file mode 100755 index 00000000000..d87aa044faf --- /dev/null +++ b/docs/tools/translate/add_meta_flag.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 + +import sys + +import util + +if __name__ == '__main__': + flag_name = sys.argv[1] + path = sys.argv[2] + meta, content = util.read_md_file(path) + meta[flag_name] = True + util.write_md_file(path, meta, content) diff --git a/docs/tools/translate/filter.py b/docs/tools/translate/filter.py index c56685226e8..b5424f20921 100755 --- a/docs/tools/translate/filter.py +++ b/docs/tools/translate/filter.py @@ -8,10 +8,13 @@ import pandocfilters import slugify import translate +import util is_debug = os.environ.get('DEBUG') is not None +filename = os.getenv('INPUT') + def debug(*args): if is_debug: @@ -33,7 +36,7 @@ def process_buffer(buffer, new_value, item=None, is_header=False): debug(f'Translate: "{text}" -> "{translated_text}"') if text and text[0].isupper() and not translated_text[0].isupper(): - translated_text = translated_text.capitalize() + translated_text = translated_text[0].upper() + translated_text[1:] if text.startswith(' ') and not translated_text.startswith(' '): translated_text = ' ' + translated_text @@ -41,12 +44,22 @@ def process_buffer(buffer, new_value, item=None, is_header=False): if text.endswith(' ') and not translated_text.endswith(' '): translated_text = translated_text + ' ' + if is_header and translated_text.endswith('.'): + translated_text = translated_text.rstrip('.') + title_case = is_header and translate.default_target_language == 'en' and text[0].isupper() - title_case_whitelist = {'a', 'an', 'the', 'and', 'or'} + title_case_whitelist = { + 'a', 'an', 'the', 'and', 'or', 'that', + 'of', 'on', 'for', 'from', 'with', 'to', 'in' + } + is_first_iteration = True for token in translated_text.split(' '): - if title_case and not token.isupper(): - if token not in title_case_whitelist: - token = token.capitalize() + if title_case and token.isascii() and not token.isupper(): + if len(token) > 1 and token.lower() not in title_case_whitelist: + token = token[0].upper() + token[1:] + elif not is_first_iteration: + token = token.lower() + is_first_iteration = False new_value.append(pandocfilters.Str(token)) new_value.append(pandocfilters.Space()) @@ -113,7 +126,7 @@ def translate_filter(key, value, _format, _): else: remaining_para_value.append(item) - break_value = [pandocfilters.LineBreak(),pandocfilters.Str(' ' * 4)] + break_value = [pandocfilters.LineBreak(), pandocfilters.Str(' ' * 4)] if admonition_value[-1].get('t') == 'Quoted': text = process_sentence(admonition_value[-1]['c'][-1]) text[0]['c'] = '"' + text[0]['c'] @@ -139,7 +152,24 @@ def translate_filter(key, value, _format, _): return pandocfilters.Str(value[2][0]) except IndexError: pass + value[1] = process_sentence(value[1]) + href = value[2][0] + if not (href.startswith('http') or href.startswith('#')): + anchor = None + attempts = 10 + if '#' in href: + href, anchor = href.split('#', 1) + + if filename: + while attempts and not os.path.exists(href): + href = f'../{href}' + attempts -= 1 + if anchor: + href = f'{href}#{anchor}' + + if attempts: + value[2][0] = href return cls(*value) elif key == 'Header': if value[1][0].islower() and '_' not in value[1][0]: # Preserve some manually specified anchors @@ -155,4 +185,9 @@ def translate_filter(key, value, _format, _): if __name__ == "__main__": - pandocfilters.toJSONFilter(translate_filter) + pwd = os.path.dirname(filename or '.') + if pwd: + with util.cd(pwd): + pandocfilters.toJSONFilter(translate_filter) + else: + pandocfilters.toJSONFilter(translate_filter) diff --git a/docs/tools/translate/normalize-markdown.sh b/docs/tools/translate/normalize-markdown.sh index d25c3ee65b2..7850fa34b1d 100755 --- a/docs/tools/translate/normalize-markdown.sh +++ b/docs/tools/translate/normalize-markdown.sh @@ -7,6 +7,7 @@ trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT INPUT="$1" if [[ ! -L "${INPUT}" ]] then + export INPUT cat "${INPUT}" > "${TEMP_FILE}" "${BASE_DIR}/translate.sh" "en" "${TEMP_FILE}" "${INPUT}" fi diff --git a/docs/tools/translate/remove_machine_translated_meta.py b/docs/tools/translate/remove_machine_translated_meta.py new file mode 100755 index 00000000000..26cfde97f1e --- /dev/null +++ b/docs/tools/translate/remove_machine_translated_meta.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +import os +import sys +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) +import convert_toc +import util + + +if __name__ == '__main__': + path = sys.argv[1][2:] + convert_toc.init_redirects() + try: + path = convert_toc.redirects[path] + except KeyError: + pass + meta, content = util.read_md_file(path) + if 'machine_translated' in meta: + del meta['machine_translated'] + if 'machine_translated_rev' in meta: + del meta['machine_translated_rev'] + util.write_md_file(path, meta, content) diff --git a/docs/tools/translate/replace-with-translation.sh b/docs/tools/translate/replace-with-translation.sh index 6106b1e1e06..922ac65a921 100755 --- a/docs/tools/translate/replace-with-translation.sh +++ b/docs/tools/translate/replace-with-translation.sh @@ -5,13 +5,13 @@ BASE_DIR=$(dirname $(readlink -f $0)) TEMP_FILE=$(mktemp) trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT TARGET_LANGUAGE="$1" -INPUT="$2" +export INPUT="$2" cat "${INPUT}" > "${TEMP_FILE}" if [[ ! -z $SLEEP ]] then sleep $[ ( $RANDOM % 20 ) + 1 ]s fi -git rm -f "${INPUT}" +rm -f "${INPUT}" mkdir -p $(dirname "${INPUT}") || true YANDEX=1 "${BASE_DIR}/translate.sh" "${TARGET_LANGUAGE}" "${TEMP_FILE}" "${INPUT}" git add "${INPUT}" diff --git a/docs/tools/translate/requirements.txt b/docs/tools/translate/requirements.txt index 41b1db836d3..b0ea9603555 100644 --- a/docs/tools/translate/requirements.txt +++ b/docs/tools/translate/requirements.txt @@ -1,9 +1,9 @@ Babel==2.8.0 -certifi==2019.11.28 +certifi==2020.4.5.1 chardet==3.0.4 googletrans==2.4.0 idna==2.9 -Jinja2==2.11.1 +Jinja2==2.11.2 pandocfilters==1.4.2 python-slugify==4.0.0 PyYAML==5.3.1 diff --git a/docs/tools/translate/split_meta.py b/docs/tools/translate/split_meta.py new file mode 100755 index 00000000000..b38b93e10b4 --- /dev/null +++ b/docs/tools/translate/split_meta.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +import os +import subprocess +import sys + +import translate +import util + + +if __name__ == '__main__': + path = sys.argv[1] + content_path = f'{path}.content' + meta_path = f'{path}.meta' + meta, content = util.read_md_file(path) + + target_language = os.getenv('TARGET_LANGUAGE') + if target_language is not None and target_language != 'en': + rev = subprocess.check_output( + 'git rev-parse HEAD', shell=True + ).decode('utf-8').strip() + meta['machine_translated'] = True + meta['machine_translated_rev'] = rev + title = meta.get('toc_title') + if title: + meta['toc_title'] = translate.translate(title, target_language) + folder_title = meta.get('toc_folder_title') + if folder_title: + meta['toc_folder_title'] = translate.translate(folder_title, target_language) + if 'en_copy' in meta: + del meta['en_copy'] + + with open(content_path, 'w') as f: + print(content, file=f) + + util.write_md_file(meta_path, meta, '') diff --git a/docs/tools/translate/translate.py b/docs/tools/translate/translate.py index 5b4dc3f5486..759e5b849d2 100755 --- a/docs/tools/translate/translate.py +++ b/docs/tools/translate/translate.py @@ -63,8 +63,8 @@ def translate_toc(root, lang): def translate_po(): import babel.messages.pofile - base_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'website', 'locale') - for lang in ['en', 'zh', 'es', 'fr', 'ru', 'ja', 'fa']: + base_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'website', 'locale') + for lang in ['en', 'zh', 'es', 'fr', 'ru', 'ja', 'tr', 'fa']: po_path = os.path.join(base_dir, lang, 'LC_MESSAGES', 'messages.po') with open(po_path, 'r') as f: po_file = babel.messages.pofile.read_po(f, locale=lang, domain='messages') diff --git a/docs/tools/translate/translate.sh b/docs/tools/translate/translate.sh index 89225e0cfcd..d9f8501184f 100755 --- a/docs/tools/translate/translate.sh +++ b/docs/tools/translate/translate.sh @@ -6,9 +6,16 @@ OUTPUT=${3:-/dev/stdout} export TARGET_LANGUAGE="$1" export DEBUG TEMP_FILE=$(mktemp) -trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT +export INPUT_PATH="$2" +INPUT_META="${INPUT_PATH}.meta" +INPUT_CONTENT="${INPUT_PATH}.content" + +trap 'rm -f -- "${TEMP_FILE}" "${INPUT_META}" "${INPUT_CONTENT}"' INT TERM HUP EXIT source "${BASE_DIR}/venv/bin/activate" -pandoc "$2" --filter "${BASE_DIR}/filter.py" -o "${TEMP_FILE}" \ + +${BASE_DIR}/split_meta.py "${INPUT_PATH}" + +pandoc "${INPUT_CONTENT}" --filter "${BASE_DIR}/filter.py" -o "${TEMP_FILE}" \ -f "markdown-space_in_atx_header" -t "markdown_strict+pipe_tables+markdown_attribute+all_symbols_escapable+backtick_code_blocks+autolink_bare_uris-link_attributes+markdown_attribute+mmd_link_attributes-raw_attribute+header_attributes-grid_tables" \ --atx-headers --wrap=none --columns=99999 --tab-stop=4 perl -pi -e 's/{\\#\\#/{##/g' "${TEMP_FILE}" @@ -19,4 +26,4 @@ then perl -pi -e 's/“/«/gg' "${TEMP_FILE}" perl -pi -e 's/”/»/gg' "${TEMP_FILE}" fi -cat "${TEMP_FILE}" > "${OUTPUT}" +cat "${INPUT_META}" "${TEMP_FILE}" > "${OUTPUT}" diff --git a/docs/tools/translate/update-po.sh b/docs/tools/translate/update-po.sh index bf9d1d4b7fb..f2f4039bcb8 100755 --- a/docs/tools/translate/update-po.sh +++ b/docs/tools/translate/update-po.sh @@ -2,11 +2,11 @@ # Usage: update-po.sh set -ex BASE_DIR=$(dirname $(readlink -f $0)) -WEBSITE_DIR="${BASE_DIR}/../../website" +WEBSITE_DIR="${BASE_DIR}/../../../website" LOCALE_DIR="${WEBSITE_DIR}/locale" MESSAGES_POT="${LOCALE_DIR}/messages.pot" BABEL_INI="${BASE_DIR}/babel-mapping.ini" -LANGS="en zh es fr ru ja fa" +LANGS="en zh es fr ru ja tr fa" source "${BASE_DIR}/venv/bin/activate" cd "${WEBSITE_DIR}" pybabel extract "." -o "${MESSAGES_POT}" -F "${BABEL_INI}" diff --git a/docs/tools/translate/util.py b/docs/tools/translate/util.py new file mode 120000 index 00000000000..7f16d68497e --- /dev/null +++ b/docs/tools/translate/util.py @@ -0,0 +1 @@ +../util.py \ No newline at end of file diff --git a/docs/tools/util.py b/docs/tools/util.py index 3dc58807612..a5a751020f0 100644 --- a/docs/tools/util.py +++ b/docs/tools/util.py @@ -1,3 +1,4 @@ +import collections import contextlib import multiprocessing import os @@ -7,6 +8,8 @@ import socket import tempfile import threading +import yaml + @contextlib.contextmanager def temp_dir(): @@ -57,3 +60,54 @@ def run_function_in_parallel(func, args_list, threads=False): exit_code = process.exitcode if exit_code: sys.exit(exit_code) + + +def read_md_file(path): + in_meta = False + meta = {} + meta_text = [] + content = [] + if os.path.exists(path): + with open(path, 'r') as f: + for line in f: + if line.startswith('---'): + if in_meta: + in_meta = False + meta = yaml.full_load(''.join(meta_text)) + else: + in_meta = True + else: + if in_meta: + meta_text.append(line) + else: + content.append(line) + return meta, ''.join(content) + + +def write_md_file(path, meta, content): + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + with open(path, 'w') as f: + if meta: + print('---', file=f) + yaml.dump(meta, f) + print('---', file=f) + if not content.startswith('\n'): + print('', file=f) + f.write(content) + + +def represent_ordereddict(dumper, data): + value = [] + for item_key, item_value in data.items(): + node_key = dumper.represent_data(item_key) + node_value = dumper.represent_data(item_value) + + value.append((node_key, node_value)) + + return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value) + + +yaml.add_representer(collections.OrderedDict, represent_ordereddict) diff --git a/docs/tools/website.py b/docs/tools/website.py index 9704cf7d5a4..83eef270fc5 100644 --- a/docs/tools/website.py +++ b/docs/tools/website.py @@ -155,7 +155,8 @@ def minify_website(args): with open(path, 'rb') as f: content = f.read().decode('utf-8') if filename.endswith('.html'): - content = htmlmin.minify(content, remove_empty_space=False) + if not content.startswith(' diff --git a/docs/tr/development/build_cross_arm.md b/docs/tr/development/build_cross_arm.md new file mode 100644 index 00000000000..d0d18162d6f --- /dev/null +++ b/docs/tr/development/build_cross_arm.md @@ -0,0 +1,43 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 67 +toc_title: "AARCH64 (ARM64) i\xE7in Linux'ta ClickHouse nas\u0131l olu\u015Fturulur)" +--- + +# AARCH64 (ARM64) mimarisi için Linux'ta ClickHouse nasıl oluşturulur {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} + +Bu, Linux makineniz olduğunda ve onu oluşturmak için kullanmak istediğinizde geçerlidir `clickhouse` AARCH64 CPU mimarisi ile başka bir Linux makinede çalışacak ikili. Bu, Linux sunucularında çalışan sürekli entegrasyon kontrolleri için tasarlanmıştır. + +AARCH64 için çapraz yapı, [Inşa talimatları](build.md) önce onları takip et. + +# Clang-8'i Yükle {#install-clang-8} + +Yönergeleri izleyin https://apt.llvm.org / Ubuntu veya Debian kurulumunuz için. +Örneğin, Ubuntu Bionic'te aşağıdaki komutları kullanabilirsiniz: + +``` bash +echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list +sudo apt-get update +sudo apt-get install clang-8 +``` + +# Çapraz Derleme Araç Setini Yükle {#install-cross-compilation-toolset} + +``` bash +cd ClickHouse +mkdir -p build-aarch64/cmake/toolchain/linux-aarch64 +wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 +``` + +# ClickHouse İnşa {#build-clickhouse} + +``` bash +cd ClickHouse +mkdir build-arm64 +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake +ninja -C build-arm64 +``` + +Ortaya çıkan ikili, yalnızca AARCH64 CPU mimarisi ile Linux'ta çalışacaktır. diff --git a/docs/tr/development/build_cross_osx.md b/docs/tr/development/build_cross_osx.md new file mode 100644 index 00000000000..2001ad985e2 --- /dev/null +++ b/docs/tr/development/build_cross_osx.md @@ -0,0 +1,64 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 66 +toc_title: "Mac OS X i\xE7in Linux'ta ClickHouse nas\u0131l olu\u015Fturulur" +--- + +# Mac OS X için Linux'ta ClickHouse nasıl oluşturulur {#how-to-build-clickhouse-on-linux-for-mac-os-x} + +Bu, Linux makineniz olduğunda ve onu oluşturmak için kullanmak istediğinizde geçerlidir `clickhouse` OS X üzerinde çalışacak ikili. bu, Linux sunucularında çalışan sürekli entegrasyon kontrolleri için tasarlanmıştır. Clickhouse'u doğrudan Mac OS X'te oluşturmak istiyorsanız, devam edin [başka bir talimat](build_osx.md). + +Mac OS X için çapraz yapı, [Inşa talimatları](build.md) önce onları takip et. + +# Clang-8'i Yükle {#install-clang-8} + +Yönergeleri izleyin https://apt.llvm.org / Ubuntu veya Debian kurulumunuz için. +Örneğin biyonik için komutlar gibidir: + +``` bash +sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list +sudo apt-get install clang-8 +``` + +# Çapraz Derleme Araç Setini Yükle {#install-cross-compilation-toolset} + +Yüklediğimiz yolu hatırlayalım `cctools` olarak $ {CCTOOLS} + +``` bash +mkdir ${CCTOOLS} + +git clone https://github.com/tpoechtrager/apple-libtapi.git +cd apple-libtapi +INSTALLPREFIX=${CCTOOLS} ./build.sh +./install.sh +cd .. + +git clone https://github.com/tpoechtrager/cctools-port.git +cd cctools-port/cctools +./configure --prefix=${CCTOOLS} --with-libtapi=${CCTOOLS} --target=x86_64-apple-darwin +make install +``` + +Ayrıca, MacOS X SDK'YI çalışma ağacına indirmemiz gerekiyor. + +``` bash +cd ClickHouse +wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz' +mkdir -p build-darwin/cmake/toolchain/darwin-x86_64 +tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1 +``` + +# ClickHouse İnşa {#build-clickhouse} + +``` bash +cd ClickHouse +mkdir build-osx +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake \ + -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \ + -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \ + -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld +ninja -C build-osx +``` + +Ortaya çıkan ikili bir Mach-O yürütülebilir biçimine sahip olacak ve Linux üzerinde çalıştırılamaz. diff --git a/docs/tr/development/build_osx.md b/docs/tr/development/build_osx.md new file mode 100644 index 00000000000..af750725070 --- /dev/null +++ b/docs/tr/development/build_osx.md @@ -0,0 +1,93 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 65 +toc_title: "Mac OS X \xFCzerinde ClickHouse nas\u0131l olu\u015Fturulur" +--- + +# Mac OS X üzerinde ClickHouse nasıl oluşturulur {#how-to-build-clickhouse-on-mac-os-x} + +Build Mac OS X 10.15 (Catalina) üzerinde çalışmalıdır) + +## Homebrew Yüklemek {#install-homebrew} + +``` bash +$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +``` + +## Gerekli derleyicileri, araçları ve kitaplıkları yükleyin {#install-required-compilers-tools-and-libraries} + +``` bash +$ brew install cmake ninja libtool gettext +``` + +## Checkout ClickHouse Kaynakları {#checkout-clickhouse-sources} + +``` bash +$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git +``` + +veya + +``` bash +$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git + +$ cd ClickHouse +``` + +## ClickHouse İnşa {#build-clickhouse} + +``` bash +$ mkdir build +$ cd build +$ cmake .. -DCMAKE_CXX_COMPILER=`which clang++` -DCMAKE_C_COMPILER=`which clang` +$ ninja +$ cd .. +``` + +## Uyarılar {#caveats} + +Clickhouse-server çalıştırmak istiyorsanız, sistemin maxfiles değişken artırmak için emin olun. + +!!! info "Not" + Sudo kullanmanız gerekecek. + +Bunu yapmak için aşağıdaki dosyayı oluşturun: + +/ Kütüphane / LaunchDaemons / sınırı.maxfiles.plist: + +``` xml + + + + + Label + limit.maxfiles + ProgramArguments + + launchctl + limit + maxfiles + 524288 + 524288 + + RunAtLoad + + ServiceIPC + + + +``` + +Aşağıdaki komutu çalıştırın: + +``` bash +$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +``` + +Başlatmak. + +Çalışıp çalışmadığını kontrol etmek için şunları kullanabilirsiniz `ulimit -n` komut. + +[Orijinal makale](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/tr/development/contrib.md b/docs/tr/development/contrib.md new file mode 100644 index 00000000000..7f09d9bc49b --- /dev/null +++ b/docs/tr/development/contrib.md @@ -0,0 +1,42 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 70 +toc_title: "Kullan\u0131lan \xDC\xE7\xFCnc\xFC Taraf K\xFCt\xFCphaneleri" +--- + +# Kullanılan Üçüncü Taraf Kütüphaneleri {#third-party-libraries-used} + +| Kitaplık | Lisans | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| base64 | [BSD 2-Clause Lisansı](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | +| artırmak | [Bo Boostost Software Lic 1.0ense 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | +| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | +| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | +| cctz | [Apache Lic 2.0ense 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | +| çift dönüşüm | [BSD 3-Clause Lisansı](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | +| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | +| googletest | [BSD 3-Clause Lisansı](https://github.com/google/googletest/blob/master/LICENSE) | +| h33 | [Apache Lic 2.0ense 2.0](https://github.com/uber/h3/blob/master/LICENSE) | +| hyperscan | [BSD 3-Clause Lisansı](https://github.com/intel/hyperscan/blob/master/LICENSE) | +| libbtrie | [BSD 2-Clause Lisansı](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | +| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | +| libdivide | [Zlib Lisansı](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | +| libgsasl | [LGPL v2. 1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | +| libhdfs3 | [Apache Lic 2.0ense 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | +| libmetrohash | [Apache Lic 2.0ense 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | +| libpcg-rastgele | [Apache Lic 2.0ense 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | +| libressl | [OpenSSL Lisansı](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | +| librdkafka | [BSD 2-Clause Lisansı](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | +| libwidechar\_width | [CC0 1.0 Evrensel](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | +| llvm | [BSD 3-Clause Lisansı](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | +| lz4 | [BSD 2-Clause Lisansı](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | +| mariadb-bağlayıcı-c | [LGPL v2. 1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | +| murmurhash | [Kamu Malı](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | +| pdqsort | [Zlib Lisansı](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | +| az | [Boost Yazılım Lisansı-Sürüm 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | +| protobuf | [BSD 3-Clause Lisansı](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | +| re2 | [BSD 3-Clause Lisansı](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | +| UnixODBC | [LGPL v2. 1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | +| zlib-ng | [Zlib Lisansı](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | +| zstd | [BSD 3-Clause Lisansı](https://github.com/facebook/zstd/blob/dev/LICENSE) | diff --git a/docs/tr/development/developer_instruction.md b/docs/tr/development/developer_instruction.md new file mode 100644 index 00000000000..f9984d9bf20 --- /dev/null +++ b/docs/tr/development/developer_instruction.md @@ -0,0 +1,285 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 61 +toc_title: "Acemi ClickHouse Geli\u015Ftirici Talimat" +--- + +ClickHouse binası Linux, FreeBSD ve Mac OS X üzerinde desteklenmektedir. + +# Windows Kullanıyorsanız {#if-you-use-windows} + +Windows kullanıyorsanız, Ubuntu ile bir sanal makine oluşturmanız gerekir. Bir sanal makine ile çalışmaya başlamak için VirtualBox yükleyin. UB :unt :u'yu web sitesinden indirebilirsiniz: https://www.ubuntu.com/\#download. lütfen indirilen görüntüden bir sanal makine oluşturun (bunun için en az 4GB RAM ayırmalısınız). Ubuntu'da bir komut satırı terminali çalıştırmak için lütfen kelimeyi içeren bir program bulun “terminal” adına (gnome-terminal, konsole vb.)) veya sadece Ctrl+Alt+T tuşlarına basın. + +# 32 bit sistem kullanıyorsanız {#if-you-use-a-32-bit-system} + +ClickHouse çalışamaz veya 32-bit bir sistem üzerinde oluşturun. 64-bit bir sisteme erişim kazanmanız gerekir ve okumaya devam edebilirsiniz. + +# Github'da bir depo oluşturma {#creating-a-repository-on-github} + +ClickHouse repository ile çalışmaya başlamak için bir GitHub hesabına ihtiyacınız olacaktır. + +Muhtemelen zaten bir tane var, ama yapmazsanız, lütfen kayıt olun https://github.com. SSH anahtarlarınız yoksa, bunları üretmeli ve daha sonra Github'a yüklemelisiniz. Bu yamalar üzerinden göndermek için gereklidir. Diğer SSH sunucularıyla kullandığınız aynı SSH anahtarlarını kullanmak da mümkündür - muhtemelen zaten bunlara sahipsiniz. + +ClickHouse deposunun bir çatalı oluşturun. Bunu yapmak için lütfen tıklayın “fork” sağ üst köşedeki düğme https://github.com/ClickHouse/ClickHouse. bu hesabınıza ClickHouse / ClickHouse kendi kopyasını çatal olacaktır. + +Geliştirme süreci ilk ClickHouse sizin çatal içine amaçlanan değişiklikleri işlemekle ve daha sonra bir oluşturma oluşur “pull request” bu değişikliklerin ana depoya kabul edilmesi için (ClickHouse/ClickHouse). + +Git depoları ile çalışmak için lütfen yükleyin `git`. + +Bunu Ubuntu'da yapmak için komut satırı terminalinde çalışırsınız: + + sudo apt update + sudo apt install git + +Git kullanımı ile ilgili kısa bir el kitabı burada bulunabilir: https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf. +Git ile ilgili ayrıntılı bir el kitabı için bkz. https://git-scm.com/book/en/v2. + +# Geliştirme Makinenize bir depo klonlama {#cloning-a-repository-to-your-development-machine} + +Ardından, kaynak dosyaları çalışma makinenize indirmeniz gerekir. Bu denir “to clone a repository” çünkü çalışma makinenizde deponun yerel bir kopyasını oluşturur. + +Komut satırında terminal Çalıştır: + + git clone --recursive git@guthub.com:your_github_username/ClickHouse.git + cd ClickHouse + +Not: lütfen, yerine *your\_github\_username* uygun olanı ile! + +Bu komut bir dizin oluşturacaktır `ClickHouse` projenin çalışma kopyasını içeren. + +Yapı sistemini çalıştırmakla ilgili sorunlara yol açabileceğinden, çalışma dizininin yolunun hiçbir boşluk içermemesi önemlidir. + +ClickHouse deposunun kullandığını lütfen unutmayın `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` yukarıdaki örnekte olduğu gibi bayrak. Depo alt modüller olmadan klonlanmışsa, bunları indirmek için aşağıdakileri çalıştırmanız gerekir: + + git submodule init + git submodule update + +Komutu ile durumunu kontrol edebilirsiniz: `git submodule status`. + +Aşağıdaki hata iletisini alırsanız: + + Permission denied (publickey). + fatal: Could not read from remote repository. + + Please make sure you have the correct access rights + and the repository exists. + +Genellikle Github'a bağlanmak için SSH anahtarlarının eksik olduğu anlamına gelir. Bu anahtarlar normalde `~/.ssh`. SSH anahtarlarının kabul edilmesi için bunları GitHub kullanıcı arayüzünün ayarlar bölümüne yüklemeniz gerekir. + +Depoyu https protokolü aracılığıyla da klonlayabilirsiniz: + + git clone https://github.com/ClickHouse/ClickHouse.git + +Ancak bu, değişikliklerinizi sunucuya göndermenize izin vermez. Yine de geçici olarak kullanabilir ve SSH anahtarlarını daha sonra deponun uzak adresini değiştirerek ekleyebilirsiniz `git remote` komut. + +Oradan güncellemeleri çekmek için orijinal ClickHouse repo'nun adresini yerel deponuza da ekleyebilirsiniz: + + git remote add upstream git@github.com:ClickHouse/ClickHouse.git + +Başarıyla bu komutu çalıştırdıktan sonra çalıştırarak ana ClickHouse repo güncellemeleri çekmek mümkün olacak `git pull upstream master`. + +## Alt modüllerle çalışma {#working-with-submodules} + +Git'teki alt modüllerle çalışmak acı verici olabilir. Sonraki komutlar onu yönetmeye yardımcı olacaktır: + + # ! each command accepts --recursive + # Update remote URLs for submodules. Barely rare case + git submodule sync + # Add new submodules + git submodule init + # Update existing submodules to the current state + git submodule update + # Two last commands could be merged together + git submodule update --init + +Bir sonraki komutlar, tüm alt modülleri başlangıç durumuna sıfırlamanıza yardımcı olacaktır (!UYARI! - herhangi bir değişiklik içinde silinecektir): + + # Synchronizes submodules' remote URL with .gitmodules + git submodule sync --recursive + # Update the registered submodules with initialize not yet initialized + git submodule update --init --recursive + # Reset all changes done after HEAD + git submodule foreach git reset --hard + # Clean files from .gitignore + git submodule foreach git clean -xfd + # Repeat last 4 commands for all submodule + git submodule foreach git submodule sync --recursive + git submodule foreach git submodule update --init --recursive + git submodule foreach git submodule foreach git reset --hard + git submodule foreach git submodule foreach git clean -xfd + +# Yapı Sistemi {#build-system} + +ClickHouse bina için Cmake ve Ninja kullanır. + +Cmake-ninja dosyaları (yapı görevleri) üretebilir bir meta-yapı sistemi. +Ninja-bu cmake oluşturulan görevleri yürütmek için kullanılan hıza odaklanarak daha küçük bir yapı sistemi. + +Ubuntu, Debian veya Mint run'a yüklemek için `sudo apt install cmake ninja-build`. + +Centos'ta, RedHat koşusu `sudo yum install cmake ninja-build`. + +Arch veya Gentoo kullanıyorsanız, muhtemelen cmake'i nasıl kuracağınızı kendiniz biliyorsunuz. + +Mac OS X üzerinde cmake ve Ninja yüklemek için ilk homebrew yüklemek ve daha sonra demlemek yoluyla her şeyi yüklemek: + + /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" + brew install cmake ninja + +Ardından, cmake sürümünü kontrol edin: `cmake --version`. 3.3'ün altındaysa, web sitesinden daha yeni bir sürüm yüklemelisiniz: https://cmake.org/download/. + +# İsteğe Bağlı Harici Kütüphaneler {#optional-external-libraries} + +ClickHouse, bina için birkaç dış kütüphane kullanır. Alt modüllerde bulunan kaynaklardan ClickHouse ile birlikte oluşturuldukları için hepsinin ayrı olarak kurulması gerekmez. Listeyi kontrol edebilirsiniz `contrib`. + +# C++ Derleyici {#c-compiler} + +Derleyiciler gcc sürüm 9 ve Clang sürüm 8 veya üzeri başlayarak ClickHouse bina için desteklenmektedir. + +Resmi Yandex şu anda GCC'Yİ kullanıyor çünkü biraz daha iyi performansa sahip makine kodu üretiyor (kriterlerimize göre yüzde birkaçına kadar bir fark yaratıyor). Ve Clang genellikle geliştirme için daha uygundur. Yine de, sürekli entegrasyon (CI) platformumuz yaklaşık bir düzine yapı kombinasyonunu denetler. + +Ubuntu run GCC yüklemek için: `sudo apt install gcc g++` + +Gcc sürümünü kontrol edin: `gcc --version`. 9'un altındaysa, buradaki talimatları izleyin: https://clickhouse.tech / docs/TR/development / build / \#ınstall-gcc-9. + +Mac OS X build sadece Clang için desteklenir. Sadece koş `brew install llvm` + +Eğer Clang kullanmaya karar verirseniz, ayrıca yükleyebilirsiniz `libc++` ve `lld` eğer ne olduğunu biliyorsan. Kullanım `ccache` ayrıca tavsiye edilir. + +# İnşaat Süreci {#the-building-process} + +Artık ClickHouse oluşturmaya hazır olduğunuza göre ayrı bir dizin oluşturmanızı öneririz `build` için `ClickHouse` bu, tüm yapı eserlerini içerecek: + + mkdir build + cd build + +Birkaç farklı dizine (build\_release, build\_debug, vb.) sahip olabilirsiniz.) farklı yapı türleri için. + +İçinde iken `build` dizin, cmake çalıştırarak yapı yapılandırın. İlk çalıştırmadan önce, derleyici belirten ortam değişkenlerini tanımlamanız gerekir (bu örnekte sürüm 9 gcc derleyicisi). + +Linux: + + export CC=gcc-9 CXX=g++-9 + cmake .. + +Mac OS X: + + export CC=clang CXX=clang++ + cmake .. + +Bu `CC` değişken C için derleyiciyi belirtir (C derleyicisi için kısa) ve `CXX` değişken, hangi C++ derleyicisinin bina için kullanılacağını bildirir. + +Daha hızlı bir yapı için, `debug` yapı türü-hiçbir optimizasyonları ile bir yapı. Bunun için aşağıdaki parametreyi sağlayın `-D CMAKE_BUILD_TYPE=Debug`: + + cmake -D CMAKE_BUILD_TYPE=Debug .. + +Bu komutu çalıştırarak yapı türünü değiştirebilirsiniz. `build` dizin. + +İnşa etmek için ninja çalıştırın: + + ninja clickhouse-server clickhouse-client + +Bu örnekte yalnızca gerekli ikili dosyalar oluşturulacaktır. + +Tüm ikili dosyaları (Yardımcı Programlar ve testler) oluşturmanız gerekiyorsa, ninja'yı parametre olmadan çalıştırmalısınız: + + ninja + +Tam yapı, ana ikili dosyaları oluşturmak için yaklaşık 30GB boş disk alanı veya 15GB gerektirir. + +Yapı makinesinde büyük miktarda RAM mevcut olduğunda, paralel olarak çalışan yapı görevlerinin sayısını sınırlamanız gerekir `-j` param: + + ninja -j 1 clickhouse-server clickhouse-client + +4GB RAM'Lİ makinelerde, 8GB RAM için 1 belirtmeniz önerilir `-j 2` tavsiye edilir. + +Mesajı alırsanız: `ninja: error: loading 'build.ninja': No such file or directory` bu, bir yapı yapılandırması oluşturmanın başarısız olduğu ve yukarıdaki mesajı incelemeniz gerektiği anlamına gelir. + +Bina işleminin başarılı bir şekilde başlatılmasının ardından, yapı ilerlemesini görürsünüz-işlenmiş görevlerin sayısı ve toplam görev sayısı. + +Libhdfs2 kütüphanesinde protobuf dosyaları hakkında mesajlar oluştururken `libprotobuf WARNING` ortaya çıkabilir. Hiçbir şeyi etkilemezler ve göz ardı edilmeleri güvenlidir. + +Başarılı bir yapı üzerine yürütülebilir bir dosya alırsınız `ClickHouse//programs/clickhouse`: + + ls -l programs/clickhouse + +# Clickhouse'un yerleşik yürütülebilir dosyasını çalıştırma {#running-the-built-executable-of-clickhouse} + +Sunucuyu geçerli kullanıcı altında çalıştırmak için aşağıdakilere gitmeniz gerekir `ClickHouse/programs/server/` (dışında bulunan `build`) ve koş: + + ../../../build/programs/clickhouse server + +Bu durumda, ClickHouse geçerli dizinde bulunan yapılandırma dosyalarını kullanır. Koş youabilirsiniz `clickhouse server` komut satırı parametresi olarak bir yapılandırma dosyasının yolunu belirten herhangi bir dizinden `--config-file`. + +Başka bir terminalde clickhouse-client ile Clickhouse'a bağlanmak için `ClickHouse/build/programs/` ve koş `clickhouse client`. + +Eğer alırsanız `Connection refused` Mac OS X veya Freebsd'de mesaj, ana bilgisayar adresi 127.0.0.1 belirtmeyi deneyin: + + clickhouse client --host 127.0.0.1 + +Sisteminizde yüklü olan ClickHouse binary'nin üretim sürümünü özel olarak oluşturulmuş ClickHouse binaryinizle değiştirebilirsiniz. Bunu yapmak için resmi web sitesinden talimatları izleyerek Makinenize ClickHouse yükleyin. Ardından, aşağıdakileri çalıştırın: + + sudo service clickhouse-server stop + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ + sudo service clickhouse-server start + +Not thate that `clickhouse-client`, `clickhouse-server` ve diğerleri yaygın olarak paylaşılan sembolik bağlardır `clickhouse` ikilik. + +Ayrıca sisteminizde yüklü ClickHouse paketinden yapılandırma dosyası ile özel inşa ClickHouse ikili çalıştırabilirsiniz: + + sudo service clickhouse-server stop + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + +# IDE (entegre geliştirme ortamı) {#ide-integrated-development-environment} + +Hangi IDE kullanmak bilmiyorsanız, clion kullanmanızı öneririz. CLion ticari bir yazılımdır, ancak 30 günlük ücretsiz deneme süresi sunar. Öğrenciler için de ücretsizdir. CLion Linux ve Mac OS X hem de kullanılabilir. + +KDevelop ve QTCreator, ClickHouse geliştirmek için bir IDE'NİN diğer harika alternatifleridir. KDevelop kararsız olmasına rağmen çok kullanışlı bir IDE olarak geliyor. KDevelop projeyi açtıktan sonra bir süre sonra çökerse, tıklamanız gerekir “Stop All” proje dosyalarının listesini açar açmaz düğme. Bunu yaptıktan sonra KDevelop ile çalışmak iyi olmalıdır. + +Basit kod editörleri olarak, Yüce metin veya Visual Studio kodunu veya Kate'i (hepsi Linux'ta kullanılabilir) kullanabilirsiniz. + +Her ihtimale karşı, Clion'un yarattığını belirtmek gerekir `build` kendi başına yol, aynı zamanda kendi seçtikleri `debug` yapı türü için, yapılandırma için Clion'da tanımlanan ve sizin tarafınızdan yüklenmeyen bir cmake sürümünü kullanır ve son olarak CLion kullanacaktır `make` yerine yapı görevlerini çalıştırmak için `ninja`. Bu normal bir davranıştır, sadece karışıklığı önlemek için bunu aklınızda bulundurun. + +# Kod Yazma {#writing-code} + +Açıklaması ClickHouse mimarisi burada bulabilirsiniz: https://clickhouse.tech / doscs/TR / development / Arch /it /ec /ture/ + +Kod stili Kılavuzu: https://clickhouse.tech / doscs / TR / development / style/ + +Yazma testleri: https://clickhouse.teknoloji / doscs / TR / geliştirme / testler/ + +Görevlerin listesi: https://github.com/ClickHouse/ClickHouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md + +# Test Verileri {#test-data} + +Clickhouse'un geliştirilmesi genellikle gerçekçi veri kümelerinin yüklenmesini gerektirir. Performans testi için özellikle önemlidir. Yandex'ten özel olarak hazırlanmış anonim veri setimiz var.Metrica. Ayrıca bazı 3GB boş disk alanı gerektirir. Bu verilerin geliştirme görevlerinin çoğunu gerçekleştirmek için gerekli olmadığını unutmayın. + + sudo apt install wget xz-utils + + wget https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz + wget https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz + + xz -v -d hits_v1.tsv.xz + xz -v -d visits_v1.tsv.xz + + clickhouse-client + + CREATE TABLE test.hits ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime); + + CREATE TABLE test.visits ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), `Goals.ID` Array(UInt32), `Goals.Serial` Array(UInt32), `Goals.EventTime` Array(DateTime), `Goals.Price` Array(Int64), `Goals.OrderID` Array(String), `Goals.CurrencyID` Array(UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, `TraficSource.ID` Array(Int8), `TraficSource.SearchEngineID` Array(UInt16), `TraficSource.AdvEngineID` Array(UInt8), `TraficSource.PlaceID` Array(UInt16), `TraficSource.SocialSourceNetworkID` Array(UInt8), `TraficSource.Domain` Array(String), `TraficSource.SearchPhrase` Array(String), `TraficSource.SocialSourcePage` Array(String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `Market.Type` Array(UInt8), `Market.GoalID` Array(UInt32), `Market.OrderID` Array(String), `Market.OrderPrice` Array(Int64), `Market.PP` Array(UInt32), `Market.DirectPlaceID` Array(UInt32), `Market.DirectOrderID` Array(UInt32), `Market.DirectBannerID` Array(UInt32), `Market.GoodID` Array(String), `Market.GoodName` Array(String), `Market.GoodQuantity` Array(Int32), `Market.GoodPrice` Array(Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID); + + clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv + clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv + +# Çekme İsteği Oluşturma {#creating-pull-request} + +Github'un kullanıcı arayüzünde çatal deposuna gidin. Bir dalda gelişiyorsanız, o Dalı seçmeniz gerekir. Bir olacak “Pull request” ekranda bulunan düğme. Özünde, bu demektir “create a request for accepting my changes into the main repository”. + +Çalışma henüz tamamlanmamış olsa bile bir çekme isteği oluşturulabilir. Bu durumda lütfen kelimeyi koyun “WIP” (devam eden çalışma) başlığın başında, daha sonra değiştirilebilir. Bu, kooperatif Gözden geçirme ve değişikliklerin tartışılması ve mevcut tüm testlerin çalıştırılması için kullanışlıdır. Değişikliklerinizin kısa bir açıklamasını sağlamanız önemlidir, daha sonra sürüm değişiklikleri oluşturmak için kullanılacaktır. + +Yandex çalışanları PR'NİZİ bir etiketle etiketlediğinde testler başlayacaktır “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. + +Sistem, çekme isteğiniz için ayrı ayrı ClickHouse ikili yapıları hazırlayacaktır. Bu yapıları almak için tıklayın “Details” yanındaki bağlantı “ClickHouse build check” çekler listesinde giriş. Orada inşa doğrudan bağlantılar bulacaksınız .eğer üretim sunucularında bile dağıtabilirsiniz ClickHouse DEB paketleri (eğer hiçbir korku varsa). + +Büyük olasılıkla bazı yapılar ilk kez başarısız olur. Bunun nedeni, hem gcc hem de clang ile, hemen hemen tüm mevcut uyarılarla (her zaman `-Werror` bayrak) clang için etkin. Aynı sayfada, tüm yapı günlüklerini bulabilirsiniz, böylece tüm olası yollarla ClickHouse oluşturmak zorunda kalmazsınız. diff --git a/docs/tr/development/index.md b/docs/tr/development/index.md new file mode 100644 index 00000000000..fdd4c0c0805 --- /dev/null +++ b/docs/tr/development/index.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Geli\u015Fme" +toc_hidden: true +toc_priority: 58 +toc_title: "gizlenmi\u015F" +--- + +# ClickHouse Geliştirme {#clickhouse-development} + +[Orijinal makale](https://clickhouse.tech/docs/en/development/) diff --git a/docs/tr/development/style.md b/docs/tr/development/style.md new file mode 100644 index 00000000000..713f95cf053 --- /dev/null +++ b/docs/tr/development/style.md @@ -0,0 +1,841 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 68 +toc_title: "C++ kodu nas\u0131l yaz\u0131l\u0131r" +--- + +# C++ kodu nasıl yazılır {#how-to-write-c-code} + +## Genel Öneriler {#general-recommendations} + +**1.** Aşağıdakiler önerilerdir, gereksinimler değildir. + +**2.** Kod düzenliyorsanız, varolan kodun biçimlendirmesini takip etmek mantıklıdır. + +**3.** Kod stili tutarlılık için gereklidir. Tutarlılık, kodu okumayı kolaylaştırır ve aynı zamanda kodu aramayı da kolaylaştırır. + +**4.** Kuralların çoğunun mantıklı nedenleri yoktur, yerleşik uygulamalar tarafından belirlenir. + +## Biçimlendirir {#formatting} + +**1.** Biçimlendirme çoğu tarafından otomatik olarak yapılacaktır `clang-format`. + +**2.** Girintiler 4 boşluk vardır. Bir sekme dört boşluk ekleyecek şekilde geliştirme ortamınızı yapılandırın. + +**3.** Kıvırcık parantezlerin açılması ve kapatılması ayrı bir satırda olmalıdır. + +``` cpp +inline void readBoolText(bool & x, ReadBuffer & buf) +{ + char tmp = '0'; + readChar(tmp, buf); + x = tmp != '0'; +} +``` + +**4.** Tüm fonksiyon gövdesi tek ise `statement`, tek bir satır üzerine yerleştirilebilir. Yer (satır sonunda boşluk dışında) etrafında ayraç boşluk. + +``` cpp +inline size_t mask() const { return buf_size() - 1; } +inline size_t place(HashValue x) const { return x & mask(); } +``` + +**5.** Fonksiyonlar için. Parantezlerin etrafına boşluk koymayın. + +``` cpp +void reinsert(const Value & x) +``` + +``` cpp +memcpy(&buf[place_value], &x, sizeof(x)); +``` + +**6.** İçinde `if`, `for`, `while` ve diğer ifadeler, açılış braketinin önüne bir boşluk yerleştirilir (işlev çağrılarının aksine). + +``` cpp +for (size_t i = 0; i < rows; i += storage.index_granularity) +``` + +**7.** İkili operatörlerin etrafına boşluk Ekle (`+`, `-`, `*`, `/`, `%`, …) and the ternary operator `?:`. + +``` cpp +UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); +UInt8 month = (s[5] - '0') * 10 + (s[6] - '0'); +UInt8 day = (s[8] - '0') * 10 + (s[9] - '0'); +``` + +**8.** Bir satır beslemesi girilirse, operatörü yeni bir satıra koyun ve girintiyi ondan önce artırın. + +``` cpp +if (elapsed_ns) + message << " (" + << rows_read_on_server * 1000000000 / elapsed_ns << " rows/s., " + << bytes_read_on_server * 1000.0 / elapsed_ns << " MB/s.) "; +``` + +**9.** İsterseniz, bir çizgi içinde hizalama için boşluk kullanabilirsiniz. + +``` cpp +dst.ClickLogID = click.LogID; +dst.ClickEventID = click.EventID; +dst.ClickGoodEvent = click.GoodEvent; +``` + +**10.** Operatörler etrafında boşluk kullanmayın `.`, `->`. + +Gerekirse, operatör bir sonraki satıra sarılabilir. Bu durumda, önündeki ofset artar. + +**11.** Tekli operatörleri ayırmak için boşluk kullanmayın (`--`, `++`, `*`, `&`, …) from the argument. + +**12.** Virgülden sonra bir boşluk koyun, ancak ondan önce değil. Aynı kural, bir içindeki noktalı virgül için de geçerlidir `for` ifade. + +**13.** Ayırmak için boşluk kullanmayın `[]` operatör. + +**14.** İn a `template <...>` ifade, arasında bir boşluk kullanın `template` ve `<`; sonra boşluk yok `<` ya da önce `>`. + +``` cpp +template +struct AggregatedStatElement +{} +``` + +**15.** Sınıflarda ve yapılarda, yazın `public`, `private`, ve `protected` aynı seviyede `class/struct` ve kodun geri kalanını girinti. + +``` cpp +template +class MultiVersion +{ +public: + /// Version of object for usage. shared_ptr manage lifetime of version. + using Version = std::shared_ptr; + ... +} +``` + +**16.** Eğer aynı `namespace` tüm dosya için kullanılır ve başka önemli bir şey yoktur, içinde bir ofset gerekli değildir `namespace`. + +**17.** Eğer blok için bir `if`, `for`, `while` veya başka bir ifade tek bir `statement`, kıvırcık parantez isteğe bağlıdır. Place the `statement` bunun yerine ayrı bir satırda. Bu kural iç içe geçmiş için de geçerlidir `if`, `for`, `while`, … + +Ama eğer iç `statement` kıvırcık parantez içerir veya `else`, dış blok kıvırcık parantez içinde yazılmalıdır. + +``` cpp +/// Finish write. +for (auto & stream : streams) + stream.second->finalize(); +``` + +**18.** Çizgilerin uçlarında boşluk olmamalıdır. + +**19.** Kaynak dosyalar UTF-8 kodlanmıştır. + +**20.** ASCII olmayan karakterler dize değişmezlerinde kullanılabilir. + +``` cpp +<< ", " << (timer.elapsed() / chunks_stats.hits) << " μsec/hit."; +``` + +**21.** Tek bir satırda birden çok ifade yazmayın. + +**22.** Fonksiyonların içindeki kod bölümlerini gruplandırın ve bunları birden fazla boş satırla ayırın. + +**23.** Bir veya iki boş satırla ayrı işlevler, sınıflar vb. + +**24.** `A const` (bir değerle ilgili) tür adından önce yazılmalıdır. + +``` cpp +//correct +const char * pos +const std::string & s +//incorrect +char const * pos +``` + +**25.** Bir işaretçi veya başvuru bildirirken, `*` ve `&` semboller her iki taraftaki boşluklarla ayrılmalıdır. + +``` cpp +//correct +const char * pos +//incorrect +const char* pos +const char *pos +``` + +**26.** Şablon türlerini kullanırken, `using` anahtar kelime (en basit durumlar hariç). + +Başka bir deyişle, şablon parametreleri yalnızca `using` ve kodda tekrarlanmıyor. + +`using` bir işlevin içinde olduğu gibi yerel olarak bildirilebilir. + +``` cpp +//correct +using FileStreams = std::map>; +FileStreams streams; +//incorrect +std::map> streams; +``` + +**27.** Bir ifadede farklı türde birkaç değişken bildirmeyin. + +``` cpp +//incorrect +int x, *y; +``` + +**28.** C tarzı yayınları kullanmayın. + +``` cpp +//incorrect +std::cerr << (int)c <<; std::endl; +//correct +std::cerr << static_cast(c) << std::endl; +``` + +**29.** Sınıflarda ve yapılarda, grup üyeleri ve işlevleri her görünürlük kapsamı içinde ayrı ayrı. + +**30.** Küçük sınıflar ve yapılar için, yöntem bildirimini uygulamadan ayırmak gerekli değildir. + +Aynı şey, herhangi bir sınıf veya yapıdaki küçük yöntemler için de geçerlidir. + +Templated sınıflar ve yapılar için, yöntem bildirimlerini uygulamadan ayırmayın(aksi takdirde aynı çeviri biriminde tanımlanmaları gerekir). + +**31.** Satırları 80 yerine 140 karakterle sarabilirsiniz. + +**32.** Postfix gerekli değilse, her zaman önek artış / azaltma işleçlerini kullanın. + +``` cpp +for (Names::const_iterator it = column_names.begin(); it != column_names.end(); ++it) +``` + +## Yorumlar {#comments} + +**1.** Kodun önemsiz olmayan tüm bölümleri için yorum eklediğinizden emin olun. + +Bu çok önemli. Yorumu yazmak, kodun gerekli olmadığını veya yanlış tasarlandığını anlamanıza yardımcı olabilir. + +``` cpp +/** Part of piece of memory, that can be used. + * For example, if internal_buffer is 1MB, and there was only 10 bytes loaded to buffer from file for reading, + * then working_buffer will have size of only 10 bytes + * (working_buffer.end() will point to position right after those 10 bytes available for read). + */ +``` + +**2.** Yorumlar gerektiği kadar ayrıntılı olabilir. + +**3.** Açıklama yaptıkları koddan önce yorumları yerleştirin. Nadir durumlarda, yorumlar aynı satırda koddan sonra gelebilir. + +``` cpp +/** Parses and executes the query. +*/ +void executeQuery( + ReadBuffer & istr, /// Where to read the query from (and data for INSERT, if applicable) + WriteBuffer & ostr, /// Where to write the result + Context & context, /// DB, tables, data types, engines, functions, aggregate functions... + BlockInputStreamPtr & query_plan, /// Here could be written the description on how query was executed + QueryProcessingStage::Enum stage = QueryProcessingStage::Complete /// Up to which stage process the SELECT query + ) +``` + +**4.** Yorumlar sadece İngilizce olarak yazılmalıdır. + +**5.** Bir kitaplık yazıyorsanız, ana başlık dosyasına açıklayan ayrıntılı yorumları ekleyin. + +**6.** Ek bilgi vermeyen yorumlar eklemeyin. Özellikle, bu gibi boş yorumlar bırakmayın: + +``` cpp +/* +* Procedure Name: +* Original procedure name: +* Author: +* Date of creation: +* Dates of modification: +* Modification authors: +* Original file name: +* Purpose: +* Intent: +* Designation: +* Classes used: +* Constants: +* Local variables: +* Parameters: +* Date of creation: +* Purpose: +*/ +``` + +Örnek kaynaktan ödünç alınmıştır http://home.tamk.fi / ~ jaalto / kurs / kodlama stili / doc/ulaşılamaz-kod/. + +**7.** Çöp yorum yazmayın (yazar, oluşturma tarihi ..) her dosyanın başında. + +**8.** Tek satırlı yorumlar üç eğik çizgi ile başlar: `///` ve çok satırlı yorumlar ile başlar `/**`. Bu yorumlar dikkate alınır “documentation”. + +Not: bu yorumlardan belgeler oluşturmak için Doxygen kullanabilirsiniz. Ancak DOXYGEN genellikle kullanılmaz, çünkü IDE'DEKİ kodda gezinmek daha uygundur. + +**9.** Çok satırlı açıklamaların başında ve sonunda (çok satırlı bir açıklamayı kapatan satır hariç) boş satırları olmamalıdır. + +**10.** Kodu yorumlamak için temel yorumları kullanın, değil “documenting” yorumlar. + +**11.** İşlem yapmadan önce kodun yorumlanan kısımlarını silin. + +**12.** Yorumlarda veya kodda küfür kullanmayın. + +**13.** Büyük harf kullanmayın. Aşırı noktalama kullanmayın. + +``` cpp +/// WHAT THE FAIL??? +``` + +**14.** Sınırlayıcılar yapmak için yorum kullanmayın. + +``` cpp +///****************************************************** +``` + +**15.** Yorumlarda tartışmalara başlamayın. + +``` cpp +/// Why did you do this stuff? +``` + +**16.** Ne hakkında olduğunu açıklayan bir bloğun sonunda bir yorum yazmaya gerek yok. + +``` cpp +/// for +``` + +## Adlar {#names} + +**1.** Değişkenlerin ve sınıf üyelerinin adlarında alt çizgi içeren küçük harfler kullanın. + +``` cpp +size_t max_block_size; +``` + +**2.** İşlevlerin (yöntemlerin) adları için, küçük harfle başlayan camelCase kullanın. + +``` cpp +std::string getName() const override { return "Memory"; } +``` + +**3.** Sınıfların (yapıların) adları için büyük harfle başlayan CamelCase kullanın. Ben dışındaki önekler arayüzler için kullanılmaz. + +``` cpp +class StorageMemory : public IStorage +``` + +**4.** `using` sınıf aslarla aynı şekilde adlandırılır veya `_t` ucunda. + +**5.** Şablon Türü argümanlarının isimleri: basit durumlarda, kullanın `T`; `T`, `U`; `T1`, `T2`. + +Daha karmaşık durumlar için, sınıf adları için kuralları izleyin veya öneki ekleyin `T`. + +``` cpp +template +struct AggregatedStatElement +``` + +**6.** Şablon sabit argümanlarının adları: değişken adları için kurallara uyun veya kullanın `N` basit durumlarda. + +``` cpp +template +struct ExtractDomain +``` + +**7.** Soyut sınıflar (arayüzler) için şunları ekleyebilirsiniz `I` önek. + +``` cpp +class IBlockInputStream +``` + +**8.** Yerel olarak bir değişken kullanırsanız, kısa adı kullanabilirsiniz. + +Diğer tüm durumlarda, anlamı açıklayan bir isim kullanın. + +``` cpp +bool info_successfully_loaded = false; +``` + +**9.** İsimleri `define`s ve genel sabitler alt çizgi ile ALL\_CAPS kullanın. + +``` cpp +#define MAX_SRC_TABLE_NAMES_TO_STORE 1000 +``` + +**10.** Dosya adları, içerikleriyle aynı stili kullanmalıdır. + +Bir dosya tek bir sınıf içeriyorsa, dosyayı sınıfla aynı şekilde adlandırın (CamelCase). + +Dosya tek bir işlev içeriyorsa, dosyayı işlevle aynı şekilde adlandırın (camelCase). + +**11.** İsim bir kısaltma içeriyorsa, o zaman: + +- Değişken adları için kısaltma küçük harfler kullanmalıdır `mysql_connection` (değil `mySQL_connection`). +- Sınıfların ve işlevlerin adları için, büyük harfleri kısaltmada tutun`MySQLConnection` (değil `MySqlConnection`). + +**12.** Yalnızca sınıf üyelerini başlatmak için kullanılan yapıcı bağımsız değişkenleri, sınıf üyeleri ile aynı şekilde, ancak sonunda bir alt çizgi ile adlandırılmalıdır. + +``` cpp +FileQueueProcessor( + const std::string & path_, + const std::string & prefix_, + std::shared_ptr handler_) + : path(path_), + prefix(prefix_), + handler(handler_), + log(&Logger::get("FileQueueProcessor")) +{ +} +``` + +Bağımsız değişken yapıcı gövdesinde kullanılmazsa, alt çizgi soneki atlanabilir. + +**13.** Yerel değişkenlerin ve sınıf üyelerinin adlarında fark yoktur (önek gerekmez). + +``` cpp +timer (not m_timer) +``` + +**14.** Bir de SAB theitler için `enum`, büyük harfle CamelCase kullanın. ALL\_CAPS da kabul edilebilir. Eğer... `enum` yerel olmayan, bir `enum class`. + +``` cpp +enum class CompressionMethod +{ + QuickLZ = 0, + LZ4 = 1, +}; +``` + +**15.** Tüm isimler İngilizce olmalıdır. Rusça kelimelerin çevirisi izin verilmez. + + not Stroka + +**16.** Kısaltmalar iyi biliniyorsa kabul edilebilir (kısaltmanın anlamını Wikipedia'da veya bir arama motorunda kolayca bulabilirsiniz). + + `AST`, `SQL`. + + Not `NVDH` (some random letters) + +Kısaltılmış versiyon ortak kullanım ise eksik kelimeler kabul edilebilir. + +Yorumlarda tam ad yanında yer alıyorsa bir kısaltma da kullanabilirsiniz. + +**17.** C++ kaynak kodu ile dosya adları olmalıdır `.cpp` uzantı. Başlık dosyaları olmalıdır `.h` uzantı. + +## Kod nasıl yazılır {#how-to-write-code} + +**1.** Bellek yönetimi. + +El ile bellek ayırma (`delete`) sadece kütüphane kodunda kullanılabilir. + +Kütüphane kod inunda, `delete` operatör yalnızca yıkıcılarda kullanılabilir. + +Uygulama kodunda, bellek sahibi olan nesne tarafından serbest bırakılmalıdır. + +Örnekler: + +- En kolay yol, bir nesneyi yığına yerleştirmek veya onu başka bir sınıfın üyesi yapmaktır. +- Çok sayıda küçük nesne için kapları kullanın. +- Öbekte bulunan az sayıda nesnenin otomatik olarak ayrılması için şunları kullanın `shared_ptr/unique_ptr`. + +**2.** Kaynak yönetimi. + +Kullanmak `RAII` ve yukarıya bakın. + +**3.** Hata işleme. + +İstisnaları kullanın. Çoğu durumda, yalnızca bir istisna atmanız gerekir ve onu yakalamanız gerekmez (çünkü `RAII`). + +Çevrimdışı veri işleme uygulamalarında, istisnaları yakalamamak genellikle kabul edilebilir. + +Kullanıcı isteklerini işleyen sunucularda, bağlantı işleyicisinin en üst düzeyindeki istisnaları yakalamak genellikle yeterlidir. + +İş parçacığı işlevlerinde, bunları ana iş parçacığında yeniden taramak için tüm istisnaları yakalamalı ve tutmalısınız `join`. + +``` cpp +/// If there weren't any calculations yet, calculate the first block synchronously +if (!started) +{ + calculate(); + started = true; +} +else /// If calculations are already in progress, wait for the result + pool.wait(); + +if (exception) + exception->rethrow(); +``` + +İşleme olmadan istisnaları asla gizlemeyin. Sadece körü körüne log tüm istisnaları koymak asla. + +``` cpp +//Not correct +catch (...) {} +``` + +Eğer bazı özel durumlar göz ardı etmek gerekiyorsa, sadece özel olanlar için bunu yapmak ve diğerleri yeniden oluşturma. + +``` cpp +catch (const DB::Exception & e) +{ + if (e.code() == ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION) + return nullptr; + else + throw; +} +``` + +Yanıt kodlarıyla işlevleri kullanırken veya `errno`, her zaman sonucu kontrol edin ve hata durumunda bir istisna atın. + +``` cpp +if (0 != close(fd)) + throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE); +``` + +`Do not use assert`. + +**4.** İstisna türleri. + +Uygulama kodunda karmaşık özel durum hiyerarşisini kullanmaya gerek yoktur. Özel durum metni bir sistem yöneticisi için anlaşılabilir olmalıdır. + +**5.** Yıkıcılardan istisnalar atmak. + +Bu tavsiye edilmez, ancak izin verilir. + +Aşağıdaki seçenekleri kullanın: + +- Bir işlev oluşturma (`done()` veya `finalize()`) bu, bir istisnaya yol açabilecek tüm işleri önceden yapacaktır. Bu işlev çağrıldıysa, daha sonra yıkıcıda istisna olmamalıdır. +- Çok karmaşık olan görevler (ağ üzerinden ileti gönderme gibi), sınıf kullanıcısının imha edilmeden önce çağırması gereken ayrı bir yöntemle yerleştirilebilir. +- Yıkıcıda bir istisna varsa, onu gizlemek yerine günlüğe kaydetmek daha iyidir (logger mevcutsa). +- Basit uygulamalarda, güvenmek kabul edilebilir `std::terminate` bu (vakaların `noexcept` varsayılan olarak C++11) istisnaları işlemek için. + +**6.** Anonim kod blokları. + +Belirli değişkenleri yerel hale getirmek için tek bir işlevin içinde ayrı bir kod bloğu oluşturabilirsiniz, böylece bloktan çıkarken yıkıcılar çağrılır. + +``` cpp +Block block = data.in->read(); + +{ + std::lock_guard lock(mutex); + data.ready = true; + data.block = block; +} + +ready_any.set(); +``` + +**7.** Multithreading. + +Çevrimdışı veri işleme programlarında: + +- Tek bir CPU çekirdeğinde mümkün olan en iyi performansı elde etmeye çalışın. Daha sonra gerekirse kodunuzu parallelize edebilirsiniz. + +Sunucu uygulamalarında: + +- İstekleri işlemek için iş parçacığı havuzunu kullanın. Bu noktada, userspace bağlam değiştirme gerektiren herhangi bir görevimiz olmadı. + +Çatal paralelleştirme için kullanılmaz. + +**8.** İş parçacıklarını senkronize etme. + +Genellikle farklı iş parçacıklarının farklı bellek hücreleri kullanmasını sağlamak mümkündür (daha da iyisi: farklı önbellek çizgileri) ve herhangi bir iş parçacığı senkronizasyonu kullanmamak (hariç `joinAll`). + +Senkronizasyon gerekiyorsa, çoğu durumda, mutex altında kullanmak yeterlidir `lock_guard`. + +Diğer durumlarda sistem senkronizasyonu ilkellerini kullanın. Meşgul bekleme kullanmayın. + +Atomik işlemler sadece en basit durumlarda kullanılmalıdır. + +Birincil uzmanlık alanınız olmadığı sürece kilitsiz veri yapılarını uygulamaya çalışmayın. + +**9.** İşaretçiler vs referanslar. + +Çoğu durumda, referansları tercih edin. + +**10.** const. + +Sabit referanslar, sabitler için işaretçiler kullanın, `const_iterator` ve const yöntemleri. + +Düşünmek `const` varsayılan olmak ve olmayan kullanmak-`const` sadece gerektiğinde. + +Değişkenleri değere göre geçirirken, `const` genellikle mantıklı değil. + +**11.** imzasız. + +Kullanmak `unsigned` gerekirse. + +**12.** Sayısal türleri. + +Türleri kullanın `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, ve `Int64` gibi `size_t`, `ssize_t`, ve `ptrdiff_t`. + +Bu türleri sayılar için kullanmayın: `signed/unsigned long`, `long long`, `short`, `signed/unsigned char`, `char`. + +**13.** Argümanları geçmek. + +Karmaşık değerleri referansla geçirin (dahil `std::string`). + +Bir işlev öbekte oluşturulan bir nesnenin sahipliğini yakalarsa, bağımsız değişken türünü yapın `shared_ptr` veya `unique_ptr`. + +**14.** Değerleri döndürür. + +Çoğu durumda, sadece kullanın `return`. Yaz domayın `[return std::move(res)]{.strike}`. + +İşlev öbek üzerinde bir nesne ayırır ve döndürürse, şunları kullanın `shared_ptr` veya `unique_ptr`. + +Nadir durumlarda, değeri bir argüman aracılığıyla döndürmeniz gerekebilir. Bu durumda, argüman bir referans olmalıdır. + +``` cpp +using AggregateFunctionPtr = std::shared_ptr; + +/** Allows creating an aggregate function by its name. + */ +class AggregateFunctionFactory +{ +public: + AggregateFunctionFactory(); + AggregateFunctionPtr get(const String & name, const DataTypes & argument_types) const; +``` + +**15.** ad. + +Ayrı bir kullanmaya gerek yoktur `namespace` uygulama kodu için. + +Küçük kütüphanelerin de buna ihtiyacı yok. + +Orta ve büyük kütüphaneler için her şeyi bir `namespace`. + +Kütüphan theede `.h` dosya, kullanabilirsiniz `namespace detail` uygulama kodu için gerekli olmayan uygulama ayrıntılarını gizlemek için. + +İn a `.cpp` dosya, bir kullanabilirsiniz `static` veya sembolleri gizlemek için anonim ad alanı. + +Ayrıca, bir `namespace` bir için kullanılabilir `enum` ilgili isimlerin harici bir yere düşmesini önlemek için `namespace` (ama kullanmak daha iyidir `enum class`). + +**16.** Ertelenmiş başlatma. + +Başlatma için bağımsız değişkenler gerekiyorsa, normalde varsayılan bir yapıcı yazmamalısınız. + +Daha sonra başlatmayı geciktirmeniz gerekiyorsa, geçersiz bir nesne oluşturacak varsayılan bir yapıcı ekleyebilirsiniz. Veya, az sayıda nesne için şunları kullanabilirsiniz `shared_ptr/unique_ptr`. + +``` cpp +Loader(DB::Connection * connection_, const std::string & query, size_t max_block_size_); + +/// For deferred initialization +Loader() {} +``` + +**17.** Sanal fonksiyonlar. + +Sınıf polimorfik kullanım için tasarlanmamışsa, işlevleri sanal hale getirmeniz gerekmez. Bu aynı zamanda yıkıcı için de geçerlidir. + +**18.** Kodlamalar. + +Her yerde UTF-8 kullanın. Kullanmak `std::string`ve`char *`. Kullanmayın `std::wstring`ve`wchar_t`. + +**19.** Günlük. + +Koddaki her yerde örneklere bakın. + +Taahhütte bulunmadan önce, tüm anlamsız ve hata ayıklama günlüğünü ve diğer hata ayıklama çıktı türlerini silin. + +İzleme düzeyinde bile döngülerde oturum açmaktan kaçınılmalıdır. + +Günlükleri herhangi bir günlük düzeyinde okunabilir olmalıdır. + +Günlük kaydı yalnızca uygulama kodunda, çoğunlukla kullanılmalıdır. + +Günlük mesajları İngilizce olarak yazılmalıdır. + +Günlük, tercihen Sistem Yöneticisi için anlaşılabilir olmalıdır. + +Günlüğünde küfür kullanmayın. + +Günlüğünde UTF-8 kodlamasını kullanın. Nadir durumlarda, günlüğünde ASCII olmayan karakterler kullanabilirsiniz. + +**20.** Giriş-çıkış. + +Kullanmayın `iostreams` uygulama performansı için kritik olan iç döngülerde (ve asla kullanmayın `stringstream`). + +Kullan... `DB/IO` kütüphane yerine. + +**21.** Tarih ve zaman. + +Görmek `DateLUT` kitaplık. + +**22.** içermek. + +Her zaman kullanın `#pragma once` korumaları dahil etmek yerine. + +**23.** kullanım. + +`using namespace` kullanılmaz. Kullanabilirsiniz `using` özel bir şeyle. Ancak bir sınıf veya işlev içinde yerel yapın. + +**24.** Kullanmayın `trailing return type` gerekli olmadıkça fonksiyonlar için. + +``` cpp +[auto f() -> void;]{.strike} +``` + +**25.** Değişkenlerin bildirimi ve başlatılması. + +``` cpp +//right way +std::string s = "Hello"; +std::string s{"Hello"}; + +//wrong way +auto s = std::string{"Hello"}; +``` + +**26.** Sanal işlevler için yaz `virtual` temel sınıfta, ama yaz `override` yerine `virtual` soyundan gelen sınıflarda. + +## C++ ' ın kullanılmayan özellikleri {#unused-features-of-c} + +**1.** Sanal devralma kullanılmaz. + +**2.** C++03 özel durum belirteçleri kullanılmaz. + +## Platform {#platform} + +**1.** Belirli bir platform için kod yazıyoruz. + +Ama diğer şeyler eşit olmak, çapraz platform veya taşınabilir kod tercih edilir. + +**2.** Dil: C++17. + +**3.** Derleyici: `gcc`. Şu anda (Aralık 2017), kod sürüm 7.2 kullanılarak derlenmiştir. (Ayrıca kullanılarak derlenebilir `clang 4`.) + +Standart kütüphane kullanılır (`libstdc++` veya `libc++`). + +**4.**OS: Linux UB .untu, daha eski değil. + +**5.**Kod x86\_64 CPU mimarisi için yazılmıştır. + +CPU komut seti, sunucularımız arasında desteklenen minimum kümedir. Şu anda, sse 4.2. + +**6.** Kullanmak `-Wall -Wextra -Werror` derleme bayrakları. + +**7.** Statik olarak bağlanması zor olanlar hariç tüm kitaplıklarla statik bağlantı kullanın (bkz. `ldd` komut). + +**8.** Kod geliştirilmiş ve yayın ayarları ile ayıklanır. + +## Araçlar {#tools} + +**1.** KDevelop iyi bir IDE. + +**2.** Hata ayıklama için kullanın `gdb`, `valgrind` (`memcheck`), `strace`, `-fsanitize=...`, veya `tcmalloc_minimal_debug`. + +**3.** Profilleme için kullanın `Linux Perf`, `valgrind` (`callgrind`), veya `strace -cf`. + +**4.** Kaynaklar Git'te. + +**5.** Montaj kullanımları `CMake`. + +**6.** Programlar kullanılarak serbest bırakılır `deb` paketler. + +**7.** Ana taahhüt yapı kırmak gerekir. + +Sadece seçilen revizyonlar uygulanabilir olarak kabul edilir. + +**8.** Kod yalnızca kısmen hazır olsa bile, mümkün olduğunca sık taahhüt yapın. + +Bu amaçla dalları kullanın. + +Eğer kod inunuz `master` şube henüz imara değil, önce inşa onu hariç `push`. Bunu bitirmek veya birkaç gün içinde kaldırmak gerekir. + +**9.** Non-önemsiz değişiklik, kullanım şubeleri ve sunucu bunları yayımlamak. + +**10.** Kullanılmayan kod depodan kaldırılır. + +## Kitaplık {#libraries} + +**1.** C++14 standart Kütüphanesi kullanılır (deneysel uzantılara izin verilir) ve `boost` ve `Poco` çerçeveler. + +**2.** Gerekirse, OS paketinde bulunan iyi bilinen kütüphaneleri kullanabilirsiniz. + +Zaten mevcut olan iyi bir çözüm varsa, başka bir kütüphane yüklemeniz gerektiği anlamına gelse bile kullanın. + +(Ancak kötü kütüphaneleri koddan kaldırmaya hazır olun .) + +**3.** Paketlerde ihtiyacınız olan şey yoksa veya eski bir sürüme veya yanlış derleme türüne sahip değilseniz, paketlerde olmayan bir kitaplık yükleyebilirsiniz. + +**4.** Kütüphane küçükse ve kendi karmaşık yapı sistemine sahip değilse, kaynak dosyaları `contrib` klasör. + +**5.** Tercih her zaman zaten kullanımda olan kütüphanelere verilir. + +## Genel Öneriler {#general-recommendations-1} + +**1.** Mümkün olduğunca az kod yazın. + +**2.** En basit çözümü deneyin. + +**3.** Nasıl çalışacağını ve iç döngünün nasıl çalışacağını bilene kadar kod yazmayın. + +**4.** En basit durumlarda, kullanın `using` sınıflar veya yapılar yerine. + +**5.** Mümkünse, kopya oluşturucuları, atama işleçleri, yıkıcılar (sınıf en az bir sanal işlev içeriyorsa, sanal bir işlev dışında) yazmayın, oluşturucuları taşıyın veya atama işleçlerini taşıyın. Başka bir deyişle, derleyici tarafından oluşturulan işlevleri düzgün çalışması gerekir. Kullanabilirsiniz `default`. + +**6.** Kod sadeleştirme teşvik edilir. Mümkünse kodunuzun boyutunu azaltın. + +## Ek Öneriler {#additional-recommendations} + +**1.** Açıkça belirtme `std::` türleri için `stddef.h` + +tavsiye edilmez. Başka bir deyişle, yazmanızı öneririz `size_t` yerine `std::size_t` daha kısa olduğu için. + +Eklemek kabul edilebilir `std::`. + +**2.** Açıkça belirtme `std::` standart C kitap fromlığından fonksiyonlar için + +tavsiye edilmez. Başka bir deyişle, yazın `memcpy` yerine `std::memcpy`. + +Bunun nedeni, aşağıdaki gibi benzer standart dışı işlevlerin olmasıdır `memmem`. Bu işlevleri zaman zaman kullanıyoruz. Bu işlevler mevcut değil `namespace std`. + +Yazar yousan `std::memcpy` yerine `memcpy` her yerde, o zaman `memmem` olarak `std::` garip görünecek. + +Yine de, hala kullanabilirsiniz `std::` eğer tercih ederseniz edin. + +**3.** Aynı olanlar standart C++ kütüphanesinde mevcut olduğunda C'den işlevleri kullanma. + +Daha verimli ise bu kabul edilebilir. + +Örneğin, kullanın `memcpy` yerine `std::copy` büyük bellek parçalarını kopyalamak için. + +**4.** Çok satırlı fonksiyon argümanları. + +Aşağıdaki sarma stillerinden herhangi birine izin verilir: + +``` cpp +function( + T1 x1, + T2 x2) +``` + +``` cpp +function( + size_t left, size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +``` cpp +function(size_t left, size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +``` cpp +function(size_t left, size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +``` cpp +function( + size_t left, + size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/development/style/) diff --git a/docs/tr/development/tests.md b/docs/tr/development/tests.md new file mode 100644 index 00000000000..1d39c24da5f --- /dev/null +++ b/docs/tr/development/tests.md @@ -0,0 +1,252 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 69 +toc_title: "ClickHouse testleri nas\u0131l \xE7al\u0131\u015Ft\u0131r\u0131l\u0131\ + r" +--- + +# ClickHouse Testi {#clickhouse-testing} + +## Fonksiyonel Testler {#functional-tests} + +Fonksiyonel testler en basit ve kullanımı kolay olanlardır. ClickHouse özelliklerinin çoğu fonksiyonel testlerle test edilebilir ve bu şekilde test edilebilecek ClickHouse kodundaki her değişiklik için kullanılması zorunludur. + +Her işlevsel test, çalışan ClickHouse sunucusuna bir veya birden çok sorgu gönderir ve sonucu referansla karşılaştırır. + +Testler bulunur `queries` dizin. İki alt dizin var: `stateless` ve `stateful`. Durumsuz testler, önceden yüklenmiş test verileri olmadan sorguları çalıştırır - genellikle testin kendisinde anında küçük sentetik veri kümeleri oluştururlar. Durum bilgisi testleri, Yandex'ten önceden yüklenmiş test verileri gerektirir.Metrica ve halka açık değil. Biz sadece kullanmak eğilimindedir `stateless` testler ve yeni eklemekten kaçının `stateful` testler. + +Her test iki tipten biri olabilir: `.sql` ve `.sh`. `.sql` test için borulu basit SQL komut dosyasıdır `clickhouse-client --multiquery --testmode`. `.sh` test kendisi tarafından çalıştırılan bir komut dosyasıdır. + +Tüm testleri çalıştırmak için şunları kullanın `clickhouse-test` aracı. Bak `--help` Olası seçeneklerin listesi için. Sadece tüm testleri çalıştırmak veya test adı alt dize tarafından süzülmüş testlerin alt kümesini çalıştırabilirsiniz: `./clickhouse-test substring`. + +Fonksiyonel testleri çağırmanın en basit yolu kopyalamaktır `clickhouse-client` -e doğru `/usr/bin/`, çalıştırmak `clickhouse-server` ve sonra koş `./clickhouse-test` kendi dizininden. + +Yeni test eklemek için, bir `.sql` veya `.sh` dosya içinde `queries/0_stateless` dizin, elle kontrol edin ve sonra oluşturun `.reference` aşağıdaki şekilde dosya: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` veya `./00000_test.sh > ./00000_test.reference`. + +Testler yalnızca tabloları (create, drop, vb.) kullanmalıdır `test` önceden oluşturulduğu varsayılır veritabanı; ayrıca testler geçici tablolar kullanabilirsiniz. + +İşlevsel testlerde dağıtılmış sorgular kullanmak istiyorsanız, kaldıraç `remote` tablo fonksiyonu ile `127.0.0.{1..2}` sunucunun kendisini sorgulaması için adresler; veya sunucu yapılandırma dosyasında önceden tanımlanmış test kümelerini kullanabilirsiniz `test_shard_localhost`. + +Bazı testler ile işaretlenir `zookeeper`, `shard` veya `long` kendi adlarına. +`zookeeper` ZooKeeper kullanan testler içindir. `shard` testler içindir +dinlemek için sunucu gerektirir `127.0.0.*`; `distributed` veya `global` aynı var +anlama. `long` bir saniye biraz daha uzun süren testler içindir. Yapabilirsin +kullanarak bu test gruplarını devre dışı bırakın `--no-zookeeper`, `--no-shard` ve +`--no-long` sırasıyla seçenekler. + +## Bilinen Hatalar {#known-bugs} + +Fonksiyonel testlerle kolayca çoğaltılabilen bazı hatalar biliyorsak, hazırlanmış fonksiyonel testleri `tests/queries/bugs` dizin. Bu testler taşınacaktır `tests/queries/0_stateless` hatalar düzeltildiğinde. + +## Entegrasyon Testleri {#integration-tests} + +Entegrasyon testleri, kümelenmiş konfigürasyonda Clickhouse'u ve MySQL, Postgres, MongoDB gibi diğer sunucularla ClickHouse etkileşimini test etmeyi sağlar. Ağ bölmelerini, paket damlalarını vb. taklit etmek için kullanışlıdırlar. Bu testler Docker altında çalıştırılır ve çeşitli yazılımlarla birden fazla konteyner oluşturur. + +Görmek `tests/integration/README.md` bu testlerin nasıl çalıştırılacağı hakkında. + +Clickhouse'un üçüncü taraf sürücülerle entegrasyonunun sınanmadığını unutmayın. Ayrıca şu anda JDBC ve ODBC sürücülerimizle entegrasyon testlerimiz yok. + +## Ünite Testleri {#unit-tests} + +Birim testleri, Clickhouse'u bir bütün olarak değil, tek bir yalıtılmış kitaplık veya sınıfı test etmek istediğinizde kullanışlıdır. Etkinleştirebilir veya devre dışı bırakma ile testlerin yapı `ENABLE_TESTS` Cmake seçeneği. Birim testleri (ve diğer test programları) bulunur `tests` kodun alt dizinleri. Birim testlerini çalıştırmak için şunları yazın `ninja test`. Bazı testler kullanın `gtest`, ancak bazıları test başarısızlığında sıfır olmayan çıkış kodunu döndüren programlardır. + +Kodun zaten işlevsel testler tarafından kapsanması durumunda birim testlerine sahip olmak zorunlu değildir (ve işlevsel testler genellikle kullanımı çok daha basittir). + +## Performans Testleri {#performance-tests} + +Performans testleri ölçmek ve sentetik sorguları ClickHouse bazı izole kısmının performansını karşılaştırmak için izin verir. Testler bulunur `tests/performance`. Her test ile temsil edilir `.xml` test durumunun açıklaması ile dosya. Testler ile çalıştırılır `clickhouse performance-test` Aracı (Bu gömülü `clickhouse` ikilik). Görmek `--help` çağırma için. + +Her test, durdurma için bazı koşullarla (örneğin, bir döngüde bir veya birden fazla sorgu (muhtemelen parametre kombinasyonlarıyla) çalıştırır “maximum execution speed is not changing in three seconds”) ve sorgu performansı ile ilgili bazı metrikleri ölçün (örneğin “maximum execution speed”). Bazı testler önceden yüklenmiş test veri kümesinde Önkoşullar içerebilir. + +Bazı senaryoda Clickhouse'un performansını artırmak istiyorsanız ve basit sorgularda iyileştirmeler gözlemlenebiliyorsa, bir performans testi yazmanız önerilir. Her zaman kullanmak mantıklı `perf top` testleriniz sırasında veya diğer perf araçları. + +## Test araçları ve komut dosyaları {#test-tools-and-scripts} + +Bazı programlar `tests` dizin testleri hazırlanmış değil, ancak test araçlarıdır. Örneğin, için `Lexer` bir araç var `src/Parsers/tests/lexer` bu sadece stdin'in tokenizasyonunu yapar ve renklendirilmiş sonucu stdout'a yazar. Bu tür araçları kod örnekleri olarak ve keşif ve manuel test için kullanabilirsiniz. + +Ayrıca Çift Dosya yerleştirebilirsiniz `.sh` ve `.reference` aracı ile birlikte bazı önceden tanımlanmış giriş üzerinde çalıştırmak için-daha sonra komut sonucu karşılaştırılabilir `.reference` Dosya. Bu tür testler otomatik değildir. + +## Çeşitli Testler {#miscellaneous-tests} + +Bulunan dış sözlükler için testler vardır `tests/external_dictionaries` ve makine öğrenilen modeller için `tests/external_models`. Bu testler güncelleştirilmez ve tümleştirme testlerine aktarılmalıdır. + +Çekirdek ekler için ayrı bir test var. Bu test, ayrı sunucularda ClickHouse kümesini çalıştırır ve çeşitli arıza durumlarını taklit eder: ağ bölünmesi, paket bırakma (ClickHouse düğümleri arasında, ClickHouse ve ZooKeeper arasında, ClickHouse sunucusu ve istemci arasında, vb.), `kill -9`, `kill -STOP` ve `kill -CONT` , istemek [Jepsen](https://aphyr.com/tags/Jepsen). Daha sonra test, kabul edilen tüm eklerin yazıldığını ve reddedilen tüm eklerin olmadığını kontrol eder. + +Clickhouse açık kaynaklı önce çekirdek testi ayrı ekip tarafından yazılmıştır. Bu takım artık ClickHouse ile çalışmıyor. Test yanlışlıkla Java ile yazılmıştır. Bu nedenlerden dolayı, çekirdek testi yeniden yazılmalı ve entegrasyon testlerine taşınmalıdır. + +## Manuel Test {#manual-testing} + +Yeni bir özellik geliştirdiğinizde, el ile de test etmek mantıklıdır. Bunu aşağıdaki adımlarla yapabilirsiniz: + +ClickHouse Oluşturun. Terminalden Clickhouse'u çalıştırın: dizini değiştir `programs/clickhouse-server` ve ile çalıştırın `./clickhouse-server`. Bu yapılandırma kullanacak (`config.xml`, `users.xml` ve içindeki dosyalar `config.d` ve `users.d` dizinler) geçerli dizinden varsayılan olarak. ClickHouse sunucusuna bağlanmak için, çalıştırın `programs/clickhouse-client/clickhouse-client`. + +Tüm clickhouse araçlarının (sunucu, istemci, vb.) sadece tek bir ikili için symlinks olduğunu unutmayın `clickhouse`. Bu ikili bulabilirsiniz `programs/clickhouse`. Tüm araçlar olarak da çağrılabilir `clickhouse tool` yerine `clickhouse-tool`. + +Alternatif olarak ClickHouse paketini yükleyebilirsiniz: Yandex deposundan kararlı sürüm veya kendiniz için paket oluşturabilirsiniz `./release` ClickHouse kaynakları kökünde. Ardından sunucuyu şu şekilde başlatın `sudo service clickhouse-server start` (veya sunucuyu durdurmak için durdurun). Günlükleri arayın `/etc/clickhouse-server/clickhouse-server.log`. + +ClickHouse sisteminizde zaten yüklü olduğunda, yeni bir `clickhouse` ikili ve mevcut ikili değiştirin: + +``` bash +$ sudo service clickhouse-server stop +$ sudo cp ./clickhouse /usr/bin/ +$ sudo service clickhouse-server start +``` + +Ayrıca sistem clickhouse-server durdurmak ve aynı yapılandırma ile ancak terminale günlüğü ile kendi çalıştırabilirsiniz: + +``` bash +$ sudo service clickhouse-server stop +$ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml +``` + +Gdb ile örnek: + +``` bash +$ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml +``` + +Sistem clickhouse-sunucu zaten çalışıyorsa ve bunu durdurmak istemiyorsanız, sizin port numaralarını değiştirebilirsiniz `config.xml` (veya bunları bir dosyada geçersiz kılma `config.d` dizin), uygun veri yolu sağlayın ve çalıştırın. + +`clickhouse` ikili neredeyse hiçbir bağımlılıkları vardır ve Linux dağıtımları geniş genelinde çalışır. Hızlı ve kirli bir sunucuda değişikliklerinizi test etmek için, sadece yapabilirsiniz `scp` taze inşa `clickhouse` sunucunuza ikili ve daha sonra yukarıdaki örneklerde olduğu gibi çalıştırın. + +## Test Ortamı {#testing-environment} + +Kararlı olarak yayınlamadan önce test ortamında dağıtın. Test ortamı, 1/39 bölümünü işleyen bir kümedir [Üye.Metrica](https://metrica.yandex.com/) veriler. Test ortamımızı Yandex ile paylaşıyoruz.Metrica takımı. ClickHouse mevcut verilerin üstünde kesinti olmadan yükseltilir. İlk önce verilerin gerçek zamanlı olarak gecikmeden başarıyla işlendiğine bakıyoruz, çoğaltma çalışmaya devam ediyor ve Yandex tarafından görülebilen herhangi bir sorun yok.Metrica takımı. İlk kontrol aşağıdaki şekilde yapılabilir: + +``` sql +SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h; +``` + +Bazı durumlarda yandex'teki arkadaş ekiplerimizin test ortamına da dağıtım yapıyoruz: Pazar, Bulut, vb. Ayrıca geliştirme amacıyla kullanılan bazı donanım sunucularımız var. + +## Yük Testi {#load-testing} + +Test ortamına dağıtıldıktan sonra, üretim kümesinden gelen sorgularla yük testini çalıştırıyoruz. Bu elle yapılır. + +Etkinleştirdiğinizden emin olun `query_log` üretim kümenizde. + +Bir gün veya daha fazla sorgu günlüğü toplayın: + +``` bash +$ clickhouse-client --query="SELECT DISTINCT query FROM system.query_log WHERE event_date = today() AND query LIKE '%ym:%' AND query NOT LIKE '%system.query_log%' AND type = 2 AND is_initial_query" > queries.tsv +``` + +Bu şekilde karmaşık bir örnektir. `type = 2` başarıyla yürütülen sorguları süzer. `query LIKE '%ym:%'` yandex'ten ilgili sorguları seçmektir.Metrica. `is_initial_query` yalnızca istemci tarafından başlatılan sorguları seçmektir, Clickhouse'un kendisi tarafından değil (dağıtılmış sorgu işlemenin parçaları olarak). + +`scp` bu test kümenize günlük ve aşağıdaki gibi çalıştırın: + +``` bash +$ clickhouse benchmark --concurrency 16 < queries.tsv +``` + +(muhtemelen de belirtmek istiyorum `--user`) + +Sonra bir gece ya da hafta sonu için bırakın ve dinlenin. + +Kontrol etmelisiniz `clickhouse-server` çökmez, bellek ayak izi sınırlıdır ve performans zamanla aşağılayıcı değildir. + +Kesin sorgu yürütme zamanlamaları kaydedilmez ve sorguların ve ortamın yüksek değişkenliği nedeniyle karşılaştırılmaz. + +## Yapı Testleri {#build-tests} + +Yapı testleri, yapının çeşitli alternatif konfigürasyonlarda ve bazı yabancı sistemlerde bozulmadığını kontrol etmeyi sağlar. Testler bulunur `ci` dizin. Docker, Vagrant ve bazen de `qemu-user-static` Docker'ın içinde. Bu testler geliştirme aşamasındadır ve test çalıştırmaları otomatik değildir. + +Motivasyon: + +Normalde tüm testleri ClickHouse yapısının tek bir varyantında serbest bırakırız ve çalıştırırız. Ancak, iyice test edilmeyen alternatif yapı varyantları vardır. Örnekler: + +- FreeBSD üzerine inşa; +- sistem paketlerinden kütüphaneler ile Debian üzerine inşa; +- kütüphanelerin paylaşılan bağlantısı ile oluşturun; +- AArch64 platformunda oluşturun; +- PowerPc platformunda oluşturun. + +Örneğin, sistem paketleri ile oluştur kötü bir uygulamadır, çünkü bir sistemin hangi paketlerin tam sürümüne sahip olacağını garanti edemeyiz. Ancak bu gerçekten Debian bakıcılarına ihtiyaç duyuyor. Bu nedenle en azından bu yapı varyantını desteklemeliyiz. Başka bir örnek: paylaşılan bağlantı ortak bir sorun kaynağıdır, ancak bazı Meraklılar için gereklidir. + +Tüm yapı varyantlarında tüm testleri çalıştıramasak da, en azından çeşitli yapı varyantlarının bozulmadığını kontrol etmek istiyoruz. Bu amaçla yapı testlerini kullanıyoruz. + +## Protokol uyumluluğu testi {#testing-for-protocol-compatibility} + +ClickHouse ağ protokolünü genişlettiğimizde, eski clickhouse istemcisinin yeni clickhouse sunucusu ile çalıştığını ve yeni clickhouse istemcisinin eski clickhouse sunucusu ile çalıştığını (sadece ilgili paketlerden ikili dosyaları çalıştırarak) manuel olarak test ediyoruz. + +## Derleyiciden yardım {#help-from-the-compiler} + +Ana ClickHouse kodu (bu `dbms` dizin) ile inşa edilmiştir `-Wall -Wextra -Werror` ve bazı ek etkin uyarılar ile. Bu seçenekler üçüncü taraf kitaplıkları için etkin olmasa da. + +Clang daha yararlı uyarılar vardır-Sen ile onları arayabilirsiniz `-Weverything` ve varsayılan oluşturmak için bir şey seçin. + +Üretim yapıları için gcc kullanılır (hala clang'dan biraz daha verimli kod üretir). Geliştirme için, clang genellikle kullanımı daha uygundur. Hata ayıklama modu ile kendi makinenizde inşa edebilirsiniz (dizüstü bilgisayarınızın pilinden tasarruf etmek için), ancak derleyicinin daha fazla uyarı üretebileceğini lütfen unutmayın `-O3` daha iyi kontrol akışı ve prosedürler arası analiz nedeniyle. Clang ile inşa ederken, `libc++` yerine kullanılır `libstdc++` ve hata ayıklama modu ile oluştururken, hata ayıklama sürümü `libc++` çalışma zamanında daha fazla hata yakalamak için izin verir kullanılır. + +## Dezenfektanlar {#sanitizers} + +**Adres dezenfektanı**. +Biz başına taahhüt bazında ASan altında fonksiyonel ve entegrasyon testleri çalıştırın. + +**Valgrind (Memcheck)**. +Bir gecede valgrind altında fonksiyonel testler yapıyoruz. Birden fazla saat sürer. Şu anda bilinen bir yanlış pozitif var `re2` kütüphane, bkz [bu makale](https://research.swtch.com/sparse). + +**Tanımsız davranış dezenfektanı.** +Biz başına taahhüt bazında ASan altında fonksiyonel ve entegrasyon testleri çalıştırın. + +**İplik dezenfektanı**. +Biz başına taahhüt bazında tsan altında fonksiyonel testler çalıştırın. Tsan altında hala taahhüt bazında entegrasyon testleri yapmıyoruz. + +**Bellek temizleyici**. +Şu anda hala MSan kullanmıyoruz. + +**Hata ayıklama ayırıcısı.** +Hata ayıklama sürümü `jemalloc` hata ayıklama oluşturmak için kullanılır. + +## Fuzzing {#fuzzing} + +Rastgele SQL sorguları oluşturmak ve sunucunun ölmediğini kontrol etmek için basit fuzz testi kullanıyoruz. Fuzz testi Adres dezenfektanı ile yapılır. İçinde bulabilirsiniz `00746_sql_fuzzy.pl`. Bu test sürekli olarak (gece ve daha uzun) çalıştırılmalıdır. + +Aralık 2018 itibariyle, hala kütüphane kodunun izole fuzz testini kullanmıyoruz. + +## Güvenlik Denetimi {#security-audit} + +Yandex Bulut departmanından insanlar, güvenlik açısından ClickHouse yeteneklerine bazı temel genel bakışlar yaparlar. + +## Statik Analizörler {#static-analyzers} + +Koş weuyoruz `PVS-Studio` taahhüt bazında. Değerlendir havedik `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Sen kullanım talimatları bulacaksınız `tests/instructions/` dizin. Ayrıca okuyabilirsiniz [Rusça makale](https://habr.com/company/yandex/blog/342018/). + +Kullanıyorsanız `CLion` bir IDE olarak, bazı kaldıraç `clang-tidy` kutudan kontrol eder. + +## Sertleşme {#hardening} + +`FORTIFY_SOURCE` varsayılan olarak kullanılır. Neredeyse işe yaramaz, ancak nadir durumlarda hala mantıklı ve bunu devre dışı bırakmıyoruz. + +## Kod Stili {#code-style} + +Kod stili kuralları açıklanmıştır [burada](https://clickhouse.tech/docs/en/development/style/). + +Bazı ortak stil ihlallerini kontrol etmek için şunları kullanabilirsiniz `utils/check-style` komut. + +Kodunuzun uygun stilini zorlamak için şunları kullanabilirsiniz `clang-format`. Dosya `.clang-format` kaynak rootlarında yer almaktadır. Çoğunlukla gerçek kod stilimizle karşılık gelir. Ancak uygulanması tavsiye edilmez `clang-format` varolan dosyalara biçimlendirmeyi daha da kötüleştirdiği için. Kullanabilirsiniz `clang-format-diff` eğer clang kaynak deposunda bulabilirsiniz aracı. + +Alternatif olarak deneyebilirsiniz `uncrustify` kodunuzu yeniden biçimlendirmek için bir araç. Yapılandırma içinde `uncrustify.cfg` kaynaklarda kök. Daha az test edilmiştir `clang-format`. + +`CLion` kod stilimiz için ayarlanması gereken kendi kod biçimlendiricisine sahiptir. + +## Metrica B2B testleri {#metrica-b2b-tests} + +Her ClickHouse sürümü Yandex Metrica ve AppMetrica motorları ile test edilir. Clickhouse'un Test ve kararlı sürümleri Vm'lerde dağıtılır ve Giriş verilerinin sabit örneğini işleyen Metrica motorunun küçük bir kopyasıyla çalışır. Daha sonra Metrica motorunun iki örneğinin sonuçları birlikte karşılaştırılır. + +Bu testler ayrı ekip tarafından otomatikleştirilir. Yüksek sayıda hareketli parça nedeniyle, testler çoğu zaman tamamen ilgisiz nedenlerle başarısız olur, bu da anlaşılması çok zordur. Büyük olasılıkla bu testlerin bizim için negatif değeri var. Bununla birlikte, bu testlerin yüzlerce kişiden yaklaşık bir veya iki kez yararlı olduğu kanıtlanmıştır. + +## Test Kapsamı {#test-coverage} + +Temmuz 2018 itibariyle test kapsamını takip etmiyoruz. + +## Test Otomasyonu {#test-automation} + +Yandex dahili CI ve iş otomasyon sistemi ile testler yapıyoruz “Sandbox”. + +Yapı işleri ve testler, taahhüt bazında sanal alanda çalıştırılır. Ortaya çıkan paketler ve test sonuçları Github'da yayınlanır ve doğrudan bağlantılar tarafından indirilebilir. Eserler sonsuza dek saklanır. Eğer GitHub bir çekme isteği gönderdiğinizde, biz olarak etiketlemek “can be tested” ve bizim CI sistemi sizin için ClickHouse paketleri (yayın, hata ayıklama, Adres dezenfektanı ile, vb) inşa edecek. + +Travis CI, zaman ve hesaplama gücü sınırı nedeniyle kullanmıyoruz. +Jenkins'i kullanmayız. Daha önce kullanıldı ve şimdi Jenkins kullanmadığımız için mutluyuz. + +[Orijinal makale](https://clickhouse.tech/docs/en/development/tests/) diff --git a/docs/tr/engines/database_engines/index.md b/docs/tr/engines/database_engines/index.md new file mode 100644 index 00000000000..c0e0bea8ab5 --- /dev/null +++ b/docs/tr/engines/database_engines/index.md @@ -0,0 +1,21 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Veritaban\u0131 Motorlar\u0131" +toc_priority: 27 +toc_title: "Giri\u015F" +--- + +# Veritabanı Motorları {#database-engines} + +Veritabanı motorları, tablolarla çalışmanıza izin verir. + +Varsayılan olarak, ClickHouse yapılandırılabilir sağlayan yerel veritabanı altyapısını kullanır [masa motorları](../../engines/table_engines/index.md) ve bir [SQL lehçesi](../../sql_reference/syntax.md). + +Aşağıdaki veritabanı altyapılarını da kullanabilirsiniz: + +- [MySQL](mysql.md) + +- [Tembel](lazy.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/tr/engines/database_engines/lazy.md b/docs/tr/engines/database_engines/lazy.md new file mode 100644 index 00000000000..85436fdc164 --- /dev/null +++ b/docs/tr/engines/database_engines/lazy.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 31 +toc_title: Tembel +--- + +# Tembel {#lazy} + +Tabloları yalnızca RAM'de tutar `expiration_time_in_seconds` son erişimden saniyeler sonra. Sadece \* Log tabloları ile kullanılabilir. + +Erişimler arasında uzun bir zaman aralığı olan birçok küçük \* günlük tablosunu saklamak için optimize edilmiştir. + +## Veritabanı oluşturma {#creating-a-database} + + CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); + +[Orijinal makale](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/tr/engines/database_engines/mysql.md b/docs/tr/engines/database_engines/mysql.md new file mode 100644 index 00000000000..974515ab655 --- /dev/null +++ b/docs/tr/engines/database_engines/mysql.md @@ -0,0 +1,135 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 30 +toc_title: MySQL +--- + +# MySQL {#mysql} + +Uzak bir MySQL sunucusunda veritabanlarına bağlanmak ve gerçekleştirmek için izin verir `INSERT` ve `SELECT` ClickHouse ve MySQL arasında veri alışverişi için sorgular. + +Bu `MySQL` veritabanı motoru sorguları MySQL sunucusuna çevirir, böylece aşağıdaki gibi işlemleri gerçekleştirebilirsiniz `SHOW TABLES` veya `SHOW CREATE TABLE`. + +Aşağıdaki sorguları gerçekleştiremiyor: + +- `RENAME` +- `CREATE TABLE` +- `ALTER` + +## Veritabanı oluşturma {#creating-a-database} + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] +ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') +``` + +**Motor Parametreleri** + +- `host:port` — MySQL server address. +- `database` — Remote database name. +- `user` — MySQL user. +- `password` — User password. + +## Veri Türleri Desteği {#data_types-support} + +| MySQL | ClickHouse | +|----------------------------------|--------------------------------------------------------------| +| UNSIGNED TINYINT | [Uİnt8](../../sql_reference/data_types/int_uint.md) | +| TINYINT | [Int8](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED SMALLINT | [Uınt16](../../sql_reference/data_types/int_uint.md) | +| SMALLINT | [Int16](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [Uİnt32](../../sql_reference/data_types/int_uint.md) | +| INT, MEDIUMINT | [Int32](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED BIGINT | [Uİnt64](../../sql_reference/data_types/int_uint.md) | +| BIGINT | [Int64](../../sql_reference/data_types/int_uint.md) | +| FLOAT | [Float32](../../sql_reference/data_types/float.md) | +| DOUBLE | [Float64](../../sql_reference/data_types/float.md) | +| DATE | [Tarihli](../../sql_reference/data_types/date.md) | +| DATETIME, TIMESTAMP | [DateTime](../../sql_reference/data_types/datetime.md) | +| BINARY | [FixedString](../../sql_reference/data_types/fixedstring.md) | + +Diğer tüm MySQL veri türleri dönüştürülür [Dize](../../sql_reference/data_types/string.md). + +[Nullable](../../sql_reference/data_types/nullable.md) desteklenir. + +## Kullanım Örnekleri {#examples-of-use} + +MySQL tablo: + +``` text +mysql> USE test; +Database changed + +mysql> CREATE TABLE `mysql_table` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `float` FLOAT NOT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from mysql_table; ++------+-----+ +| int_id | value | ++------+-----+ +| 1 | 2 | ++------+-----+ +1 row in set (0,00 sec) +``` + +Clickhouse'daki veritabanı, MySQL sunucusu ile veri alışverişi: + +``` sql +CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') +``` + +``` sql +SHOW DATABASES +``` + +``` text +┌─name─────┐ +│ default │ +│ mysql_db │ +│ system │ +└──────────┘ +``` + +``` sql +SHOW TABLES FROM mysql_db +``` + +``` text +┌─name─────────┐ +│ mysql_table │ +└──────────────┘ +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +└────────┴───────┘ +``` + +``` sql +INSERT INTO mysql_db.mysql_table VALUES (3,4) +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +│ 3 │ 4 │ +└────────┴───────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/tr/engines/index.md b/docs/tr/engines/index.md new file mode 100644 index 00000000000..48004afa1c8 --- /dev/null +++ b/docs/tr/engines/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Motorlar +toc_priority: 25 +--- + + diff --git a/docs/tr/engines/table_engines/index.md b/docs/tr/engines/table_engines/index.md new file mode 100644 index 00000000000..fad3823453c --- /dev/null +++ b/docs/tr/engines/table_engines/index.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Masa Motorlar\u0131" +toc_priority: 26 +toc_title: "Giri\u015F" +--- + +# Masa Motorları {#table_engines} + +Tablo motoru (tablo türü) belirler: + +- Verilerin nasıl ve nerede depolandığı, nereye yazılacağı ve nereden okunacağı. +- Hangi sorgular desteklenir ve nasıl. +- Eşzamanlı veri erişimi. +- Varsa indeks uselerin kullanımı. +- Çok iş parçacıklı istek yürütme mümkün olup olmadığı. +- Veri çoğaltma parametreleri. + +## Motor Aileleri {#engine-families} + +### MergeTree {#mergetree} + +Yüksek yük görevleri için en evrensel ve fonksiyonel masa motorları. Bu motorlar tarafından paylaşılan özellik, sonraki arka plan veri işleme ile hızlı veri ekleme ' dir. `MergeTree` aile motorları destek veri çoğaltma (ile [Çoğaltıyordu\*](mergetree_family/replication.md) sürümleri), bölümleme ve diğer özellikler diğer motorlarda desteklenmez. + +Ailede motorlar: + +- [MergeTree](mergetree_family/mergetree.md) +- [ReplacingMergeTree](mergetree_family/replacingmergetree.md) +- [SummingMergeTree](mergetree_family/summingmergetree.md) +- [AggregatingMergeTree](mergetree_family/aggregatingmergetree.md) +- [CollapsingMergeTree](mergetree_family/collapsingmergetree.md) +- [VersionedCollapsingMergeTree](mergetree_family/versionedcollapsingmergetree.md) +- [Graphıtemergetree](mergetree_family/graphitemergetree.md) + +### Günlük {#log} + +Hafiflik [motorlar](log_family/index.md) minimum işlevsellik ile. Birçok küçük tabloyu (yaklaşık 1 milyon satıra kadar) hızlı bir şekilde yazmanız ve daha sonra bir bütün olarak okumanız gerektiğinde en etkili olanlardır. + +Ailede motorlar: + +- [TinyLog](log_family/tinylog.md) +- [StripeLog](log_family/stripelog.md) +- [Günlük](log_family/log.md) + +### Entegrasyon Motorları {#integration-engines} + +Diğer veri depolama ve işleme sistemleri ile iletişim kurmak için motorlar. + +Ailede motorlar: + +- [Kafka](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) +- [HDFS](integrations/hdfs.md) + +### Özel Motorlar {#special-engines} + +Ailede motorlar: + +- [Dağılı](special/distributed.md) +- [MaterializedView](special/materializedview.md) +- [Sözlük](special/dictionary.md) +- [Birleştirmek](special/merge.md) +- [Dosya](special/file.md) +- [Boş](special/null.md) +- [Koymak](special/set.md) +- [Katmak](special/join.md) +- [URL](special/url.md) +- [Görünüm](special/view.md) +- [Bellek](special/memory.md) +- [Arabellek](special/buffer.md) + +## Sanal Sütunlar {#table_engines-virtual-columns} + +Sanal sütun, motor kaynak kodunda tanımlanan ayrılmaz bir tablo altyapısı özniteliğidir. + +Sanal sütunları belirtmemelisiniz `CREATE TABLE` sorgula ve onları göremezsin `SHOW CREATE TABLE` ve `DESCRIBE TABLE` sorgu sonuçları. Sanal sütunlar da salt okunur, bu nedenle sanal sütunlara veri ekleyemezsiniz. + +Sanal bir sütundan veri seçmek için, adını `SELECT` sorgu. `SELECT *` sanal sütunlardan değerler döndürmez. + +Tablo sanal sütunlarından biriyle aynı ada sahip bir sütuna sahip bir tablo oluşturursanız, sanal sütuna erişilemez hale gelir. Bunu yapmayı önermiyoruz. Çakışmaları önlemek için, sanal sütun adları genellikle bir alt çizgi ile öneki. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/) diff --git a/docs/tr/engines/table_engines/integrations/hdfs.md b/docs/tr/engines/table_engines/integrations/hdfs.md new file mode 100644 index 00000000000..42e1e31b80f --- /dev/null +++ b/docs/tr/engines/table_engines/integrations/hdfs.md @@ -0,0 +1,123 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 36 +toc_title: HDFS +--- + +# HDFS {#table_engines-hdfs} + +Bu motor ile entegrasyon sağlar [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) üzerinde veri Yönet allowingilmesine izin vererek ekosist dataem [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)ClickHouse aracılığıyla. Bu motor benzer +to the [Dosya](../special/file.md) ve [URL](../special/url.md) motorlar, ancak hadoop özgü özellikleri sağlar. + +## Kullanma {#usage} + +``` sql +ENGINE = HDFS(URI, format) +``` + +Bu `URI` parametre, HDFS'DEKİ tüm dosya URI'SIDIR. +Bu `format` parametre kullanılabilir dosya biçimlerinden birini belirtir. Gerçekleştirmek +`SELECT` sorgular, biçim giriş için desteklenmeli ve gerçekleştirmek için +`INSERT` queries – for output. The available formats are listed in the +[Biçimliler](../../../interfaces/formats.md#formats) bölme. +Yol kısmı `URI` globs içerebilir. Bu durumda tablo salt okunur olurdu. + +**Örnek:** + +**1.** Set up the `hdfs_engine_table` Tablo: + +``` sql +CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') +``` + +**2.** Fil filel file: + +``` sql +INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` + +**3.** Verileri sorgula: + +``` sql +SELECT * FROM hdfs_engine_table LIMIT 2 +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## Uygulama Detayları {#implementation-details} + +- Okuma ve yazma paralel olabilir +- Desteklenmiyor: + - `ALTER` ve `SELECT...SAMPLE` harekat. + - Dizinler. + - Çoğalma. + +**Yolda Globs** + +Birden çok yol bileşenleri globs olabilir. İşlenmek için dosya var olmalı ve tüm yol deseniyle eşleşmelidir. Sırasında dosyaların listelen ofmesini belirler `SELECT` (not at `CREATE` an). + +- `*` — Substitutes any number of any characters except `/` boş dize dahil. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +İle yapılar `{}` benzer olan [uzak](../../../sql_reference/table_functions/remote.md) tablo işlevi. + +**Örnek** + +1. HDFS'DE aşağıdaki Urı'lerle TSV formatında birkaç dosyamız olduğunu varsayalım: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. Altı dosyadan oluşan bir tablo oluşturmanın birkaç yolu vardır: + + + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') +``` + +Başka bir yol: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') +``` + +Tablo, her iki dizindeki tüm dosyalardan oluşur (tüm dosyalar, sorguda açıklanan biçimi ve şemayı karşılamalıdır): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') +``` + +!!! warning "Uyarıcı" + Dosyaların listelenmesi, önde gelen sıfırlarla sayı aralıkları içeriyorsa, her basamak için parantez içeren yapıyı ayrı ayrı kullanın veya kullanın `?`. + +**Örnek** + +Adlı dosyaları içeren tablo oluşturma `file000`, `file001`, … , `file999`: + +``` sql +CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') +``` + +## Sanal Sütunlar {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**Ayrıca Bakınız** + +- [Sanal sütunlar](../index.md#table_engines-virtual_columns) + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/tr/engines/table_engines/integrations/index.md b/docs/tr/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..608fc900e62 --- /dev/null +++ b/docs/tr/engines/table_engines/integrations/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Entegrasyonlar +toc_priority: 30 +--- + + diff --git a/docs/tr/engines/table_engines/integrations/jdbc.md b/docs/tr/engines/table_engines/integrations/jdbc.md new file mode 100644 index 00000000000..08196116cf8 --- /dev/null +++ b/docs/tr/engines/table_engines/integrations/jdbc.md @@ -0,0 +1,90 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 34 +toc_title: JDBC +--- + +# JDBC {#table-engine-jdbc} + +ClickHouse üzerinden harici veritabanlarına bağlanmak için izin verir [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). + +JDBC bağlantısını uygulamak için ClickHouse ayrı programı kullanır [clickhouse-JDBC-köprü](https://github.com/alex-krash/clickhouse-jdbc-bridge) bu bir daemon olarak çalışmalıdır. + +Bu motor destekler [Nullable](../../../sql_reference/data_types/nullable.md) veri türü. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name +( + columns list... +) +ENGINE = JDBC(dbms_uri, external_database, external_table) +``` + +**Motor Parametreleri** + +- `dbms_uri` — URI of an external DBMS. + + Biçimli: `jdbc:://:/?user=&password=`. + MySQL örneği: `jdbc:mysql://localhost:3306/?user=root&password=root`. + +- `external_database` — Database in an external DBMS. + +- `external_table` — Name of the table in `external_database`. + +## Kullanım Örneği {#usage-example} + +Doğrudan konsol istemcisine bağlanarak MySQL sunucusunda bir tablo oluşturma: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouse Server'da bir tablo oluşturma ve ondan veri seçme: + +``` sql +CREATE TABLE jdbc_table +( + `int_id` Int32, + `int_nullable` Nullable(Int32), + `float` Float32, + `float_nullable` Nullable(Float32) +) +ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') +``` + +``` sql +SELECT * +FROM jdbc_table +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## Ayrıca Bakınız {#see-also} + +- [JDBC tablo işlevi](../../../sql_reference/table_functions/jdbc.md). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/tr/engines/table_engines/integrations/kafka.md b/docs/tr/engines/table_engines/integrations/kafka.md new file mode 100644 index 00000000000..baf0adcd080 --- /dev/null +++ b/docs/tr/engines/table_engines/integrations/kafka.md @@ -0,0 +1,176 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 32 +toc_title: Kafka +--- + +# Kafka {#kafka} + +Bu motor ile çalışır [Apache Kafka](http://kafka.apache.org/). + +Kafka sağlar: + +- Veri akışlarını yayınlayın veya abone olun. +- Hataya dayanıklı depolama düzenlemek. +- Kullanılabilir hale geldikçe akışları işleyin. + +## Tablo oluşturma {#table_engine-kafka-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = Kafka() +SETTINGS + kafka_broker_list = 'host:port', + kafka_topic_list = 'topic1,topic2,...', + kafka_group_name = 'group_name', + kafka_format = 'data_format'[,] + [kafka_row_delimiter = 'delimiter_symbol',] + [kafka_schema = '',] + [kafka_num_consumers = N,] + [kafka_skip_broken_messages = N] +``` + +Gerekli parametreler: + +- `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). +- `kafka_topic_list` – A list of Kafka topics. +- `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. +- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` fonksiyon gibi `JSONEachRow`. Daha fazla bilgi için, bkz: [Biçimliler](../../../interfaces/formats.md) bölme. + +İsteğe bağlı parametreler: + +- `kafka_row_delimiter` – Delimiter character, which ends the message. +- `kafka_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap'n Proto](https://capnproto.org/) şema dosyasının yolunu ve kök adını gerektirir `schema.capnp:Message` nesne. +- `kafka_num_consumers` – The number of consumers per table. Default: `1`. Bir tüketicinin verimi yetersizse daha fazla tüketici belirtin. Bölüm başına yalnızca bir tüketici atanabileceğinden, toplam tüketici sayısı konudaki bölüm sayısını geçmemelidir. +- `kafka_skip_broken_messages` – Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. Eğer `kafka_skip_broken_messages = N` sonra motor atlar *N* Ayrıştırılamayan Kafka iletileri (bir ileti bir veri satırına eşittir). + +Örnekler: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + SELECT * FROM queue LIMIT 5; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') + SETTINGS kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; +``` + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın. Mümkünse, eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format + [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages]) +``` + +
    + +## Açıklama {#description} + +Teslim edilen mesajlar otomatik olarak izlenir, bu nedenle bir gruptaki her mesaj yalnızca bir kez sayılır. Verileri iki kez almak istiyorsanız, tablonun başka bir grup adıyla bir kopyasını oluşturun. + +Gruplar esnek ve kümede senkronize edilir. Örneğin, bir kümede 10 konu ve bir tablonun 5 kopyası varsa, her kopya 2 konu alır. Kopya sayısı değişirse, konular kopyalar arasında otomatik olarak yeniden dağıtılır. Bu konuda daha fazla bilgi edinin http://kafka.apache.org/intro. + +`SELECT` mesajları okumak için özellikle yararlı değildir (hata ayıklama hariç), çünkü her mesaj yalnızca bir kez okunabilir. Hayata görünümler kullanarak gerçek zamanlı iş parçacıkları oluşturmak daha pratiktir. Bunu yapmak için : + +1. Bir Kafka tüketici oluşturmak için motoru kullanın ve bir veri akışı düşünün. +2. İstenen yapıya sahip bir tablo oluşturun. +3. Verileri motordan dönüştüren ve daha önce oluşturulmuş bir tabloya koyan materyalleştirilmiş bir görünüm oluşturun. + +Ne zaman `MATERIALIZED VIEW` motora katılır, arka planda veri toplamaya başlar. Bu, kafka'dan sürekli olarak mesaj almanızı ve bunları kullanarak gerekli biçime dönüştürmenizi sağlar `SELECT`. +Bir kafka tablosu istediğiniz kadar materialized görüşe sahip olabilir, kafka tablosundan doğrudan veri okumazlar, ancak yeni kayıtlar (bloklar halinde) alırlar, bu şekilde farklı ayrıntı seviyesine sahip birkaç tabloya yazabilirsiniz (gruplama-toplama ve olmadan). + +Örnek: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + CREATE TABLE daily ( + day Date, + level String, + total UInt64 + ) ENGINE = SummingMergeTree(day, (day, level), 8192); + + CREATE MATERIALIZED VIEW consumer TO daily + AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total + FROM queue GROUP BY day, level; + + SELECT level, sum(total) FROM daily GROUP BY level; +``` + +Performansı artırmak için, alınan iletiler bloklar halinde gruplandırılır [max\_ınsert\_block\_size](../../../operations/server_configuration_parameters/settings.md#settings-max_insert_block_size). İçinde blok oluş ifma ifdıysa [stream\_flush\_interval\_ms](../../../operations/server_configuration_parameters/settings.md) milisaniye, veri blok bütünlüğü ne olursa olsun tabloya temizlendi. + +Konu verilerini almayı durdurmak veya dönüşüm mantığını değiştirmek için, hayata geçirilmiş görünümü ayırın: + +``` sql + DETACH TABLE consumer; + ATTACH MATERIALIZED VIEW consumer; +``` + +Kullanarak hedef tabloyu değiştirmek istiyorsanız `ALTER` hedef tablo ile görünümdeki veriler arasındaki tutarsızlıkları önlemek için malzeme görünümünü devre dışı bırakmanızı öneririz. + +## Yapılandırma {#configuration} + +GraphiteMergeTree benzer şekilde, Kafka motoru ClickHouse yapılandırma dosyasını kullanarak genişletilmiş yapılandırmayı destekler. Kullanabileceğiniz iki yapılandırma anahtarı vardır: global (`kafka`) ve konu düzeyinde (`kafka_*`). Genel yapılandırma önce uygulanır ve sonra konu düzeyinde yapılandırma uygulanır (varsa). + +``` xml + + + cgrp + smallest + + + + + 250 + 100000 + +``` + +Olası yapılandırma seçeneklerinin listesi için bkz. [librdkafka yapılandırma referansı](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Alt çizgiyi kullan (`_`) ClickHouse yapılandırmasında bir nokta yerine. Mesela, `check.crcs=true` olacak `true`. + +## Sanal Sütunlar {#virtual-columns} + +- `_topic` — Kafka topic. +- `_key` — Key of the message. +- `_offset` — Offset of the message. +- `_timestamp` — Timestamp of the message. +- `_partition` — Partition of Kafka topic. + +**Ayrıca Bakınız** + +- [Sanal sütunlar](../index.md#table_engines-virtual_columns) + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/tr/engines/table_engines/integrations/mysql.md b/docs/tr/engines/table_engines/integrations/mysql.md new file mode 100644 index 00000000000..f6c811465e6 --- /dev/null +++ b/docs/tr/engines/table_engines/integrations/mysql.md @@ -0,0 +1,105 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 33 +toc_title: MySQL +--- + +# Mysql {#mysql} + +MySQL motoru gerçekleştirmek için izin verir `SELECT` uzak bir MySQL sunucusunda depolanan veriler üzerinde sorgular. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... +) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +Ayrıntılı bir açıklamasını görmek [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) sorgu. + +Tablo yapısı orijinal MySQL tablo yapısından farklı olabilir: + +- Sütun adları orijinal MySQL tablosundaki ile aynı olmalıdır, ancak bu sütunların sadece bazılarını ve herhangi bir sırada kullanabilirsiniz. +- Sütun türleri orijinal MySQL tablosundakilerden farklı olabilir. ClickHouse çalışır [döküm](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) ClickHouse veri türleri için değerler. + +**Motor Parametreleri** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` için sorgular `REPLACE INTO`. Eğer `replace_query=1`, sorgu değiştirilir. + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` eklenen ifade `INSERT` sorgu. + + Örnek: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, nere `on_duplicate_clause` oluyor `UPDATE c2 = c2 + 1`. Görmek [MySQL dökü documentationmanları](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) bulmak için hangi `on_duplicate_clause` ile kullanabilirsiniz `ON DUPLICATE KEY` yan. + + Belirtmek `on_duplicate_clause` sen geçmek gerekir `0` to the `replace_query` parametre. Aynı anda geçerseniz `replace_query = 1` ve `on_duplicate_clause`, ClickHouse bir özel durum oluşturur. + +Basit `WHERE` gibi maddeler `=, !=, >, >=, <, <=` MySQL sunucusunda yürütülür. + +Geri kalan şartlar ve `LIMIT` örnekleme kısıtlaması, yalnızca MySQL sorgusu bittikten sonra Clickhouse'da yürütülür. + +## Kullanım Örneği {#usage-example} + +MySQL tablo: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +Clickhouse'daki tablo, yukarıda oluşturulan MySQL tablosundan veri alma: + +``` sql +CREATE TABLE mysql_table +( + `float_nullable` Nullable(Float32), + `int_id` Int32 +) +ENGINE = MySQL('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` sql +SELECT * FROM mysql_table +``` + +``` text +┌─float_nullable─┬─int_id─┐ +│ ᴺᵁᴸᴸ │ 1 │ +└────────────────┴────────┘ +``` + +## Ayrıca Bakınız {#see-also} + +- [Bu ‘mysql’ tablo fonksiyonu](../../../sql_reference/table_functions/mysql.md) +- [Harici sözlük kaynağı olarak MySQL kullanma](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) diff --git a/docs/tr/engines/table_engines/integrations/odbc.md b/docs/tr/engines/table_engines/integrations/odbc.md new file mode 100644 index 00000000000..86ded26587a --- /dev/null +++ b/docs/tr/engines/table_engines/integrations/odbc.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 35 +toc_title: ODBC +--- + +# ODBC {#table-engine-odbc} + +ClickHouse üzerinden harici veritabanlarına bağlanmak için izin verir [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +ODBC bağlantılarını güvenli bir şekilde uygulamak için ClickHouse ayrı bir program kullanır `clickhouse-odbc-bridge`. ODBC sürücüsü doğrudan yüklenmişse `clickhouse-server`, sürücü sorunları ClickHouse sunucu çökmesine neden olabilir. ClickHouse otomatik olarak başlar `clickhouse-odbc-bridge` gerekli olduğunda. ODBC Köprüsü programı aynı paketten yüklenir `clickhouse-server`. + +Bu motor destekler [Nullable](../../../sql_reference/data_types/nullable.md) veri türü. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1], + name2 [type2], + ... +) +ENGINE = ODBC(connection_settings, external_database, external_table) +``` + +Ayrıntılı bir açıklamasını görmek [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) sorgu. + +Tablo yapısı kaynak tablo yapısından farklı olabilir: + +- Sütun adları kaynak tablodaki ile aynı olmalıdır, ancak yalnızca bu sütunlardan bazılarını ve herhangi bir sırada kullanabilirsiniz. +- Sütun türleri kaynak tablodakilerden farklı olabilir. ClickHouse çalışır [döküm](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) ClickHouse veri türleri için değerler. + +**Motor Parametreleri** + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` Dosya. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +## Kullanım Örneği {#usage-example} + +**ODBC üzerinden yerel MySQL kurulumundan veri alma** + +Bu örnek Ubuntu Linux 18.04 ve MySQL server 5.7 için kontrol edilir. + +UnixODBC ve MySQL Connector yüklü olduğundan emin olun. + +Varsayılan olarak (paketlerden yüklüyse), ClickHouse kullanıcı olarak başlar `clickhouse`. Bu nedenle, bu kullanıcıyı MySQL sunucusunda oluşturmanız ve yapılandırmanız gerekir. + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +Sonra bağlantıyı yapılandırın `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +Kullanarak bağlantıyı kontrol edebilirsiniz `isql` unixodbc yüklemesinden yardımcı program. + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQL tablo: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +Clickhouse'daki tablo, MySQL tablosundan veri alma: + +``` sql +CREATE TABLE odbc_t +( + `int_id` Int32, + `float_nullable` Nullable(Float32) +) +ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') +``` + +``` sql +SELECT * FROM odbc_t +``` + +``` text +┌─int_id─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└────────┴────────────────┘ +``` + +## Ayrıca Bakınız {#see-also} + +- [ODBC harici sözlükler](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC tablo işlevi](../../../sql_reference/table_functions/odbc.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/tr/engines/table_engines/log_family/index.md b/docs/tr/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..062087a5874 --- /dev/null +++ b/docs/tr/engines/table_engines/log_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "G\xFCnl\xFCk Aile" +toc_priority: 29 +--- + + diff --git a/docs/tr/engines/table_engines/log_family/log.md b/docs/tr/engines/table_engines/log_family/log.md new file mode 100644 index 00000000000..ca1f8c4c1f4 --- /dev/null +++ b/docs/tr/engines/table_engines/log_family/log.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 33 +toc_title: "G\xFCnl\xFCk" +--- + +# Günlük {#log} + +Motor günlük motorları ailesine aittir. Günlük motorlarının ortak özelliklerini ve farklılıklarını görün [Log Engine Ailesi](log_family.md) makale. + +Log differsar differsit fromma [TinyLog](tinylog.md) bu küçük bir dosyada “marks” sütun dosyaları ile bulunur. Bu işaretler her veri bloğuna yazılır ve belirtilen satır sayısını atlamak için dosyayı okumaya nereden başlayacağınızı gösteren uzaklıklar içerir. Bu, tablo verilerini birden çok iş parçacığında okumayı mümkün kılar. +Eşzamanlı veri erişimi için, okuma işlemleri aynı anda gerçekleştirilebilirken, yazma işlemleri okur ve birbirlerini engeller. +Günlük altyapısı dizinleri desteklemez. Benzer şekilde, bir tabloya yazma başarısız olursa, tablo bozulur ve Okuma bir hata döndürür. Günlük altyapısı, geçici veriler, bir kez yazma tabloları ve sınama veya gösteri amaçları için uygundur. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/log/) diff --git a/docs/tr/engines/table_engines/log_family/log_family.md b/docs/tr/engines/table_engines/log_family/log_family.md new file mode 100644 index 00000000000..2a954c60def --- /dev/null +++ b/docs/tr/engines/table_engines/log_family/log_family.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 31 +toc_title: "Giri\u015F" +--- + +# Log Engine Ailesi {#log-engine-family} + +Bu motorlar, birçok küçük tabloyu (yaklaşık 1 milyon satıra kadar) hızlı bir şekilde yazmanız ve daha sonra bir bütün olarak okumanız gerektiğinde senaryolar için geliştirilmiştir. + +Ailenin motorları: + +- [StripeLog](stripelog.md) +- [Günlük](log.md) +- [TinyLog](tinylog.md) + +## Ortak Özellikler {#common-properties} + +Motorlar: + +- Verileri bir diskte saklayın. + +- Yazarken dosyanın sonuna veri ekleyin. + +- Eşzamanlı veri erişimi için destek kilitleri. + + Sırasında `INSERT` sorgular, tablo kilitlenir ve veri okumak ve yazmak için diğer sorgular hem tablonun kilidini açmak için bekler. Veri yazma sorguları varsa, herhangi bir sayıda veri okuma sorguları aynı anda gerçekleştirilebilir. + +- Destek yok [mutasyon](../../../sql_reference/statements/alter.md#alter-mutations) harekat. + +- Dizinleri desteklemez. + + Bu demektir ki `SELECT` veri aralıkları için sorgular verimli değildir. + +- Atomik veri yazmayın. + + Bir şey yazma işlemini bozarsa, örneğin anormal sunucu kapatma gibi bozuk verilerle bir tablo alabilirsiniz. + +## Farklılıklar {#differences} + +Bu `TinyLog` motor, ailenin en basitidir ve en fakir işlevselliği ve en düşük verimliliği sağlar. Bu `TinyLog` motor, birkaç iş parçacığı tarafından paralel veri okumayı desteklemez. Paralel okumayı destekleyen ailedeki diğer motorlardan daha yavaş veri okur ve neredeyse birçok tanımlayıcı kullanır `Log` motor, her sütunu ayrı bir dosyada sakladığı için. Basit düşük yük senaryolarında kullanın. + +Bu `Log` ve `StripeLog` motorlar paralel veri okumayı destekler. Veri okurken, ClickHouse birden çok iş parçacığı kullanır. Her iş parçacığı ayrı bir veri bloğu işler. Bu `Log` engine, tablonun her sütunu için ayrı bir dosya kullanır. `StripeLog` tüm verileri tek bir dosyada saklar. Sonuç olarak, `StripeLog` motor işletim sisteminde daha az tanımlayıcı kullanır, ancak `Log` motor veri okurken daha yüksek verimlilik sağlar. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/tr/engines/table_engines/log_family/stripelog.md b/docs/tr/engines/table_engines/log_family/stripelog.md new file mode 100644 index 00000000000..1ff251c3ee0 --- /dev/null +++ b/docs/tr/engines/table_engines/log_family/stripelog.md @@ -0,0 +1,95 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 32 +toc_title: StripeLog +--- + +# Stripelog {#stripelog} + +Bu motor günlük motor ailesine aittir. Günlük motorlarının ortak özelliklerini ve farklılıklarını görün [Log Engine Ailesi](log_family.md) makale. + +Az miktarda veri içeren (1 milyondan az satır) birçok tablo yazmanız gerektiğinde, bu altyapıyı senaryolarda kullanın. + +## Tablo oluşturma {#table_engines-stripelog-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = StripeLog +``` + +Ayrıntılı açıklamasına bakın [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) sorgu. + +## Veri yazma {#table_engines-stripelog-writing-the-data} + +Bu `StripeLog` motor tüm sütunları tek bir dosyada saklar. Her biri için `INSERT` sorgu, ClickHouse veri bloğunu bir tablo dosyasının sonuna ekler, sütunları tek tek yazar. + +Her tablo için ClickHouse dosyaları yazar: + +- `data.bin` — Data file. +- `index.mrk` — File with marks. Marks contain offsets for each column of each data block inserted. + +Bu `StripeLog` motor desteklemiyor `ALTER UPDATE` ve `ALTER DELETE` harekat. + +## Verileri okuma {#table_engines-stripelog-reading-the-data} + +İşaretli dosya, Clickhouse'un verilerin okunmasını paralelleştirmesine izin verir. Bu demektir `SELECT` sorgu satırları öngörülemeyen bir sırayla döndürür. Kullan... `ORDER BY` satırları sıralamak için yan tümce. + +## Kullanım Örneği {#table_engines-stripelog-example-of-use} + +Tablo oluşturma: + +``` sql +CREATE TABLE stripe_log_table +( + timestamp DateTime, + message_type String, + message String +) +ENGINE = StripeLog +``` + +Veri ekleme: + +``` sql +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') +``` + +İki kullandık `INSERT` içinde iki veri bloğu oluşturmak için sorgular `data.bin` Dosya. + +ClickHouse veri seçerken birden çok iş parçacığı kullanır. Her iş parçacığı ayrı bir veri bloğu okur ve sonuç olarak satırları bağımsız olarak döndürür. Sonuç olarak, çıktıdaki satır bloklarının sırası, çoğu durumda girişteki aynı blokların sırasına uymuyor. Mesela: + +``` sql +SELECT * FROM stripe_log_table +``` + +``` text +┌───────────timestamp─┬─message_type─┬─message────────────────────┐ +│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ +│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ +└─────────────────────┴──────────────┴────────────────────────────┘ +┌───────────timestamp─┬─message_type─┬─message───────────────────┐ +│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ +└─────────────────────┴──────────────┴───────────────────────────┘ +``` + +Sonuçları sıralama (varsayılan olarak artan sipariş): + +``` sql +SELECT * FROM stripe_log_table ORDER BY timestamp +``` + +``` text +┌───────────timestamp─┬─message_type─┬─message────────────────────┐ +│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ +│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ +│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ +└─────────────────────┴──────────────┴────────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/tr/engines/table_engines/log_family/tinylog.md b/docs/tr/engines/table_engines/log_family/tinylog.md new file mode 100644 index 00000000000..e4eccf220b1 --- /dev/null +++ b/docs/tr/engines/table_engines/log_family/tinylog.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 34 +toc_title: TinyLog +--- + +# TinyLog {#tinylog} + +Motor log engine ailesine aittir. Görmek [Log Engine Ailesi](log_family.md) günlük motorlarının ortak özellikleri ve farklılıkları için. + +Bu tablo motoru genellikle write-once yöntemi ile kullanılır: verileri bir kez yazın, ardından gerektiği kadar okuyun. Örneğin, kullanabilirsiniz `TinyLog`- küçük gruplar halinde işlenen Ara veriler için tablolar yazın. Çok sayıda küçük tabloda veri depolamanın verimsiz olduğunu unutmayın. + +Sorgular tek bir akışta yürütülür. Başka bir deyişle, bu motor nispeten küçük tablolar için tasarlanmıştır (yaklaşık 1.000.000 satıra kadar). Çok sayıda küçük tablonuz varsa, bu tablo motorunu kullanmak mantıklıdır, çünkü [Günlük](log.md) motor (daha az dosya açılması gerekir). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/tinylog/) diff --git a/docs/tr/engines/table_engines/mergetree_family/aggregatingmergetree.md b/docs/tr/engines/table_engines/mergetree_family/aggregatingmergetree.md new file mode 100644 index 00000000000..8034be02969 --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -0,0 +1,102 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 35 +toc_title: AggregatingMergeTree +--- + +# Aggregatingmergetree {#aggregatingmergetree} + +Motor devralır [MergeTree](mergetree.md#table_engines-mergetree), veri parçaları birleştirme mantığı değiştirme. ClickHouse, tüm satırları aynı birincil anahtarla değiştirir (veya daha doğru olarak, aynı [sıralama anahtarı](mergetree.md)) tek bir satırla (bir veri parçası içinde), toplama işlevlerinin durumlarının bir kombinasyonunu saklar. + +Kullanabilirsiniz `AggregatingMergeTree` artımlı veri toplama, toplanan materialized görünümleri de dahil olmak üzere tablolar. + +Motor tüm sütunları ile işler [AggregateFunction](../../../sql_reference/data_types/aggregatefunction.md) tür. + +Kullanmak uygundur `AggregatingMergeTree` siparişlere göre satır sayısını azaltırsa. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = AggregatingMergeTree() +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[TTL expr] +[SETTINGS name=value, ...] +``` + +İstek parametrelerinin açıklaması için bkz. [istek açıklaması](../../../sql_reference/statements/create.md). + +**Sorgu yan tümceleri** + +Oluştururken bir `AggregatingMergeTree` tablo aynı [yanlar](mergetree.md) oluşturul ,urken olduğu gibi gerekli `MergeTree` Tablo. + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın ve mümkünse eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +Tüm parametreler, aşağıdaki gibi aynı anlama sahiptir `MergeTree`. +
    + +## Seç ve Ekle {#select-and-insert} + +Veri eklemek için şunları kullanın [INSERT SELECT](../../../sql_reference/statements/insert_into.md) agrega-Devlet-fonksiyonları ile sorgu. +Veri seçerken `AggregatingMergeTree` tablo kullanın `GROUP BY` yan tümce ve veri eklerken aynı toplama işlevleri, ancak kullanarak `-Merge` sonek. + +Sonuç inlarında `SELECT` sorgu, değerleri `AggregateFunction` türü, Tüm ClickHouse çıktı biçimleri için uygulamaya özgü ikili gösterime sahiptir. Örneğin, veri dökümü, `TabSeparated` ile format `SELECT` sorgu daha sonra bu dökümü kullanarak geri yüklenebilir `INSERT` sorgu. + +## Toplu bir Somutlaştırılmış Görünüm örneği {#example-of-an-aggregated-materialized-view} + +`AggregatingMergeTree` saatler hayata görünüm `test.visits` Tablo: + +``` sql +CREATE MATERIALIZED VIEW test.basic +ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate) +AS SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM test.visits +GROUP BY CounterID, StartDate; +``` + +Veri ekleme `test.visits` Tablo. + +``` sql +INSERT INTO test.visits ... +``` + +Veriler hem tablo hem de görünümde eklenir `test.basic` toplama işlemini gerçekleştir .ecektir. + +Toplanan verileri almak için, aşağıdaki gibi bir sorgu yürütmemiz gerekir `SELECT ... GROUP BY ...` görünüm fromden `test.basic`: + +``` sql +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +GROUP BY StartDate +ORDER BY StartDate; +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/tr/engines/table_engines/mergetree_family/collapsingmergetree.md b/docs/tr/engines/table_engines/mergetree_family/collapsingmergetree.md new file mode 100644 index 00000000000..3ef53846f32 --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -0,0 +1,309 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 36 +toc_title: CollapsingMergeTree +--- + +# CollapsingMergeTree {#table_engine-collapsingmergetree} + +Motor devralır [MergeTree](mergetree.md) ve veri parçaları birleştirme algoritmasına çöken satırların mantığını ekler. + +`CollapsingMergeTree` sıralama anahtarındaki tüm alanlar zaman uyumsuz olarak siler (daraltır) satır çiftleri (`ORDER BY`) belirli alan hariç eşdeğerdir `Sign` hangi olabilir `1` ve `-1` değerler. Çift olmayan satırlar tutulur. Daha fazla bilgi için bkz: [Çökme](#table_engine-collapsingmergetree-collapsing) belgenin bölümü. + +Motor depolama hacmini önemli ölçüde azaltabilir ve `SELECT` sonuç olarak sorgu. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = CollapsingMergeTree(sign) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +Sorgu parametrelerinin açıklaması için bkz. [sorgu açıklaması](../../../sql_reference/statements/create.md). + +**CollapsingMergeTree Parametreleri** + +- `sign` — Name of the column with the type of row: `1` is a “state” satır, `-1` is a “cancel” satır. + + Column data type — `Int8`. + +**Sorgu yan tümceleri** + +Oluştururken bir `CollapsingMergeTree` tablo, aynı [sorgu yan tümceleri](mergetree.md#table_engine-mergetree-creating-a-table) oluşturul ,urken olduğu gibi gerekli `MergeTree` Tablo. + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın ve mümkünse eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign) +``` + +Hariç tüm parametreler `sign` içinde olduğu gibi aynı anlama sahip `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` — “state” satır, `-1` — “cancel” satır. + + Column Data Type — `Int8`. + +
    + +## Çökme {#table_engine-collapsingmergetree-collapsing} + +### Veriler {#data} + +Bazı nesneler için sürekli değişen verileri kaydetmeniz gereken durumu düşünün. Bir nesne için bir satıra sahip olmak ve herhangi bir değişiklikte güncellemek mantıklı geliyor, ancak güncelleme işlemi dbms için pahalı ve yavaş çünkü depolama alanındaki verilerin yeniden yazılmasını gerektiriyor. Verileri hızlı bir şekilde yazmanız gerekiyorsa, güncelleme kabul edilemez, ancak bir nesnenin değişikliklerini sırayla aşağıdaki gibi yazabilirsiniz. + +Belirli sütunu kullanın `Sign`. Eğer `Sign = 1` bu, satırın bir nesnenin durumu olduğu anlamına gelir, diyelim ki “state” satır. Eğer `Sign = -1` aynı özelliklere sahip bir nesnenin durumunun iptali anlamına gelir, diyelim ki “cancel” satır. + +Örneğin, kullanıcıların bazı sitelerde ne kadar sayfa kontrol ettiğini ve ne kadar süre orada olduklarını hesaplamak istiyoruz. Bir anda kullanıcı etkinliği durumu ile aşağıdaki satırı yazıyoruz: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +Bir an sonra kullanıcı aktivitesinin değişikliğini kaydedip aşağıdaki iki satırla yazıyoruz. + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +İlk satır, nesnenin (kullanıcı) önceki durumunu iptal eder. İptal edilen durumun sıralama anahtar alanlarını kopyalamalıdır `Sign`. + +İkinci satır geçerli durumu içerir. + +Sadece kullanıcı etkinliğinin son durumuna ihtiyacımız olduğu için, satırlar + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +bir nesnenin geçersiz (eski) durumunu daraltarak silinebilir. `CollapsingMergeTree` veri parçalarının birleştirilmesi sırasında bunu yapar. + +Neden her değişiklik için 2 satıra ihtiyacımız var [Algoritma](#table_engine-collapsingmergetree-collapsing-algorithm) paragraf. + +**Bu yaklaşımın kendine özgü özellikleri** + +1. Verileri yazan program, iptal edebilmek için bir nesnenin durumunu hatırlamalıdır. “Cancel” dize, sıralama anahtar alanlarının kopyalarını içermelidir. “state” dize ve tersi `Sign`. Bu depolama başlangıç boyutunu artırır ama hızlı bir şekilde veri yazmak için izin verir. +2. Sütunlardaki uzun büyüyen diziler, yazma yükü nedeniyle motorun verimliliğini azaltır. Daha basit veriler, verimlilik o kadar yüksek olur. +3. Bu `SELECT` sonuçlara itiraz değişiklikler tarihin tutarlılık bağlıdır. Ekleme için veri hazırlarken doğru olun. Tutarsız verilerde öngörülemeyen sonuçlar elde edebilirsiniz, örneğin, oturum derinliği gibi negatif olmayan metrikler için negatif değerler. + +### Algoritma {#table_engine-collapsingmergetree-collapsing-algorithm} + +ClickHouse veri parçalarını birleştirdiğinde, her ardışık satır grubu aynı sıralama anahtarıyla (`ORDER BY`) en fazla iki satır reduceda indir isgen ,ir, biri `Sign = 1` (“state” satır) ve başka bir `Sign = -1` (“cancel” satır). Başka bir deyişle, girişler çöker. + +Elde edilen her veri parçası için ClickHouse kaydeder: + +1. Birincilik “cancel” ve son “state” satır sayısı ise “state” ve “cancel” satırlar eşleşir ve son satır bir “state” satır. + +2. Son “state” satır, daha varsa “state” satırlar daha “cancel” satırlar. + +3. Birincilik “cancel” satır, daha varsa “cancel” satırlar daha “state” satırlar. + +4. Diğer tüm durumlarda satırların hiçbiri. + +Ayrıca en az 2 tane daha olduğunda “state” satırlar daha “cancel” satırlar veya en az 2 tane daha “cancel” r rowsows th thenen “state” satırlar, birleştirme devam eder, ancak ClickHouse bu durumu mantıksal bir hata olarak değerlendirir ve sunucu günlüğüne kaydeder. Aynı veriler birden çok kez eklendiğinde, bu hata oluşabilir. + +Bu nedenle, çöken istatistik hesaplama sonuçlarını değiştirmemelidir. +Değişiklikler yavaş yavaş çöktü, böylece sonunda hemen hemen her nesnenin sadece son durumu kaldı. + +Bu `Sign` birleştirme algoritması, aynı sıralama anahtarına sahip tüm satırların aynı sonuçtaki veri bölümünde ve hatta aynı fiziksel sunucuda olacağını garanti etmediğinden gereklidir. ClickHouse süreci `SELECT` birden çok iş parçacığına sahip sorgular ve sonuçtaki satırların sırasını tahmin edemez. Tamamen almak için bir ihtiyaç varsa toplama gereklidir “collapsed” veri `CollapsingMergeTree` Tablo. + +Daraltmayı sonuçlandırmak için bir sorgu yazın `GROUP BY` yan tümce ve işareti için hesap toplama işlevleri. Örneğin, miktarı hesaplamak için kullanın `sum(Sign)` yerine `count()`. Bir şeyin toplamını hesaplamak için şunları kullanın `sum(Sign * x)` yerine `sum(x)`, ve böylece, ve ayrıca ekleyin `HAVING sum(Sign) > 0`. + +Toplanan `count`, `sum` ve `avg` bu şekilde hesaplanmış olabilir. Toplanan `uniq` bir nesnenin en az bir durumu çökmüş değilse hesaplanabilir. Toplanan `min` ve `max` hesaplan becauseamadı çünkü `CollapsingMergeTree` daraltılmış durumların değerleri geçmişini kaydetmez. + +Toplama olmadan veri ayıklamanız gerekiyorsa (örneğin, en yeni değerleri belirli koşullarla eşleşen satırların mevcut olup olmadığını kontrol etmek için) `FINAL` değiştirici için `FROM` yan. Bu yaklaşım önemli ölçüde daha az etkilidir. + +## Kullanım Örneği {#example-of-use} + +Örnek veriler: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +Tablonun oluşturulması: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY UserID +``` + +Veri ekleme: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) +``` + +Biz iki kullanın `INSERT` iki farklı veri parçası oluşturmak için sorgular. Verileri bir sorgu ile eklersek ClickHouse bir veri parçası oluşturur ve hiç bir birleştirme gerçekleştirmez. + +Veri alma: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +Ne görüyoruz ve nerede çöküyor? + +İki ile `INSERT` sorgular, 2 Veri parçası oluşturduk. Bu `SELECT` sorgu 2 iş parçacığında yapıldı ve rastgele bir satır sırası aldık. Veri parçalarının henüz birleştirilmediği için çökme gerçekleşmedi. ClickHouse biz tahmin edemez bilinmeyen bir anda veri kısmını birleştirir. + +Böylece toplama ihtiyacımız var: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration +FROM UAct +GROUP BY UserID +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┐ +│ 4324182021466249494 │ 6 │ 185 │ +└─────────────────────┴───────────┴──────────┘ +``` + +Toplamaya ihtiyacımız yoksa ve çökmeyi zorlamak istiyorsak, şunları kullanabiliriz `FINAL` değiştirici için `FROM` yan. + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +Verileri seçmenin bu yolu çok verimsizdir. Büyük masalar için kullanmayın. + +## Başka Bir Yaklaşım Örneği {#example-of-another-approach} + +Örnek veriler: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ -5 │ -146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +Fikir, birleştirmelerin yalnızca anahtar alanları hesaba katmasıdır. Ve içinde “Cancel” satır işareti sütununu kullanmadan toplanırken satırın önceki sürümünü eşitleyen negatif değerleri belirtebiliriz. Bu yaklaşım için veri türünü değiştirmek gerekir `PageViews`,`Duration` uint8 -\> Int16 negatif değerlerini saklamak için. + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews Int16, + Duration Int16, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY UserID +``` + +Yaklaşımı test edelim: + +``` sql +insert into UAct values(4324182021466249494, 5, 146, 1); +insert into UAct values(4324182021466249494, -5, -146, -1); +insert into UAct values(4324182021466249494, 6, 185, 1); + +select * from UAct final; // avoid using final in production (just for a test or small tables) +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +``` sql +SELECT + UserID, + sum(PageViews) AS PageViews, + sum(Duration) AS Duration +FROM UAct +GROUP BY UserID +```text +┌──────────────UserID─┬─PageViews─┬─Duration─┐ +│ 4324182021466249494 │ 6 │ 185 │ +└─────────────────────┴───────────┴──────────┘ +``` + +``` sqk +select count() FROM UAct +``` + +``` text +┌─count()─┐ +│ 3 │ +└─────────┘ +``` + +``` sql +optimize table UAct final; + +select * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/tr/engines/table_engines/mergetree_family/custom_partitioning_key.md b/docs/tr/engines/table_engines/mergetree_family/custom_partitioning_key.md new file mode 100644 index 00000000000..ba09aa08cbb --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -0,0 +1,127 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 32 +toc_title: "\xD6zel B\xF6l\xFCmleme Anahtar\u0131" +--- + +# Özel Bölümleme Anahtarı {#custom-partitioning-key} + +Bölümleme için kullanılabilir [MergeTree](mergetree.md) aile tabloları (dahil [çoğaltıyordu](replication.md) Tablolar). [Hayata görünümler](../special/materializedview.md) MergeTree tablolarına dayanarak bölümlemeyi de destekler. + +Bir bölüm, bir tablodaki kayıtların belirtilen bir kritere göre mantıksal bir birleşimidir. Bir bölümü, ay, gün veya olay türü gibi rasgele bir ölçütle ayarlayabilirsiniz. Bu verilerin manipülasyonlarını basitleştirmek için her bölüm ayrı ayrı saklanır. Verilere erişirken, ClickHouse mümkün olan en küçük bölüm alt kümesini kullanır. + +Bölüm belirtilen `PARTITION BY expr` fık whenra ne zaman [tablo oluşturma](mergetree.md#table_engine-mergetree-creating-a-table). Bölüm anahtarı tablo sütunlarından herhangi bir ifade olabilir. Örneğin, aya göre bölümleme belirtmek için ifadeyi kullanın `toYYYYMM(date_column)`: + +``` sql +CREATE TABLE visits +( + VisitDate Date, + Hour UInt8, + ClientID UUID +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(VisitDate) +ORDER BY Hour; +``` + +Bölüm anahtarı ayrıca bir ifade kümesi olabilir ( [birincil anahtar](mergetree.md#primary-keys-and-indexes-in-queries)). Mesela: + +``` sql +ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) +PARTITION BY (toMonday(StartDate), EventType) +ORDER BY (CounterID, StartDate, intHash32(UserID)); +``` + +Bu örnekte, bölümlemeyi geçerli hafta boyunca meydana gelen olay türlerine göre ayarladık. + +Bir tabloya yeni veri eklerken, bu veriler birincil anahtara göre sıralanmış ayrı bir parça (yığın) olarak depolanır. Taktıktan 10-15 dakika sonra, aynı bölümün parçaları tüm parçaya birleştirilir. + +!!! info "Bilgin" + Birleştirme yalnızca bölümleme ifadesi için aynı değere sahip veri parçaları için çalışır. Bu demektir **aşırı granüler bölümler yapmamalısınız** (yaklaşık binden fazla bölüm). Aksi takdirde, `SELECT` sorgu, dosya sistemindeki ve açık dosya tanımlayıcılarındaki makul olmayan sayıda dosya nedeniyle yetersiz performans gösterir. + +Kullan... [sistem.parçalar](../../../operations/system_tables.md#system_tables-parts) tablo tablo parçaları ve bölümleri görüntülemek için. Örneğin, bir var varsayalım `visits` aya göre bölümleme ile tablo. Hadi gerçekleştirelim `SELECT` sorgu için `system.parts` Tablo: + +``` sql +SELECT + partition, + name, + active +FROM system.parts +WHERE table = 'visits' +``` + +``` text +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 1 │ +│ 201902 │ 201902_11_11_0 │ 1 │ +└───────────┴────────────────┴────────┘ +``` + +Bu `partition` sütun bölümlerin adlarını içerir. Bu örnekte iki bölüm vardır: `201901` ve `201902`. Bölüm adını belirtmek için bu sütun değerini kullanabilirsiniz [ALTER … PARTITION](#alter_manipulations-with-partitions) sorgular. + +Bu `name` sütun, bölüm veri parçalarının adlarını içerir. Bölümün adını belirtmek için bu sütunu kullanabilirsiniz. [ALTER ATTACH PART](#alter_attach-partition) sorgu. + +İlk bölümün adını kıralım: `201901_1_3_1`: + +- `201901` bölüm adıdır. +- `1` en az veri bloğu sayısıdır. +- `3` veri bloğunun maksimum sayısıdır. +- `1` yığın düzeyidir (oluşturduğu birleştirme ağacının derinliği). + +!!! info "Bilgin" + Eski tip tabloların parçaları adı vardır: `20190117_20190123_2_2_0` (minimum tarih - maksimum tarih - minimum blok numarası - maksimum blok numarası - seviye). + +Bu `active` sütun, parçanın durumunu gösterir. `1` aktif istir; `0` etkin değil. Etkin olmayan parçalar, örneğin, daha büyük bir parçaya birleştirildikten sonra kalan kaynak parçalarıdır. Bozuk veri parçaları da etkin olarak gösterilir. + +Örnekte gördüğünüz gibi, aynı bölümün birkaç ayrı parçası vardır (örneğin, `201901_1_3_1` ve `201901_1_9_2`). Bu, bu parçaların henüz birleştirilmediği anlamına gelir. ClickHouse, eklendikten yaklaşık 15 dakika sonra eklenen veri parçalarını periyodik olarak birleştirir. Buna ek olarak, kullanarak zamanlanmış olmayan birleştirme gerçekleştirebilirsiniz [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) sorgu. Örnek: + +``` sql +OPTIMIZE TABLE visits PARTITION 201902; +``` + +``` text +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 0 │ +│ 201902 │ 201902_4_11_2 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 0 │ +│ 201902 │ 201902_11_11_0 │ 0 │ +└───────────┴────────────────┴────────┘ +``` + +Etkin olmayan parçalar birleştirildikten yaklaşık 10 dakika sonra silinecektir. + +Bir parça ve bölüm kümesini görüntülemenin başka bir yolu da tablonun dizinine gitmektir: `/var/lib/clickhouse/data///`. Mesela: + +``` bash +/var/lib/clickhouse/data/default/visits$ ls -l +total 40 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached +``` + +Klasör ‘201901\_1\_1\_0’, ‘201901\_1\_7\_1’ ve böylece parçaların dizinleri vardır. Her bölüm karşılık gelen bir bölümle ilgilidir ve yalnızca belirli bir ay için veri içerir (Bu örnekteki tabloda aylara göre bölümleme vardır). + +Bu `detached` dizin kullanarak tablodan ayrılmış parçaları içerir [DETACH](#alter_detach-partition) sorgu. Bozuk parçalar da silinmek yerine bu dizine taşınır. Sunucu parçaları kullanmaz `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../../sql_reference/statements/alter.md#alter_attach-partition) sorgu. + +İşletim sunucusunda, sunucu bunu bilmediğinden, dosya sistemindeki parça kümesini veya verilerini el ile değiştiremeyeceğinizi unutmayın. Çoğaltılmamış tablolar için, sunucu durdurulduğunda bunu yapabilirsiniz, ancak önerilmez. Çoğaltılmış tablolar için, parça kümesi her durumda değiştirilemez. + +ClickHouse, bölümlerle işlemleri gerçekleştirmenize izin verir: bunları silin, bir tablodan diğerine kopyalayın veya bir yedek oluşturun. Bölümdeki tüm işlemlerin listesine bakın [Bölümler ve parçalar ile manipülasyonlar](../../../sql_reference/statements/alter.md#alter_manipulations-with-partitions). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/tr/engines/table_engines/mergetree_family/graphitemergetree.md b/docs/tr/engines/table_engines/mergetree_family/graphitemergetree.md new file mode 100644 index 00000000000..5d4a349631b --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/graphitemergetree.md @@ -0,0 +1,174 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 38 +toc_title: "Graph\u0131temergetree" +--- + +# Graphıtemergetree {#graphitemergetree} + +Bu motor inceltme ve toplama/ortalama (toplaması) için tasarlanmıştır) [Grafit](http://graphite.readthedocs.io/en/latest/index.html) veriler. Clickhouse'u Grafit için bir veri deposu olarak kullanmak isteyen geliştiriciler için yararlı olabilir. + +Toplamaya ihtiyacınız yoksa Grafit verilerini depolamak için herhangi bir ClickHouse tablo motorunu kullanabilirsiniz, ancak bir toplamaya ihtiyacınız varsa `GraphiteMergeTree`. Motor, depolama hacmini azaltır ve grafitten gelen sorguların verimliliğini arttırır. + +Motor özellikleri devralır [MergeTree](mergetree.md). + +## Tablo oluşturma {#creating-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE = GraphiteMergeTree(config_section) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +Ayrıntılı bir açıklamasını görmek [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) sorgu. + +Grafit verileri için bir tablo aşağıdaki veriler için aşağıdaki sütunlara sahip olmalıdır: + +- Metrik adı (Grafit sensörü). Veri türü: `String`. + +- Metrik ölçme zamanı. Veri türü: `DateTime`. + +- Metrik değeri. Veri türü: herhangi bir sayısal. + +- Metrik sürümü. Veri türü: herhangi bir sayısal. + + ClickHouse en yüksek sürümü veya sürümleri aynı ise son yazılan satırları kaydeder. Veri parçalarının birleştirilmesi sırasında diğer satırlar silinir. + +Bu sütunların adları toplaması yapılandırmasında ayarlanmalıdır. + +**Graphıtemergetree parametreleri** + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +**Sorgu yan tümceleri** + +Oluştururken bir `GraphiteMergeTree` tablo, aynı [yanlar](mergetree.md#table_engine-mergetree-creating-a-table) oluşturul ,urken olduğu gibi gerekli `MergeTree` Tablo. + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın ve mümkünse eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + EventDate Date, + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) +``` + +Hariç tüm parametreler `config_section` içinde olduğu gibi aynı anlama sahip `MergeTree`. + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +
    + +## Toplaması Yapılandırması {#rollup-configuration} + +Toplaması için ayarları tarafından tanımlanan [graphite\_rollup](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) sunucu yapılandırmasında parametre. Parametrenin adı herhangi biri olabilir. Birkaç yapılandırma oluşturabilir ve bunları farklı tablolar için kullanabilirsiniz. + +Toplaması yapılandırma yapısı: + + required-columns + patterns + +### Gerekli Sütunlar {#required-columns} + +- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. +- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. +- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. Varsayılan değer: `Value`. +- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. + +### Desenler {#patterns} + +Bu yapı `patterns` bölme: + +``` text +pattern + regexp + function +pattern + regexp + age + precision + ... +pattern + regexp + function + age + precision + ... +pattern + ... +default + function + age + precision + ... +``` + +!!! warning "Dikkat" + Desenler kesinlikle sipariş edilmelidir: + + 1. Patterns without `function` or `retention`. + 1. Patterns with both `function` and `retention`. + 1. Pattern `default`. + +Bir satır işlerken, ClickHouse kuralları denetler `pattern` bölmeler. Tüm `pattern` (içeren `default`) bölümler içerebilir `function` toplama için parametre, `retention` parametreler veya her ikisi. Metrik adı eşleşirse `regexp` gelen kuralları `pattern` bölüm (veya bölümler) uygulanır; aksi takdirde, kurallar `default` bölüm kullanılır. + +Alanlar için `pattern` ve `default` bölmeler: + +- `regexp`– A pattern for the metric name. +- `age` – The minimum age of the data in seconds. +- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). +- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. + +### Yapılandırma Örneği {#configuration-example} + +``` xml + + Version + + click_cost + any + + 0 + 5 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/tr/engines/table_engines/mergetree_family/index.md b/docs/tr/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..e722564f4dd --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: MergeTree Ailesi +toc_priority: 28 +--- + + diff --git a/docs/tr/engines/table_engines/mergetree_family/mergetree.md b/docs/tr/engines/table_engines/mergetree_family/mergetree.md new file mode 100644 index 00000000000..a06aae83e1f --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/mergetree.md @@ -0,0 +1,654 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 30 +toc_title: MergeTree +--- + +# MergeTree {#table_engines-mergetree} + +Bu `MergeTree` bu ailenin motoru ve diğer motorları (`*MergeTree`) en sağlam ClickHouse masa motorlarıdır. + +Motor inlarda `MergeTree` aile, bir tabloya çok büyük miktarda veri eklemek için tasarlanmıştır. Veriler hızlı bir şekilde tabloya kısmen yazılır, daha sonra parçaları arka planda birleştirmek için kurallar uygulanır. Bu yöntem, ınsert sırasında depolama alanındaki verileri sürekli olarak yeniden yazmaktan çok daha etkilidir. + +Ana özellikler: + +- Birincil anahtara göre sıralanmış verileri saklar. + + Bu, verileri daha hızlı bulmanıza yardımcı olan küçük bir seyrek dizin oluşturmanıza olanak sağlar. + +- Bölümler eğer kullanılabilir [bölümleme anahtarı](custom_partitioning_key.md) belirt .ilmektedir. + + ClickHouse, aynı sonuçla aynı veriler üzerindeki genel işlemlerden daha etkili olan bölümlerle belirli işlemleri destekler. ClickHouse, bölümleme anahtarının sorguda belirtildiği bölüm verilerini de otomatik olarak keser. Bu da sorgu performansını artırır. + +- Veri çoğaltma desteği. + + The family of `ReplicatedMergeTree` tablolar veri çoğaltma sağlar. Daha fazla bilgi için, bkz. [Veri çoğaltma](replication.md). + +- Veri örnekleme desteği. + + Gerekirse, tabloda veri örnekleme yöntemini ayarlayabilirsiniz. + +!!! info "Bilgin" + Bu [Birleştirmek](../special/merge.md) motor ait değil `*MergeTree` aile. + +## Tablo oluşturma {#table_engine-mergetree-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... + INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, + INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 +) ENGINE = MergeTree() +[PARTITION BY expr] +[ORDER BY expr] +[PRIMARY KEY expr] +[SAMPLE BY expr] +[TTL expr [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'], ...] +[SETTINGS name=value, ...] +``` + +Parametrelerin açıklaması için bkz. [Sorgu açıklaması oluştur](../../../sql_reference/statements/create.md). + +!!! note "Not" + `INDEX` deneysel bir özelliktir, bkz [Veri Atlama Dizinleri](#table_engine-mergetree-data_skipping-indexes). + +### Sorgu Yan Tümceleri {#mergetree-query-clauses} + +- `ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. Bu `MergeTree` motor parametreleri yok. + +- `PARTITION BY` — The [bölümleme anahtarı](custom_partitioning_key.md). + + Aylara göre bölümleme için `toYYYYMM(date_column)` ifade, nerede `date_column` türün tarihi olan bir sütun mu [Tarihli](../../../sql_reference/data_types/date.md). Burada bölüm isimleri var `"YYYYMM"` biçimli. + +- `ORDER BY` — The sorting key. + + Sütun veya keyfi ifadeler bir tuple. Örnek: `ORDER BY (CounterID, EventDate)`. + +- `PRIMARY KEY` — The primary key if it [sıralama anahtarından farklıdır](mergetree.md). + + Varsayılan olarak, birincil anahtar sıralama anahtarıyla aynıdır (bu anahtar tarafından belirtilir). `ORDER BY` yan). Bu nedenle çoğu durumda ayrı bir belirtmek gereksizdir `PRIMARY KEY` yan. + +- `SAMPLE BY` — An expression for sampling. + + Bir örnekleme ifadesi kullanılırsa, birincil anahtar onu içermelidir. Örnek: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. + +- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [diskler ve birimler arasında](#table_engine-mergetree-multiple-volumes). + + İfade bir olmalıdır `Date` veya `DateTime` sonuç olarak sütun. Örnek: + `TTL date + INTERVAL 1 DAY` + + Kuralın türü `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'` ifade tatmin edildiyse (geçerli zamana ulaşırsa) parça ile yapılacak bir eylemi belirtir: süresi dolmuş satırların kaldırılması, bir parçanın (bir parçadaki tüm satırlar için ifade tatmin edildiyse) belirtilen diske taşınması (`TO DISK 'xxx'`) veya hacim (`TO VOLUME 'xxx'`). Kuralın varsayılan türü kaldırma (`DELETE`). Birden fazla kural listesi belirtilebilir, ancak birden fazla olmamalıdır `DELETE` kural. + + Daha fazla ayrıntı için bkz. [Sütunlar ve tablolar için TTL](#table_engine-mergetree-ttl) + +- `SETTINGS` — Additional parameters that control the behavior of the `MergeTree`: + + - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Veri Depolama](#mergetree-data-storage). + - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Veri Depolama](#mergetree-data-storage). + - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` ayar. 19.11 sürümünden önce, sadece `index_granularity` granül boyutunu kısıtlamak için ayar. Bu `index_granularity_bytes` büyük satırlar (onlarca ve megabayt yüzlerce) ile tablolardan veri seçerken ayarı ClickHouse performansını artırır. Büyük satırlara sahip tablolarınız varsa, tabloların verimliliğini artırmak için bu ayarı etkinleştirebilirsiniz. `SELECT` sorgular. + - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, daha sonra ZooKeeper daha az veri depolar. Daha fazla bilgi için, bkz: [ayar açıklaması](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) içinde “Server configuration parameters”. + - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bayt, ClickHouse okur ve doğrudan I/O arabirimi kullanarak depolama diskine veri yazar (`O_DIRECT` seçenek). Eğer `min_merge_bytes_to_use_direct_io = 0`, sonra doğrudan g / Ç devre dışı bırakılır. Varsayılan değer: `10 * 1024 * 1024 * 1024` baytlar. + + - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). + - `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don't turn it off. + - `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192. + - `storage_policy` — Storage policy. See [Veri depolama için birden fazla blok cihazı kullanma](#table_engine-mergetree-multiple-volumes). + +**Bölüm ayarı örneği** + +``` sql +ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 +``` + +Örnekte, aylara göre bölümleme ayarladık. + +Biz de kullanıcı kimliği ile karma olarak örnekleme için bir ifade ayarlayın. Bu, her biri için tablodaki verileri pseudorandomize etmenizi sağlar `CounterID` ve `EventDate`. Tanım yoularsanız bir [SAMPLE](../../../sql_reference/statements/select.md#select-sample-clause) yan tümcesi verileri seçerken, ClickHouse kullanıcıların bir alt kümesi için eşit pseudorandom veri örneği döndürür. + +Bu `index_granularity` 8192 varsayılan değer olduğundan ayarı atlanabilir. + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın. Mümkünse, eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +**MergeTree () Parametreleri** + +- `date-column` — The name of a column of the [Tarihli](../../../sql_reference/data_types/date.md) tür. ClickHouse otomatik olarak bu sütuna göre ay bölümleri oluşturur. Bölüm adları `"YYYYMM"` biçimli. +- `sampling_expression` — An expression for sampling. +- `(primary, key)` — Primary key. Type: [Demet()](../../../sql_reference/data_types/tuple.md) +- `index_granularity` — The granularity of an index. The number of data rows between the “marks” bir dizinin. 8192 değeri çoğu görev için uygundur. + +**Örnek** + +``` sql +MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) +``` + +Bu `MergeTree` motor, Ana motor yapılandırma yöntemi için yukarıdaki örnekte olduğu gibi yapılandırılır. +
    + +## Veri Depolama {#mergetree-data-storage} + +Bir tabloda birincil anahtar tarafından sıralanmış verileri bölümden oluşmaktadır. + +Veri bir tabloya eklendiğinde, ayrı veri parçaları oluşturulur ve bunların her biri birincil anahtara göre lexicographically sıralanır. Örneğin, birincil anahtar `(CounterID, Date)`, parçadaki veriler şu şekilde sıralanır `CounterID` ve içinde her `CounterID` tarafından sipariş edilir `Date`. + +Farklı bölümlere ait veriler farklı parçalara ayrılır. Arka planda, ClickHouse daha verimli depolama için veri parçalarını birleştirir. Farklı bölümlere ait parçalar birleştirilmez. Birleştirme mekanizması, aynı birincil anahtara sahip tüm satırların aynı veri bölümünde olacağını garanti etmez. + +Her veri parçası mantıksal olarak granüllere ayrılmıştır. Bir granül, Clickhouse'un veri seçerken okuduğu en küçük bölünmez veri kümesidir. ClickHouse satırları veya değerleri bölmez, bu nedenle her granül her zaman bir tamsayı satır içerir. Bir granülün ilk satırı, satır için birincil anahtarın değeri ile işaretlenir. Her veri bölümü için ClickHouse işaretleri depolayan bir dizin dosyası oluşturur. Her sütun için, birincil anahtarda olsun ya da olmasın, ClickHouse aynı işaretleri de saklar. Bu işaretler, verileri doğrudan sütun dosyalarında bulmanızı sağlar. + +Granül boyutu ile sınırlıdır `index_granularity` ve `index_granularity_bytes` tablo motorunun ayarları. Bir granüldeki satır sayısı `[1, index_granularity]` Aralık, satırların boyutuna bağlı olarak. Bir granülün boyutu aşabilir `index_granularity_bytes` tek bir satırın boyutu ayarın değerinden büyükse. Bu durumda, granülün boyutu satırın boyutuna eşittir. + +## Sorgularda birincil anahtarlar ve dizinler {#primary-keys-and-indexes-in-queries} + +Tak thee the `(CounterID, Date)` örnek olarak birincil anahtar. Bu durumda, sıralama ve dizin aşağıdaki gibi gösterilebilir: + + Whole data: [---------------------------------------------] + CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] + Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] + Marks: | | | | | | | | | | | + a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 + Marks numbers: 0 1 2 3 4 5 6 7 8 9 10 + +Veri sorgusu belirtirse: + +- `CounterID in ('a', 'h')`, sunucu işaretleri aralıklarında verileri okur `[0, 3)` ve `[6, 8)`. +- `CounterID IN ('a', 'h') AND Date = 3`, sunucu işaretleri aralıklarında verileri okur `[1, 3)` ve `[7, 8)`. +- `Date = 3`, sunucu işaretleri aralığında veri okur `[1, 10]`. + +Yukarıdaki örnekler, her zaman bir dizin tam taramadan daha etkili olduğunu göstermektedir. + +Seyrek bir dizin, ekstra verilerin okunmasına izin verir. Birincil anahtarın tek bir aralığını okurken, `index_granularity * 2` her veri bloğundaki ekstra satırlar okunabilir. + +Seyrek dizinler, çok sayıda tablo satırı ile çalışmanıza izin verir, çünkü çoğu durumda, bu tür dizinler bilgisayarın RAM'İNE sığar. + +ClickHouse benzersiz bir birincil anahtar gerektirmez. Aynı birincil anahtar ile birden çok satır ekleyebilirsiniz. + +### Birincil anahtar seçme {#selecting-the-primary-key} + +Birincil anahtardaki sütun sayısı açıkça sınırlı değildir. Veri yapısına bağlı olarak, birincil anahtara daha fazla veya daha az sütun ekleyebilirsiniz. Bu Mayıs: + +- Bir dizin performansını artırın. + + Birincil anahtar ise `(a, b)`, sonra başka bir sütun ekleyerek `c` aşağıdaki koşullar yerine getirilirse performansı artıracaktır: + + - Sütun üzerinde bir koşulu olan sorgular var `c`. + - Uzun veri aralıkları (birkaç kat daha uzun `index_granularity`) için aynı değer withlerle `(a, b)` yaygındır. Başka bir deyişle, başka bir sütun eklerken oldukça uzun veri aralıklarını atlamanıza izin verir. + +- Veri sıkıştırmasını geliştirin. + + ClickHouse verileri birincil anahtarla sıralar, bu nedenle tutarlılık ne kadar yüksek olursa sıkıştırma o kadar iyi olur. + +- Veri parçalarını birleştirirken ek mantık sağlayın [CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) ve [SummingMergeTree](summingmergetree.md) motorlar. + + Bu durumda belirtmek mantıklı *sıralama anahtarı* bu birincil anahtardan farklıdır. + +Uzun bir birincil anahtar, ekleme performansını ve bellek tüketimini olumsuz yönde etkiler, ancak birincil anahtardaki ek sütunlar, ClickHouse performansını etkilemez `SELECT` sorgular. + +### Sıralama anahtarından farklı bir birincil anahtar seçme {#choosing-a-primary-key-that-differs-from-the-sorting-key} + +Sıralama anahtarından (veri bölümlerindeki satırları sıralamak için bir ifade) farklı bir birincil anahtar (her işaret için dizin dosyasında yazılan değerlere sahip bir ifade) belirtmek mümkündür. Bu durumda, birincil anahtar ifadesi tuple, sıralama anahtarı ifadesi tuple'ın bir öneki olmalıdır. + +Bu özellik kullanırken yararlıdır [SummingMergeTree](summingmergetree.md) ve +[AggregatingMergeTree](aggregatingmergetree.md) masa motorları. Bu motorları kullanırken yaygın bir durumda, tablonun iki tür sütunu vardır: *boyutlular* ve *ölçümler*. Tipik sorgular, rasgele ölçü sütunlarının değerlerini toplar `GROUP BY` ve boyutlara göre filtreleme. Çünkü SummingMergeTree ve AggregatingMergeTree sıralama anahtarının aynı değere sahip satırları toplamak, tüm boyutları eklemek doğaldır. Sonuç olarak, anahtar ifadesi uzun bir sütun listesinden oluşur ve bu liste yeni eklenen boyutlarla sık sık güncelleştirilmelidir. + +Bu durumda, birincil anahtarda verimli Aralık taramaları sağlayacak ve kalan boyut sütunlarını sıralama anahtarı kümesine ekleyecek yalnızca birkaç sütun bırakmak mantıklıdır. + +[ALTER](../../../sql_reference/statements/alter.md) yeni bir sütun aynı anda tabloya ve sıralama anahtarı eklendiğinde, varolan veri parçaları değiştirilmesi gerekmez, çünkü sıralama anahtarının hafif bir işlemdir. Eski sıralama anahtarı yeni sıralama anahtarının bir öneki olduğundan ve yeni eklenen sütunda veri olmadığından, veriler tablo değişikliği anında hem eski hem de yeni sıralama anahtarlarına göre sıralanır. + +### Sorgularda dizin ve bölümlerin kullanımı {#use-of-indexes-and-partitions-in-queries} + +İçin `SELECT` sorgular, ClickHouse bir dizin kullanılabilir olup olmadığını analiz eder. Eğer bir dizin kullanılabilir `WHERE/PREWHERE` yan tümce, bir eşitlik veya eşitsizlik karşılaştırma işlemini temsil eden bir ifadeye (bağlantı öğelerinden biri olarak veya tamamen) sahiptir veya varsa `IN` veya `LIKE` sütun veya birincil anahtar veya bölümleme anahtar veya bu sütunların belirli kısmen tekrarlayan işlevleri veya bu ifadelerin mantıksal ilişkileri olan ifadeler üzerinde sabit bir önek ile. + +Bu nedenle, birincil anahtarın bir veya daha fazla aralığındaki sorguları hızlı bir şekilde çalıştırmak mümkündür. Bu örnekte, belirli bir izleme etiketi, belirli bir etiket ve tarih aralığı, belirli bir etiket ve tarih için, tarih aralığına sahip birden çok etiket için vb. çalıştırıldığında sorgular hızlı olacaktır. + +Aşağıdaki gibi yapılandırılmış motora bakalım: + + ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 + +Bu durumda, sorgularda: + +``` sql +SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 +SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) +SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) +``` + +ClickHouse, uygun olmayan verileri kırpmak için birincil anahtar dizinini ve uygun olmayan tarih aralıklarındaki bölümleri kırpmak için aylık bölümleme anahtarını kullanır. + +Yukarıdaki sorgular, dizinin karmaşık ifadeler için bile kullanıldığını göstermektedir. Tablodan okuma, dizini kullanarak tam taramadan daha yavaş olamayacak şekilde düzenlenmiştir. + +Aşağıdaki örnekte, dizin kullanılamaz. + +``` sql +SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' +``` + +Clickhouse'un bir sorgu çalıştırırken dizini kullanıp kullanamayacağını kontrol etmek için ayarları kullanın [force\_index\_by\_date](../../../operations/settings/settings.md#settings-force_index_by_date) ve [force\_primary\_key](../../../operations/settings/settings.md). + +Aylara göre bölümleme anahtarı, yalnızca uygun aralıktaki tarihleri içeren veri bloklarını okumanıza izin verir. Bu durumda, veri bloğu birçok tarih için veri içerebilir (bir aya kadar). Bir blok içinde veriler, ilk sütun olarak tarihi içermeyen birincil anahtara göre sıralanır. Bu nedenle, birincil anahtar önekini belirtmeyen yalnızca bir tarih koşulu ile bir sorgu kullanarak tek bir tarih için okunacak daha fazla veri neden olur. + +### Kısmen monotonik birincil anahtarlar için Endeks kullanımı {#use-of-index-for-partially-monotonic-primary-keys} + +Örneğin, Ayın günlerini düşünün. Onlar formu bir [monotonik dizisi](https://en.wikipedia.org/wiki/Monotonic_function) bir ay boyunca, ancak daha uzun süreler için monotonik değil. Bu kısmen monotonik bir dizidir. Bir kullanıcı kısmen monoton birincil anahtar ile tablo oluşturursa, ClickHouse her zamanki gibi seyrek bir dizin oluşturur. Bir kullanıcı bu tür bir tablodan veri seçtiğinde, ClickHouse sorgu koşullarını analiz eder. Kullanıcı, dizinin iki işareti arasında veri almak isterse ve bu işaretlerin her ikisi de bir ay içinde düşerse, ClickHouse bu özel durumda dizini kullanabilir, çünkü sorgu parametreleri ile dizin işaretleri arasındaki mesafeyi hesaplayabilir. + +Sorgu parametresi aralığındaki birincil anahtarın değerleri monotonik bir sırayı temsil etmiyorsa, ClickHouse bir dizin kullanamaz. Bu durumda, ClickHouse Tam Tarama yöntemini kullanır. + +ClickHouse bu mantığı yalnızca ay dizilerinin günleri için değil, kısmen monotonik bir diziyi temsil eden herhangi bir birincil anahtar için kullanır. + +### Veri atlama indeksleri (deneysel) {#table_engine-mergetree-data_skipping-indexes} + +Dizin bildirimi sütunlar bölümünde `CREATE` sorgu. + +``` sql +INDEX index_name expr TYPE type(...) GRANULARITY granularity_value +``` + +Tablolar için `*MergeTree` aile, veri atlama endeksleri belirtilebilir. + +Bu endeksler, bloklarda belirtilen ifade hakkında bazı bilgileri toplar ve bunlardan oluşur `granularity_value` granüller (granül boyutu kullanılarak belirtilir `index_granularity` tablo motoru ayarı). Daha sonra bu agregalar `SELECT` büyük veri bloklarını atlayarak diskten okunacak veri miktarını azaltmak için sorgular `where` sorgu tatmin edilemez. + +**Örnek** + +``` sql +CREATE TABLE table_name +( + u64 UInt64, + i32 Int32, + s String, + ... + INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, + INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 +) ENGINE = MergeTree() +... +``` + +Örneğin endeksleri aşağıdaki sorgularda diskten okunacak veri miktarını azaltmak için ClickHouse tarafından kullanılabilir: + +``` sql +SELECT count() FROM table WHERE s < 'z' +SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 +``` + +#### Mevcut Endeks Türleri {#available-types-of-indices} + +- `minmax` + + Belirtilen ifad (eyi saklar (ifad (enin `tuple`, sonra her eleman için aşırı depolar `tuple`), birincil anahtar gibi veri bloklarını atlamak için saklanan bilgileri kullanır. + +- `set(max_rows)` + + Belirtilen ifadenin benzersiz değerlerini depolar (en fazla `max_rows` satırlar, `max_rows=0` anlama “no limits”). Kontrol etmek için değerleri kullanır `WHERE` ifade, bir veri bloğu üzerinde tatmin edilemez değildir. + +- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + + Mağazalar a [Blo filterom filtre](https://en.wikipedia.org/wiki/Bloom_filter) bu, bir veri bloğundaki tüm ngramları içerir. Sadece dizeleri ile çalışır. Optimizasyonu için kullanılabilir `equals`, `like` ve `in` ifadeler. + + - `n` — ngram size, + - `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well). + - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. + - `random_seed` — The seed for Bloom filter hash functions. + +- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + + Olarak aynı `ngrambf_v1`, ancak ngrams yerine simgeleri saklar. Belirteçler alfasayısal olmayan karakterlerle ayrılmış dizilerdir. + +- `bloom_filter([false_positive])` — Stores a [Blo filterom filtre](https://en.wikipedia.org/wiki/Bloom_filter) belirtilen sütunlar için. + + Opsiyonel `false_positive` parametre, filtreden yanlış pozitif yanıt alma olasılığıdır. Olası değerler: (0, 1). Varsayılan değer: 0.025. + + Desteklenen veri türleri: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`. + + Aşağıdaki işlevleri kullanabilirsiniz: [eşitlikler](../../../sql_reference/functions/comparison_functions.md), [notEquals](../../../sql_reference/functions/comparison_functions.md), [içinde](../../../sql_reference/functions/in_functions.md), [notİn](../../../sql_reference/functions/in_functions.md), [var](../../../sql_reference/functions/array_functions.md). + + + +``` sql +INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 +INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 +INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 +``` + +#### Fonksiyonları Destek {#functions-support} + +Koşulları `WHERE` yan tümcesi, sütunlarla çalışan işlevlerin çağrılarını içerir. Sütun bir dizinin bir parçasıysa, ClickHouse işlevleri gerçekleştirirken bu dizini kullanmaya çalışır. ClickHouse, dizinleri kullanmak için farklı işlev alt kümelerini destekler. + +Bu `set` dizin tüm fonksiyonları ile kullanılabilir. Diğer dizinler için işlev alt kümeleri aşağıdaki tabloda gösterilmiştir. + +| Fonksiyon (operatör) / dizin | birincil anahtar | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter | +|------------------------------------------------------------------------------------------------------------|------------------|--------|-------------|-------------|---------------| +| [eşitlikler (=, ==)](../../../sql_reference/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, \<\>)](../../../sql_reference/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [hoşlanmak](../../../sql_reference/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [notLike](../../../sql_reference/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [startsWith](../../../sql_reference/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [endsWith](../../../sql_reference/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [multiSearchAny](../../../sql_reference/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [içinde](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notİn](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [daha az (\<)](../../../sql_reference/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [büyük (\>)](../../../sql_reference/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql_reference/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../sql_reference/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [boş](../../../sql_reference/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql_reference/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | + +Ngram boyutundan daha az olan sabit bir argümana sahip işlevler tarafından kullanılamaz `ngrambf_v1` sorgu optimizasyonu için. + +Bloom filtreleri yanlış pozitif eşleşmelere sahip olabilir, bu yüzden `ngrambf_v1`, `tokenbf_v1`, ve `bloom_filter` dizinler, örneğin bir işlevin sonucunun false olması beklenen sorguları en iyi duruma getirmek için kullanılamaz: + +- Optimize edilebilir: + - `s LIKE '%test%'` + - `NOT s NOT LIKE '%test%'` + - `s = 1` + - `NOT s != 1` + - `startsWith(s, 'test')` +- Optimize edilemez: + - `NOT s LIKE '%test%'` + - `s NOT LIKE '%test%'` + - `NOT s = 1` + - `s != 1` + - `NOT startsWith(s, 'test')` + +## Eşzamanlı Veri Erişimi {#concurrent-data-access} + +Eşzamanlı tablo erişimi için çoklu sürüm kullanıyoruz. Başka bir deyişle, bir tablo aynı anda okunup güncelleştirildiğinde, sorgu sırasında geçerli olan bir parça kümesinden veri okunur. Uzun kilitler yok. Ekler okuma işlemlerinin yoluna girmez. + +Bir tablodan okuma otomatik olarak paralelleştirilir. + +## Sütunlar ve tablolar için TTL {#table_engine-mergetree-ttl} + +Değerlerin ömrünü belirler. + +Bu `TTL` yan tümcesi tüm tablo ve her sütun için ayarlanabilir. Tablo düzeyinde TTL ayrıca diskler ve birimler arasında otomatik veri taşıma mantığını belirtebilirsiniz. + +İfadeleri değerlendirmek gerekir [Tarihli](../../../sql_reference/data_types/date.md) veya [DateTime](../../../sql_reference/data_types/datetime.md) veri türü. + +Örnek: + +``` sql +TTL time_column +TTL time_column + interval +``` + +Tanımlamak `interval`, kullanma [zaman aralığı](../../../sql_reference/operators.md#operators-datetime) operatörler. + +``` sql +TTL date_time + INTERVAL 1 MONTH +TTL date_time + INTERVAL 15 HOUR +``` + +### Sütun TTL {#mergetree-column-ttl} + +Sütundaki değerler sona erdiğinde, ClickHouse bunları sütun veri türü için varsayılan değerlerle değiştirir. Veri bölümündeki tüm sütun değerleri sona ererse, ClickHouse bu sütunu bir dosya sistemindeki veri bölümünden siler. + +Bu `TTL` yan tümcesi anahtar sütunlar için kullanılamaz. + +Örnekler: + +TTL ile tablo oluşturma + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int TTL d + INTERVAL 1 MONTH, + b Int TTL d + INTERVAL 1 MONTH, + c String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d; +``` + +Varolan bir tablonun sütununa TTL ekleme + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 DAY; +``` + +Sütun TTL değiştirme + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 MONTH; +``` + +### Tablo TTL {#mergetree-table-ttl} + +Tablo, süresi dolmuş satırların kaldırılması için bir ifadeye ve parçaların arasında otomatik olarak taşınması için birden fazla ifadeye sahip olabilir [diskler veya birimler](#table_engine-mergetree-multiple-volumes). Tablodaki satırların süresi dolduğunda, ClickHouse ilgili tüm satırları siler. Parça taşıma özelliği için, bir parçanın tüm satırları hareket ifadesi ölçütlerini karşılaması gerekir. + +``` sql +TTL expr [DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'], ... +``` + +TTL kuralı türü her TTL ifadesini takip edebilir. İfade tatmin edildikten sonra yapılacak bir eylemi etkiler (şimdiki zamana ulaşır): + +- `DELETE` - süresi dolmuş satırları sil (varsayılan eylem); +- `TO DISK 'aaa'` - parçayı diske taşı `aaa`; +- `TO VOLUME 'bbb'` - parçayı diske taşı `bbb`. + +Örnekler: + +TTL ile tablo oluşturma + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d +TTL d + INTERVAL 1 MONTH [DELETE], + d + INTERVAL 1 WEEK TO VOLUME 'aaa', + d + INTERVAL 2 WEEK TO DISK 'bbb'; +``` + +Tablonun TTL değiştirme + +``` sql +ALTER TABLE example_table + MODIFY TTL d + INTERVAL 1 DAY; +``` + +**Verileri Kaldırma** + +ClickHouse veri parçalarını birleştirdiğinde süresi dolmuş bir TTL ile veri kaldırılır. + +ClickHouse, verilerin süresi dolduğunu gördüğünde, zamanlama dışı bir birleştirme gerçekleştirir. Bu tür birleştirmelerin sıklığını kontrol etmek için şunları ayarlayabilirsiniz [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout). Değer çok düşükse, çok fazla kaynak tüketebilecek birçok zamanlama dışı birleştirme gerçekleştirir. + +Gerçekleştir theirseniz `SELECT` birleştirme arasında sorgu, süresi dolmuş veri alabilirsiniz. Bunu önlemek için, [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) önce sorgu `SELECT`. + +## Veri Depolama İçin Birden Fazla Blok Cihazı Kullanma {#table_engine-mergetree-multiple-volumes} + +### Giriş {#introduction} + +`MergeTree` aile tablo motorları birden fazla blok cihazlarda veri saklayabilirsiniz. Örneğin, belirli bir tablonun verileri örtük olarak bölündüğünde yararlı olabilir “hot” ve “cold”. En son veriler düzenli olarak talep edilir, ancak yalnızca az miktarda alan gerektirir. Aksine, yağ kuyruklu tarihsel veriler nadiren talep edilir. Birkaç disk varsa, “hot” veriler hızlı disklerde (örneğin, NVMe SSD'ler veya bellekte) bulunabilir; “cold” veri-nispeten yavaş olanlar (örneğin, HDD). + +Veri kısmı için minimum hareketli birimdir `MergeTree`- motor masaları. Bir parçaya ait veriler bir diskte saklanır. Veri parçaları arka planda diskler arasında (kullanıcı ayarlarına göre) ve aynı zamanda [ALTER](../../../sql_reference/statements/alter.md#alter_move-partition) sorgular. + +### Şartlar {#terms} + +- Disk — Block device mounted to the filesystem. +- Default disk — Disk that stores the path specified in the [yol](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) sunucu ayarı. +- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). +- Storage policy — Set of volumes and the rules for moving data between them. + +Açıklanan varlıklara verilen isimler sistem tablolarında bulunabilir, [sistem.storage\_policies](../../../operations/system_tables.md#system_tables-storage_policies) ve [sistem.diskler](../../../operations/system_tables.md#system_tables-disks). Bir tablo için yapılandırılmış depolama ilkelerinden birini uygulamak için `storage_policy` ayarı `MergeTree`- motor aile tabloları. + +### Yapılandırma {#table_engine-mergetree-multiple-volumes-configure} + +Diskler, birimler ve depolama politikaları içinde bildirilmelidir `` ana dosyada ya etiket `config.xml` veya farklı bir dosyada `config.d` dizin. + +Yapılandırma yapısı: + +``` xml + + + + /mnt/fast_ssd/clickhouse + + + /mnt/hdd1/clickhouse + 10485760 + + + /mnt/hdd2/clickhouse + 10485760 + + + ... + + + ... + +``` + +Etiketler: + +- `` — Disk name. Names must be different for all disks. +- `path` — path under which a server will store data (`data` ve `shadow` klasörler) ile Sonlandır shouldılmalıdır ‘/’. +- `keep_free_space_bytes` — the amount of free disk space to be reserved. + +Disk tanımının sırası önemli değildir. + +Depolama ilkeleri yapılandırma biçimlendirme: + +``` xml + + ... + + + + + disk_name_from_disks_configuration + 1073741824 + + + + + + + 0.2 + + + + + + + + ... + +``` + +Etiketler: + +- `policy_name_N` — Policy name. Policy names must be unique. +- `volume_name_N` — Volume name. Volume names must be unique. +- `disk` — a disk within a volume. +- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume's disks. +- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1). + +Cofiguration örnekleri: + +``` xml + + ... + + + + + disk1 + disk2 + + + + + + + + fast_ssd + 1073741824 + + + disk1 + + + 0.2 + + + ... + +``` + +Verilen örnekte, `hdd_in_order` politika uygular [Ro -und-robin](https://en.wikipedia.org/wiki/Round-robin_scheduling) yaklaşma. Böylece bu politika yalnızca bir birim tanımlar (`single`), veri parçaları tüm disklerinde dairesel sırayla saklanır. Bu tür bir politika, sisteme birkaç benzer disk takılıysa, ancak RAID yapılandırılmamışsa oldukça yararlı olabilir. Her bir disk sürücüsünün güvenilir olmadığını ve bunu 3 veya daha fazla çoğaltma faktörü ile telafi etmek isteyebileceğinizi unutmayın. + +Sistemde farklı türde diskler varsa, `moving_from_ssd_to_hdd` politika yerine kullanılabilir. Birim `hot` bir SSD disk oluşur (`fast_ssd`) ve bu birimde saklanabilecek bir parçanın maksimum boyutu 1GB. Tüm parçaları ile boyutu daha büyük 1 GB üzerinde doğrudan saklanır `cold` bir HDD diski içeren birim `disk1`. +Ayrıca, bir kez disk `fast_ssd` 80'den fazla % tarafından doldurulur, veri transfer edilecektir `disk1` bir arka plan işlemi ile. + +Depolama ilkesi içindeki birim numaralandırma sırası önemlidir. Bir birim aşırı doldurulduktan sonra, veriler bir sonrakine taşınır. Disk numaralandırma sırası da önemlidir, çünkü veriler sırayla depolanır. + +Bir tablo oluştururken, yapılandırılmış depolama ilkelerinden birini ona uygulayabilirsiniz: + +``` sql +CREATE TABLE table_with_non_default_policy ( + EventDate Date, + OrderID UInt64, + BannerID UInt64, + SearchPhrase String +) ENGINE = MergeTree +ORDER BY (OrderID, BannerID) +PARTITION BY toYYYYMM(EventDate) +SETTINGS storage_policy = 'moving_from_ssd_to_hdd' +``` + +Bu `default` depolama ilkesi, Yalnızca verilen bir diskten oluşan yalnızca bir birim kullanmayı ima eder ``. Bir tablo oluşturulduktan sonra, depolama ilkesi değiştirilemez. + +### Ayrıntı {#details} + +Bu durumda `MergeTree` tablolar, veriler diske farklı şekillerde giriyor: + +- Bir ekleme sonucunda (`INSERT` sorgu). +- Arka plan birleştirmeleri sırasında ve [mutasyonlar](../../../sql_reference/statements/alter.md#alter-mutations). +- Başka bir kopyadan indirirken. +- Bölüm Don ofması sonucu [ALTER TABLE … FREEZE PARTITION](../../../sql_reference/statements/alter.md#alter_freeze-partition). + +Mutasyonlar ve bölüm dondurma hariç tüm bu durumlarda, bir parça verilen depolama politikasına göre bir birim ve bir diskte saklanır: + +1. Bir parçayı depolamak için yeterli disk alanına sahip olan ilk birim (tanım sırasına göre) (`unreserved_space > current_part_size`) ve belirli bir boyuttaki parçaların saklanmasına izin verir (`max_data_part_size_bytes > current_part_size`) seçilir. +2. Bu birimde, önceki veri yığınını depolamak için kullanılan ve parça boyutundan daha fazla boş alana sahip olan diski izleyen disk seçilir (`unreserved_space - keep_free_space_bytes > current_part_size`). + +Kap hoodut underun altında, [sabit linkler](https://en.wikipedia.org/wiki/Hard_link). Farklı diskler arasındaki sabit bağlantılar desteklenmez, bu nedenle bu gibi durumlarda ortaya çıkan parçalar ilk disklerle aynı disklerde saklanır. + +Arka planda, parçalar boş alan miktarına göre hacimler arasında taşınır (`move_factor` parametre) sırasına göre birimler yapılandırma dosyasında beyan edilir. +Veriler asla sonuncudan ve birincisine aktarılmaz. Bir sistem tabloları kullanabilirsiniz [sistem.part\_log](../../../operations/system_tables.md#system_tables-part-log) (alan `type = MOVE_PART`) ve [sistem.parçalar](../../../operations/system_tables.md#system_tables-parts) (alanlar `path` ve `disk`) arka plan hareketlerini izlemek için. Ayrıca, ayrıntılı bilgi sunucu günlüklerinde bulunabilir. + +Kullanıcı, sorguyu kullanarak bir bölümü veya bölümü bir birimden diğerine taşımaya zorlayabilir [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql_reference/statements/alter.md#alter_move-partition), arka plan işlemleri için tüm kısıtlamalar dikkate alınır. Sorgu, kendi başına bir hareket başlatır ve tamamlanması için arka plan işlemleri beklemez. Yeterli boş alan yoksa veya gerekli koşullardan herhangi biri karşılanmazsa kullanıcı bir hata mesajı alır. + +Veri taşıma veri çoğaltma ile müdahale etmez. Bu nedenle, farklı depolama ilkeleri aynı tablo için farklı yinelemeler üzerinde belirtilebilir. + +Arka plan birleşimlerinin ve mutasyonlarının tamamlanmasından sonra, eski parçalar yalnızca belirli bir süre sonra çıkarılır (`old_parts_lifetime`). +Bu süre zarfında, diğer birimlere veya disklere taşınmazlar. Bu nedenle, parçalar nihayet çıkarılıncaya kadar, işgal edilen disk alanının değerlendirilmesi için hala dikkate alınır. + +[Orijinal makale](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) diff --git a/docs/tr/engines/table_engines/mergetree_family/replacingmergetree.md b/docs/tr/engines/table_engines/mergetree_family/replacingmergetree.md new file mode 100644 index 00000000000..baf2fc98b07 --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/replacingmergetree.md @@ -0,0 +1,69 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 33 +toc_title: ReplacingMergeTree +--- + +# ReplacingMergeTree {#replacingmergetree} + +Motor farklıdır [MergeTree](mergetree.md#table_engines-mergetree) aynı birincil anahtar değerine sahip yinelenen girdileri kaldırır (veya daha doğru bir şekilde, aynı [sıralama anahtarı](mergetree.md) değer). + +Veri tekilleştirme yalnızca birleştirme sırasında oluşur. Birleştirme, arka planda bilinmeyen bir zamanda gerçekleşir, bu nedenle bunu planlayamazsınız. Bazı veriler işlenmemiş kalabilir. Kullanarak programsız bir birleştirme çalıştırabilirsiniz, ancak `OPTIMIZE` sorgu, kullanmaya güvenmeyin, çünkü `OPTIMIZE` sorgu büyük miktarda veri okuyacak ve yazacaktır. + +Böyle, `ReplacingMergeTree` yerden tasarruf etmek için arka planda yinelenen verileri temizlemek için uygundur, ancak kopyaların yokluğunu garanti etmez. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = ReplacingMergeTree([ver]) +[PARTITION BY expr] +[ORDER BY expr] +[PRIMARY KEY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +İstek parametrelerinin açıklaması için bkz. [istek açıklaması](../../../sql_reference/statements/create.md). + +**ReplacingMergeTree Parametreleri** + +- `ver` — column with version. Type `UInt*`, `Date` veya `DateTime`. İsteğe bağlı parametre. + + Birleş whenirken, `ReplacingMergeTree` aynı birincil anahtara sahip tüm satırlardan sadece bir tane bırakır: + + - Seç inimde son, eğer `ver` set değil. + - Maksimum sürümü ile, eğer `ver` belirtilen. + +**Sorgu yan tümceleri** + +Oluştururken bir `ReplacingMergeTree` tablo aynı [yanlar](mergetree.md) oluşturul ,urken olduğu gibi gerekli `MergeTree` Tablo. + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın ve mümkünse eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) +``` + +Hariç tüm parametreler `ver` içinde olduğu gibi aynı anlama sahip `MergeTree`. + +- `ver` - sürümü ile sütun. İsteğe bağlı parametre. Bir açıklama için yukarıdaki metne bakın. + +
    + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/tr/engines/table_engines/mergetree_family/replication.md b/docs/tr/engines/table_engines/mergetree_family/replication.md new file mode 100644 index 00000000000..e9890c2652f --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/replication.md @@ -0,0 +1,218 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 31 +toc_title: "Veri \xC7o\u011Faltma" +--- + +# Veri Çoğaltma {#table_engines-replication} + +Çoğaltma yalnızca mergetree ailesindeki tablolar için desteklenir: + +- ReplicatedMergeTree +- ReplicatedSummingMergeTree +- ReplicatedReplacingMergeTree +- ReplicatedAggregatingMergeTree +- ReplicatedCollapsingMergeTree +- ReplicatedVersionedCollapsingMergetree +- ReplicatedGraphiteMergeTree + +Çoğaltma, tüm sunucu değil, tek bir tablo düzeyinde çalışır. Bir sunucu hem çoğaltılmış hem de çoğaltılmamış tabloları aynı anda depolayabilir. + +Çoğaltma, parçaya bağlı değildir. Her parçanın kendi bağımsız çoğaltması vardır. + +İçin sıkıştırılmış veri `INSERT` ve `ALTER` sorgular çoğaltılır (daha fazla bilgi için bkz. [ALTER](../../../sql_reference/statements/alter.md#query_language_queries_alter)). + +`CREATE`, `DROP`, `ATTACH`, `DETACH` ve `RENAME` sorgular tek bir sunucuda yürütülür ve çoğaltılmaz: + +- Bu `CREATE TABLE` sorgu sorgu çalıştırıldığı sunucuda yeni bir replicatable tablo oluşturur. Bu tablo diğer sunucularda zaten varsa, yeni bir yineleme ekler. +- Bu `DROP TABLE` sorgu, sorgunun çalıştırıldığı sunucuda bulunan yinelemeyi siler. +- Bu `RENAME` sorgu yinelemeler birinde tabloyu yeniden adlandırır. Başka bir deyişle, çoğaltılmış tablolar farklı yinelemeler üzerinde farklı adlara sahip olabilir. + +ClickHouse kullanır [Apache ZooKeeper](https://zookeeper.apache.org) kopyaları meta bilgilerini saklamak için. ZooKeeper sürüm 3.4.5 veya daha yeni kullanın. + +Çoğaltma kullanmak için, parametreleri [zookeeper](../../../operations/server_configuration_parameters/settings.md#server-settings_zookeeper) sunucu yapılandırma bölümü. + +!!! attention "Dikkat" + Güvenlik ayarını ihmal etmeyin. ClickHouse destekler `digest` [ACLL şeması](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) ZooKeeper Güvenlik alt sisteminin. + +ZooKeeper kümesinin adreslerini ayarlama örneği: + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + + example3 + 2181 + + +``` + +Varolan herhangi bir ZooKeeper kümesini belirtebilirsiniz ve sistem kendi verileri için bir dizin kullanır (replicatable tablo oluştururken dizin belirtilir). + +Zookeeper yapılandırma dosyasında ayarlanmamışsa, çoğaltılmış tablolar oluşturamazsınız ve varolan çoğaltılmış tablolar salt okunur olacaktır. + +ZooKeeper kullanılmaz `SELECT` çoğaltma performansını etkilemez çünkü sorgular `SELECT` ve sorgular, çoğaltılmamış tablolar için yaptıkları kadar hızlı çalışır. Dağıtılmış çoğaltılmış tabloları sorgularken, ClickHouse davranışı ayarlar tarafından denetlenir [max\_replica\_delay\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) ve [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). + +Her biri için `INSERT` sorgu, yaklaşık on girişleri zookeeper birkaç işlemler aracılığıyla eklenir. (Daha kesin olmak gerekirse, bu eklenen her veri bloğu içindir; bir ekleme sorgusu her bir blok veya bir blok içerir `max_insert_block_size = 1048576` satırlar.) Bu, biraz daha uzun gecikmelere yol açar `INSERT` çoğaltılmamış tablolarla karşılaştırıldığında. Ancak, birden fazla olmayan gruplar halinde veri eklemek için önerileri izlerseniz `INSERT` saniyede, herhangi bir sorun yaratmaz. Bir ZooKeeper kümesini koordine etmek için kullanılan tüm ClickHouse kümesinin toplam birkaç yüzü vardır `INSERTs` saniyede. Veri eklerindeki verim (saniyede satır sayısı), çoğaltılmamış veriler için olduğu kadar yüksektir. + +Çok büyük kümeler için, farklı kırıklar için farklı ZooKeeper kümelerini kullanabilirsiniz. Ancak, bu Yandex'de gerekli değildir.Metrica küme (yaklaşık 300 sunucu). + +Çoğaltma zaman uyumsuz ve çok ana. `INSERT` sorgular (yanı sıra `ALTER`) mevcut herhangi bir sunucuya gönderilebilir. Veri sorgu çalıştırıldığı sunucuda eklenir ve sonra diğer sunuculara kopyalanır. Zaman uyumsuz olduğundan, son eklenen veriler bazı gecikme ile diğer yinelemeler görünür. Yinelemelerin bir kısmı mevcut değilse, veriler kullanılabilir olduklarında yazılır. Bir çoğaltma varsa, gecikme, sıkıştırılmış veri bloğunu ağ üzerinden aktarmak için gereken süredir. + +Varsayılan olarak, bir INSERT sorgusu yalnızca bir yinelemeden veri yazma onayı bekler. Verileri başarıyla yalnızca bir yineleme için yazılmıştır ve bu yineleme ile sunucu varolmaya sona erer, depolanan veriler kaybolur. Birden çok yinelemeden veri yazma onayını almayı etkinleştirmek için `insert_quorum` seçenek. + +Her veri bloğu atomik olarak yazılır. Ekle sorgusu kadar bloklara ayrılmıştır `max_insert_block_size = 1048576` satırlar. Diğer bir deyişle, `INSERT` sorgu 1048576 satırdan daha az, atomik olarak yapılır. + +Veri blokları tekilleştirilmiştir. Aynı veri bloğunun (aynı sırayla aynı satırları içeren aynı boyuttaki veri blokları) birden fazla yazımı için, blok yalnızca bir kez yazılır. Bunun nedeni, istemci uygulaması verilerin DB'YE yazılıp yazılmadığını bilmediğinde ağ arızaları durumunda, `INSERT` sorgu sadece tekrar edilebilir. Hangi çoğaltma eklerinin aynı verilerle gönderildiği önemli değildir. `INSERTs` idempotent vardır. Tekilleştirme parametreleri tarafından kontrol edilir [merge\_tree](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-merge_tree) sunucu ayarları. + +Çoğaltma sırasında, yalnızca eklenecek kaynak veriler ağ üzerinden aktarılır. Daha fazla veri dönüşümü (birleştirme), tüm kopyalarda aynı şekilde koordine edilir ve gerçekleştirilir. Bu, ağ kullanımını en aza indirir; bu, çoğaltmaların farklı veri merkezlerinde bulunduğu zaman çoğaltmanın iyi çalıştığı anlamına gelir. (Farklı veri merkezlerinde çoğaltmanın çoğaltmanın ana hedefi olduğunu unutmayın .) + +Aynı verilerin çoğaltmaları herhangi bir sayıda olabilir. Üye.Metrica üretimde çift çoğaltma kullanır. Her sunucu, bazı durumlarda RAID-5 veya RAID-6 ve RAID-10 kullanır. Bu nispeten güvenilir ve kullanışlı bir çözümdür. + +Sistem, yinelemelerdeki veri senkronizasyonunu izler ve bir hatadan sonra kurtarabilir. Yük devretme otomatik (verilerde küçük farklılıklar için) veya yarı otomatik (veriler çok fazla farklılık gösterdiğinde, bu da bir yapılandırma hatasını gösterebilir). + +## Çoğaltılmış Tablolar Oluşturma {#creating-replicated-tables} + +Bu `Replicated` önek tablo motoru adına eklenir. Mesela:`ReplicatedMergeTree`. + +**Çoğaltılan \* MergeTree parametreleri** + +- `zoo_path` — The path to the table in ZooKeeper. +- `replica_name` — The replica name in ZooKeeper. + +Örnek: + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}') +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +``` + +
    + +Kullanımdan kaldırılmış sözdizimi örneği + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192) +``` + +
    + +Örnekte gösterildiği gibi, bu parametreler kıvırcık köşeli ayraçlarda ikameler içerebilir. İkame edilen değerler ‘macros’ yapılandırma dosyasının bölümü. Örnek: + +``` xml + + 05 + 02 + example05-02-1.yandex.ru + +``` + +Zookeeper tablonun yolunu her çoğaltılmış tablo için benzersiz olmalıdır. Farklı parçalardaki tabloların farklı yolları olmalıdır. +Bu durumda, yol aşağıdaki parçalardan oluşur: + +`/clickhouse/tables/` ortak önek. Tam olarak bunu kullanmanızı öneririz. + +`{layer}-{shard}` shard tanımlayıcısıdır. Bu örnekte Yandex'den beri iki bölümden oluşmaktadır.Metrica küme iki seviyeli sharding kullanır. Çoğu görev için, yalnızca shard tanımlayıcısına genişletilecek olan {shard} ikamesini bırakabilirsiniz. + +`table_name` ZooKeeper tablo için düğüm adıdır. Tablo adı ile aynı yapmak için iyi bir fikirdir. Açıkça tanımlanır, çünkü tablo adının aksine, bir yeniden adlandırma sorgusundan sonra değişmez. +*HINT*: önüne bir veritabanı adı ekleyebilirsiniz `table_name` yanında. E. g. `db_name.table_name` + +Çoğaltma adı, aynı tablonun farklı yinelemelerini tanımlar. Örnekte olduğu gibi bunun için sunucu adını kullanabilirsiniz. Adın sadece her parça içinde benzersiz olması gerekir. + +Değiştirmeleri kullanmak yerine parametreleri açıkça tanımlayabilirsiniz. Bu, test etmek ve küçük kümeleri yapılandırmak için uygun olabilir. Ancak, dağıtılmış DDL sorguları kullanamazsınız (`ON CLUSTER` bu durumda). + +Büyük kümelerle çalışırken, hata olasılığını azalttıkları için değiştirmeleri kullanmanızı öneririz. + +Run the `CREATE TABLE` her yineleme üzerinde sorgu. Bu sorgu, yeni bir çoğaltılmış tablo oluşturur veya varolan bir yeni bir yineleme ekler. + +Tablo zaten diğer yinelemeler üzerinde bazı veriler içerdikten sonra yeni bir yineleme eklerseniz, verileri diğer yinelemeler için yeni bir sorgu çalıştırdıktan sonra kopyalanır. Başka bir deyişle, yeni çoğaltma kendisini diğerleriyle eşitler. + +Bir yineleme silmek için çalıştırın `DROP TABLE`. However, only one replica is deleted – the one that resides on the server where you run the query. + +## Arızalardan Sonra Kurtarma {#recovery-after-failures} + +Bir sunucu başlatıldığında ZooKeeper kullanılamıyorsa, çoğaltılmış tablolar salt okunur moda geçer. Sistem periyodik olarak ZooKeeper bağlanmaya çalışır. + +ZooKeeper sırasında kullanılamıyorsa bir `INSERT`, veya ZooKeeper ile etkileşimde bulunurken bir hata oluşur, bir istisna atılır. + +ZooKeeper bağlandıktan sonra, sistem yerel dosya sistemindeki veri kümesinin beklenen veri kümesiyle eşleşip eşleşmediğini kontrol eder (ZooKeeper bu bilgileri saklar). Küçük tutarsızlıklar varsa, sistem verileri kopyalarla senkronize ederek bunları çözer. + +Sistem bozuk veri parçalarını (yanlış dosya boyutu ile) veya tanınmayan parçaları (dosya sistemine yazılmış ancak Zookeeper'da kaydedilmemiş parçalar) tespit ederse, bunları `detached` alt dizin (silinmez). Eksik parçalar kopyalardan kopyalanır. + +Clickhouse'un büyük miktarda veriyi otomatik olarak silme gibi yıkıcı eylemler gerçekleştirmediğini unutmayın. + +Sunucu başlatıldığında (veya ZooKeeper ile yeni bir oturum kurduğunda), yalnızca tüm dosyaların miktarını ve boyutlarını kontrol eder. Dosya boyutları eşleşirse, ancak bayt ortasında bir yerde değiştirilmişse, bu hemen algılanmaz, ancak yalnızca bir dosya için verileri okumaya çalışırken algılanmaz. `SELECT` sorgu. Sorgu, eşleşen olmayan bir sağlama toplamı veya sıkıştırılmış bir bloğun boyutu hakkında bir özel durum atar. Bu durumda, veri parçaları doğrulama kuyruğuna eklenir ve gerekirse kopyalardan kopyalanır. + +Yerel veri kümesi beklenenden çok fazla farklıysa, bir güvenlik mekanizması tetiklenir. Sunucu bunu günlüğe girer ve başlatmayı reddeder. Bunun nedeni, bu durumda, bir parçadaki bir kopya yanlışlıkla farklı bir parçadaki bir kopya gibi yapılandırılmışsa gibi bir yapılandırma hatası gösterebilir. Ancak, bu mekanizma için eşikleri oldukça düşük ayarlanır ve bu durum normal hata kurtarma sırasında ortaya çıkabilir. Bu durumda, veriler yarı otomatik olarak geri yüklenir “pushing a button”. + +Kurtarma işlemini başlatmak için düğümü oluşturun `/path_to_table/replica_name/flags/force_restore_data` herhangi bir içerik ile ZooKeeper veya tüm çoğaltılmış tabloları geri yüklemek için komutu çalıştırın: + +``` bash +sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data +``` + +Sunucuyu yeniden başlatın. Başlangıçta, sunucu bu bayrakları siler ve kurtarma işlemini başlatır. + +## Tam Veri Kaybından Sonra Kurtarma {#recovery-after-complete-data-loss} + +Tüm veriler ve meta veriler sunuculardan birinden kaybolduysa, kurtarma için şu adımları izleyin: + +1. Clickhouse'u sunucuya yükleyin. Bunları kullanırsanız, shard tanımlayıcısı ve yinelemeleri içeren yapılandırma dosyasında doğru değiştirmelerin tanımlayın. +2. Sunucularda el ile çoğaltılması gereken yinelenmemiş tablolar varsa, verilerini bir kopyadan kopyalayın (dizinde `/var/lib/clickhouse/data/db_name/table_name/`). +3. Bulunan tablo tanım copylarını kopyala `/var/lib/clickhouse/metadata/` bir kopyadan. Tablo tanımlarında bir parça veya çoğaltma tanımlayıcısı açıkça tanımlanmışsa, bu kopyaya karşılık gelecek şekilde düzeltin. (Alternatif olarak, sunucuyu başlatın ve tüm `ATTACH TABLE` içinde olması gereken sorgular .sql dosyaları `/var/lib/clickhouse/metadata/`.) +4. Kurtarma işlemini başlatmak için ZooKeeper düğümünü oluşturun `/path_to_table/replica_name/flags/force_restore_data` herhangi bir içerikle veya tüm çoğaltılmış tabloları geri yüklemek için komutu çalıştırın: `sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` + +Ardından sunucuyu başlatın (zaten çalışıyorsa yeniden başlatın). Veriler kopyalardan indirilecektir. + +Alternatif bir kurtarma seçeneği zookeeper kayıp yineleme hakkında bilgi silmektir (`/path_to_table/replica_name`), daha sonra açıklandığı gibi yinelemeyi tekrar oluşturun “[Çoğaltılmış tablolar oluşturma](#creating-replicated-tables)”. + +Kurtarma sırasında ağ bant genişliği üzerinde herhangi bir kısıtlama yoktur. Aynı anda birçok yinelemeyi geri yüklüyorsanız bunu aklınızda bulundurun. + +## Mergetree'den Replicatedmergetree'ye Dönüştürme {#converting-from-mergetree-to-replicatedmergetree} + +Terimi kullanıyoruz `MergeTree` tüm tablo motorlarına başvurmak için `MergeTree family` için aynı `ReplicatedMergeTree`. + +Eğer olsaydı bir `MergeTree` el ile çoğaltılmış tablo, çoğaltılmış bir tabloya dönüştürebilirsiniz. Zaten büyük miktarda veri topladıysanız bunu yapmanız gerekebilir. `MergeTree` tablo ve şimdi çoğaltmayı etkinleştirmek istiyorsunuz. + +Veriler çeşitli yinelemelerde farklılık gösteriyorsa, önce onu eşitleyin veya bu verileri biri dışındaki tüm yinelemelerde silin. + +Varolan MergeTree tablosunu yeniden adlandırın, sonra bir `ReplicatedMergeTree` eski adı olan tablo. +Eski tablodan veri taşıma `detached` yeni tablo verileri ile dizin içindeki alt dizin (`/var/lib/clickhouse/data/db_name/table_name/`). +Sonra koş `ALTER TABLE ATTACH PARTITION` bu veri parçalarını çalışma kümesine eklemek için yinelemelerden birinde. + +## Replicatedmergetree'den Mergetree'ye Dönüştürme {#converting-from-replicatedmergetree-to-mergetree} + +Farklı bir adla bir MergeTree tablosu oluşturun. İle dizinden tüm verileri taşıyın `ReplicatedMergeTree` yeni tablonun veri dizinine tablo verileri. Sonra Sil `ReplicatedMergeTree` tablo ve sunucuyu yeniden başlatın. + +Eğer bir kurtulmak istiyorsanız `ReplicatedMergeTree` sunucu başlatmadan tablo: + +- İlgili sil `.sql` meta veri dizinindeki dosya (`/var/lib/clickhouse/metadata/`). +- ZooKeeper ilgili yolu silin (`/path_to_table/replica_name`). + +Bundan sonra, sunucuyu başlatabilir, bir `MergeTree` tablo, verileri kendi dizinine taşıyın ve sonra sunucuyu yeniden başlatın. + +## Zookeeper kümesindeki meta veriler kaybolduğunda veya zarar gördüğünde kurtarma {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} + +ZooKeeper içindeki veriler kaybolduysa veya hasar gördüyse, verileri yukarıda açıklandığı gibi yinelenmemiş bir tabloya taşıyarak kaydedebilirsiniz. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/tr/engines/table_engines/mergetree_family/summingmergetree.md b/docs/tr/engines/table_engines/mergetree_family/summingmergetree.md new file mode 100644 index 00000000000..15a58064ab4 --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/summingmergetree.md @@ -0,0 +1,141 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 34 +toc_title: SummingMergeTree +--- + +# SummingMergeTree {#summingmergetree} + +Motor devralır [MergeTree](mergetree.md#table_engines-mergetree). Fark, veri parçalarını birleştirirken `SummingMergeTree` tablolar ClickHouse tüm satırları aynı birincil anahtarla değiştirir (veya daha doğru olarak, aynı [sıralama anahtarı](mergetree.md)) sayısal veri türüne sahip sütunlar için özetlenen değerleri içeren bir satır ile. Sıralama anahtarı, tek bir anahtar değeri çok sayıda satıra karşılık gelecek şekilde oluşturulursa, bu, depolama birimini önemli ölçüde azaltır ve veri seçimini hızlandırır. + +Motoru birlikte kullanmanızı öneririz `MergeTree`. Mağaza tam veri `MergeTree` tablo ve kullanım `SummingMergeTree` örneğin, rapor hazırlarken toplu veri depolamak için. Böyle bir yaklaşım, yanlış oluşturulmuş bir birincil anahtar nedeniyle değerli verileri kaybetmenizi önleyecektir. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = SummingMergeTree([columns]) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +İstek parametrelerinin açıklaması için bkz. [istek açıklaması](../../../sql_reference/statements/create.md). + +**SummingMergeTree parametreleri** + +- `columns` - değerlerin özetleneceği sütunların adlarına sahip bir tuple. İsteğe bağlı parametre. + Sütunlar sayısal tipte olmalı ve birincil anahtarda olmamalıdır. + + Eğer `columns` belirtilmemiş, ClickHouse birincil anahtarda olmayan bir sayısal veri türü ile tüm sütunlardaki değerleri özetler. + +**Sorgu yan tümceleri** + +Oluştururken bir `SummingMergeTree` tablo aynı [yanlar](mergetree.md) oluşturul ,urken olduğu gibi gerekli `MergeTree` Tablo. + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın ve mümkünse eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) +``` + +Hariç tüm parametreler `columns` içinde olduğu gibi aynı anlama sahip `MergeTree`. + +- `columns` — tuple with names of columns values of which will be summarized. Optional parameter. For a description, see the text above. + +
    + +## Kullanım Örneği {#usage-example} + +Aşağıdaki tabloyu düşünün: + +``` sql +CREATE TABLE summtt +( + key UInt32, + value UInt32 +) +ENGINE = SummingMergeTree() +ORDER BY key +``` + +Veri Ekle: + +``` sql +INSERT INTO summtt Values(1,1),(1,2),(2,1) +``` + +ClickHouse tüm satırları tamamen toplayabilir ([aşağıya bakın](#data-processing)), bu yüzden bir toplama işlevi kullanıyoruz `sum` ve `GROUP BY` sorguda yan tümcesi. + +``` sql +SELECT key, sum(value) FROM summtt GROUP BY key +``` + +``` text +┌─key─┬─sum(value)─┐ +│ 2 │ 1 │ +│ 1 │ 3 │ +└─────┴────────────┘ +``` + +## Veri İşleme {#data-processing} + +Veriler bir tabloya eklendiğinde, bunlar olduğu gibi kaydedilir. ClickHouse, verilerin eklenen bölümlerini periyodik olarak birleştirir ve bu, aynı birincil anahtara sahip satırların toplandığı ve sonuçta elde edilen her veri parçası için bir tane ile değiştirildiği zamandır. + +ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) bir toplama fonksiyonu [toplam()](../../../sql_reference/aggregate_functions/reference.md#agg_function-sum) ve `GROUP BY` yukarıdaki örnekte açıklandığı gibi yan tümcesi bir sorguda kullanılmalıdır. + +### Toplama İçin Ortak Kurallar {#common-rules-for-summation} + +Sayısal veri türüne sahip sütunlardaki değerler özetlenir. Sütun kümesi parametre tarafından tanımlanır `columns`. + +Değerler toplamı için tüm sütunlarda 0 ise, satır silinir. + +Sütun birincil anahtarda değilse ve özetlenmezse, mevcut olanlardan rasgele bir değer seçilir. + +Değerler, birincil anahtardaki sütunlar için özetlenmez. + +### Aggregatefunction Sütunlarındaki toplama {#the-summation-in-the-aggregatefunction-columns} + +Sütunlar için [AggregateFunction türü](../../../sql_reference/data_types/aggregatefunction.md) ClickHouse olarak davranır [AggregatingMergeTree](aggregatingmergetree.md) işleve göre motor toplama. + +### İç İçe Yapılar {#nested-structures} + +Tablo, özel bir şekilde işlenen iç içe geçmiş veri yapılarına sahip olabilir. + +İç içe geçmiş bir tablonun adı ile bitiyorsa `Map` ve aşağıdaki kriterleri karşılayan en az iki sütun içerir: + +- ilk sütun sayısal `(*Int*, Date, DateTime)` veya bir dize `(String, FixedString)` hadi diyelim `key`, +- diğer sütunlar aritmetik `(*Int*, Float32/64)` hadi diyelim `(values...)`, + +sonra bu iç içe geçmiş tablo bir eşleme olarak yorumlanır `key => (values...)` ve satırlarını birleştirirken, iki veri kümesinin öğeleri şu şekilde birleştirilir `key` karşılık gelen bir toplamı ile `(values...)`. + +Örnekler: + +``` text +[(1, 100)] + [(2, 150)] -> [(1, 100), (2, 150)] +[(1, 100)] + [(1, 150)] -> [(1, 250)] +[(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] +[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] +``` + +Veri isterken, [sumMap (anahtar, değer)](../../../sql_reference/aggregate_functions/reference.md) toplama fonksiyonu `Map`. + +İç içe geçmiş veri yapısı için, sütunlarının toplamı için sütun kümesinde belirtmeniz gerekmez. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/tr/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md b/docs/tr/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md new file mode 100644 index 00000000000..b9adb381783 --- /dev/null +++ b/docs/tr/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -0,0 +1,238 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 37 +toc_title: VersionedCollapsingMergeTree +--- + +# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} + +Bu motor: + +- Sürekli değişen nesne durumlarının hızlı yazılmasını sağlar. +- Arka planda eski nesne durumlarını siler. Bu, depolama hacmini önemli ölçüde azaltır. + +Bölümüne bakınız [Çökme](#table_engines_versionedcollapsingmergetree) ayrıntılar için. + +Motor devralır [MergeTree](mergetree.md#table_engines-mergetree) ve veri parçalarını birleştirmek için algoritmaya satırları daraltmak için mantığı ekler. `VersionedCollapsingMergeTree` aynı amaca hizmet eder [CollapsingMergeTree](collapsingmergetree.md) ancak, verilerin birden çok iş parçacığıyla herhangi bir sıraya yerleştirilmesine izin veren farklı bir çökme algoritması kullanır. Özellikle, `Version` sütun, yanlış sıraya yerleştirilmiş olsalar bile satırları düzgün bir şekilde daraltmaya yardımcı olur. Tersine, `CollapsingMergeTree` sadece kesinlikle ardışık ekleme sağlar. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = VersionedCollapsingMergeTree(sign, version) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +Sorgu parametrelerinin açıklaması için bkz: [sorgu açıklaması](../../../sql_reference/statements/create.md). + +**Motor Parametreleri** + +``` sql +VersionedCollapsingMergeTree(sign, version) +``` + +- `sign` — Name of the column with the type of row: `1` is a “state” satır, `-1` is a “cancel” satır. + + Sütun veri türü olmalıdır `Int8`. + +- `version` — Name of the column with the version of the object state. + + Sütun veri türü olmalıdır `UInt*`. + +**Sorgu Yan Tümceleri** + +Oluştururken bir `VersionedCollapsingMergeTree` tablo, aynı [yanlar](mergetree.md) oluşturul aurken gerekli `MergeTree` Tablo. + +
    + +Bir tablo oluşturmak için kullanımdan kaldırılan yöntem + +!!! attention "Dikkat" + Bu yöntemi yeni projelerde kullanmayın. Mümkünse, eski projeleri yukarıda açıklanan yönteme geçin. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) +``` + +Dışındaki tüm parametreler `sign` ve `version` içinde olduğu gibi aynı anlama sahip `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` is a “state” satır, `-1` is a “cancel” satır. + + Column Data Type — `Int8`. + +- `version` — Name of the column with the version of the object state. + + Sütun veri türü olmalıdır `UInt*`. + +
    + +## Çökme {#table_engines-versionedcollapsingmergetree} + +### Veriler {#data} + +Bazı nesneler için sürekli değişen verileri kaydetmeniz gereken bir durumu düşünün. Bir nesne için bir satıra sahip olmak ve değişiklikler olduğunda satırı güncellemek mantıklıdır. Ancak, depolama alanındaki verileri yeniden yazmayı gerektirdiğinden, güncelleştirme işlemi bir DBMS için pahalı ve yavaştır. Verileri hızlı bir şekilde yazmanız gerekiyorsa güncelleştirme kabul edilemez, ancak değişiklikleri bir nesneye sırayla aşağıdaki gibi yazabilirsiniz. + +Kullan... `Sign` satır yazarken sütun. Eğer `Sign = 1` bu, satırın bir nesnenin durumu olduğu anlamına gelir (diyelim “state” satır). Eğer `Sign = -1` aynı özelliklere sahip bir nesnenin durumunun iptal edildiğini gösterir (buna “cancel” satır). Ayrıca kullanın `Version` bir nesnenin her durumunu ayrı bir sayı ile tanımlaması gereken sütun. + +Örneğin, kullanıcıların bazı sitede kaç sayfa ziyaret ettiğini ve ne kadar süre orada olduklarını hesaplamak istiyoruz. Bir noktada, kullanıcı etkinliği durumu ile aşağıdaki satırı yazıyoruz: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +Bir noktada daha sonra kullanıcı aktivitesinin değişikliğini kaydediyoruz ve aşağıdaki iki satırla yazıyoruz. + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +İlk satır, nesnenin (kullanıcı) önceki durumunu iptal eder. Dışında iptal edilen Devletin tüm alanlarını kopya shouldlama shouldlıdır `Sign`. + +İkinci satır geçerli durumu içerir. + +Sadece kullanıcı etkinliğinin son durumuna ihtiyacımız olduğundan, satırlar + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +nesnenin geçersiz (eski) durumunu daraltarak silinebilir. `VersionedCollapsingMergeTree` veri parçalarını birleştirirken bunu yapar. + +Her değişiklik için neden iki satıra ihtiyacımız olduğunu bulmak için bkz. [Algoritma](#table_engines-versionedcollapsingmergetree-algorithm). + +**Kullanımı ile ilgili notlar** + +1. Verileri yazan program, iptal etmek için bir nesnenin durumunu hatırlamalıdır. Bu “cancel” dize bir kopyası olmalıdır “state” tersi ile dize `Sign`. Bu, ilk depolama boyutunu arttırır, ancak verileri hızlı bir şekilde yazmanıza izin verir. +2. Sütunlardaki uzun büyüyen diziler, yazma yükü nedeniyle motorun verimliliğini azaltır. Daha basit veri, daha iyi verim. +3. `SELECT` sonuçlara itiraz değişiklikleri tarihinin tutarlılık bağlıdır. Ekleme için veri hazırlarken doğru olun. Oturum derinliği gibi negatif olmayan metrikler için negatif değerler gibi tutarsız verilerle öngörülemeyen sonuçlar alabilirsiniz. + +### Algoritma {#table_engines-versionedcollapsingmergetree-algorithm} + +ClickHouse veri parçalarını birleştirdiğinde, aynı birincil anahtar ve sürüm ve farklı olan her satır çiftini siler `Sign`. Satırların sırası önemli değil. + +ClickHouse veri eklediğinde, satırları birincil anahtarla sipariş eder. Eğer... `Version` sütun birincil anahtarda değil, ClickHouse onu birincil anahtara örtük olarak son alan olarak ekler ve sipariş vermek için kullanır. + +## Veri Seçme {#selecting-data} + +ClickHouse, aynı birincil anahtara sahip tüm satırların aynı sonuçtaki veri bölümünde veya hatta aynı fiziksel sunucuda olacağını garanti etmez. Bu, hem verileri yazmak hem de veri parçalarının daha sonra birleştirilmesi için geçerlidir. Ayrıca, ClickHouse süreçleri `SELECT` birden çok iş parçacıklarıyla sorgular ve sonuçtaki satırların sırasını tahmin edemez. Bu tamamen almak için bir ihtiyaç varsa toplama gerekli olduğu anlamına gelir “collapsed” bir veri `VersionedCollapsingMergeTree` Tablo. + +Daraltmayı sonuçlandırmak için, bir sorgu ile bir sorgu yazın `GROUP BY` yan tümce ve işareti için hesap toplama işlevleri. Örneğin, miktarı hesaplamak için kullanın `sum(Sign)` yerine `count()`. Bir şeyin toplamını hesaplamak için şunları kullanın `sum(Sign * x)` yerine `sum(x)` ve Ekle `HAVING sum(Sign) > 0`. + +Toplanan `count`, `sum` ve `avg` bu şekilde hesaplanabilir. Toplanan `uniq` bir nesnenin en az bir daraltılmamış durumu varsa hesaplanabilir. Toplanan `min` ve `max` hesaplan becauseamaz çünkü `VersionedCollapsingMergeTree` çökmüş durumların değerlerinin geçmişini kaydetmez. + +İle verileri ayıklamak gerekiyorsa “collapsing” ancak toplama olmadan (örneğin, en yeni değerleri belirli koşullarla eşleşen satırların mevcut olup olmadığını kontrol etmek için) `FINAL` değiştirici için `FROM` yan. Bu yaklaşım verimsizdir ve büyük tablolarla kullanılmamalıdır. + +## Kullanım Örneği {#example-of-use} + +Örnek veriler: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +Tablo oluşturma: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8, + Version UInt8 +) +ENGINE = VersionedCollapsingMergeTree(Sign, Version) +ORDER BY UserID +``` + +Veri ekleme: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) +``` + +Biz iki kullanın `INSERT` iki farklı veri parçası oluşturmak için sorgular. Verileri tek bir sorgu ile eklersek, ClickHouse bir veri parçası oluşturur ve hiçbir zaman birleştirme gerçekleştirmez. + +Veri alma: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +Burada ne görüyoruz ve çökmüş parçalar nerede? +İki veri parçasını iki kullanarak oluşturduk `INSERT` sorgular. Bu `SELECT` sorgu iki iş parçacığında gerçekleştirildi ve sonuç rastgele bir satır sırasıdır. +Veri bölümleri henüz birleştirilmediği için çökme gerçekleşmedi. ClickHouse biz tahmin edemez zaman içinde bilinmeyen bir noktada veri parçalarını birleştirir. + +Bu yüzden toplamaya ihtiyacımız var: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration, + Version +FROM UAct +GROUP BY UserID, Version +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 2 │ +└─────────────────────┴───────────┴──────────┴─────────┘ +``` + +Toplamaya ihtiyacımız yoksa ve çökmeyi zorlamak istiyorsak, `FINAL` değiştirici için `FROM` yan. + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +Bu, verileri seçmek için çok verimsiz bir yoldur. Büyük tablolar için kullanmayın. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/tr/engines/table_engines/special/buffer.md b/docs/tr/engines/table_engines/special/buffer.md new file mode 100644 index 00000000000..fa53822ab2b --- /dev/null +++ b/docs/tr/engines/table_engines/special/buffer.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 45 +toc_title: Arabellek +--- + +# Arabellek {#buffer} + +RAM'de yazmak için verileri tamponlar, periyodik olarak başka bir tabloya temizler. Okuma işlemi sırasında veri arabellekten ve diğer tablodan aynı anda okunur. + +``` sql +Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes) +``` + +Motor parametreleri: + +- `database` – Database name. Instead of the database name, you can use a constant expression that returns a string. +- `table` – Table to flush data to. +- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` bağımsız tamponların. Önerilen değer: 16. +- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, ve `max_bytes` – Conditions for flushing data from the buffer. + +Veri arabellekten temizlendi ve hedef tabloya yazılır eğer tüm `min*` koşulları veya en az bir `max*` koşul karşı arelanır. + +- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. +- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. +- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. + +Yazma işlemi sırasında veri bir `num_layers` rastgele tampon sayısı. Veya, eklenecek veri kısmı yeterince büyükse (daha büyük `max_rows` veya `max_bytes`), arabelleği atlayarak doğrudan hedef tabloya yazılır. + +Verilerin yıkanması için koşullar, her biri için ayrı ayrı hesaplanır. `num_layers` arabellekler. Örneğin, `num_layers = 16` ve `max_bytes = 100000000`, maksimum RAM tüketimi 1,6 GB'DİR. + +Örnek: + +``` sql +CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10, 100, 10000, 1000000, 10000000, 100000000) +``` + +Oluşturma Bir ‘merge.hits\_buffer’ ile aynı yapıya sahip tablo ‘merge.hits’ ve Tampon motorunu kullanarak. Bu tabloya yazarken, veriler RAM'de arabelleğe alınır ve daha sonra ‘merge.hits’ Tablo. 16 tamponlar oluşturulur. 100 saniye geçti veya bir milyon satır yazılmış veya 100 MB veri yazılmıştır; ya da aynı anda 10 saniye geçti ve 10.000 satır ve 10 MB veri yazılmıştır, bunların her veri temizlendi. Örneğin, sadece bir satır yazılmışsa, 100 saniye sonra ne olursa olsun, yıkanacaktır. Ancak, birçok satır yazılmışsa, veriler daha erken temizlenecektir. + +Sunucu DROP TABLE veya DETACH TABLE ile durdurulduğunda, arabellek verileri de hedef tabloya temizlendi. + +Veritabanı ve tablo adı için tek tırnak içinde boş dizeleri ayarlayabilirsiniz. Bu, bir hedef tablonun yokluğunu gösterir. Bu durumda, Veri Temizleme koşullarına ulaşıldığında, arabellek basitçe temizlenir. Bu, bir veri penceresini bellekte tutmak için yararlı olabilir. + +Bir arabellek tablosundan okurken, veriler hem arabellekten hem de hedef tablodan (varsa) işlenir. +Arabellek tabloları bir dizin desteklemediğini unutmayın. Başka bir deyişle, arabellekteki veriler tamamen taranır, bu da büyük arabellekler için yavaş olabilir. (Alt tablodaki veriler için, desteklediği dizin kullanılacaktır.) + +Arabellek tablosundaki sütun kümesi, alt tablodaki sütun kümesiyle eşleşmiyorsa, her iki tabloda da bulunan sütunların bir alt kümesi eklenir. + +Türleri arabellek tablo ve alt tablo sütunlarından biri için eşleşmiyorsa, sunucu günlüğüne bir hata iletisi girilir ve arabellek temizlenir. +Arabellek temizlendiğinde alt tablo yoksa aynı şey olur. + +Eğer bağımlı bir tablo ve Tampon tablo için ALTER çalıştırmak gerekiyorsa, ilk Tampon tablo silme, alt tablo için ALTER çalışan, sonra tekrar Tampon tablo oluşturma öneririz. + +Sunucu anormal şekilde yeniden başlatılırsa, arabellekteki veriler kaybolur. + +Son ve örnek arabellek tabloları için düzgün çalışmıyor. Bu koşullar hedef tabloya geçirilir, ancak arabellekte veri işlemek için kullanılmaz. Bu özellikler gerekiyorsa, hedef tablodan okurken yalnızca yazma için arabellek tablosunu kullanmanızı öneririz. + +Bir arabelleğe veri eklerken, arabelleklerden biri kilitlenir. Bir okuma işlemi aynı anda tablodan gerçekleştiriliyor, bu gecikmelere neden olur. + +Bir arabellek tablosuna eklenen veriler, alt tabloda farklı bir sırada ve farklı bloklarda sonuçlanabilir. Bu nedenle, bir arabellek tablo CollapsingMergeTree doğru yazmak için kullanmak zordur. Sorunları önlemek için şunları ayarlayabilirsiniz ‘num\_layers’ 1'e. + +Hedef tablo yinelenirse, bir arabellek tablosuna yazarken yinelenmiş tabloların bazı beklenen özellikleri kaybolur. Satır ve veri parçaları boyutlarda sipariş için rasgele değişiklikler veri çoğaltma güvenilir olması mümkün olmadığını ifade eden çalışma, kapanmasına neden ‘exactly once’ çoğaltılan tablolara yazın. + +Bu dezavantajlardan dolayı, nadir durumlarda yalnızca bir arabellek tablosu kullanmanızı önerebiliriz. + +Bir arabellek tablosu, bir zaman birimi üzerinden çok sayıda sunucudan çok fazla ekleme alındığında kullanılır ve ekleme işleminden önce veri arabelleğe alınamaz, bu da eklerin yeterince hızlı çalışamayacağı anlamına gelir. + +Arabellek tabloları için bile, her seferinde bir satır veri eklemek mantıklı olmadığını unutmayın. Bu, yalnızca saniyede birkaç bin satırlık bir hız üretirken, daha büyük veri blokları eklemek saniyede bir milyondan fazla satır üretebilir (bölüme bakın “Performance”). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/buffer/) diff --git a/docs/tr/engines/table_engines/special/dictionary.md b/docs/tr/engines/table_engines/special/dictionary.md new file mode 100644 index 00000000000..ce02429a31f --- /dev/null +++ b/docs/tr/engines/table_engines/special/dictionary.md @@ -0,0 +1,97 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 35 +toc_title: "S\xF6zl\xFCk" +--- + +# Sözlük {#dictionary} + +Bu `Dictionary` motor görüntüler [sözlük](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) bir ClickHouse tablo olarak veri. + +Örnek olarak, bir sözlük düşünün `products` aşağıdaki yapılandırma ile: + +``` xml + + + products + + +
    products
    + DSN=some-db-server + + + + 300 + 360 + + + + + + + product_id + + + title + String + + + + + +``` + +Sözlük verilerini sorgula: + +``` sql +SELECT + name, + type, + key, + attribute.names, + attribute.types, + bytes_allocated, + element_count, + source +FROM system.dictionaries +WHERE name = 'products' +``` + +``` text +┌─name─────┬─type─┬─key────┬─attribute.names─┬─attribute.types─┬─bytes_allocated─┬─element_count─┬─source──────────┐ +│ products │ Flat │ UInt64 │ ['title'] │ ['String'] │ 23065376 │ 175032 │ ODBC: .products │ +└──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ +``` + +Kullanabilirsiniz [dictGet\*](../../../sql_reference/functions/ext_dict_functions.md#ext_dict_functions) sözlük verilerini bu formatta almak için işlev. + +Bu görünüm, ham veri almanız gerektiğinde veya bir `JOIN` işleyiş. Bu durumlar için şunları kullanabilirsiniz `Dictionary` bir tabloda sözlük verilerini görüntüleyen motor. + +Sözdizimi: + +``` sql +CREATE TABLE %table_name% (%fields%) engine = Dictionary(%dictionary_name%)` +``` + +Kullanım örneği: + +``` sql +create table products (product_id UInt64, title String) Engine = Dictionary(products); +``` + + Ok + +Masada ne olduğuna bir bak. + +``` sql +select * from products limit 1; +``` + +``` text +┌────product_id─┬─title───────────┐ +│ 152689 │ Some item │ +└───────────────┴─────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) diff --git a/docs/tr/engines/table_engines/special/distributed.md b/docs/tr/engines/table_engines/special/distributed.md new file mode 100644 index 00000000000..28cb0e0855d --- /dev/null +++ b/docs/tr/engines/table_engines/special/distributed.md @@ -0,0 +1,152 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 33 +toc_title: "Da\u011F\u0131l\u0131" +--- + +# Dağılı {#distributed} + +**Dağıtılmış altyapısı olan tablolar kendileri tarafından herhangi bir veri depolamaz**, ancak birden çok sunucuda dağıtılmış sorgu işleme izin verir. +Okuma otomatik olarak paralelleştirilir. Bir okuma sırasında, varsa uzak sunucularda tablo dizinleri kullanılır. + +Dağıtılmış motor parametreleri kabul eder: + +- sunucunun yapılandırma dosyasındaki küme adı + +- uzak veritabanı adı + +- uzak bir tablonun adı + +- (isteğe bağlı olarak) sharding anahtarı + +- (isteğe bağlı olarak) ilke adı, zaman uyumsuz göndermek için geçici dosyaları depolamak için kullanılacaktır + + Ayrıca bakınız: + + - `insert_distributed_sync` ayar + - [MergeTree](../mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) örnekler için + +Örnek: + +``` sql +Distributed(logs, default, hits[, sharding_key[, policy_name]]) +``` + +Veri tüm sunuculardan okunacak ‘logs’ küme, varsayılan değerden.kümedeki her sunucuda bulunan hits tablosu. +Veriler yalnızca okunmakla kalmaz, aynı zamanda uzak sunucularda kısmen işlenir (bunun mümkün olduğu ölçüde). +Örneğin, GROUP BY ile bir sorgu için uzak sunucularda veri toplanır ve toplama işlevlerinin Ara durumları istek sahibi sunucuya gönderilir. Daha sonra veriler daha fazla toplanacaktır. + +Veritabanı adı yerine, bir dize döndüren sabit bir ifade kullanabilirsiniz. Örneğin: currentDatabase (). + +logs – The cluster name in the server's config file. + +Kümeler şöyle ayarlanır: + +``` xml + + + + + 1 + + false + + example01-01-1 + 9000 + + + example01-01-2 + 9000 + + + + 2 + false + + example01-02-1 + 9000 + + + example01-02-2 + 1 + 9440 + + + + +``` + +Burada bir küme adı ile tanımlanır ‘logs’ bu, her biri iki kopya içeren iki parçadan oluşur. +Kırıklar, verilerin farklı bölümlerini içeren sunuculara başvurur (tüm verileri okumak için tüm kırıklara erişmeniz gerekir). +Yinelemeler sunucuları çoğaltılıyor (tüm verileri okumak için, yinelemelerden herhangi birinde verilere erişebilirsiniz). + +Küme adları nokta içermemelidir. + +Parametre `host`, `port` ve isteğe bağlı olarak `user`, `password`, `secure`, `compression` her sunucu için belirtilir: +- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn't start. If you change the DNS record, restart the server. +- `port` – The TCP port for messenger activity (‘tcp\_port’ yapılandırmada, genellikle 9000 olarak ayarlanır). Http\_port ile karıştırmayın. +- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Erişim hakları](../../../operations/access_rights.md). +- `password` – The password for connecting to a remote server (not masked). Default value: empty string. +- `secure` - Bağlantı için ssl kullanın, genellikle de tanımlamanız gerekir `port` = 9440. Sunucu dinlem shouldeli 9440 ve doğru sertifikalara sahip. +- `compression` - Kullanım veri sıkıştırma. Varsayılan değer: true. + +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [dengeleme](../../../operations/settings/settings.md#settings-load_balancing) ayar. +Sunucu ile bağlantı kurulmamışsa, kısa bir zaman aşımı ile bağlanma girişimi olacaktır. Bağlantı başarısız olursa, sonraki yineleme seçilir ve benzeri tüm yinelemeler için. Bağlantı girişimi tüm yinelemeler için başarısız olursa, girişimi aynı şekilde, birkaç kez tekrarlanır. +Bu esneklik lehine çalışır, ancak tam hataya dayanıklılık sağlamaz: uzak bir sunucu bağlantıyı kabul edebilir, ancak çalışmayabilir veya kötü çalışabilir. + +Parçalardan yalnızca birini belirtebilirsiniz (bu durumda, sorgu işleme dağıtılmış yerine uzak olarak adlandırılmalıdır) veya herhangi bir sayıda parçaya kadar. Her parçada, bir ila herhangi bir sayıda yinelemeyi belirtebilirsiniz. Her parça için farklı sayıda çoğaltma belirtebilirsiniz. + +Yapılandırmada istediğiniz kadar küme belirtebilirsiniz. + +Kümelerinizi görüntülemek için ‘system.clusters’ Tablo. + +Dağıtılmış motor, yerel bir sunucu gibi bir küme ile çalışmaya izin verir. Ancak, küme uzatılamaz: yapılandırmasını sunucu yapılandırma dosyasına yazmanız gerekir (tüm kümenin sunucuları için daha da iyisi). + +The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don't need to create a Distributed table – use the ‘remote’ bunun yerine tablo işlevi. Bölümüne bakınız [Tablo fonksiyonları](../../../sql_reference/table_functions/index.md). + +Bir kümeye veri yazmak için iki yöntem vardır: + +İlk olarak, hangi sunucuların hangi verileri yazacağını ve her bir parçaya doğrudan yazmayı gerçekleştireceğini tanımlayabilirsiniz. Başka bir deyişle, dağıtılmış tablo içinde INSERT gerçekleştirmek “looks at”. Bu, konu alanının gereklilikleri nedeniyle önemsiz olmayan herhangi bir sharding şemasını kullanabileceğiniz için en esnek çözümdür. Bu aynı zamanda en uygun çözümdür, çünkü veriler farklı parçalara tamamen bağımsız olarak yazılabilir. + +İkinci olarak, dağıtılmış bir tabloda ekleme gerçekleştirebilirsiniz. Bu durumda, tablo eklenen verileri sunucuların kendisine dağıtacaktır. Dağıtılmış bir tabloya yazmak için, bir sharding anahtar kümesi (son parametre) olmalıdır. Ek olarak, yalnızca bir parça varsa, yazma işlemi sharding anahtarını belirtmeden çalışır, çünkü bu durumda hiçbir şey ifade etmez. + +Her parça yapılandırma dosyasında tanımlanan bir ağırlığa sahip olabilir. Varsayılan olarak, ağırlık bir eşittir. Veriler, parça ağırlığı ile orantılı miktarda parçalara dağıtılır. Örneğin, iki parça varsa ve birincisi 9'luk bir ağırlığa sahipse, ikincisi 10'luk bir ağırlığa sahipse, ilk satırların 9 / 19 parçası gönderilir ve ikincisi 10 / 19 gönderilir. + +Her shard olabilir ‘internal\_replication’ yapılandırma dosyasında tanımlanan parametre. + +Bu parametre şu şekilde ayarlanırsa ‘true’, yazma işlemi ilk sağlıklı yinelemeyi seçer ve ona veri yazar. Dağıtılmış tablo ise bu alternatifi kullanın “looks at” çoğaltılan tablolar. Başka bir deyişle, verilerin yazılacağı tablo kendilerini çoğaltacaktır. + +Olarak ayarlan ifmışsa ‘false’ (varsayılan), veriler tüm kopyalara yazılır. Özünde, bu, dağıtılmış tablonun verilerin kendisini çoğalttığı anlamına gelir. Bu, çoğaltılmış tabloları kullanmaktan daha kötüdür, çünkü kopyaların tutarlılığı denetlenmez ve zamanla biraz farklı veriler içerirler. + +Bir veri satırının gönderildiği parçayı seçmek için, parçalama ifadesi analiz edilir ve kalan kısmı, parçaların toplam ağırlığına bölünmesinden alınır. Satır, kalanların yarı aralığına karşılık gelen parçaya gönderilir. ‘prev\_weight’ -e doğru ‘prev\_weights + weight’, nere ‘prev\_weights’ en küçük sayıya sahip parçaların toplam ağırlığı ve ‘weight’ bu parçanın ağırlığı. Örneğin, iki parça varsa ve birincisi 9'luk bir ağırlığa sahipse, ikincisi 10'luk bir ağırlığa sahipse, satır \[0, 9) aralığından kalanlar için ilk parçaya ve ikincisine \[9, 19) aralığından kalanlar için gönderilecektir. + +Sharding ifadesi, bir tamsayı döndüren sabitler ve tablo sütunlarından herhangi bir ifade olabilir. Örneğin, ifadeyi kullanabilirsiniz ‘rand()’ verilerin rastgele dağılımı için veya ‘UserID’ kullanıcının kimliğinin bölünmesinden kalanın dağıtımı için (daha sonra tek bir kullanıcının verileri, kullanıcılar tarafından çalışmayı ve katılmayı basitleştiren tek bir parçada bulunur). Sütunlardan biri yeterince eşit olarak dağıtılmazsa, onu bir karma işleve sarabilirsiniz: ınthash64(Userıd). + +Bölüm'den basit bir hatırlatma, sharding için sınırlı bir çözümdür ve her zaman uygun değildir. Orta ve büyük hacimlerde veri (düzinelerce sunucu) için çalışır, ancak çok büyük hacimlerde veri (yüzlerce sunucu veya daha fazla) için değildir. İkinci durumda, dağıtılmış tablolarda girdileri kullanmak yerine konu alanı tarafından gerekli olan sharding şemasını kullanın. + +SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you don't have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. + +Aşağıdaki durumlarda sharding şeması hakkında endişelenmelisiniz: + +- Belirli bir anahtar tarafından veri (veya birleştirme) birleştirme gerektiren sorgular kullanılır. Veriler bu anahtar tarafından parçalanırsa, GLOBAL IN veya GLOBAL JOİN yerine local IN veya JOİN kullanabilirsiniz, bu da çok daha etkilidir. +- Çok sayıda küçük Sorgu ile çok sayıda sunucu (yüzlerce veya daha fazla) kullanılır (bireysel müşterilerin sorguları - web siteleri, reklamverenler veya ortaklar). Küçük sorguların tüm kümeyi etkilememesi için, tek bir istemci için tek bir parça üzerinde veri bulmak mantıklıdır. Alternatif olarak, Yandex'te yaptığımız gibi.Metrica, iki seviyeli sharding kurabilirsiniz: tüm kümeyi bölün “layers”, bir katmanın birden fazla parçadan oluşabileceği yer. Tek bir istemci için veriler tek bir katmanda bulunur, ancak kırıklar gerektiğinde bir katmana eklenebilir ve veriler rastgele dağıtılır. Her katman için dağıtılmış tablolar oluşturulur ve genel sorgular için tek bir paylaşılan dağıtılmış tablo oluşturulur. + +Veriler zaman uyumsuz olarak yazılır. Tabloya eklendiğinde, veri bloğu sadece yerel dosya sistemine yazılır. Veriler en kısa sürede arka planda uzak sunuculara gönderilir. Veri gönderme süresi tarafından yönetilir [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) ve [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) ayarlar. Bu `Distributed` motor ayrı ayrı eklenen verilerle her dosyayı gönderir, ancak toplu dosya gönderme etkinleştirebilirsiniz [distributed\_directory\_monitor\_batch\_ınserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) ayar. Bu ayar, yerel sunucu ve ağ kaynaklarını daha iyi kullanarak küme performansını artırır. Tablo dizinindeki dosyaların listesini (gönderilmeyi bekleyen veriler) kontrol ederek verilerin başarıyla gönderilip gönderilmediğini kontrol etmelisiniz: `/var/lib/clickhouse/data/database/table/`. + +Sunucu varlığını durdurdu veya (örneğin, bir aygıt arızasından sonra) dağıtılmış bir tabloya bir ekleme sonra kaba bir yeniden başlatma vardı, eklenen veriler kaybolabilir. Tablo dizininde bozuk bir veri parçası tespit edilirse, ‘broken’ alt dizin ve artık kullanılmıyor. + +Max\_parallel\_replicas seçeneği etkinleştirildiğinde, sorgu işleme tek bir parça içindeki tüm yinelemeler arasında paralelleştirilir. Daha fazla bilgi için bölüme bakın [max\_parallel\_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). + +## Sanal Sütunlar {#virtual-columns} + +- `_shard_num` — Contains the `shard_num` (itibaren `system.clusters`). Tür: [Uİnt32](../../../sql_reference/data_types/int_uint.md). + +!!! note "Not" + Beri [`remote`](../../../sql_reference/table_functions/remote.md)/`cluster` tablo işlevleri DAHİLİ olarak aynı dağıtılmış altyapının geçici örneğini oluşturur, `_shard_num` de kullanılabilir. + +**Ayrıca Bakınız** + +- [Sanal sütunlar](index.md#table_engines-virtual_columns) + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/tr/engines/table_engines/special/external_data.md b/docs/tr/engines/table_engines/special/external_data.md new file mode 100644 index 00000000000..123b2dbdf0e --- /dev/null +++ b/docs/tr/engines/table_engines/special/external_data.md @@ -0,0 +1,68 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 34 +toc_title: "D\u0131\u015F veri" +--- + +# Sorgu işleme için harici veriler {#external-data-for-query-processing} + +ClickHouse bir sunucu bir SELECT sorgusu ile birlikte bir sorgu işlemek için gerekli olan verileri gönderme sağlar. Bu veriler geçici bir tabloya konur (bölüme bakın “Temporary tables”) ve sorguda kullanılabilir (örneğin, işleçlerde). + +Örneğin, önemli kullanıcı tanımlayıcılarına sahip bir metin dosyanız varsa, bu listeyi süzme kullanan bir sorgu ile birlikte sunucuya yükleyebilirsiniz. + +Büyük hacimli dış verilerle birden fazla sorgu çalıştırmanız gerekiyorsa, bu özelliği kullanmayın. Verileri vaktinden önce DB'YE yüklemek daha iyidir. + +Harici veriler komut satırı istemcisi (etkileşimli olmayan modda) veya HTTP arabirimi kullanılarak yüklenebilir. + +Komut satırı istemcisinde, formatta bir parametreler bölümü belirtebilirsiniz + +``` bash +--external --file=... [--name=...] [--format=...] [--types=...|--structure=...] +``` + +İletilen tablo sayısı için bunun gibi birden çok bölümünüz olabilir. + +**–external** – Marks the beginning of a clause. +**–file** – Path to the file with the table dump, or -, which refers to stdin. +Stdın'den yalnızca tek bir tablo alınabilir. + +Aşağıdaki parametreler isteğe bağlıdır: **–name**– Name of the table. If omitted, \_data is used. +**–format** – Data format in the file. If omitted, TabSeparated is used. + +Aşağıdaki parametrelerden biri gereklidir:**–types** – A list of comma-separated column types. For example: `UInt64,String`. The columns will be named \_1, \_2, … +**–structure**– The table structure in the format`UserID UInt64`, `URL String`. Sütun adlarını ve türlerini tanımlar. + +Belirtilen dosyalar ‘file’ belirtilen biçimde ayrıştırılır ‘format’, belirtilen veri türlerini kullanarak ‘types’ veya ‘structure’. Tablo sunucuya yüklenecek ve orada adı ile geçici bir tablo olarak erişilebilir ‘name’. + +Örnekler: + +``` bash +$ echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 +849897 +$ cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +HTTP arabirimini kullanırken, dış veriler çok parçalı/form veri biçiminde geçirilir. Her tablo ayrı bir dosya olarak iletilir. Tablo adı dosya adından alınır. Bu ‘query\_string’ parametreleri geçirilir ‘name\_format’, ‘name\_types’, ve ‘name\_structure’, nere ‘name’ bu parametreler karşılık gelen tablonun adıdır. Parametrelerin anlamı, komut satırı istemcisini kullanırken olduğu gibi aynıdır. + +Örnek: + +``` bash +$ cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv + +$ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +Dağıtılmış sorgu işleme için geçici tablolar tüm uzak sunuculara gönderilir. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/external_data/) diff --git a/docs/tr/engines/table_engines/special/file.md b/docs/tr/engines/table_engines/special/file.md new file mode 100644 index 00000000000..c132d1dd5a4 --- /dev/null +++ b/docs/tr/engines/table_engines/special/file.md @@ -0,0 +1,90 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 37 +toc_title: Dosya +--- + +# Dosya {#table_engines-file} + +Dosya tablosu altyapısı, verileri desteklenen dosyalardan birinde tutar [Dosya +biçimliler](../../../interfaces/formats.md#formats) (TabSeparated, yerli, vb.). + +Kullanım örnekleri: + +- Clickhouse'dan dosyaya veri aktarımı. +- Verileri bir biçimden diğerine dönüştürün. +- Bir diskte bir dosya düzenleme yoluyla ClickHouse veri güncelleme. + +## ClickHouse Sunucusunda Kullanım {#usage-in-clickhouse-server} + +``` sql +File(Format) +``` + +Bu `Format` parametre kullanılabilir dosya biçimlerinden birini belirtir. Gerçekleştirmek +`SELECT` sorgular, biçim giriş için desteklenmeli ve gerçekleştirmek için +`INSERT` queries – for output. The available formats are listed in the +[Biçimliler](../../../interfaces/formats.md#formats) bölme. + +ClickHouse dosya sistemi yolunu belirtmek için izin vermiyor`File`. Tarafından tanımlanan klasörü kullan willacaktır [yol](../../../operations/server_configuration_parameters/settings.md) sunucu yapılandırmasında ayarlama. + +Kullanarak tablo oluştururken `File(Format)` bu klasörde boş bir alt dizin oluşturur. Veri o tabloya yazıldığında, içine konur `data.Format` bu alt dizinde dosya. + +Bu alt klasörü ve dosyayı sunucu dosya sisteminde el ile oluşturabilir ve sonra [ATTACH](../../../sql_reference/statements/misc.md) eşleşen ada sahip tablo bilgilerine, böylece bu dosyadan veri sorgulayabilirsiniz. + +!!! warning "Uyarıcı" + Bu işlevselliğe dikkat edin, çünkü ClickHouse bu tür dosyalarda harici değişiklikleri izlemez. ClickHouse ve ClickHouse dışında eşzamanlı yazma sonucu tanımsızdır. + +**Örnek:** + +**1.** Set up the `file_engine_table` Tablo: + +``` sql +CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) +``` + +Varsayılan olarak ClickHouse klasör oluşturur `/var/lib/clickhouse/data/default/file_engine_table`. + +**2.** El ile oluştur `/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated` içerme: + +``` bash +$ cat data.TabSeparated +one 1 +two 2 +``` + +**3.** Verileri sorgula: + +``` sql +SELECT * FROM file_engine_table +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## Clickhouse'da kullanım-yerel {#usage-in-clickhouse-local} + +İçinde [clickhouse-yerel](../../../operations/utilities/clickhouse-local.md) Dosya motoru ek olarak dosya yolunu kabul eder `Format`. Varsayılan giriş / çıkış akışları gibi sayısal veya insan tarafından okunabilir isimler kullanılarak belirtilebilir `0` veya `stdin`, `1` veya `stdout`. +**Örnek:** + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +``` + +## Uygulama Detayları {#details-of-implementation} + +- Çoklu `SELECT` sorgular aynı anda yapılabilir, ancak `INSERT` sorgular birbirini bekler. +- Tarafından yeni dosya oluşturma desteklenen `INSERT` sorgu. +- Dosya varsa, `INSERT` içinde yeni değerler ekler. +- Desteklenmiyor: + - `ALTER` + - `SELECT ... SAMPLE` + - Dizinler + - Çoğalma + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/tr/engines/table_engines/special/generate.md b/docs/tr/engines/table_engines/special/generate.md new file mode 100644 index 00000000000..01d2534441e --- /dev/null +++ b/docs/tr/engines/table_engines/special/generate.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 46 +toc_title: GenerateRandom +--- + +# Generaterandom {#table_engines-generate} + +GenerateRandom tablo motoru, verilen tablo şeması için rasgele veri üretir. + +Kullanım örnekleri: + +- Tekrarlanabilir büyük tabloyu doldurmak için testte kullanın. +- Fuzzing testleri için rastgele girdi oluşturun. + +## ClickHouse Sunucusunda Kullanım {#usage-in-clickhouse-server} + +``` sql +ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) +``` + +Bu `max_array_length` ve `max_string_length` parametreler tüm maksimum uzunluğu belirtin +oluşturulan verilerde dizi sütunları ve dizeleri. + +Tablo motoru oluşturmak yalnızca destekler `SELECT` sorgular. + +Tüm destekler [Veri türleri](../../../sql_reference/data_types/index.md) dışında bir tabloda saklanabilir `LowCardinality` ve `AggregateFunction`. + +**Örnek:** + +**1.** Set up the `generate_engine_table` Tablo: + +``` sql +CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) +``` + +**2.** Verileri sorgula: + +``` sql +SELECT * FROM generate_engine_table LIMIT 3 +``` + +``` text +┌─name─┬──────value─┐ +│ c4xJ │ 1412771199 │ +│ r │ 1791099446 │ +│ 7#$ │ 124312908 │ +└──────┴────────────┘ +``` + +## Uygulama Detayları {#details-of-implementation} + +- Desteklenmiyor: + - `ALTER` + - `SELECT ... SAMPLE` + - `INSERT` + - Dizinler + - Çoğalma + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/tr/engines/table_engines/special/index.md b/docs/tr/engines/table_engines/special/index.md new file mode 100644 index 00000000000..2e754a86bc8 --- /dev/null +++ b/docs/tr/engines/table_engines/special/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "\xD6zellikli" +toc_priority: 31 +--- + + diff --git a/docs/tr/engines/table_engines/special/join.md b/docs/tr/engines/table_engines/special/join.md new file mode 100644 index 00000000000..2fa786148e0 --- /dev/null +++ b/docs/tr/engines/table_engines/special/join.md @@ -0,0 +1,111 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 40 +toc_title: Katmak +--- + +# Katmak {#join} + +Kullanılmak üzere hazırlanmış veri yapısı [JOIN](../../../sql_reference/statements/select.md#select-join) harekat. + +## Tablo oluşturma {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], +) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) +``` + +Ayrıntılı açıklamasına bakın [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) sorgu. + +**Motor Parametreleri** + +- `join_strictness` – [Katılık katılın](../../../sql_reference/statements/select.md#select-join-strictness). +- `join_type` – [Birleştirme türü](../../../sql_reference/statements/select.md#select-join-types). +- `k1[, k2, ...]` – Key columns from the `USING` fık thera: `JOIN` işlemi yapılmamaktadır. + +Girmek `join_strictness` ve `join_type` tırnak işaretleri olmadan parametreler, örneğin, `Join(ANY, LEFT, col1)`. Onlar eşleşmelidir `JOIN` tablo için kullanılacak işlem. Parametreler eşleşmezse, ClickHouse bir istisna atmaz ve yanlış veri döndürebilir. + +## Tablo Kullanımı {#table-usage} + +### Örnek {#example} + +Sol taraftaki tablo oluşturma: + +``` sql +CREATE TABLE id_val(`id` UInt32, `val` UInt32) ENGINE = TinyLog +``` + +``` sql +INSERT INTO id_val VALUES (1,11)(2,12)(3,13) +``` + +Sağ tarafı oluşturma `Join` Tablo: + +``` sql +CREATE TABLE id_val_join(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id) +``` + +``` sql +INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23) +``` + +Tabloları birleştirme: + +``` sql +SELECT * FROM id_val ANY LEFT JOIN id_val_join USING (id) SETTINGS join_use_nulls = 1 +``` + +``` text +┌─id─┬─val─┬─id_val_join.val─┐ +│ 1 │ 11 │ 21 │ +│ 2 │ 12 │ ᴺᵁᴸᴸ │ +│ 3 │ 13 │ 23 │ +└────┴─────┴─────────────────┘ +``` + +Alternatif olarak, veri alabilirsiniz `Join` tablo, birleştirme anahtarı değerini belirterek: + +``` sql +SELECT joinGet('id_val_join', 'val', toUInt32(1)) +``` + +``` text +┌─joinGet('id_val_join', 'val', toUInt32(1))─┐ +│ 21 │ +└────────────────────────────────────────────┘ +``` + +### Veri seçme ve ekleme {#selecting-and-inserting-data} + +Kullanabilirsiniz `INSERT` veri eklemek için sorgular `Join`- motor masaları. Tablo ile oluşturulmuş ise `ANY` katılık, yinelenen anahtarlar için veriler göz ardı edilir. İle... `ALL` katılık, tüm satırlar eklenir. + +Gerçekleştir aemezsiniz `SELECT` doğrudan tablodan sorgulayın. Bunun yerine, aşağıdaki yöntemlerden birini kullanın: + +- Tabloyu sağ tarafa yerleştirin. `JOIN` yan. +- Call the [joinGet](../../../sql_reference/functions/other_functions.md#joinget) tablodan bir sözlükten aynı şekilde veri ayıklamanızı sağlayan işlev. + +### Sınırlamalar ve Ayarlar {#join-limitations-and-settings} + +Bir tablo oluştururken aşağıdaki ayarlar uygulanır: + +- [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) +- [max\_rows\_in\_join](../../../operations/settings/query_complexity.md#settings-max_rows_in_join) +- [max\_bytes\_in\_join](../../../operations/settings/query_complexity.md#settings-max_bytes_in_join) +- [join\_overflow\_mode](../../../operations/settings/query_complexity.md#settings-join_overflow_mode) +- [join\_any\_take\_last\_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) + +Bu `Join`- motor tabloları kullanılamaz `GLOBAL JOIN` harekat. + +Bu `Join`- motor kullanımına izin verir [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) ayarı `CREATE TABLE` deyim. Ve [SELECT](../../../sql_reference/statements/select.md) sorgu kullanımına izin verir `join_use_nulls` çok. Eğer farklı varsa `join_use_nulls` ayarlar, tablo birleştirme bir hata alabilirsiniz. Bu katılmak türüne bağlıdır. Kullandığınızda [joinGet](../../../sql_reference/functions/other_functions.md#joinget) fonksiyonu, aynı kullanmak zorunda `join_use_nulls` ayarı `CRATE TABLE` ve `SELECT` deyimler. + +## Veri Depolama {#data-storage} + +`Join` tablo verileri her zaman RAM'de bulunur. Bir tabloya satır eklerken, sunucu yeniden başlatıldığında geri yüklenebilir, böylece ClickHouse disk üzerindeki dizine veri bloklarını yazar. + +Sunucu yanlış yeniden başlatılırsa, diskteki veri bloğu kaybolabilir veya zarar görebilir. Bu durumda, dosyayı hasarlı verilerle el ile silmeniz gerekebilir. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/tr/engines/table_engines/special/materializedview.md b/docs/tr/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..23e7d3122eb --- /dev/null +++ b/docs/tr/engines/table_engines/special/materializedview.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 43 +toc_title: MaterializedView +--- + +# Materializedview {#materializedview} + +Somut görünümler uygulamak için kullanılır (Daha fazla bilgi için bkz . [CREATE TABLE](../../../sql_reference/statements/create.md)). Verileri depolamak için, görünümü oluştururken belirtilen farklı bir motor kullanır. Bir tablodan okurken, sadece bu motoru kullanır. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/tr/engines/table_engines/special/memory.md b/docs/tr/engines/table_engines/special/memory.md new file mode 100644 index 00000000000..a5f985ff5e2 --- /dev/null +++ b/docs/tr/engines/table_engines/special/memory.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 44 +toc_title: Bellek +--- + +# Bellek {#memory} + +Bellek altyapısı verileri RAM, sıkıştırılmamış biçimde depolar. Veri okunduğunda alınan tam olarak aynı biçimde saklanır. Başka bir deyişle, bu tablodan okuma tamamen ücretsizdir. +Eşzamanlı veri erişimi senkronize edilir. Kilitler kısa: okuma ve yazma işlemleri birbirini engellemez. +Dizinler desteklenmiyor. Okuma paralelleştirilmiştir. +Basit sorgularda maksimum üretkenliğe (10 GB/sn'den fazla) ulaşılır, çünkü diskten okuma, açma veya veri serisini kaldırma yoktur. (Birçok durumda MergeTree motorunun verimliliğinin neredeyse yüksek olduğunu unutmamalıyız.) +Bir sunucu yeniden başlatılırken, veri tablodan kaybolur ve tablo boş olur. +Normalde, bu tablo motorunu kullanmak haklı değildir. Bununla birlikte, testler ve nispeten az sayıda satırda (yaklaşık 100.000.000'a kadar) maksimum hızın gerekli olduğu görevler için kullanılabilir. + +Bellek motoru, harici sorgu verilerine sahip geçici tablolar için sistem tarafından kullanılır (bkz. “External data for processing a query”) ve GLOBAL In uygulanması için (bkz. “IN operators”). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/memory/) diff --git a/docs/tr/engines/table_engines/special/merge.md b/docs/tr/engines/table_engines/special/merge.md new file mode 100644 index 00000000000..bd4150e1afb --- /dev/null +++ b/docs/tr/engines/table_engines/special/merge.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 36 +toc_title: "Birle\u015Ftirmek" +--- + +# Birleştirmek {#merge} + +Bu `Merge` motor (ile karıştırılmamalıdır `MergeTree`) verileri kendisi saklamaz, ancak aynı anda herhangi bir sayıda başka tablodan okumaya izin verir. +Okuma otomatik olarak paralelleştirilir. Bir tabloya yazma desteklenmiyor. Okurken, gerçekten okunmakta olan tabloların dizinleri varsa kullanılır. +Bu `Merge` motor parametreleri kabul eder: veritabanı adı ve tablolar için düzenli ifade. + +Örnek: + +``` sql +Merge(hits, '^WatchLog') +``` + +Veri tablolardan okunacak `hits` düzenli ifadeyle eşleşen adlara sahip veritabanı ‘`^WatchLog`’. + +Veritabanı adı yerine, bir dize döndüren sabit bir ifade kullanabilirsiniz. Mesela, `currentDatabase()`. + +Regular expressions — [re2](https://github.com/google/re2) (pcre bir alt kümesini destekler), büyük / küçük harf duyarlı. +Düzenli ifadelerde kaçan sembollerle ilgili notlara bakın “match” bölme. + +Okumak için tabloları seçerken, `Merge` regex ile eşleşse bile tablonun kendisi seçilmeyecektir. Bu döngülerden kaçınmaktır. +İki tane oluşturmak mümkündür `Merge` sonsuza kadar birbirlerinin verilerini okumaya çalışacak tablolar, ancak bu iyi bir fikir değil. + +Kullanmak için tipik bir yol `Merge` motor çok sayıda çalışma içindir `TinyLog` tablolar tek bir tablo ile sanki. + +Örnek 2: + +Diyelim ki eski bir tablonuz (WatchLog\_old) var ve verileri yeni bir tabloya (WatchLog\_new) taşımadan bölümlemeyi değiştirmeye karar verdiniz ve her iki tablodaki verileri görmeniz gerekiyor. + +``` sql +CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) +ENGINE=MergeTree(date, (UserId, EventType), 8192); +INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); + +CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) +ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; +INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); + +CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); + +SELECT * +FROM WatchLog +``` + +``` text +┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ +│ 2018-01-01 │ 1 │ hit │ 3 │ +└────────────┴────────┴───────────┴─────┘ +┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ +│ 2018-01-02 │ 2 │ hit │ 3 │ +└────────────┴────────┴───────────┴─────┘ +``` + +## Sanal Sütunlar {#virtual-columns} + +- `_table` — Contains the name of the table from which data was read. Type: [Dize](../../../sql_reference/data_types/string.md). + + Sabit koşulları ayarlayabilirsiniz `_table` in the `WHERE/PREWHERE` fıkra (sı (örneğin, `WHERE _table='xyz'`). Bu durumda, okuma işlemi yalnızca koşulun açık olduğu tablolar için gerçekleştirilir `_table` memnun olduğunu, bu yüzden `_table` sütun bir dizin görevi görür. + +**Ayrıca Bakınız** + +- [Sanal sütunlar](index.md#table_engines-virtual_columns) + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/tr/engines/table_engines/special/null.md b/docs/tr/engines/table_engines/special/null.md new file mode 100644 index 00000000000..19d518f415f --- /dev/null +++ b/docs/tr/engines/table_engines/special/null.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 38 +toc_title: "Bo\u015F" +--- + +# Boş {#null} + +Boş bir tabloya yazarken, veri yoksayılır. Boş bir tablodan okurken, yanıt boştur. + +Ancak, boş bir tablo üzerinde materialized bir görünüm oluşturabilirsiniz. Böylece tabloya yazılan veriler görünümde sona erecek. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/tr/engines/table_engines/special/set.md b/docs/tr/engines/table_engines/special/set.md new file mode 100644 index 00000000000..af94ada6e12 --- /dev/null +++ b/docs/tr/engines/table_engines/special/set.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 39 +toc_title: Koymak +--- + +# Koymak {#set} + +Her zaman RAM olan bir veri kümesi. In operatörünün sağ tarafında kullanılmak üzere tasarlanmıştır (bölüme bakın “IN operators”). + +Tabloya veri eklemek için INSERT kullanabilirsiniz. Veri kümesine yeni öğeler eklenirken, yinelenenler göz ardı edilir. +Ancak tablodan seçim yapamazsınız. Verileri almak için tek yol, IN operatörünün sağ yarısında kullanmaktır. + +Veri her zaman RAM yer almaktadır. INSERT için, eklenen veri blokları da diskteki tabloların dizinine yazılır. Sunucuyu başlatırken, bu veriler RAM'e yüklenir. Başka bir deyişle, yeniden başlattıktan sonra veriler yerinde kalır. + +Kaba bir sunucu yeniden başlatma için diskteki veri bloğu kaybolabilir veya zarar görebilir. İkinci durumda, dosyayı hasarlı verilerle el ile silmeniz gerekebilir. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/tr/engines/table_engines/special/url.md b/docs/tr/engines/table_engines/special/url.md new file mode 100644 index 00000000000..51657248e75 --- /dev/null +++ b/docs/tr/engines/table_engines/special/url.md @@ -0,0 +1,82 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 41 +toc_title: URL +--- + +# URL (URL, Biçim) {#table_engines-url} + +Uzak bir HTTP/HTTPS sunucusundaki verileri yönetir. Bu motor benzer +to the [Dosya](file.md) motor. + +## ClickHouse sunucusunda motoru kullanma {#using-the-engine-in-the-clickhouse-server} + +Bu `format` Clickhouse'un kullanabileceği bir tane olmalı +`SELECT` sorgular ve gerekirse `INSERTs`. Desteklenen formatların tam listesi için bkz. +[Biçimliler](../../../interfaces/formats.md#formats). + +Bu `URL` tekdüzen bir kaynak Bulucu yapısına uygun olmalıdır. Belirtilen URL bir sunucuya işaret etmelidir +bu HTTP veya HTTPS kullanır. Bu herhangi bir gerektirmez +sunucudan yanıt almak için ek başlıklar. + +`INSERT` ve `SELECT` sorgular dönüştürülür `POST` ve `GET` istemler, +sırasıyla. İşleme için `POST` istekleri, uzak sunucu desteklemesi gerekir +[Yığınlı aktarım kodlaması](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). + +Kullanarak HTTP get yönlendirme şerbetçiotu sayısını sınırlayabilirsiniz [max\_http\_get\_redirects](../../../operations/settings/settings.md#setting-max_http_get_redirects) ayar. + +**Örnek:** + +**1.** Create a `url_engine_table` sunucuda tablo : + +``` sql +CREATE TABLE url_engine_table (word String, value UInt64) +ENGINE=URL('http://127.0.0.1:12345/', CSV) +``` + +**2.** Standart Python 3 araçlarını kullanarak temel bir HTTP Sunucusu oluşturun ve +Başlat: + +``` python3 +from http.server import BaseHTTPRequestHandler, HTTPServer + +class CSVHTTPServer(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header('Content-type', 'text/csv') + self.end_headers() + + self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) + +if __name__ == "__main__": + server_address = ('127.0.0.1', 12345) + HTTPServer(server_address, CSVHTTPServer).serve_forever() +``` + +``` bash +$ python3 server.py +``` + +**3.** Veri iste: + +``` sql +SELECT * FROM url_engine_table +``` + +``` text +┌─word──┬─value─┐ +│ Hello │ 1 │ +│ World │ 2 │ +└───────┴───────┘ +``` + +## Uygulama Detayları {#details-of-implementation} + +- Okuma ve yazma paralel olabilir +- Desteklenmiyor: + - `ALTER` ve `SELECT...SAMPLE` harekat. + - Dizinler. + - Çoğalma. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/tr/engines/table_engines/special/view.md b/docs/tr/engines/table_engines/special/view.md new file mode 100644 index 00000000000..e3b46a7b926 --- /dev/null +++ b/docs/tr/engines/table_engines/special/view.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 42 +toc_title: "G\xF6r\xFCn\xFCm" +--- + +# Görünüm {#table_engines-view} + +Görünümler uygulamak için kullanılır (Daha fazla bilgi için bkz. `CREATE VIEW query`). Verileri saklamaz, ancak yalnızca belirtilen `SELECT` sorgu. Bir tablodan okurken, bu sorguyu çalıştırır (ve gereksiz tüm sütunları sorgudan siler). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/tr/faq/general.md b/docs/tr/faq/general.md new file mode 100644 index 00000000000..05779e132a9 --- /dev/null +++ b/docs/tr/faq/general.md @@ -0,0 +1,60 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 78 +toc_title: Genel Sorular +--- + +# Genel Sorular {#general-questions} + +## Neden MapReduce Gibi Bir Şey Kullanmıyorsun? {#why-not-use-something-like-mapreduce} + +Mapreduce gibi sistemlere, azaltma işleminin dağıtılmış sıralamaya dayandığı dağıtılmış bilgi işlem sistemleri olarak başvurabiliriz. Bu sınıftaki en yaygın açık kaynak çözümü [Apache Hadoop](http://hadoop.apache.org). Yandex, şirket içi çözümünü, YT'Yİ kullanıyor. + +Bu sistemler, yüksek gecikme süreleri nedeniyle çevrimiçi sorgular için uygun değildir. Başka bir deyişle, bir web arayüzü için arka uç olarak kullanılamazlar. Bu tür sistemler gerçek zamanlı veri güncellemeleri için yararlı değildir. Dağıtılmış sıralama, işlemin sonucu ve tüm ara sonuçlar (varsa) tek bir sunucunun RAM'İNDE bulunuyorsa, genellikle çevrimiçi sorgular için geçerli olan işlemleri azaltmanın en iyi yolu değildir. Böyle bir durumda, bir karma tablo azaltma işlemlerini gerçekleştirmek için en uygun yoldur. Harita azaltma görevlerini optimize etmek için ortak bir yaklaşım, RAM'de bir karma tablo kullanarak ön toplama (kısmi azaltma) ' dir. Kullanıcı bu optimizasyonu manuel olarak gerçekleştirir. Dağıtılmış sıralama, basit harita azaltma görevlerini çalıştırırken düşük performansın ana nedenlerinden biridir. + +Çoğu MapReduce uygulaması, bir kümede rasgele kod çalıştırmanıza izin verir. Ancak bildirimsel bir sorgu dili, deneyleri hızlı bir şekilde çalıştırmak için OLAP için daha uygundur. Örneğin, Hadoop kovanı ve domuz vardır. Ayrıca Spark için Cloudera Impala veya Shark'ı (modası geçmiş) ve Spark SQL, Presto ve Apache Drill'i de düşünün. Bu tür görevleri çalıştırırken performans, özel sistemlere kıyasla oldukça düşük bir seviyededir, ancak nispeten yüksek gecikme, bu sistemleri bir web arayüzü için arka uç olarak kullanmayı gerçekçi kılmaktadır. + +## Oracle aracılığıyla ODBC kullanırken Kodlamalarla ilgili bir sorunum varsa ne olur? {#oracle-odbc-encodings} + +Oracle ODBC sürücüsü aracılığıyla dış sözlükler kaynağı olarak kullanırsanız, doğru değeri ayarlamanız gerekir. `NLS_LANG` ortam değişkeni `/etc/default/clickhouse`. Daha fazla bilgi için, bkz: [Oracle NLS\_LANG SSS](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). + +**Örnek** + +``` sql +NLS_LANG=RUSSIAN_RUSSIA.UTF8 +``` + +## Clickhouse'dan bir dosyaya verileri nasıl dışa aktarırım? {#how-to-export-to-file} + +### INTO OUTFİLE yan tümcesini kullanma {#using-into-outfile-clause} + +Add an [INTO OUTFILE](../sql_reference/statements/select.md#into-outfile-clause) sorgunuza yan tümce. + +Mesela: + +``` sql +SELECT * FROM table INTO OUTFILE 'file' +``` + +Varsayılan olarak, ClickHouse kullanır [TabSeparated](../interfaces/formats.md#tabseparated) çıktı verileri için Biçim. Seçmek için [Veri formatı](../interfaces/formats.md), use the [FORMAT CLA clauseuse](../sql_reference/statements/select.md#format-clause). + +Mesela: + +``` sql +SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV +``` + +### Dosya altyapısı tablosu kullanma {#using-a-file-engine-table} + +Görmek [Dosya](../engines/table_engines/special/file.md). + +### Komut Satırı Yeniden Yönlendirmesini Kullanma {#using-command-line-redirection} + +``` sql +$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt +``` + +Görmek [clickhouse-müşteri](../interfaces/cli.md). + +{## [Orijinal makale](https://clickhouse.tech/docs/en/faq/general/) ##} diff --git a/docs/tr/faq/index.md b/docs/tr/faq/index.md new file mode 100644 index 00000000000..591011fb66d --- /dev/null +++ b/docs/tr/faq/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/tr/getting_started/example_datasets/amplab_benchmark.md b/docs/tr/getting_started/example_datasets/amplab_benchmark.md new file mode 100644 index 00000000000..e6f95df68b8 --- /dev/null +++ b/docs/tr/getting_started/example_datasets/amplab_benchmark.md @@ -0,0 +1,129 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 17 +toc_title: "AMPLab B\xFCy\xFCk Veri Benchmark" +--- + +# AMPLab Büyük Veri Benchmark {#amplab-big-data-benchmark} + +Bkz. https://amplab.cs.berkeley.edu/benchmark/ + +Ücretsiz bir hesap için kaydolun https://aws.amazon.com. bir kredi kartı, e-posta ve telefon numarası gerektirir. Yeni bir erişim anahtarı alın https://console.aws.amazon.com/iam/home?nc2=h\_m\_sc\#security\_credential + +Konsolda aşağıdakileri çalıştırın: + +``` bash +$ sudo apt-get install s3cmd +$ mkdir tiny; cd tiny; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/tiny/ . +$ cd .. +$ mkdir 1node; cd 1node; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/1node/ . +$ cd .. +$ mkdir 5nodes; cd 5nodes; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/5nodes/ . +$ cd .. +``` + +Aşağıdaki ClickHouse sorguları çalıştırın: + +``` sql +CREATE TABLE rankings_tiny +( + pageURL String, + pageRank UInt32, + avgDuration UInt32 +) ENGINE = Log; + +CREATE TABLE uservisits_tiny +( + sourceIP String, + destinationURL String, + visitDate Date, + adRevenue Float32, + UserAgent String, + cCode FixedString(3), + lCode FixedString(6), + searchWord String, + duration UInt32 +) ENGINE = MergeTree(visitDate, visitDate, 8192); + +CREATE TABLE rankings_1node +( + pageURL String, + pageRank UInt32, + avgDuration UInt32 +) ENGINE = Log; + +CREATE TABLE uservisits_1node +( + sourceIP String, + destinationURL String, + visitDate Date, + adRevenue Float32, + UserAgent String, + cCode FixedString(3), + lCode FixedString(6), + searchWord String, + duration UInt32 +) ENGINE = MergeTree(visitDate, visitDate, 8192); + +CREATE TABLE rankings_5nodes_on_single +( + pageURL String, + pageRank UInt32, + avgDuration UInt32 +) ENGINE = Log; + +CREATE TABLE uservisits_5nodes_on_single +( + sourceIP String, + destinationURL String, + visitDate Date, + adRevenue Float32, + UserAgent String, + cCode FixedString(3), + lCode FixedString(6), + searchWord String, + duration UInt32 +) ENGINE = MergeTree(visitDate, visitDate, 8192); +``` + +Konsola geri dön: + +``` bash +$ for i in tiny/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_tiny FORMAT CSV"; done +$ for i in tiny/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_tiny FORMAT CSV"; done +$ for i in 1node/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_1node FORMAT CSV"; done +$ for i in 1node/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_1node FORMAT CSV"; done +$ for i in 5nodes/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_5nodes_on_single FORMAT CSV"; done +$ for i in 5nodes/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_5nodes_on_single FORMAT CSV"; done +``` + +Veri örnekleri almak için sorgular: + +``` sql +SELECT pageURL, pageRank FROM rankings_1node WHERE pageRank > 1000 + +SELECT substring(sourceIP, 1, 8), sum(adRevenue) FROM uservisits_1node GROUP BY substring(sourceIP, 1, 8) + +SELECT + sourceIP, + sum(adRevenue) AS totalRevenue, + avg(pageRank) AS pageRank +FROM rankings_1node ALL INNER JOIN +( + SELECT + sourceIP, + destinationURL AS pageURL, + adRevenue + FROM uservisits_1node + WHERE (visitDate > '1980-01-01') AND (visitDate < '1980-04-01') +) USING pageURL +GROUP BY sourceIP +ORDER BY totalRevenue DESC +LIMIT 1 +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/tr/getting_started/example_datasets/criteo.md b/docs/tr/getting_started/example_datasets/criteo.md new file mode 100644 index 00000000000..37cb2d2b0e6 --- /dev/null +++ b/docs/tr/getting_started/example_datasets/criteo.md @@ -0,0 +1,81 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 19 +toc_title: "Criteo'dan Terabyte t\u0131klama g\xFCnl\xFCkleri" +--- + +# Criteo'dan tıklama günlüklerinin terabayt {#terabyte-of-click-logs-from-criteo} + +Verileri indirin http://labs.criteo.com/downloads/download-terabyte-click-logs/ + +Günlük almak için bir tablo oluşturun: + +``` sql +CREATE TABLE criteo_log (date Date, clicked UInt8, int1 Int32, int2 Int32, int3 Int32, int4 Int32, int5 Int32, int6 Int32, int7 Int32, int8 Int32, int9 Int32, int10 Int32, int11 Int32, int12 Int32, int13 Int32, cat1 String, cat2 String, cat3 String, cat4 String, cat5 String, cat6 String, cat7 String, cat8 String, cat9 String, cat10 String, cat11 String, cat12 String, cat13 String, cat14 String, cat15 String, cat16 String, cat17 String, cat18 String, cat19 String, cat20 String, cat21 String, cat22 String, cat23 String, cat24 String, cat25 String, cat26 String) ENGINE = Log +``` + +Verileri indirin: + +``` bash +$ for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done +``` + +Dönüştürülen veriler için bir tablo oluşturma: + +``` sql +CREATE TABLE criteo +( + date Date, + clicked UInt8, + int1 Int32, + int2 Int32, + int3 Int32, + int4 Int32, + int5 Int32, + int6 Int32, + int7 Int32, + int8 Int32, + int9 Int32, + int10 Int32, + int11 Int32, + int12 Int32, + int13 Int32, + icat1 UInt32, + icat2 UInt32, + icat3 UInt32, + icat4 UInt32, + icat5 UInt32, + icat6 UInt32, + icat7 UInt32, + icat8 UInt32, + icat9 UInt32, + icat10 UInt32, + icat11 UInt32, + icat12 UInt32, + icat13 UInt32, + icat14 UInt32, + icat15 UInt32, + icat16 UInt32, + icat17 UInt32, + icat18 UInt32, + icat19 UInt32, + icat20 UInt32, + icat21 UInt32, + icat22 UInt32, + icat23 UInt32, + icat24 UInt32, + icat25 UInt32, + icat26 UInt32 +) ENGINE = MergeTree(date, intHash32(icat1), (date, intHash32(icat1)), 8192) +``` + +Verileri ham günlüğünden dönüştürün ve ikinci tabloya koyun: + +``` sql +INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, reinterpretAsUInt32(unhex(cat1)) AS icat1, reinterpretAsUInt32(unhex(cat2)) AS icat2, reinterpretAsUInt32(unhex(cat3)) AS icat3, reinterpretAsUInt32(unhex(cat4)) AS icat4, reinterpretAsUInt32(unhex(cat5)) AS icat5, reinterpretAsUInt32(unhex(cat6)) AS icat6, reinterpretAsUInt32(unhex(cat7)) AS icat7, reinterpretAsUInt32(unhex(cat8)) AS icat8, reinterpretAsUInt32(unhex(cat9)) AS icat9, reinterpretAsUInt32(unhex(cat10)) AS icat10, reinterpretAsUInt32(unhex(cat11)) AS icat11, reinterpretAsUInt32(unhex(cat12)) AS icat12, reinterpretAsUInt32(unhex(cat13)) AS icat13, reinterpretAsUInt32(unhex(cat14)) AS icat14, reinterpretAsUInt32(unhex(cat15)) AS icat15, reinterpretAsUInt32(unhex(cat16)) AS icat16, reinterpretAsUInt32(unhex(cat17)) AS icat17, reinterpretAsUInt32(unhex(cat18)) AS icat18, reinterpretAsUInt32(unhex(cat19)) AS icat19, reinterpretAsUInt32(unhex(cat20)) AS icat20, reinterpretAsUInt32(unhex(cat21)) AS icat21, reinterpretAsUInt32(unhex(cat22)) AS icat22, reinterpretAsUInt32(unhex(cat23)) AS icat23, reinterpretAsUInt32(unhex(cat24)) AS icat24, reinterpretAsUInt32(unhex(cat25)) AS icat25, reinterpretAsUInt32(unhex(cat26)) AS icat26 FROM criteo_log; + +DROP TABLE criteo_log; +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) diff --git a/docs/tr/getting_started/example_datasets/index.md b/docs/tr/getting_started/example_datasets/index.md new file mode 100644 index 00000000000..fecb39f219f --- /dev/null +++ b/docs/tr/getting_started/example_datasets/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "\xD6rnek Veri K\xFCmeleri" +toc_priority: 12 +toc_title: "Giri\u015F" +--- + +# Örnek Veri Kümeleri {#example-datasets} + +Bu bölümde, örnek veri kümelerinin nasıl elde edileceği ve bunları Clickhouse'a nasıl içe aktarılacağı açıklanmaktadır. +Bazı veri kümeleri için örnek sorgular da mevcuttur. + +- [Anonim Yandex.Metrica Veri Kümesi](metrica.md) +- [Yıldız Şema Ben Benchmarkch Benchmarkmark](star_schema.md) +- [WikiStat](wikistat.md) +- [Criteo'dan tıklama günlüklerinin terabayt](criteo.md) +- [AMPLab Büyük Veri Benchmark](amplab_benchmark.md) +- [New York Taksi Verileri](nyc_taxi.md) +- [OnTime](ontime.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/tr/getting_started/example_datasets/metrica.md b/docs/tr/getting_started/example_datasets/metrica.md new file mode 100644 index 00000000000..a9d3d6743ee --- /dev/null +++ b/docs/tr/getting_started/example_datasets/metrica.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 21 +toc_title: "\xDCye.Metrica Verileri" +--- + +# Anonim Yandex.Metrica Verileri {#anonymized-yandex-metrica-data} + +Veri kümesi, isabetlerle ilgili anonimleştirilmiş verileri içeren iki tablodan oluşur (`hits_v1`) ve ziyaret visitsler (`visits_v1`(kayıt olmak için).Metrica. Yandex hakkında daha fazla bilgi edinebilirsiniz.Metrica içinde [ClickHouse geçmişi](../../introduction/history.md) bölme. + +Veri kümesi iki tablodan oluşur, bunlardan biri sıkıştırılmış olarak indirilebilir `tsv.xz` dosya veya hazırlanmış bölümler olarak. Buna ek olarak, genişletilmiş bir sürümü `hits` 100 milyon satır içeren tablo TSV olarak mevcuttur https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz ve hazırlanan bölümler olarak https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz. + +## Hazırlanan bölümlerden tablolar elde etme {#obtaining-tables-from-prepared-partitions} + +İndirme ve ithalat tablo hits: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar +tar xvf hits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +İndirme ve ithalat ziyaretleri: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/visits/partitions/visits_v1.tar +tar xvf visits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## Sıkıştırılmış TSV dosyasından Tablo alma {#obtaining-tables-from-compressed-tsv-file} + +Sıkıştırılmış TSV dosyasından indir ve İçe Aktar: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +Sıkıştırılmış tsv dosyasından ziyaretleri indirin ve içe aktarın: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## Örnek Sorgular {#example-queries} + +[ClickHouse eğitimi](../../getting_started/tutorial.md) Yandex dayanmaktadır.Metrica veri kümesi ve bu veri kümesine başlamak için önerilen yol sadece öğreticiden geçmektir. + +Bu tablolara ek sorgu örnekleri arasında bulunabilir [durum bilgisi testleri](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) ClickHouse of (onlar adlandırılır `test.hists` ve `test.visits` oralarda). diff --git a/docs/tr/getting_started/example_datasets/nyc_taxi.md b/docs/tr/getting_started/example_datasets/nyc_taxi.md new file mode 100644 index 00000000000..218a7f06f7a --- /dev/null +++ b/docs/tr/getting_started/example_datasets/nyc_taxi.md @@ -0,0 +1,390 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 16 +toc_title: New York Taksi Verileri +--- + +# New York Taksi Verileri {#new-york-taxi-data} + +Bu veri kümesi iki şekilde elde edilebilir: + +- ham verilerden içe aktarma +- hazırlanan bölüm downloadlerin indir downloadilmesi + +## Ham veri nasıl alınır {#how-to-import-the-raw-data} + +Bkz. https://github.com/toddwschneider/nyc-taxi-data ve http://tech.marksblogg.com/billion-nyc-taxi-rides-redshift.html bir veri kümesinin açıklaması ve indirme talimatları için. + +İndirme, CSV dosyalarında yaklaşık 227 GB sıkıştırılmamış veri ile sonuçlanacaktır. İndirme, 1 Gbit bağlantısı üzerinden yaklaşık bir saat sürer (paralel indirme s3.amazonaws.com 1 Gbit kanalın en az yarısını kurtarır). +Bazı dosyalar tam olarak indirilmeyebilir. Dosya boyutlarını kontrol edin ve şüpheli görünen herhangi birini yeniden indirin. + +Bazı dosyalar geçersiz satırlar içerebilir. Bunları aşağıdaki gibi düzeltebilirsiniz: + +``` bash +sed -E '/(.*,){18,}/d' data/yellow_tripdata_2010-02.csv > data/yellow_tripdata_2010-02.csv_ +sed -E '/(.*,){18,}/d' data/yellow_tripdata_2010-03.csv > data/yellow_tripdata_2010-03.csv_ +mv data/yellow_tripdata_2010-02.csv_ data/yellow_tripdata_2010-02.csv +mv data/yellow_tripdata_2010-03.csv_ data/yellow_tripdata_2010-03.csv +``` + +Daha sonra veriler Postgresql'de önceden işlenmelidir. Bu, çokgenlerdeki noktaların seçimlerini oluşturacaktır (Haritadaki noktaları New York şehrinin ilçeleriyle eşleştirmek için) ve tüm verileri bir birleştirme kullanarak tek bir denormalize düz tabloda birleştirecektir. Bunu yapmak için Postgresql'i Postgıs desteği ile yüklemeniz gerekir. + +Çalışırken dikkatli olun `initialize_database.sh` ve tüm tabloların doğru şekilde oluşturulduğunu manuel olarak yeniden kontrol edin. + +Postgresql'deki her ayın verilerini işlemek yaklaşık 20-30 dakika sürer, toplam yaklaşık 48 saat sürer. + +İndirilen satır sayısını aşağıdaki gibi kontrol edebilirsiniz: + +``` bash +$ time psql nyc-taxi-data -c "SELECT count(*) FROM trips;" +## Count + 1298979494 +(1 row) + +real 7m9.164s +``` + +(Bu, Mark Litwintschik tarafından bir dizi blog gönderisinde bildirilen 1.1 milyar satırdan biraz daha fazladır .) + +Postgresql'deki veriler 370 GB alan kullanıyor. + +PostgreSQL veri verme: + +``` sql +COPY +( + SELECT trips.id, + trips.vendor_id, + trips.pickup_datetime, + trips.dropoff_datetime, + trips.store_and_fwd_flag, + trips.rate_code_id, + trips.pickup_longitude, + trips.pickup_latitude, + trips.dropoff_longitude, + trips.dropoff_latitude, + trips.passenger_count, + trips.trip_distance, + trips.fare_amount, + trips.extra, + trips.mta_tax, + trips.tip_amount, + trips.tolls_amount, + trips.ehail_fee, + trips.improvement_surcharge, + trips.total_amount, + trips.payment_type, + trips.trip_type, + trips.pickup, + trips.dropoff, + + cab_types.type cab_type, + + weather.precipitation_tenths_of_mm rain, + weather.snow_depth_mm, + weather.snowfall_mm, + weather.max_temperature_tenths_degrees_celsius max_temp, + weather.min_temperature_tenths_degrees_celsius min_temp, + weather.average_wind_speed_tenths_of_meters_per_second wind, + + pick_up.gid pickup_nyct2010_gid, + pick_up.ctlabel pickup_ctlabel, + pick_up.borocode pickup_borocode, + pick_up.boroname pickup_boroname, + pick_up.ct2010 pickup_ct2010, + pick_up.boroct2010 pickup_boroct2010, + pick_up.cdeligibil pickup_cdeligibil, + pick_up.ntacode pickup_ntacode, + pick_up.ntaname pickup_ntaname, + pick_up.puma pickup_puma, + + drop_off.gid dropoff_nyct2010_gid, + drop_off.ctlabel dropoff_ctlabel, + drop_off.borocode dropoff_borocode, + drop_off.boroname dropoff_boroname, + drop_off.ct2010 dropoff_ct2010, + drop_off.boroct2010 dropoff_boroct2010, + drop_off.cdeligibil dropoff_cdeligibil, + drop_off.ntacode dropoff_ntacode, + drop_off.ntaname dropoff_ntaname, + drop_off.puma dropoff_puma + FROM trips + LEFT JOIN cab_types + ON trips.cab_type_id = cab_types.id + LEFT JOIN central_park_weather_observations_raw weather + ON weather.date = trips.pickup_datetime::date + LEFT JOIN nyct2010 pick_up + ON pick_up.gid = trips.pickup_nyct2010_gid + LEFT JOIN nyct2010 drop_off + ON drop_off.gid = trips.dropoff_nyct2010_gid +) TO '/opt/milovidov/nyc-taxi-data/trips.tsv'; +``` + +Veri anlık görüntüsü saniyede yaklaşık 50 MB hızında oluşturulur. Anlık görüntü oluştururken, PostgreSQL diskten saniyede yaklaşık 28 MB hızında okur. +Bu yaklaşık 5 saat sürer. Elde edilen TSV dosyası 590612904969 bayttır. + +Clickhouse'da geçici bir tablo oluşturma: + +``` sql +CREATE TABLE trips +( +trip_id UInt32, +vendor_id String, +pickup_datetime DateTime, +dropoff_datetime Nullable(DateTime), +store_and_fwd_flag Nullable(FixedString(1)), +rate_code_id Nullable(UInt8), +pickup_longitude Nullable(Float64), +pickup_latitude Nullable(Float64), +dropoff_longitude Nullable(Float64), +dropoff_latitude Nullable(Float64), +passenger_count Nullable(UInt8), +trip_distance Nullable(Float64), +fare_amount Nullable(Float32), +extra Nullable(Float32), +mta_tax Nullable(Float32), +tip_amount Nullable(Float32), +tolls_amount Nullable(Float32), +ehail_fee Nullable(Float32), +improvement_surcharge Nullable(Float32), +total_amount Nullable(Float32), +payment_type Nullable(String), +trip_type Nullable(UInt8), +pickup Nullable(String), +dropoff Nullable(String), +cab_type Nullable(String), +precipitation Nullable(UInt8), +snow_depth Nullable(UInt8), +snowfall Nullable(UInt8), +max_temperature Nullable(UInt8), +min_temperature Nullable(UInt8), +average_wind_speed Nullable(UInt8), +pickup_nyct2010_gid Nullable(UInt8), +pickup_ctlabel Nullable(String), +pickup_borocode Nullable(UInt8), +pickup_boroname Nullable(String), +pickup_ct2010 Nullable(String), +pickup_boroct2010 Nullable(String), +pickup_cdeligibil Nullable(FixedString(1)), +pickup_ntacode Nullable(String), +pickup_ntaname Nullable(String), +pickup_puma Nullable(String), +dropoff_nyct2010_gid Nullable(UInt8), +dropoff_ctlabel Nullable(String), +dropoff_borocode Nullable(UInt8), +dropoff_boroname Nullable(String), +dropoff_ct2010 Nullable(String), +dropoff_boroct2010 Nullable(String), +dropoff_cdeligibil Nullable(String), +dropoff_ntacode Nullable(String), +dropoff_ntaname Nullable(String), +dropoff_puma Nullable(String) +) ENGINE = Log; +``` + +Alanları daha doğru veri türlerine dönüştürmek ve mümkünse Boşları ortadan kaldırmak için gereklidir. + +``` bash +$ time clickhouse-client --query="INSERT INTO trips FORMAT TabSeparated" < trips.tsv + +real 75m56.214s +``` + +Veri 112-140 Mb/saniye hızında okunur. +Bir akışta bir günlük türü tablosuna veri yükleme 76 dakika sürdü. +Bu tablodaki veriler 142 GB kullanır. + +(Verileri doğrudan Postgres'ten içe aktarmak da mümkündür `COPY ... TO PROGRAM`.) + +Unfortunately, all the fields associated with the weather (precipitation…average\_wind\_speed) were filled with NULL. Because of this, we will remove them from the final data set. + +Başlamak için, tek bir sunucuda bir tablo oluşturacağız. Daha sonra tabloyu dağıtacağız. + +Özet Tablo oluşturma ve doldurma: + +``` sql +CREATE TABLE trips_mergetree +ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) +AS SELECT + +trip_id, +CAST(vendor_id AS Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14)) AS vendor_id, +toDate(pickup_datetime) AS pickup_date, +ifNull(pickup_datetime, toDateTime(0)) AS pickup_datetime, +toDate(dropoff_datetime) AS dropoff_date, +ifNull(dropoff_datetime, toDateTime(0)) AS dropoff_datetime, +assumeNotNull(store_and_fwd_flag) IN ('Y', '1', '2') AS store_and_fwd_flag, +assumeNotNull(rate_code_id) AS rate_code_id, +assumeNotNull(pickup_longitude) AS pickup_longitude, +assumeNotNull(pickup_latitude) AS pickup_latitude, +assumeNotNull(dropoff_longitude) AS dropoff_longitude, +assumeNotNull(dropoff_latitude) AS dropoff_latitude, +assumeNotNull(passenger_count) AS passenger_count, +assumeNotNull(trip_distance) AS trip_distance, +assumeNotNull(fare_amount) AS fare_amount, +assumeNotNull(extra) AS extra, +assumeNotNull(mta_tax) AS mta_tax, +assumeNotNull(tip_amount) AS tip_amount, +assumeNotNull(tolls_amount) AS tolls_amount, +assumeNotNull(ehail_fee) AS ehail_fee, +assumeNotNull(improvement_surcharge) AS improvement_surcharge, +assumeNotNull(total_amount) AS total_amount, +CAST((assumeNotNull(payment_type) AS pt) IN ('CSH', 'CASH', 'Cash', 'CAS', 'Cas', '1') ? 'CSH' : (pt IN ('CRD', 'Credit', 'Cre', 'CRE', 'CREDIT', '2') ? 'CRE' : (pt IN ('NOC', 'No Charge', 'No', '3') ? 'NOC' : (pt IN ('DIS', 'Dispute', 'Dis', '4') ? 'DIS' : 'UNK'))) AS Enum8('CSH' = 1, 'CRE' = 2, 'UNK' = 0, 'NOC' = 3, 'DIS' = 4)) AS payment_type_, +assumeNotNull(trip_type) AS trip_type, +ifNull(toFixedString(unhex(pickup), 25), toFixedString('', 25)) AS pickup, +ifNull(toFixedString(unhex(dropoff), 25), toFixedString('', 25)) AS dropoff, +CAST(assumeNotNull(cab_type) AS Enum8('yellow' = 1, 'green' = 2, 'uber' = 3)) AS cab_type, + +assumeNotNull(pickup_nyct2010_gid) AS pickup_nyct2010_gid, +toFloat32(ifNull(pickup_ctlabel, '0')) AS pickup_ctlabel, +assumeNotNull(pickup_borocode) AS pickup_borocode, +CAST(assumeNotNull(pickup_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS pickup_boroname, +toFixedString(ifNull(pickup_ct2010, '000000'), 6) AS pickup_ct2010, +toFixedString(ifNull(pickup_boroct2010, '0000000'), 7) AS pickup_boroct2010, +CAST(assumeNotNull(ifNull(pickup_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS pickup_cdeligibil, +toFixedString(ifNull(pickup_ntacode, '0000'), 4) AS pickup_ntacode, + +CAST(assumeNotNull(pickup_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS pickup_ntaname, + +toUInt16(ifNull(pickup_puma, '0')) AS pickup_puma, + +assumeNotNull(dropoff_nyct2010_gid) AS dropoff_nyct2010_gid, +toFloat32(ifNull(dropoff_ctlabel, '0')) AS dropoff_ctlabel, +assumeNotNull(dropoff_borocode) AS dropoff_borocode, +CAST(assumeNotNull(dropoff_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS dropoff_boroname, +toFixedString(ifNull(dropoff_ct2010, '000000'), 6) AS dropoff_ct2010, +toFixedString(ifNull(dropoff_boroct2010, '0000000'), 7) AS dropoff_boroct2010, +CAST(assumeNotNull(ifNull(dropoff_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS dropoff_cdeligibil, +toFixedString(ifNull(dropoff_ntacode, '0000'), 4) AS dropoff_ntacode, + +CAST(assumeNotNull(dropoff_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS dropoff_ntaname, + +toUInt16(ifNull(dropoff_puma, '0')) AS dropoff_puma + +FROM trips +``` + +Bu, saniyede yaklaşık 428.000 satırlık bir hızda 3030 saniye sürer. +Daha hızlı yüklemek için, tablo ile oluşturabilirsiniz `Log` motor yerine `MergeTree`. Bu durumda, indirme 200 saniyeden daha hızlı çalışır. + +Tablo 126 GB disk alanı kullanır. + +``` sql +SELECT formatReadableSize(sum(bytes)) FROM system.parts WHERE table = 'trips_mergetree' AND active +``` + +``` text +┌─formatReadableSize(sum(bytes))─┐ +│ 126.18 GiB │ +└────────────────────────────────┘ +``` + +Diğer şeylerin yanı sıra, MERGETREE üzerinde en iyi duruma getirme sorgusunu çalıştırabilirsiniz. Ama her şey onsuz iyi olacak çünkü gerekli değildir. + +## Hazırlanan Bölüm downloadlerin indir downloadilmesi {#download-of-prepared-partitions} + +``` bash +$ curl -O https://clickhouse-datasets.s3.yandex.net/trips_mergetree/partitions/trips_mergetree.tar +$ tar xvf trips_mergetree.tar -C /var/lib/clickhouse # path to ClickHouse data directory +$ # check permissions of unpacked data, fix if required +$ sudo service clickhouse-server restart +$ clickhouse-client --query "select count(*) from datasets.trips_mergetree" +``` + +!!! info "Bilgin" + Aşağıda açıklanan sorguları çalıştıracaksanız, tam tablo adını kullanmanız gerekir, `datasets.trips_mergetree`. + +## Tek Server ile ilgili sonuçlar {#results-on-single-server} + +Q1: + +``` sql +SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type +``` + +0.490 saniye. + +Q2: + +``` sql +SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count +``` + +1.224 saniye. + +Q3: + +``` sql +SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year +``` + +2.104 saniye. + +Q4: + +``` sql +SELECT passenger_count, toYear(pickup_date) AS year, round(trip_distance) AS distance, count(*) +FROM trips_mergetree +GROUP BY passenger_count, year, distance +ORDER BY year, count(*) DESC +``` + +3.593 saniye. + +Aşağıdaki sunucu kullanıldı: + +İki Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60 GHz, 16 fiziksel çekirdekler toplam,128 GiB RAM,8x6 TB HD donanım RAID-5 + +Yürütme süresi üç koşunun en iyisidir. Ancak ikinci çalıştırmadan başlayarak, sorgular dosya sistemi önbelleğinden verileri okur. Başka önbelleğe alma oluşur: veri okundu ve her vadede işlenir. + +Üç sunucuda tablo oluşturma: + +Her sunucuda: + +``` sql +CREATE TABLE default.trips_mergetree_third ( trip_id UInt32, vendor_id Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14), pickup_date Date, pickup_datetime DateTime, dropoff_date Date, dropoff_datetime DateTime, store_and_fwd_flag UInt8, rate_code_id UInt8, pickup_longitude Float64, pickup_latitude Float64, dropoff_longitude Float64, dropoff_latitude Float64, passenger_count UInt8, trip_distance Float64, fare_amount Float32, extra Float32, mta_tax Float32, tip_amount Float32, tolls_amount Float32, ehail_fee Float32, improvement_surcharge Float32, total_amount Float32, payment_type_ Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), trip_type UInt8, pickup FixedString(25), dropoff FixedString(25), cab_type Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), pickup_nyct2010_gid UInt8, pickup_ctlabel Float32, pickup_borocode UInt8, pickup_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), pickup_ct2010 FixedString(6), pickup_boroct2010 FixedString(7), pickup_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), pickup_ntacode FixedString(4), pickup_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), pickup_puma UInt16, dropoff_nyct2010_gid UInt8, dropoff_ctlabel Float32, dropoff_borocode UInt8, dropoff_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), dropoff_ct2010 FixedString(6), dropoff_boroct2010 FixedString(7), dropoff_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), dropoff_ntacode FixedString(4), dropoff_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), dropoff_puma UInt16) ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) +``` + +Kaynak sunucuda: + +``` sql +CREATE TABLE trips_mergetree_x3 AS trips_mergetree_third ENGINE = Distributed(perftest, default, trips_mergetree_third, rand()) +``` + +Aşağıdaki sorgu verileri yeniden dağıtır: + +``` sql +INSERT INTO trips_mergetree_x3 SELECT * FROM trips_mergetree +``` + +Bu 2454 saniye sürer. + +Üç sunucuda: + +Q1: 0.212 saniye. +Q2: 0.438 saniye. +Q3: 0.733 saniye. +Q4: 1.241 saniye. + +Sorgular doğrusal olarak ölçeklendiğinden, burada sürpriz yok. + +Ayrıca 140 sunucu kümesinden elde edilen sonuçlara sahibiz: + +Q1: 0.028 sn. +Q2: 0.043 sn. +Q3: 0.051 sn. +Q4: 0.072 sn. + +Bu durumda, sorgu işleme süresi her şeyden önce ağ gecikmesi ile belirlenir. +Finlandiya'daki bir Yandex veri merkezinde bulunan ve Rusya'daki bir kümede bulunan ve yaklaşık 20 ms gecikme süresi ekleyen bir istemci kullanarak sorgular çalıştırdık. + +## Özet {#summary} + +| hizmetçiler | Q1 | Q2 | Q3 | Q4 | +|-------------|-------|-------|-------|-------| +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/tr/getting_started/example_datasets/ontime.md b/docs/tr/getting_started/example_datasets/ontime.md new file mode 100644 index 00000000000..dc06b86189f --- /dev/null +++ b/docs/tr/getting_started/example_datasets/ontime.md @@ -0,0 +1,412 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 15 +toc_title: OnTime +--- + +# OnTime {#ontime} + +Bu veri kümesi iki şekilde elde edilebilir: + +- ham verilerden içe aktarma +- hazırlanan bölüm downloadlerin indir downloadilmesi + +## Ham Verilerden İçe Aktarma {#import-from-raw-data} + +Veri indirme: + +``` bash +for s in `seq 1987 2018` +do +for m in `seq 1 12` +do +wget https://transtats.bts.gov/PREZIP/On_Time_Reporting_Carrier_On_Time_Performance_1987_present_${s}_${m}.zip +done +done +``` + +(from https://github.com/Percona-Lab/ontime-airline-performance/blob/master/download.sh ) + +Tablo oluşturma: + +``` sql +CREATE TABLE `ontime` ( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `UniqueCarrier` FixedString(7), + `AirlineID` Int32, + `Carrier` FixedString(2), + `TailNum` String, + `FlightNum` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Int32, + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String +) ENGINE = MergeTree +PARTITION BY Year +ORDER BY (Carrier, FlightDate) +SETTINGS index_granularity = 8192; +``` + +Veri yükleme: + +``` bash +$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +``` + +## Hazırlanan Bölüm downloadlerin indir downloadilmesi {#download-of-prepared-partitions} + +``` bash +$ curl -O https://clickhouse-datasets.s3.yandex.net/ontime/partitions/ontime.tar +$ tar xvf ontime.tar -C /var/lib/clickhouse # path to ClickHouse data directory +$ # check permissions of unpacked data, fix if required +$ sudo service clickhouse-server restart +$ clickhouse-client --query "select count(*) from datasets.ontime" +``` + +!!! info "Bilgin" + Aşağıda açıklanan sorguları çalıştıracaksanız, tam tablo adını kullanmanız gerekir, `datasets.ontime`. + +## Sorgular {#queries} + +Q0. + +``` sql +SELECT avg(c1) +FROM +( + SELECT Year, Month, count(*) AS c1 + FROM ontime + GROUP BY Year, Month +); +``` + +Q1. 2000 yılından 2008 yılına kadar günlük uçuş sayısı + +``` sql +SELECT DayOfWeek, count(*) AS c +FROM ontime +WHERE Year>=2000 AND Year<=2008 +GROUP BY DayOfWeek +ORDER BY c DESC; +``` + +S2. 2000-2008 için haftanın gününe göre gruplandırılmış 10 dakikadan fazla geciken uçuş sayısı + +``` sql +SELECT DayOfWeek, count(*) AS c +FROM ontime +WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 +GROUP BY DayOfWeek +ORDER BY c DESC; +``` + +Q3. 2000-2008 için havaalanı tarafından gecikme sayısı + +``` sql +SELECT Origin, count(*) AS c +FROM ontime +WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 +GROUP BY Origin +ORDER BY c DESC +LIMIT 10; +``` + +S4. 2007 yılı için taşıyıcı tarafından gecikme sayısı + +``` sql +SELECT Carrier, count(*) +FROM ontime +WHERE DepDelay>10 AND Year=2007 +GROUP BY Carrier +ORDER BY count(*) DESC; +``` + +Q5. 2007 yılı için taşıyıcı tarafından gecikme yüzdesi + +``` sql +SELECT Carrier, c, c2, c*100/c2 as c3 +FROM +( + SELECT + Carrier, + count(*) AS c + FROM ontime + WHERE DepDelay>10 + AND Year=2007 + GROUP BY Carrier +) +JOIN +( + SELECT + Carrier, + count(*) AS c2 + FROM ontime + WHERE Year=2007 + GROUP BY Carrier +) USING Carrier +ORDER BY c3 DESC; +``` + +Aynı sorgunun daha iyi sürümü: + +``` sql +SELECT Carrier, avg(DepDelay>10)*100 AS c3 +FROM ontime +WHERE Year=2007 +GROUP BY Carrier +ORDER BY c3 DESC +``` + +S6. Daha geniş bir yıl aralığı için önceki talep, 2000-2008 + +``` sql +SELECT Carrier, c, c2, c*100/c2 as c3 +FROM +( + SELECT + Carrier, + count(*) AS c + FROM ontime + WHERE DepDelay>10 + AND Year>=2000 AND Year<=2008 + GROUP BY Carrier +) +JOIN +( + SELECT + Carrier, + count(*) AS c2 + FROM ontime + WHERE Year>=2000 AND Year<=2008 + GROUP BY Carrier +) USING Carrier +ORDER BY c3 DESC; +``` + +Aynı sorgunun daha iyi sürümü: + +``` sql +SELECT Carrier, avg(DepDelay>10)*100 AS c3 +FROM ontime +WHERE Year>=2000 AND Year<=2008 +GROUP BY Carrier +ORDER BY c3 DESC; +``` + +Q7. Yıla göre 10 dakikadan fazla gecikmeli uçuş yüzdesi + +``` sql +SELECT Year, c1/c2 +FROM +( + select + Year, + count(*)*100 as c1 + from ontime + WHERE DepDelay>10 + GROUP BY Year +) +JOIN +( + select + Year, + count(*) as c2 + from ontime + GROUP BY Year +) USING (Year) +ORDER BY Year; +``` + +Aynı sorgunun daha iyi sürümü: + +``` sql +SELECT Year, avg(DepDelay>10)*100 +FROM ontime +GROUP BY Year +ORDER BY Year; +``` + +S8. Çeşitli yıl aralıkları için doğrudan bağlı şehirlerin sayısına göre en popüler yerler + +``` sql +SELECT DestCityName, uniqExact(OriginCityName) AS u +FROM ontime +WHERE Year >= 2000 and Year <= 2010 +GROUP BY DestCityName +ORDER BY u DESC LIMIT 10; +``` + +Q9. + +``` sql +SELECT Year, count(*) AS c1 +FROM ontime +GROUP BY Year; +``` + +Q10. + +``` sql +SELECT + min(Year), max(Year), Carrier, count(*) AS cnt, + sum(ArrDelayMinutes>30) AS flights_delayed, + round(sum(ArrDelayMinutes>30)/count(*),2) AS rate +FROM ontime +WHERE + DayOfWeek NOT IN (6,7) AND OriginState NOT IN ('AK', 'HI', 'PR', 'VI') + AND DestState NOT IN ('AK', 'HI', 'PR', 'VI') + AND FlightDate < '2010-01-01' +GROUP by Carrier +HAVING cnt>100000 and max(Year)>1990 +ORDER by rate DESC +LIMIT 1000; +``` + +Bonus: + +``` sql +SELECT avg(cnt) +FROM +( + SELECT Year,Month,count(*) AS cnt + FROM ontime + WHERE DepDel15=1 + GROUP BY Year,Month +); + +SELECT avg(c1) FROM +( + SELECT Year,Month,count(*) AS c1 + FROM ontime + GROUP BY Year,Month +); + +SELECT DestCityName, uniqExact(OriginCityName) AS u +FROM ontime +GROUP BY DestCityName +ORDER BY u DESC +LIMIT 10; + +SELECT OriginCityName, DestCityName, count() AS c +FROM ontime +GROUP BY OriginCityName, DestCityName +ORDER BY c DESC +LIMIT 10; + +SELECT OriginCityName, count() AS c +FROM ontime +GROUP BY OriginCityName +ORDER BY c DESC +LIMIT 10; +``` + +Bu performans testi Vadim Tkachenko tarafından oluşturuldu. Görmek: + +- https://www.percona.com/blog/2009/10/02/analyzing-air-traffic-performance-with-infobright-and-monetdb/ +- https://www.percona.com/blog/2009/10/26/air-traffic-queries-in-luciddb/ +- https://www.percona.com/blog/2009/11/02/air-traffic-queries-in-infinidb-early-alpha/ +- https://www.percona.com/blog/2014/04/21/using-apache-hadoop-and-impala-together-with-mysql-for-data-analysis/ +- https://www.percona.com/blog/2016/01/07/apache-spark-with-air-ontime-performance-data/ +- http://nickmakos.blogspot.ru/2012/08/analyzing-air-traffic-performance-with.html + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/example_datasets/ontime/) diff --git a/docs/tr/getting_started/example_datasets/star_schema.md b/docs/tr/getting_started/example_datasets/star_schema.md new file mode 100644 index 00000000000..1326aab7ab2 --- /dev/null +++ b/docs/tr/getting_started/example_datasets/star_schema.md @@ -0,0 +1,370 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 20 +toc_title: "Y\u0131ld\u0131z \u015Eema Ben Benchmarkch Benchmarkmark" +--- + +# Yıldız Şema Ben Benchmarkch Benchmarkmark {#star-schema-benchmark} + +Dbgen derleme: + +``` bash +$ git clone git@github.com:vadimtk/ssb-dbgen.git +$ cd ssb-dbgen +$ make +``` + +Veri oluşturma: + +!!! warning "Dikkat" + İle `-s 100` dbgen, 600 milyon satır (67 GB) üretir `-s 1000` 6 milyar satır üretir (bu çok zaman alır) + +``` bash +$ ./dbgen -s 1000 -T c +$ ./dbgen -s 1000 -T l +$ ./dbgen -s 1000 -T p +$ ./dbgen -s 1000 -T s +$ ./dbgen -s 1000 -T d +``` + +Clickhouse'da tablolar oluşturma: + +``` sql +CREATE TABLE customer +( + C_CUSTKEY UInt32, + C_NAME String, + C_ADDRESS String, + C_CITY LowCardinality(String), + C_NATION LowCardinality(String), + C_REGION LowCardinality(String), + C_PHONE String, + C_MKTSEGMENT LowCardinality(String) +) +ENGINE = MergeTree ORDER BY (C_CUSTKEY); + +CREATE TABLE lineorder +( + LO_ORDERKEY UInt32, + LO_LINENUMBER UInt8, + LO_CUSTKEY UInt32, + LO_PARTKEY UInt32, + LO_SUPPKEY UInt32, + LO_ORDERDATE Date, + LO_ORDERPRIORITY LowCardinality(String), + LO_SHIPPRIORITY UInt8, + LO_QUANTITY UInt8, + LO_EXTENDEDPRICE UInt32, + LO_ORDTOTALPRICE UInt32, + LO_DISCOUNT UInt8, + LO_REVENUE UInt32, + LO_SUPPLYCOST UInt32, + LO_TAX UInt8, + LO_COMMITDATE Date, + LO_SHIPMODE LowCardinality(String) +) +ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); + +CREATE TABLE part +( + P_PARTKEY UInt32, + P_NAME String, + P_MFGR LowCardinality(String), + P_CATEGORY LowCardinality(String), + P_BRAND LowCardinality(String), + P_COLOR LowCardinality(String), + P_TYPE LowCardinality(String), + P_SIZE UInt8, + P_CONTAINER LowCardinality(String) +) +ENGINE = MergeTree ORDER BY P_PARTKEY; + +CREATE TABLE supplier +( + S_SUPPKEY UInt32, + S_NAME String, + S_ADDRESS String, + S_CITY LowCardinality(String), + S_NATION LowCardinality(String), + S_REGION LowCardinality(String), + S_PHONE String +) +ENGINE = MergeTree ORDER BY S_SUPPKEY; +``` + +Veri ekleme: + +``` bash +$ clickhouse-client --query "INSERT INTO customer FORMAT CSV" < customer.tbl +$ clickhouse-client --query "INSERT INTO part FORMAT CSV" < part.tbl +$ clickhouse-client --query "INSERT INTO supplier FORMAT CSV" < supplier.tbl +$ clickhouse-client --query "INSERT INTO lineorder FORMAT CSV" < lineorder.tbl +``` + +Dönüşüm “star schema” denormalized için “flat schema”: + +``` sql +SET max_memory_usage = 20000000000; + +CREATE TABLE lineorder_flat +ENGINE = MergeTree +PARTITION BY toYear(LO_ORDERDATE) +ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS +SELECT + l.LO_ORDERKEY AS LO_ORDERKEY, + l.LO_LINENUMBER AS LO_LINENUMBER, + l.LO_CUSTKEY AS LO_CUSTKEY, + l.LO_PARTKEY AS LO_PARTKEY, + l.LO_SUPPKEY AS LO_SUPPKEY, + l.LO_ORDERDATE AS LO_ORDERDATE, + l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY, + l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY, + l.LO_QUANTITY AS LO_QUANTITY, + l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE, + l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE, + l.LO_DISCOUNT AS LO_DISCOUNT, + l.LO_REVENUE AS LO_REVENUE, + l.LO_SUPPLYCOST AS LO_SUPPLYCOST, + l.LO_TAX AS LO_TAX, + l.LO_COMMITDATE AS LO_COMMITDATE, + l.LO_SHIPMODE AS LO_SHIPMODE, + c.C_NAME AS C_NAME, + c.C_ADDRESS AS C_ADDRESS, + c.C_CITY AS C_CITY, + c.C_NATION AS C_NATION, + c.C_REGION AS C_REGION, + c.C_PHONE AS C_PHONE, + c.C_MKTSEGMENT AS C_MKTSEGMENT, + s.S_NAME AS S_NAME, + s.S_ADDRESS AS S_ADDRESS, + s.S_CITY AS S_CITY, + s.S_NATION AS S_NATION, + s.S_REGION AS S_REGION, + s.S_PHONE AS S_PHONE, + p.P_NAME AS P_NAME, + p.P_MFGR AS P_MFGR, + p.P_CATEGORY AS P_CATEGORY, + p.P_BRAND AS P_BRAND, + p.P_COLOR AS P_COLOR, + p.P_TYPE AS P_TYPE, + p.P_SIZE AS P_SIZE, + p.P_CONTAINER AS P_CONTAINER +FROM lineorder AS l +INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY +INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY +INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; +``` + +Sorguları çalıştırma: + +Q1.1 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25; +``` + +Q1.2 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toYYYYMM(LO_ORDERDATE) = 199401 AND LO_DISCOUNT BETWEEN 4 AND 6 AND LO_QUANTITY BETWEEN 26 AND 35; +``` + +Q1.3 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toISOWeek(LO_ORDERDATE) = 6 AND toYear(LO_ORDERDATE) = 1994 + AND LO_DISCOUNT BETWEEN 5 AND 7 AND LO_QUANTITY BETWEEN 26 AND 35; +``` + +Q2.1 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q2.2 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q2.3 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q3.1 + +``` sql +SELECT + C_NATION, + S_NATION, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 +GROUP BY + C_NATION, + S_NATION, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.2 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.3 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.4 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND toYYYYMM(LO_ORDERDATE) = 199712 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q4.1 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + C_NATION, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') +GROUP BY + year, + C_NATION +ORDER BY + year ASC, + C_NATION ASC; +``` + +Q4.2 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + S_NATION, + P_CATEGORY, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') +GROUP BY + year, + S_NATION, + P_CATEGORY +ORDER BY + year ASC, + S_NATION ASC, + P_CATEGORY ASC; +``` + +Q4.3 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + S_CITY, + P_BRAND, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' +GROUP BY + year, + S_CITY, + P_BRAND +ORDER BY + year ASC, + S_CITY ASC, + P_BRAND ASC; +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) diff --git a/docs/tr/getting_started/example_datasets/wikistat.md b/docs/tr/getting_started/example_datasets/wikistat.md new file mode 100644 index 00000000000..0fc24dd5bb1 --- /dev/null +++ b/docs/tr/getting_started/example_datasets/wikistat.md @@ -0,0 +1,35 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 18 +toc_title: WikiStat +--- + +# WikiStat {#wikistat} + +Bakınız: http://dumps.wikimedia.org/other/pagecounts-raw/ + +Tablo oluşturma: + +``` sql +CREATE TABLE wikistat +( + date Date, + time DateTime, + project String, + subproject String, + path String, + hits UInt64, + size UInt64 +) ENGINE = MergeTree(date, (path, time), 8192); +``` + +Veri yükleme: + +``` bash +$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt +$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done +$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) diff --git a/docs/tr/getting_started/index.md b/docs/tr/getting_started/index.md new file mode 100644 index 00000000000..e97f9add1f0 --- /dev/null +++ b/docs/tr/getting_started/index.md @@ -0,0 +1,17 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Ba\u015Flarken" +toc_hidden: true +toc_priority: 8 +toc_title: "gizlenmi\u015F" +--- + +# Başlarken {#getting-started} + +Eğer ClickHouse için yeni ve performans bir hands-on hissi almak istiyorsanız, her şeyden önce, sen [yükleme işlemi](install.md). Bundan sonra şunları yapabilirsiniz: + +- [Ayrıntılı öğretici geçmesi](tutorial.md) +- [Örnek veri kümeleri ile deneme](example_datasets/ontime.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/) diff --git a/docs/tr/getting_started/install.md b/docs/tr/getting_started/install.md new file mode 100644 index 00000000000..8597a378fb4 --- /dev/null +++ b/docs/tr/getting_started/install.md @@ -0,0 +1,191 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 11 +toc_title: Kurulum +--- + +# Kurulum {#installation} + +## Sistem Gereksinimleri {#system-requirements} + +ClickHouse, x86\_64, AArch64 veya PowerPC64LE CPU mimarisine sahip herhangi bir Linux, FreeBSD veya Mac OS X üzerinde çalışabilir. + +Resmi önceden oluşturulmuş ikili dosyalar genellikle x86\_64 ve kaldıraç sse 4.2 komut seti için derlenir, bu nedenle destekleyen CPU'nun aksi belirtilmedikçe ek bir sistem gereksinimi haline gelir. Geçerli CPU'nun sse 4.2 desteği olup olmadığını kontrol etmek için komut: + +``` bash +$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" +``` + +SSE 4.2'yi desteklemeyen veya AArch64 veya PowerPC64LE mimarisine sahip işlemcilerde Clickhouse'u çalıştırmak için şunları yapmalısınız [kaynaklardan ClickHouse oluşturun](#from-sources) uygun yapılandırma ayarlamaları ile. + +## Mevcut Kurulum Seçenekleri {#available-installation-options} + +### DEB paket fromlerinden {#install-from-deb-packages} + +Resmi önceden derlenmiş kullanılması tavsiye edilir `deb` Debian veya Ubuntu için paketler. + +Resmi paketleri yüklemek için Yandex deposunu ekleyin `/etc/apt/sources.list` veya ayrı bir `/etc/apt/sources.list.d/clickhouse.list` Dosya: + + deb http://repo.clickhouse.tech/deb/stable/ main/ + +En son sürümü kullanmak istiyorsanız, değiştirin `stable` ile `testing` (bu, test ortamlarınız için önerilir). + +Sonra paketleri yüklemek için bu komutları çalıştırın: + +``` bash +sudo apt-get install dirmngr # optional +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional +sudo apt-get update +sudo apt-get install clickhouse-client clickhouse-server +``` + +Paketleri buradan manuel olarak indirebilir ve kurabilirsiniz: https://repo.yandex.ru/clickhouse/deb/stable/main/. + +#### Paketler {#packages} + +- `clickhouse-common-static` — Installs ClickHouse compiled binary files. +- `clickhouse-server` — Creates a symbolic link for `clickhouse-server` ve varsayılan sunucu yapılandırmasını yükler. +- `clickhouse-client` — Creates a symbolic link for `clickhouse-client` ve diğer istemci ile ilgili araçlar. ve istemci yapılandırma dosyalarını yükler. +- `clickhouse-common-static-dbg` — Installs ClickHouse compiled binary files with debug info. + +### RPM paket fromlerinden {#from-rpm-packages} + +Resmi önceden derlenmiş kullanılması tavsiye edilir `rpm` CentOS, RedHat ve diğer tüm rpm tabanlı Linux dağıtımları için paketler. + +İlk olarak, resmi depoyu eklemeniz gerekir: + +``` bash +sudo yum install yum-utils +sudo rpm --import https://repo.clickhouse.tech/CLICKHOUSE-KEY.GPG +sudo yum-config-manager --add-repo https://repo.clickhouse.tech/rpm/stable/x86_64 +``` + +En son sürümü kullanmak istiyorsanız, değiştirin `stable` ile `testing` (bu, test ortamlarınız için önerilir). Bu `prestable` etiket de bazen kullanılabilir. + +Sonra paketleri yüklemek için bu komutları çalıştırın: + +``` bash +sudo yum install clickhouse-server clickhouse-client +``` + +Paketleri buradan manuel olarak indirebilir ve kurabilirsiniz: https://repo.clickhouse.teknoloji / rpm / kararlı / x86\_64. + +### Tgz Arşivlerinden {#from-tgz-archives} + +Resmi önceden derlenmiş kullanılması tavsiye edilir `tgz` Arch ,iv ,es for tüm Linux dağıtım installationları, kurulumu `deb` veya `rpm` paketler mümkün değildir. + +Gerekli sürümü ile indirilebilir `curl` veya `wget` depo fromdan https://repo.yandex.ru/clickhouse/tgz/. +Bundan sonra indirilen arşivler açılmalı ve kurulum komut dosyaları ile kurulmalıdır. En son sürüm için örnek: + +``` bash +export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1` +curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-dbg-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-server-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-client-$LATEST_VERSION.tgz + +tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz +sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh + +tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz +sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh + +tar -xzvf clickhouse-server-$LATEST_VERSION.tgz +sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh +sudo /etc/init.d/clickhouse-server start + +tar -xzvf clickhouse-client-$LATEST_VERSION.tgz +sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh +``` + +Üretim ortamları için en son teknolojiyi kullanmanız önerilir `stable`-sürüm. Numarasını GitHub sayfasında bulabilirsiniz https://github.com/ClickHouse/ClickHouse/tags postfix ile `-stable`. + +### Docker Görüntüden {#from-docker-image} + +Docker içinde ClickHouse çalıştırmak için kılavuzu izleyin [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Bu görüntüler resmi `deb` paketler içinde. + +### Kaynaklardan {#from-sources} + +Clickhouse'u el ile derlemek için aşağıdaki talimatları izleyin [Linux](../development/build.md) veya [Mac OS X](../development/build_osx.md). + +Paketleri derleyebilir ve yükleyebilir veya paketleri yüklemeden programları kullanabilirsiniz. Ayrıca elle inşa ederek SSE 4.2 gereksinimini devre dışı bırakabilir veya AArch64 CPU'lar için oluşturabilirsiniz. + + Client: programs/clickhouse-client + Server: programs/clickhouse-server + +Bir veri ve meta veri klasörleri oluşturmanız gerekir ve `chown` onları istenen kullanıcı için. Yolları sunucu yapılandırmasında değiştirilebilir (src / programlar / sunucu / config.xml), varsayılan olarak: + + /opt/clickhouse/data/default/ + /opt/clickhouse/metadata/default/ + +Gentoo üzerinde, sadece kullanabilirsiniz `emerge clickhouse` Clickhouse'u kaynaklardan yüklemek için. + +## Başlamak {#launch} + +Sunucuyu bir daemon olarak başlatmak için çalıştırın: + +``` bash +$ sudo service clickhouse-server start +``` + +Yok eğer `service` command, run as + +``` bash +$ sudo /etc/init.d/clickhouse-server start +``` + +Günlükleri görmek `/var/log/clickhouse-server/` dizin. + +Sunucu başlatılmazsa, dosyadaki yapılandırmaları kontrol edin `/etc/clickhouse-server/config.xml`. + +Ayrıca sunucuyu konsoldan manuel olarak başlatabilirsiniz: + +``` bash +$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml +``` + +Bu durumda, günlük geliştirme sırasında uygun olan konsola yazdırılacaktır. +Yapılandırma dosyası geçerli dizinde ise, `--config-file` parametre. Varsayılan olarak, kullanır `./config.xml`. + +ClickHouse erişim kısıtlama ayarlarını destekler. Bulun theurlar. `users.xml` dosya (yanındaki `config.xml`). +Varsayılan olarak, erişim için herhangi bir yerden izin verilir `default` Kullanıcı, şifre olmadan. Görmek `user/default/networks`. +Daha fazla bilgi için bölüme bakın [“Configuration Files”](../operations/configuration_files.md). + +Sunucuyu başlattıktan sonra, ona bağlanmak için komut satırı istemcisini kullanabilirsiniz: + +``` bash +$ clickhouse-client +``` + +Varsayılan olarak, bağlanır `localhost:9000` kullanıcı adına `default` şifre olmadan. Kullanarak uzak bir sunucuya bağlanmak için de kullanılabilir `--host` değişken. + +Terminal UTF-8 kodlamasını kullanmalıdır. +Daha fazla bilgi için bölüme bakın [“Command-line client”](../interfaces/cli.md). + +Örnek: + +``` bash +$ ./clickhouse-client +ClickHouse client version 0.0.18749. +Connecting to localhost:9000. +Connected to ClickHouse server version 0.0.18749. + +:) SELECT 1 + +SELECT 1 + +┌─1─┐ +│ 1 │ +└───┘ + +1 rows in set. Elapsed: 0.003 sec. + +:) +``` + +**Tebrikler, sistem çalışıyor!** + +Denemeye devam etmek için, test veri kümelerinden birini indirebilir veya şunları yapabilirsiniz [öğretici](https://clickhouse.tech/tutorial.html). + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/install/) diff --git a/docs/tr/getting_started/playground.md b/docs/tr/getting_started/playground.md new file mode 100644 index 00000000000..7c0f3641544 --- /dev/null +++ b/docs/tr/getting_started/playground.md @@ -0,0 +1,48 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 14 +toc_title: "Bah\xE7e" +--- + +# ClickHouse Oyun Alanı {#clickhouse-playground} + +[ClickHouse Oyun Alanı](https://play.clickhouse.tech?file=welcome) kullanıcıların kendi sunucu veya küme kurmadan, anında sorguları çalıştırarak ClickHouse ile deneme sağlar. +Oyun alanında çeşitli örnek veri kümelerinin yanı sıra ClickHouse özelliklerini gösteren örnek sorgular da mevcuttur. + +Sorgular salt okunur bir kullanıcı olarak yürütülür. Bazı sınırlamaları ima eder: + +- DDL sorgularına İzin Verilmiyor +- Sorgu Ekle izin verilmez + +Aşağıdaki ayarlar da uygulanır: +- [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) +- [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) +- [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) +- [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) + +ClickHouse oyun alanı m2 deneyimini sunar.küçükler +[ClickHouse için yönetilen hizmet](https://cloud.yandex.com/services/managed-clickhouse) +örnek host hosteded in [Üye.Bulut](https://cloud.yandex.com/). +Hakkında daha fazla bilgi [bulut sağlayıcıları](../commercial/cloud.md). + +ClickHouse Playground web arayüzü clickhouse üzerinden istekleri yapar [HTTP API](../interfaces/http.md). +Bahçesi arka uç herhangi bir ek sunucu tarafı uygulaması olmadan sadece bir ClickHouse kümesidir. +ClickHouse HTTPS bitiş noktası da oyun alanının bir parçası olarak kullanılabilir. + +Herhangi bir HTTP istemcisi kullanarak oyun alanına sorgu yapabilirsiniz, örneğin [kıvrılma](https://curl.haxx.se) veya [wget](https://www.gnu.org/software/wget/), veya kullanarak bir bağlantı kurmak [JDBC](../interfaces/jdbc.md) veya [ODBC](../interfaces/odbc.md) sürücüler. +Clickhouse'u destekleyen yazılım ürünleri hakkında daha fazla bilgi mevcuttur [burada](../interfaces/index.md). + +| Parametre | Değer | +|:----------|:----------------------------------------| +| Nokta | https://play-api.clickhouse.teknik:8443 | +| Kullanan | `playground` | +| Şifre | `clickhouse` | + +Bu bitiş noktasının güvenli bir bağlantı gerektirdiğini unutmayın. + +Örnek: + +``` bash +curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" +``` diff --git a/docs/tr/getting_started/tutorial.md b/docs/tr/getting_started/tutorial.md new file mode 100644 index 00000000000..9d3d31ba898 --- /dev/null +++ b/docs/tr/getting_started/tutorial.md @@ -0,0 +1,671 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 12 +toc_title: "\xD6\u011Fretici" +--- + +# ClickHouse Eğitimi {#clickhouse-tutorial} + +## Bu Öğreticiden ne beklenir? {#what-to-expect-from-this-tutorial} + +Bu öğreticiden geçerek, basit bir ClickHouse kümesinin nasıl kurulacağını öğreneceksiniz. Küçük ama hataya dayanıklı ve ölçeklenebilir olacak. Ardından, verilerle doldurmak ve bazı demo sorguları yürütmek için örnek veri kümelerinden birini kullanacağız. + +## Tek Düğüm Kurulumu {#single-node-setup} + +Dağıtılmış bir ortamın karmaşıklığını ertelemek için, Clickhouse'u tek bir sunucu veya sanal makinede dağıtmaya başlayacağız. ClickHouse genellikle [deb](index.md#install-from-deb-packages) veya [rpm](index.md#from-rpm-packages) paketler, ama var [alternatifler](index.md#from-docker-image) onları desteklemeyen işletim sistemleri için. + +Örneğin, seçtiğiniz `deb` paketler ve yürütülen: + +``` bash +sudo apt-get install dirmngr +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 + +echo "deb http://repo.clickhouse.tech/deb/stable/ main/" | sudo tee /etc/apt/sources.list.d/clickhouse.list +sudo apt-get update + +sudo apt-get install -y clickhouse-server clickhouse-client +``` + +Yüklü olan paketlerde ne var: + +- `clickhouse-client` paket içerir [clickhouse-müşteri](../interfaces/cli.md) uygulama, interaktif ClickHouse konsol istemcisi. +- `clickhouse-common` paket clickhouse yürütülebilir dosya içerir. +- `clickhouse-server` paket bir sunucu olarak ClickHouse çalıştırmak için yapılandırma dosyalarını içerir. + +Sunucu yapılandırma dosyaları bulunur `/etc/clickhouse-server/`. Daha fazla gitmeden önce, fark lütfen `` element in `config.xml`. Path, veri depolama için konumu belirler, bu nedenle büyük disk kapasitesine sahip birimde bulunmalıdır; varsayılan değer `/var/lib/clickhouse/`. Yapılandırmayı ayarlamak istiyorsanız, doğrudan düzenlemek kullanışlı değildir `config.xml` dosya, gelecekteki paket güncellemelerinde yeniden yazılabileceğini düşünüyor. Yapılandırma öğelerini geçersiz kılmak için önerilen yol oluşturmaktır [config dosyaları.d dizin](../operations/configuration_files.md) hizmet olarak “patches” config için.xml. + +Fark etmiş olabileceğiniz gibi, `clickhouse-server` paket kurulumdan sonra otomatik olarak başlatılmaz. Güncellemelerden sonra otomatik olarak yeniden başlatılmaz. Sunucuyu başlatma şekliniz init sisteminize bağlıdır, genellikle: + +``` bash +sudo service clickhouse-server start +``` + +veya + +``` bash +sudo /etc/init.d/clickhouse-server start +``` + +Sunucu günlükleri için varsayılan konum `/var/log/clickhouse-server/`. Sunucu, oturum açtıktan sonra istemci bağlantılarını işlemeye hazırdır. `Ready for connections` ileti. + +Bir kez `clickhouse-server` yukarı ve çalışıyor, biz kullanabilirsiniz `clickhouse-client` sunucuya bağlanmak ve aşağıdaki gibi bazı test sorguları çalıştırmak için `SELECT "Hello, world!";`. + +
    + +Clickhouse-client için hızlı ipuçları +İnteraktif mod: + +``` bash +clickhouse-client +clickhouse-client --host=... --port=... --user=... --password=... +``` + +Çok satırlı sorguları etkinleştirme: + +``` bash +clickhouse-client -m +clickhouse-client --multiline +``` + +Toplu iş modunda sorguları çalıştırma: + +``` bash +clickhouse-client --query='SELECT 1' +echo 'SELECT 1' | clickhouse-client +clickhouse-client <<< 'SELECT 1' +``` + +Belirtilen biçimde bir dosyadan veri ekleme: + +``` bash +clickhouse-client --query='INSERT INTO table VALUES' < data.txt +clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv +``` + +
    + +## Örnek Veri Kümesini İçe Aktar {#import-sample-dataset} + +Şimdi ClickHouse sunucumuzu bazı örnek verilerle doldurmanın zamanı geldi. Bu eğitimde, yandex'in anonim verilerini kullanacağız.Metrica, açık kaynak olmadan önce Clickhouse'u üretim yolunda çalıştıran ilk hizmet (daha fazlası [tarih bölümü](../introduction/history.md)). Var [Yandex'i içe aktarmanın birden fazla yolu.Metrica veri kümesi](example_datasets/metrica.md) ve öğretici uğruna, en gerçekçi olanı ile gideceğiz. + +### Tablo verilerini indirin ve ayıklayın {#download-and-extract-table-data} + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +``` + +Çıkarılan dosyalar yaklaşık 10GB boyutundadır. + +### Tablo Oluşturma {#create-tables} + +Çoğu veritabanı yönetim sisteminde olduğu gibi, ClickHouse tabloları mantıksal olarak gruplar “databases”. Bir `default` veritabanı, ancak adında yeni bir tane oluşturacağız `tutorial`: + +``` bash +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" +``` + +Tablolar oluşturmak için sözdizimi veritabanlarına kıyasla çok daha karmaşıktır (bkz. [başvurma](../sql_reference/statements/create.md). Genelde `CREATE TABLE` deyimi üç önemli şeyi belirtmek zorundadır: + +1. Oluşturulacak tablonun adı. +2. Table schema, i.e. list of columns and their [veri türleri](../sql_reference/data_types/index.md). +3. [Masa motoru](../engines/table_engines/index.md) ve bu tabloya yapılan sorguların fiziksel olarak nasıl yürütüleceği ile ilgili tüm ayrıntıları belirleyen ayarları. + +Üye.Metrica bir web analitiği hizmetidir ve örnek veri kümesi tam işlevselliğini kapsamaz, bu nedenle oluşturmak için sadece iki tablo vardır: + +- `hits` hizmet kapsamındaki tüm web sitelerinde tüm kullanıcılar tarafından yapılan her eylem ile bir tablodur. +- `visits` bireysel eylemler yerine önceden oluşturulmuş oturumlar içeren bir tablodur. + +Bu tablolar için gerçek create table sorgularını görelim ve yürütelim: + +``` sql +CREATE TABLE tutorial.hits_v1 +( + `WatchID` UInt64, + `JavaEnable` UInt8, + `Title` String, + `GoodEvent` Int16, + `EventTime` DateTime, + `EventDate` Date, + `CounterID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RegionID` UInt32, + `UserID` UInt64, + `CounterClass` Int8, + `OS` UInt8, + `UserAgent` UInt8, + `URL` String, + `Referer` String, + `URLDomain` String, + `RefererDomain` String, + `Refresh` UInt8, + `IsRobot` UInt8, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `FlashMinor2` String, + `NetMajor` UInt8, + `NetMinor` UInt8, + `UserAgentMajor` UInt16, + `UserAgentMinor` FixedString(2), + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `MobilePhone` UInt8, + `MobilePhoneModel` String, + `Params` String, + `IPNetworkID` UInt32, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `IsArtifical` UInt8, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `ClientTimeZone` Int16, + `ClientEventTime` DateTime, + `SilverlightVersion1` UInt8, + `SilverlightVersion2` UInt8, + `SilverlightVersion3` UInt32, + `SilverlightVersion4` UInt16, + `PageCharset` String, + `CodeVersion` UInt32, + `IsLink` UInt8, + `IsDownload` UInt8, + `IsNotBounce` UInt8, + `FUniqID` UInt64, + `HID` UInt32, + `IsOldCounter` UInt8, + `IsEvent` UInt8, + `IsParameter` UInt8, + `DontCountHits` UInt8, + `WithHash` UInt8, + `HitColor` FixedString(1), + `UTCEventTime` DateTime, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `WindowName` Int32, + `OpenerName` Int32, + `HistoryLength` Int16, + `BrowserLanguage` FixedString(2), + `BrowserCountry` FixedString(2), + `SocialNetwork` String, + `SocialAction` String, + `HTTPError` UInt16, + `SendTiming` Int32, + `DNSTiming` Int32, + `ConnectTiming` Int32, + `ResponseStartTiming` Int32, + `ResponseEndTiming` Int32, + `FetchTiming` Int32, + `RedirectTiming` Int32, + `DOMInteractiveTiming` Int32, + `DOMContentLoadedTiming` Int32, + `DOMCompleteTiming` Int32, + `LoadEventStartTiming` Int32, + `LoadEventEndTiming` Int32, + `NSToDOMContentLoadedTiming` Int32, + `FirstPaintTiming` Int32, + `RedirectCount` Int8, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `ParamPrice` Int64, + `ParamOrderID` String, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `GoalsReached` Array(UInt32), + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `RefererHash` UInt64, + `URLHash` UInt64, + `CLID` UInt32, + `YCLID` UInt64, + `ShareService` String, + `ShareURL` String, + `ShareTitle` String, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `IslandID` FixedString(16), + `RequestNum` UInt32, + `RequestTry` UInt8 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +``` sql +CREATE TABLE tutorial.visits_v1 +( + `CounterID` UInt32, + `StartDate` Date, + `Sign` Int8, + `IsNew` UInt8, + `VisitID` UInt64, + `UserID` UInt64, + `StartTime` DateTime, + `Duration` UInt32, + `UTCStartTime` DateTime, + `PageViews` Int32, + `Hits` Int32, + `IsBounce` UInt8, + `Referer` String, + `StartURL` String, + `RefererDomain` String, + `StartURLDomain` String, + `EndURL` String, + `LinkURL` String, + `IsDownload` UInt8, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `PlaceID` Int32, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `IsYandex` UInt8, + `GoalReachesDepth` Int32, + `GoalReachesURL` Int32, + `GoalReachesAny` Int32, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `MobilePhoneModel` String, + `ClientEventTime` DateTime, + `RegionID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `IPNetworkID` UInt32, + `SilverlightVersion3` UInt32, + `CodeVersion` UInt32, + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `UserAgentMajor` UInt16, + `UserAgentMinor` UInt16, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `SilverlightVersion2` UInt8, + `SilverlightVersion4` UInt16, + `FlashVersion3` UInt16, + `FlashVersion4` UInt16, + `ClientTimeZone` Int16, + `OS` UInt8, + `UserAgent` UInt8, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `NetMajor` UInt8, + `NetMinor` UInt8, + `MobilePhone` UInt8, + `SilverlightVersion1` UInt8, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `JavaEnable` UInt8, + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `BrowserLanguage` UInt16, + `BrowserCountry` UInt16, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `Params` Array(String), + `Goals` Nested( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32), + `WatchIDs` Array(UInt64), + `ParamSumPrice` Int64, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `ClickLogID` UInt64, + `ClickEventID` Int32, + `ClickGoodEvent` Int32, + `ClickEventTime` DateTime, + `ClickPriorityID` Int32, + `ClickPhraseID` Int32, + `ClickPageID` Int32, + `ClickPlaceID` Int32, + `ClickTypeID` Int32, + `ClickResourceID` Int32, + `ClickCost` UInt32, + `ClickClientIP` UInt32, + `ClickDomainID` UInt32, + `ClickURL` String, + `ClickAttempt` UInt8, + `ClickOrderID` UInt32, + `ClickBannerID` UInt32, + `ClickMarketCategoryID` UInt32, + `ClickMarketPP` UInt32, + `ClickMarketCategoryName` String, + `ClickMarketPPName` String, + `ClickAWAPSCampaignName` String, + `ClickPageName` String, + `ClickTargetType` UInt16, + `ClickTargetPhraseID` UInt64, + `ClickContextType` UInt8, + `ClickSelectType` Int8, + `ClickOptions` String, + `ClickGroupBannerID` Int32, + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `FirstVisit` DateTime, + `PredLastVisit` Date, + `LastVisit` Date, + `TotalVisits` UInt32, + `TraficSource` Nested( + ID Int8, + SearchEngineID UInt16, + AdvEngineID UInt8, + PlaceID UInt16, + SocialSourceNetworkID UInt8, + Domain String, + SearchPhrase String, + SocialSourcePage String), + `Attendance` FixedString(16), + `CLID` UInt32, + `YCLID` UInt64, + `NormalizedRefererHash` UInt64, + `SearchPhraseHash` UInt64, + `RefererDomainHash` UInt64, + `NormalizedStartURLHash` UInt64, + `StartURLDomainHash` UInt64, + `NormalizedEndURLHash` UInt64, + `TopLevelDomain` UInt64, + `URLScheme` UInt64, + `OpenstatServiceNameHash` UInt64, + `OpenstatCampaignIDHash` UInt64, + `OpenstatAdIDHash` UInt64, + `OpenstatSourceIDHash` UInt64, + `UTMSourceHash` UInt64, + `UTMMediumHash` UInt64, + `UTMCampaignHash` UInt64, + `UTMContentHash` UInt64, + `UTMTermHash` UInt64, + `FromHash` UInt64, + `WebVisorEnabled` UInt8, + `WebVisorActivity` UInt32, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `Market` Nested( + Type UInt8, + GoalID UInt32, + OrderID String, + OrderPrice Int64, + PP UInt32, + DirectPlaceID UInt32, + DirectOrderID UInt32, + DirectBannerID UInt32, + GoodID String, + GoodName String, + GoodQuantity Int32, + GoodPrice Int64), + `IslandID` FixedString(16) +) +ENGINE = CollapsingMergeTree(Sign) +PARTITION BY toYYYYMM(StartDate) +ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +Etkileşimli modunu kullanarak bu sorguları yürütebilirsiniz `clickhouse-client` (sadece önceden bir sorgu belirtmeden bir terminalde başlatın) veya bazı deneyin [alternatif arayüz](../interfaces/index.md) Eğer isterseniz. + +Gördüğümüz gibi, `hits_v1` kullanır [temel MergeTree motoru](../engines/table_engines/mergetree_family/mergetree.md), WH whileile the `visits_v1` kullanır [Çökme](../engines/table_engines/mergetree_family/collapsingmergetree.md) varyant. + +### Verileri İçe Aktar {#import-data} + +Clickhouse'a veri aktarımı yapılır [INSERT INTO](../sql_reference/statements/insert_into.md) diğer birçok SQL veritabanlarında olduğu gibi sorgu. Bununla birlikte, veriler genellikle [desteklenen seri hale getirme biçimleri](../interfaces/formats.md) yerine `VALUES` fıkra clausesı (ayrıca desteklenmektedir). + +Onları almak için ne kadar daha önce indirdiğimiz dosyaları sekme ayrılmış biçimde, yani burada konsol istemci ile : + +``` bash +clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv +clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv +``` + +ClickHouse bir yeri vardır [ayarlan settingsacak ayarlar](../operations/settings/index.md) ve bunları konsol istemcisinde belirtmenin bir yolu, görebildiğimiz gibi argümanlar aracılığıyla `--max_insert_block_size`. Hangi ayarların mevcut olduğunu, ne anlama geldiğini ve varsayılanların ne olduğunu anlamanın en kolay yolu `system.settings` Tablo: + +``` sql +SELECT name, value, changed, description +FROM system.settings +WHERE name LIKE '%max_insert_b%' +FORMAT TSV + +max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." +``` + +İsteğe bağlı olarak şunları yapabilirsiniz [OPTIMIZE](../sql_reference/statements/misc.md#misc_operations-optimize) ithalattan sonra tablolar. MergeTree-family'den bir motorla yapılandırılmış tablolar, veri depolamayı en iyi duruma getirmek (veya en azından mantıklı olup olmadığını kontrol etmek) için her zaman arka planda veri parçalarının birleştirilmesini sağlar. Bu sorgular, tablo motorunu bir süre sonra yerine şu anda depolama optimizasyonu yapmaya zorlar: + +``` bash +clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" +clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" +``` + +Bu sorgular bir G/Ç ve CPU yoğun işlem başlatır, bu nedenle tablo sürekli olarak yeni veriler alırsa, onu yalnız bırakmak ve birleştirmelerin arka planda çalışmasına izin vermek daha iyidir. + +Şimdi tablo ithalatının başarılı olup olmadığını kontrol edebiliriz: + +``` bash +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" +``` + +## Örnek Sorgular {#example-queries} + +``` sql +SELECT + StartURL AS URL, + AVG(Duration) AS AvgDuration +FROM tutorial.visits_v1 +WHERE StartDate BETWEEN '2014-03-23' AND '2014-03-30' +GROUP BY URL +ORDER BY AvgDuration DESC +LIMIT 10 +``` + +``` sql +SELECT + sum(Sign) AS visits, + sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits, + (100. * goal_visits) / visits AS goal_percent +FROM tutorial.visits_v1 +WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') +``` + +## Küme Dağıtımı {#cluster-deployment} + +ClickHouse kümesi homojen bir kümedir. Kurulum adımları: + +1. Kümenin tüm makinelerine ClickHouse Server'ı yükleyin +2. Yapılandırma dosyalarında küme yapılandırmalarını ayarlama +3. Her örnekte yerel tablolar oluşturun +4. Create a [Dağıtılmış tablo](../engines/table_engines/special/distributed.md) + +[Dağıtılmış tablo](../engines/table_engines/special/distributed.md) aslında bir tür “view” ClickHouse kümesinin yerel tablolarına. Dağıtılmış bir tablodan sorgu seçin, tüm kümenin parçalarının kaynaklarını kullanarak yürütür. Birden çok küme için yapılandırmalar belirtebilir ve farklı kümelere görünümler sağlayan birden çok dağıtılmış tablo oluşturabilirsiniz. + +Her biri bir kopya olan üç parçalı bir küme için örnek yapılandırma: + +``` xml + + + + + example-perftest01j.yandex.ru + 9000 + + + + + example-perftest02j.yandex.ru + 9000 + + + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +Daha fazla gösteri için, aynı ile yeni bir yerel tablo oluşturalım `CREATE TABLE` için kullandığımız sorgu `hits_v1` ama farklı bir tablo adı: + +``` sql +CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... +``` + +Kümenin yerel tablolarına bir görünüm sağlayan dağıtılmış bir tablo oluşturma: + +``` sql +CREATE TABLE tutorial.hits_all AS tutorial.hits_local +ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); +``` + +Yaygın bir uygulama, kümenin tüm makinelerinde benzer dağıtılmış tablolar oluşturmaktır. Kümenin herhangi bir makinesinde dağıtılmış sorguları çalıştırmaya izin verir. Ayrıca, belirli bir SELECT sorgusu için geçici dağıtılmış tablo oluşturmak için alternatif bir seçenek vardır [uzak](../sql_reference/table_functions/remote.md) tablo işlevi. + +Hadi koşalım [INSERT SELECT](../sql_reference/statements/insert_into.md) tabloyu birden çok sunucuya yaymak için dağıtılmış tabloya. + +``` sql +INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; +``` + +!!! warning "Bildirim" + Bu yaklaşım büyük tabloların parçalanması için uygun değildir. Ayrı bir araç var [clickhouse-fotokopi makinesi](../operations/utilities/clickhouse-copier.md) bu keyfi büyük tabloları yeniden parçalayabilir. + +Tahmin edebileceğiniz gibi, hesaplamalı olarak ağır sorgular, bir yerine 3 sunucu kullanıyorsa n kat daha hızlı çalışır. + +Bu durumda, 3 parçaya sahip bir küme kullandık ve her biri tek bir kopya içeriyor. + +Bir üretim ortamında esneklik sağlamak için, her bir parçanın birden çok kullanılabilirlik bölgesi veya veri merkezleri (veya en azından raflar) arasında yayılmış 2-3 kopya içermesi önerilir. ClickHouse yinelemeler sınırsız sayıda desteklediğini unutmayın. + +Üç yineleme içeren bir parça kümesi için örnek yapılandırma: + +``` xml + + ... + + + + example-perftest01j.yandex.ru + 9000 + + + example-perftest02j.yandex.ru + 9000 + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +Yerel çoğaltmayı etkinleştirmek için [ZooKeeper](http://zookeeper.apache.org/) gereklidir. ClickHouse tüm yinelemeler üzerinde veri tutarlılığı ilgilenir ve otomatik olarak hatadan sonra prosedürü geri çalışır. ZooKeeper kümesinin ayrı sunuculara dağıtılması önerilir(ClickHouse dahil başka hiçbir işlem çalışmaz). + +!!! note "Not" + ZooKeeper sıkı bir gereklilik değildir: bazı basit durumlarda, verileri uygulama kodunuzdan tüm kopyalara yazarak çoğaltabilirsiniz. Bu yaklaşım **değil** önerilen, bu durumda, ClickHouse tüm yinelemelerde veri tutarlılığını garanti edemez. Böylece başvurunuzun sorumluluğu haline gelir. + +Zookeeper konumları yapılandırma dosyasında belirtilir: + +``` xml + + + zoo01.yandex.ru + 2181 + + + zoo02.yandex.ru + 2181 + + + zoo03.yandex.ru + 2181 + + +``` + +Ayrıca, tablo oluşturulmasında kullanılan her bir parça ve kopyayı tanımlamak için makrolar ayarlamamız gerekir: + +``` xml + + 01 + 01 + +``` + +Yinelenmiş tablo oluşturma şu anda hiçbir yinelemeler varsa, yeni bir ilk yineleme örneği. Zaten canlı yinelemeler varsa, yeni yineleme varolan verileri klonlar. Önce tüm çoğaltılmış tablolar oluşturmak ve sonra veri eklemek için bir seçeneğiniz vardır. Başka bir seçenek, bazı yinelemeler oluşturmak ve veri ekleme sırasında veya sonrasında diğerlerini eklemektir. + +``` sql +CREATE TABLE tutorial.hits_replica (...) +ENGINE = ReplcatedMergeTree( + '/clickhouse_perftest/tables/{shard}/hits', + '{replica}' +) +... +``` + +Burada kullanıyoruz [ReplicatedMergeTree](../engines/table_engines/mergetree_family/replication.md) masa motoru. Parametrelerde, Shard ve çoğaltma tanımlayıcılarını içeren ZooKeeper yolunu belirtiyoruz. + +``` sql +INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; +``` + +Çoğaltma çok ana modda çalışır. Veriler herhangi bir kopyaya yüklenebilir ve sistem daha sonra otomatik olarak diğer örneklerle eşitler. Çoğaltma zaman uyumsuz olduğundan, belirli bir anda, tüm yinelemeler son eklenen verileri içerebilir. Veri alımına izin vermek için en az bir kopya olmalıdır. Diğerleri verileri senkronize eder ve tekrar aktif hale geldiklerinde tutarlılığı onarır. Bu yaklaşımın, yakın zamanda eklenen veri kaybı olasılığının düşük olmasına izin verdiğini unutmayın. + +[Orijinal makale](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/tr/guides/apply_catboost_model.md b/docs/tr/guides/apply_catboost_model.md new file mode 100644 index 00000000000..92bfac226f2 --- /dev/null +++ b/docs/tr/guides/apply_catboost_model.md @@ -0,0 +1,239 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 41 +toc_title: CatBoost Modellerini Uygulamak +--- + +# Clickhouse'da bir Catboost modeli uygulamak {#applying-catboost-model-in-clickhouse} + +[CatBoost](https://catboost.ai) geliştirilen ücretsiz ve açık kaynak kodlu bir GRA anddi libraryent kütüphan aesidir. [Yandex](https://yandex.com/company/) makine öğrenimi için. + +Bu Talimatla, Sql'den model çıkarımı çalıştırarak Clickhouse'da önceden eğitilmiş modelleri uygulamayı öğreneceksiniz. + +Clickhouse'da bir CatBoost modeli uygulamak için: + +1. [Tablo oluşturma](#create-table). +2. [Verileri tabloya ekleme](#insert-data-to-table). +3. [Catboost'u Clickhouse'a entegre edin](#integrate-catboost-into-clickhouse) (İsteğe bağlı adım). +4. [SQL'DEN Model Çıkarımını çalıştırın](#run-model-inference). + +Eğitim CatBoost modelleri hakkında daha fazla bilgi için bkz [Eğitim ve uygulama modelleri](https://catboost.ai/docs/features/training.html#training). + +## Önkoşullar {#prerequisites} + +Eğer yoksa [Docker](https://docs.docker.com/install/) yine de yükleyin. + +!!! note "Not" + [Docker](https://www.docker.com) sistemin geri kalanından bir CatBoost ve ClickHouse kurulumunu izole eden kaplar oluşturmanıza olanak sağlayan bir yazılım platformudur. + +Bir CatBoost modeli uygulamadan önce: + +**1.** P pullull the [Docker görüntü](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) kayıt defter theinden: + +``` bash +$ docker pull yandex/tutorial-catboost-clickhouse +``` + +Kod, çalışma zamanı, kütüphaneler, ortam değişkenleri ve Yapılandırma Dosyaları: bu Docker görüntü catboost ve ClickHouse çalıştırmak için gereken her şeyi içerir. + +**2.** Docker görüntüsünün başarıyla çekildiğinden emin olun: + +``` bash +$ docker image ls +REPOSITORY TAG IMAGE ID CREATED SIZE +yandex/tutorial-catboost-clickhouse latest 622e4d17945b 22 hours ago 1.37GB +``` + +**3.** Bu görüntüye dayalı bir Docker kabı başlatın: + +``` bash +$ docker run -it -p 8888:8888 yandex/tutorial-catboost-clickhouse +``` + +## 1. Tablo oluşturma {#create-table} + +Eğitim örneği için bir ClickHouse tablosu oluşturmak için: + +**1.** Etkileşimli modda ClickHouse konsol istemcisini başlatın: + +``` bash +$ clickhouse client +``` + +!!! note "Not" + Clickhouse sunucusu Docker kapsayıcısı içinde zaten çalışıyor. + +**2.** Komutu kullanarak tablo oluşturun: + +``` sql +:) CREATE TABLE amazon_train +( + date Date MATERIALIZED today(), + ACTION UInt8, + RESOURCE UInt32, + MGR_ID UInt32, + ROLE_ROLLUP_1 UInt32, + ROLE_ROLLUP_2 UInt32, + ROLE_DEPTNAME UInt32, + ROLE_TITLE UInt32, + ROLE_FAMILY_DESC UInt32, + ROLE_FAMILY UInt32, + ROLE_CODE UInt32 +) +ENGINE = MergeTree ORDER BY date +``` + +**3.** ClickHouse konsol istemcisinden çıkış: + +``` sql +:) exit +``` + +## 2. Verileri tabloya ekleme {#insert-data-to-table} + +Verileri eklemek için: + +**1.** Aşağıdaki komutu çalıştırın: + +``` bash +$ clickhouse client --host 127.0.0.1 --query 'INSERT INTO amazon_train FORMAT CSVWithNames' < ~/amazon/train.csv +``` + +**2.** Etkileşimli modda ClickHouse konsol istemcisini başlatın: + +``` bash +$ clickhouse client +``` + +**3.** Verilerin yüklendiğinden emin olun: + +``` sql +:) SELECT count() FROM amazon_train + +SELECT count() +FROM amazon_train + ++-count()-+ +| 65538 | ++-------+ +``` + +## 3. Catboost'u Clickhouse'a entegre edin {#integrate-catboost-into-clickhouse} + +!!! note "Not" + **İsteğe bağlı adım.** Docker görüntü catboost ve ClickHouse çalıştırmak için gereken her şeyi içerir. + +Catboost'u Clickhouse'a entegre etmek için: + +**1.** Değerlendirme kitaplığı oluşturun. + +Bir CatBoost modelini değerlendirmenin en hızlı yolu derlemedir `libcatboostmodel.` kitaplık. Kitaplığın nasıl oluşturulacağı hakkında daha fazla bilgi için bkz. [CatBoost belgeleri](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). + +**2.** Herhangi bir yerde ve herhangi bir adla yeni bir dizin oluşturun, örneğin, `data` ve oluşturulan kütüphaneyi içine koyun. Docker görüntüsü zaten kütüphaneyi içeriyor `data/libcatboostmodel.so`. + +**3.** Yapılandırma modeli için herhangi bir yerde ve herhangi bir adla yeni bir dizin oluşturun, örneğin, `models`. + +**4.** Örneğin, herhangi bir ada sahip bir model yapılandırma dosyası oluşturun, `models/amazon_model.xml`. + +**5.** Model yapılandırmasını açıklayın: + +``` xml + + + + catboost + + amazon + + /home/catboost/tutorial/catboost_model.bin + + 0 + + +``` + +**6.** Catboost yolunu ve model yapılandırmasını ClickHouse yapılandırmasına ekleyin: + +``` xml + +/home/catboost/data/libcatboostmodel.so +/home/catboost/models/*_model.xml +``` + +## 4. SQL'DEN Model Çıkarımını çalıştırın {#run-model-inference} + +Test modeli için ClickHouse istemcisini çalıştırın `$ clickhouse client`. + +Modelin çalıştığından emin olalım: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) > 0 AS prediction, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "Not" + İşlev [modelEvaluate](../sql_reference/functions/other_functions.md#function-modelevaluate) multiclass modelleri için sınıf başına ham tahminleri ile tuple döndürür. + +Olasılığı tahmin edelim: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1 + exp(-prediction)) AS probability, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "Not" + Hakkında daha fazla bilgi [exp()](../sql_reference/functions/math_functions.md) işlev. + +Örnek üzerinde LogLoss hesaplayalım: + +``` sql +:) SELECT -avg(tg * log(prob) + (1 - tg) * log(1 - prob)) AS logloss +FROM +( + SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1. + exp(-prediction)) AS prob, + ACTION AS tg + FROM amazon_train +) +``` + +!!! note "Not" + Hakkında daha fazla bilgi [avg()](../sql_reference/aggregate_functions/reference.md#agg_function-avg) ve [günlük()](../sql_reference/functions/math_functions.md) işlevler. + +[Orijinal makale](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/tr/guides/index.md b/docs/tr/guides/index.md new file mode 100644 index 00000000000..95ad65443b0 --- /dev/null +++ b/docs/tr/guides/index.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "K\u0131lavuzlar" +toc_priority: 38 +toc_title: "Genel bak\u0131\u015F" +--- + +# ClickHouse Kılavuzları {#clickhouse-guides} + +ClickHouse kullanarak çeşitli görevleri çözmeye yardımcı olan ayrıntılı adım adım talimatların listesi: + +- [Basit küme kurulumu eğitimi](../getting_started/tutorial.md) +- [Clickhouse'da bir CatBoost modeli uygulamak](apply_catboost_model.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/tr/images b/docs/tr/images new file mode 120000 index 00000000000..73937c941ec --- /dev/null +++ b/docs/tr/images @@ -0,0 +1 @@ +../en/images \ No newline at end of file diff --git a/docs/tr/index.md b/docs/tr/index.md new file mode 100644 index 00000000000..5cbf9330750 --- /dev/null +++ b/docs/tr/index.md @@ -0,0 +1,95 @@ +--- +toc_priority: 0 +toc_title: "Genel bak\u0131\u015F" +--- + +# ClickHouse nedir? {#what-is-clickhouse} + +ClickHouse, sorguların çevrimiçi analitik işlenmesi (*Online Analytical Processing* - OLAP) için sütun odaklı bir Veritabanı Yönetim Sistemidir (*DataBase Management System* - DBMS). + +“Normal” bir satır odaklı DBMS içinde veriler şu şekilde saklanır: + +| Satır | WatchId | JavaEnable | Başlık | İyiOlay | OlayZamanı | +|-------|-------------|------------|----------------------|-----------|---------------------| +| \#0 | 89354350662 | 1 | Yatırımcı İlişkileri | 1 | 2016-05-18 05:19:20 | +| \#1 | 90329509958 | 0 | Bize ulaşın | 1 | 2016-05-18 08:10:20 | +| \#2 | 89953706054 | 1 | Görev | 1 | 2016-05-18 07:38:00 | +| \#N | … | … | … | … | … | + +Başka bir deyişle, bir satırla ilgili tüm değerler fiziksel olarak yan yana depolanır. + +MySQL, Postgres ve MS SQL Server gibi veritabanları satır odaklı DBMS örnekleridir. + +Sütun odaklı bir DBMS'de ise veriler şu şekilde saklanır: + +| Satır: | \#0 | \#1 | \#2 | \#N | +|-------------|----------------------|---------------------|---------------------|-----| +| WatchId: | 89354350662 | 90329509958 | 89953706054 | … | +| JavaEnable: | 1 | 0 | 1 | … | +| Başlık: | Yatırımcı İlişkileri | Bize ulaşın | Görev | … | +| İyiOlay: | 1 | 1 | 1 | … | +| OlayZamanı: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … | + +Bu örnekler yalnızca verilerin düzenlendiği sırayı gösterir. Farklı sütunlardaki değerler ayrı olarak depolanır ve aynı sütundaki veriler birlikte depolanır. + +Sütun odaklı DBMS örnekleri: Vertica, Paraccel (Actian Matrix ve Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise ve Actian vektör), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid ve kdb+. + +Verinin farklı bir şekilde sıralanarak depolanması, bazı veri erişim senaryoları için daha uygundur. Veri erişim senaryosu, hangi sorguların ne kadar sıklıkla yapıldığını, ne kadar verinin okunduğu, bunların hangi tiplerde hangi kolonlardan, satırlardan ve hangi miktarda(bayt olarak) okunacağını; verinin okunması ile güncellenmesi arasındaki ilişkiyi; verinin işlenen boyutu ve ne kadar yerel olduğunu; veri değiş-tokuşunun(transaction) olup olmayacağını, olacaksa diğer işlemlerden ne kadat yalıtılacağını; verilerin kopyalanması ve mantıksal bütünlük intiyaçlarını; her sorgu türünün gecikme ve iletim debisi ihtiyaçlarını gösterir. + +Sistem üzerindeki yük ne kadar fazlaysa, sistem ayarlarının kullanım senaryolarına uyarlanması ve bu ayarların ne kadar hassas olduğu da o kadar önemli hale gelir. Birbirinden büyük ölçüde farklı olan veri erişim senaryolarına tam uyum sağlayan, yani her işe ve yüke gelen bir sistem yoktur. Eğer bir sistem yük altında her türlü veri erişim senaryosuna adapte olabiliyorsa, o halde böyle bir sistem ya tüm senaryolara ya da senaryoların bir veya birkaçına karşı zayıp bir performans gösterir. + +## OLAP senaryosunun temel özellikleri {#key-properties-of-olap-scenario} + +- İsteklerin büyük çoğunluğu, okuma erişimi içindir. +- Veriler, tek satırlarla değil, oldukça büyük gruplar halinde (\> 1000 satır) güncellenir; veya hiç güncellenmez. +- Veri, veritabanına eklenir, ancak değiştirilmez. +- Bazı sorgular için veritabanından den oldukça fazla sayıda satır çekilir, ancak sonuç sadece birkaç satır ve sütunludur. +- Tablolar "geniştir", yani bir tabloda çok sayıda kolon vardır(onlarca). +- Sorgular sıkılığı diğer senaryolara göre daha azdır (genellikle sunucu başına saniyede 100 veya daha az sorgu gelir). +- Basit sorgular için, 50 ms civarında gecikmelere izin verilir. +- Saklanan veriler oldukça küçüktür: genelde sadece sayılar ve kısa metinler içerir(örneğin, URL başına 60 bayt). +- Tek bir sorguyu işlemek yüksek miktarda veri okunmasını gerektirir(sunucu başına saniyede milyarlarca satıra kadar). +- Veri değiş-tokuşu(transaction) gerekli değildir. +- Veri tutarlılığı o kadar da önemli değildir. +- Genelde bir tane çok büyük tablo vardır, gerisi küçük tablolardan oluşur +- Bir sorgu sonucu elde edilen veri, okuanan veri miktarından oldukça küçüktür. Başka bir deyişle, milyarlarca satır içinden veriler süzgeçlenerek veya birleştirilerek elde edilen verilerin tek bir sunucunun RAM'ine sığar. + +OLAP senaryosunun diğer popüler senaryolardan (*Online Transactional Processing* - OLTP veya *Key-Value* veritabanı) çok farklı olduğu açıkça görülebilir. Bu nedenle, iyi bir performans elde etmek istiyorsanız, analitik sorguları işlemek için OLTP veya *Key-Value* veritabanlarını kullanmak pek mantıklı olmaz. Örneğin, analitik için MongoDB veya Redis kullanmaya çalışırsanız, OLAP veritabanlarına kıyasla çok düşük performans elde edersiniz. + +## Sütun yönelimli veritabanları OLAP senaryosunda neden daha iyi çalışır {#why-column-oriented-databases-work-better-in-the-olap-scenario} + +Sütun yönelimli veritabanları OLAP senaryolarına daha uygundur: hatta o kadar ki, çoğu sorgunun işlenmesi en az 100 kat daha hızlıdır. Her ne kadar OLAP veritabanlarının neden bu kadar hızlı olduğuna dair nedenler aşağıda ayrıntılı verilmiş olsa da görseller üzerinden anlatmak daha kolay olacakttır: + +**Satır yönelimli DBMS** + +![Row-oriented](images/row_oriented.gif#) + +**Sütun yönelimli DBMS** + +![Column-oriented](images/column_oriented.gif#) + +Farkı görüyor musunuz? + +### Giriş/çıkış {#inputoutput} + +1. Analitik bir sorgu için, yalnızca az sayıda tablo sütununun okunması gerekir. Sütun yönelimli bir veritabanında, yalnızca ihtiyacınız olan verileri okuyabilirsiniz. Örneğin, 100 üzerinden 5 sütun gerekiyorsa, g/Ç'de 20 kat azalma bekleyebilirsiniz. +2. Veri paketler halinde okunduğundan, sıkıştırılması daha kolaydır. Sütunlardaki verilerin sıkıştırılması da daha kolaydır. Bu, G/Ç hacmini daha da azaltır. +3. Azaltılmış G/Ç nedeniyle, sistem önbelleğine daha fazla veri sığar. + +Örneğin, sorgu “count the number of records for each advertising platform” bir okuma gerektirir “advertising platform ID” 1 bayt sıkıştırılmamış kadar alır sütun. Trafiğin çoğu reklam platformlarından değilse, bu sütunun en az 10 kat sıkıştırılmasını bekleyebilirsiniz. Hızlı bir sıkıştırma algoritması kullanırken, saniyede en az birkaç gigabayt sıkıştırılmamış veri hızında veri dekompresyonu mümkündür. Başka bir deyişle, bu sorgu, tek bir sunucuda saniyede yaklaşık birkaç milyar satır hızında işlenebilir. Bu hız aslında pratikte elde edilir. + +### CPU {#cpu} + +Bir sorguyu yürütmek çok sayıda satırı işlemeyi gerektirdiğinden, ayrı satırlar yerine tüm vektörler için tüm işlemlerin gönderilmesine veya sorgu motorunun neredeyse hiç gönderim maliyeti olmaması için uygulanmasına yardımcı olur. Bunu yapmazsanız, yarı iyi bir disk alt sistemi ile, sorgu yorumlayıcısı kaçınılmaz olarak CPU'yu durdurur. Hem verileri sütunlarda depolamak hem de mümkün olduğunda sütunlarla işlemek mantıklıdır. + +Bunu yapmanın iki yolu vardır: + +1. Bir vektör motoru. Tüm işlemler ayrı değerler yerine vektörler için yazılır. Bu, işlemleri çok sık aramanıza gerek olmadığı ve sevkiyatın maliyetlerinin ihmal edilebilir olduğu anlamına gelir. İşlem kodu optimize edilmiş bir iç döngü içerir. + +2. Kod üretimi. Sorgu için oluşturulan kod, içindeki tüm dolaylı çağrılara sahiptir. + +Bu yapılmaz “normal” veritabanları, çünkü basit sorguları çalıştırırken mantıklı değil. Ancak, istisnalar vardır. Örneğin, MemSQL SQL sorgularını işlerken gecikmeyi azaltmak için kod oluşturma kullanır. (Karşılaştırma için, analitik Dbms'ler gecikme değil, verim optimizasyonunu gerektirir .) + +CPU verimliliği için sorgu dilinin bildirimsel (SQL veya MDX) veya en az bir vektör (J, K) olması gerektiğini unutmayın. Sorgu yalnızca en iyi duruma getirme için izin veren örtük döngüler içermelidir. + +{## [Orijinal makale](https://clickhouse.tech/docs/en/) ##} diff --git a/docs/tr/interfaces/cli.md b/docs/tr/interfaces/cli.md new file mode 100644 index 00000000000..c526105dfe7 --- /dev/null +++ b/docs/tr/interfaces/cli.md @@ -0,0 +1,149 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 17 +toc_title: "Komut Sat\u0131r\u0131 \u0130stemcisi" +--- + +# Komut satırı istemcisi {#command-line-client} + +ClickHouse yerel bir komut satırı istemcisi sağlar: `clickhouse-client`. İstemci komut satırı seçeneklerini ve yapılandırma dosyalarını destekler. Daha fazla bilgi için, bkz. [Yapılandırma](#interfaces_cli_configuration). + +[Yüklemek](../getting_started/index.md) ıt from the `clickhouse-client` paketleyin ve komutla çalıştırın `clickhouse-client`. + +``` bash +$ clickhouse-client +ClickHouse client version 19.17.1.1579 (official build). +Connecting to localhost:9000 as user default. +Connected to ClickHouse server version 19.17.1 revision 54428. + +:) +``` + +Farklı istemci ve sunucu sürümleri birbiriyle uyumludur, ancak bazı özellikler eski istemcilerde kullanılamayabilir. Biz sunucu uygulaması olarak istemci aynı sürümünü kullanmanızı öneririz. Eski sürümün bir istemcisini kullanmaya çalıştığınızda, daha sonra sunucu, `clickhouse-client` mesajı görüntüler: + + ClickHouse client version is older than ClickHouse server. It may lack support for new features. + +## Kullanma {#cli_usage} + +İstemci etkileşimli ve etkileşimli olmayan (toplu iş) modunda kullanılabilir. Toplu iş modunu kullanmak için ‘query’ parametre veya veri göndermek ‘stdin’ (bunu doğrular ‘stdin’ bir terminal değildir) veya her ikisi de. HTTP arayüzüne benzer, kullanırken ‘query’ parametre ve veri gönderme ‘stdin’, istek bir birleştirme olduğunu ‘query’ parametre, bir satır besleme ve veri ‘stdin’. Bu, büyük ekleme sorguları için uygundur. + +Veri eklemek için istemci kullanma örneği: + +``` bash +$ echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; + +$ cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; +3, 'some text', '2016-08-14 00:00:00' +4, 'some more text', '2016-08-14 00:00:01' +_EOF + +$ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; +``` + +Toplu iş modunda, varsayılan veri biçimi TabSeparated. Sorgunun biçim yan tümcesinde biçimi ayarlayabilirsiniz. + +Varsayılan olarak, yalnızca tek bir sorguyu toplu iş modunda işleyebilirsiniz. Birden çok sorgu yapmak için bir “script,” kullan... `--multiquery` parametre. Bu, INSERT dışındaki tüm sorgular için çalışır. Sorgu sonuçları, ek ayırıcılar olmadan ardışık olarak çıktılanır. Benzer şekilde, çok sayıda sorgu işlemek için, çalıştırabilirsiniz ‘clickhouse-client’ her sorgu için. Başlatmak için onlarca milisaniye sürebilir unutmayın ‘clickhouse-client’ program. + +Etkileşimli modda, sorguları girebileceğiniz bir komut satırı alırsınız. + +Eğer ‘multiline’ belirtilmemiş (varsayılan): sorguyu çalıştırmak için Enter tuşuna basın. Noktalı virgül, sorgunun sonunda gerekli değildir. Çok satırlı bir sorgu girmek için ters eğik çizgi girin `\` hat beslemeden önce. Enter tuşuna bastıktan sonra, sorgunun sonraki satırını girmeniz istenecektir. + +Çok satırlı belirtilirse: bir sorguyu çalıştırmak için, noktalı virgülle sonlandırın ve Enter tuşuna basın. Noktalı virgül, girilen satırın sonunda atlandıysa, sorgunun bir sonraki satırını girmeniz istenecektir. + +Yalnızca tek bir sorgu çalıştırılır, bu nedenle noktalı virgülden sonra her şey göz ardı edilir. + +Belirtebilirsiniz `\G` noktalı virgül yerine veya sonra. Bu dikey biçimi gösterir. Bu formatta, her değer geniş tablolar için uygun olan ayrı bir satıra yazdırılır. Bu sıradışı özellik MySQL CLI ile uyumluluk için eklendi. + +Komut satırı dayanmaktadır ‘replxx’ (benzer ‘readline’). Başka bir deyişle, tanıdık klavye kısayollarını kullanır ve bir geçmişi tutar. Tarih yazılır `~/.clickhouse-client-history`. + +Varsayılan olarak, kullanılan biçim PrettyCompact. Sorgunun biçim yan tümcesinde veya belirterek biçimi değiştirebilirsiniz `\G` sorgunun sonunda, `--format` veya `--vertical` komut satırında veya istemci yapılandırma dosyasını kullanarak bağımsız değişken. + +İstemciden çıkmak için Ctrl+D (veya Ctrl+C) tuşlarına basın veya bir sorgu yerine aşağıdakilerden birini girin: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q” + +Bir sorguyu işlerken, istemci şunları gösterir: + +1. Saniyede en fazla 10 kez güncellenen ilerleme (varsayılan olarak). Hızlı sorgular için ilerleme görüntülenecek zaman olmayabilir. +2. Hata ayıklama için ayrıştırmadan sonra biçimlendirilmiş sorgu. +3. Belirtilen biçimde sonuç. +4. Sonuçtaki satır sayısı, geçen süre ve sorgu işlemenin ortalama hızı. + +Ctrl + C tuşlarına basarak uzun bir sorguyu iptal edebilirsiniz. ancak, sunucunun isteği iptal etmesi için biraz beklemeniz gerekir. Belirli aşamalarda bir sorguyu iptal etmek mümkün değildir. Beklemezseniz ve ikinci kez Ctrl + C tuşlarına basarsanız, istemci çıkacaktır. + +Komut satırı istemcisi, sorgulamak için dış verileri (dış geçici tablolar) geçirmenize izin verir. Daha fazla bilgi için bölüme bakın “External data for query processing”. + +### Parametrelerle sorgular {#cli-queries-with-parameters} + +Parametrelerle bir sorgu oluşturabilir ve istemci uygulamasından onlara değerler aktarabilirsiniz. Bu, istemci tarafında belirli dinamik değerlerle biçimlendirme sorgusunu önlemeye izin verir. Mesela: + +``` bash +$ clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}" +``` + +#### Sorgu Sözdizimi {#cli-queries-with-parameters-syntax} + +Bir sorguyu her zamanki gibi biçimlendirin, ardından uygulama parametrelerinden sorguya geçirmek istediğiniz değerleri parantez içinde aşağıdaki biçimde yerleştirin: + +``` sql +{:} +``` + +- `name` — Placeholder identifier. In the console client it should be used in app parameters as `--param_ = value`. +- `data type` — [Veri türü](../sql_reference/data_types/index.md) app parametre değeri. Örneğin, aşağıdaki gibi bir veri yapısı `(integer, ('string', integer))` olabilir var `Tuple(UInt8, Tuple(String, UInt8))` veri türü (başka birini de kullanabilirsiniz [tamsayı](../sql_reference/data_types/int_uint.md) türler). + +#### Örnek {#example} + +``` bash +$ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM table WHERE val = {tuple_in_tuple:Tuple(UInt8, Tuple(String, UInt8))}" +``` + +## Yapılandırma {#interfaces_cli_configuration} + +Parametreleri iletebilirsiniz `clickhouse-client` (tüm parametrelerin varsayılan değeri vardır) : + +- Komut satır fromından + + Komut satırı seçenekleri, yapılandırma dosyalarındaki varsayılan değerleri ve ayarları geçersiz kılar. + +- Yapılandırma dosyaları. + + Yapılandırma dosyalarındaki ayarlar varsayılan değerleri geçersiz kılar. + +### Komut Satırı Seçenekleri {#command-line-options} + +- `--host, -h` -– The server name, ‘localhost’ varsayılan olarak. Adı veya IPv4 veya IPv6 adresini kullanabilirsiniz. +- `--port` – The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports. +- `--user, -u` – The username. Default value: default. +- `--password` – The password. Default value: empty string. +- `--query, -q` – The query to process when using non-interactive mode. +- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ varsayılan) tarafından. +- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter). +- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons. +- `--format, -f` – Use the specified default format to output the result. +- `--vertical, -E` – If specified, use the Vertical format by default to output the result. This is the same as ‘–format=Vertical’. Bu biçimde, her bir değer, geniş tabloları görüntülerken yardımcı olan ayrı bir satıra yazdırılır. +- `--time, -t` – If specified, print the query execution time to ‘stderr’ etkileşimli olmayan modda. +- `--stacktrace` – If specified, also print the stack trace if an exception occurs. +- `--config-file` – The name of the configuration file. +- `--secure` – If specified, will connect to server over secure connection. +- `--param_` — Value for a [parametrelerle sorgu](#cli-queries-with-parameters). + +### Yapılandırma Dosyaları {#configuration_files} + +`clickhouse-client` aşağıdaki ilk varolan dosyayı kullanır: + +- Tanımlanan `--config-file` parametre. +- `./clickhouse-client.xml` +- `~/.clickhouse-client/config.xml` +- `/etc/clickhouse-client/config.xml` + +Bir yapılandırma dosyası örneği: + +``` xml + + username + password + False + +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/cli/) diff --git a/docs/tr/interfaces/cpp.md b/docs/tr/interfaces/cpp.md new file mode 100644 index 00000000000..9ebf93286ff --- /dev/null +++ b/docs/tr/interfaces/cpp.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 24 +toc_title: "C++ \u0130stemci Kitapl\u0131\u011F\u0131" +--- + +# C++ İstemci Kitaplığı {#c-client-library} + +Bkz. ben READİ READOKU [clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp) depo. + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/cpp/) diff --git a/docs/tr/interfaces/formats.md b/docs/tr/interfaces/formats.md new file mode 100644 index 00000000000..f522d697aa1 --- /dev/null +++ b/docs/tr/interfaces/formats.md @@ -0,0 +1,1212 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 21 +toc_title: "Giri\u015F ve \xE7\u0131k\u0131\u015F bi\xE7imleri" +--- + +# Giriş ve çıkış verileri için biçimler {#formats} + +ClickHouse kabul ve çeşitli biçimlerde veri dönebilirsiniz. Giriş için desteklenen bir biçim, sağlanan verileri ayrıştırmak için kullanılabilir `INSERT`s, gerçekleştirmek için `SELECT`s dosya, URL veya HDFS gibi bir dosya destekli tablodan veya harici bir sözlük okumak için. Çıktı için desteklenen bir biçim düzenlemek için kullanılabilir +sonuçları bir `SELECT` ve gerçekleştirmek için `INSERT`s dosya destekli bir tabloya. + +Desteklenen formatlar şunlardır: + +| Biçimli | Girdi | Çıktı | +|-----------------------------------------------------------------|-------|-------| +| [TabSeparated](#tabseparated) | ✔ | ✔ | +| [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | +| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | +| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | +| [Şablon](#format-template) | ✔ | ✔ | +| [TemplateİgnoreSpaces](#templateignorespaces) | ✔ | ✗ | +| [CSV](#csv) | ✔ | ✔ | +| [CSVWithNames](#csvwithnames) | ✔ | ✔ | +| [CustomSeparated](#format-customseparated) | ✔ | ✔ | +| [Değerler](#data-format-values) | ✔ | ✔ | +| [Dikey](#vertical) | ✗ | ✔ | +| [VerticalRaw](#verticalraw) | ✗ | ✔ | +| [JSON](#json) | ✗ | ✔ | +| [JSONCompact](#jsoncompact) | ✗ | ✔ | +| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | +| [TSKV](#tskv) | ✔ | ✔ | +| [Çok](#pretty) | ✗ | ✔ | +| [PrettyCompact](#prettycompact) | ✗ | ✔ | +| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | +| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | +| [PrettySpace](#prettyspace) | ✗ | ✔ | +| [Protobuf](#protobuf) | ✔ | ✔ | +| [Avro](#data-format-avro) | ✔ | ✔ | +| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | +| [Parke](#data-format-parquet) | ✔ | ✔ | +| [ORC](#data-format-orc) | ✔ | ✗ | +| [RowBinary](#rowbinary) | ✔ | ✔ | +| [Rowbinarywithnames ve türleri](#rowbinarywithnamesandtypes) | ✔ | ✔ | +| [Yerel](#native) | ✔ | ✔ | +| [Boş](#null) | ✗ | ✔ | +| [XML](#xml) | ✗ | ✔ | +| [CapnProto](#capnproto) | ✔ | ✗ | + +ClickHouse ayarları ile bazı biçim işleme parametrelerini kontrol edebilirsiniz. Daha fazla bilgi için okuyun [Ayarlar](../operations/settings/settings.md) bölme. + +## TabSeparated {#tabseparated} + +Sekmede ayrı format, veri satır ile yazılır. Her satır sekmelerle ayrılmış değerler içerir. Her değer, satırdaki son değer dışında bir sekme tarafından takip edilir ve ardından bir satır beslemesi gelir. Kesinlikle Unix hat beslemeleri her yerde kabul edilir. Son satır ayrıca sonunda bir satır beslemesi içermelidir. Değerler metin biçiminde, tırnak işaretleri olmadan yazılır ve özel karakterler kaçtı. + +Bu biçim adı altında da kullanılabilir `TSV`. + +Bu `TabSeparated` format, özel programlar ve komut dosyaları kullanarak verileri işlemek için uygundur. Varsayılan olarak HTTP arabiriminde ve komut satırı istemcisinin toplu iş modunda kullanılır. Bu format aynı zamanda farklı Dbms'ler arasında veri aktarımı sağlar. Örneğin, Mysql'den bir dökümü alabilir ve Clickhouse'a yükleyebilirsiniz veya tam tersi. + +Bu `TabSeparated` biçim, toplam değerleri (TOPLAMLARLA birlikte kullanıldığında) ve aşırı değerleri (ne zaman ‘extremes’ 1 olarak ayarlanır). Bu durumlarda, toplam değerler ve aşırılıklar ana veriden sonra çıkar. Ana sonuç, toplam değerler ve aşırılıklar birbirinden boş bir çizgi ile ayrılır. Örnek: + +``` sql +SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated`` +``` + +``` text +2014-03-17 1406958 +2014-03-18 1383658 +2014-03-19 1405797 +2014-03-20 1353623 +2014-03-21 1245779 +2014-03-22 1031592 +2014-03-23 1046491 + +0000-00-00 8873898 + +2014-03-17 1031592 +2014-03-23 1406958 +``` + +### Veri Biçimlendirme {#data-formatting} + +Tamsayı sayılar ondalık biçimde yazılır. Sayılar ekstra içerebilir “+” başlangıçtaki karakter(ayrıştırma sırasında göz ardı edilir ve biçimlendirme sırasında kaydedilmez). Negatif olmayan sayılar negatif işareti içeremez. Okurken, boş bir dizeyi sıfır olarak veya (imzalı türler için) sıfır olarak sadece eksi işaretinden oluşan bir dizeyi ayrıştırmasına izin verilir. Karşılık gelen veri türüne uymayan sayılar, hata iletisi olmadan farklı bir sayı olarak ayrıştırılabilir. + +Kayan noktalı sayılar ondalık biçimde yazılır. Nokta ondalık ayırıcı olarak kullanılır. Üstel girişler olduğu gibi desteklenir ‘inf’, ‘+inf’, ‘-inf’, ve ‘nan’. Kayan noktalı sayıların bir girişi, ondalık nokta ile başlayabilir veya sona erebilir. +Biçimlendirme sırasında kayan noktalı sayılarda doğruluk kaybolabilir. +Ayrıştırma sırasında, en yakın makine temsil edilebilir numarayı okumak kesinlikle gerekli değildir. + +Tarihler YYYY-AA-DD biçiminde yazılır ve aynı biçimde ayrıştırılır, ancak ayırıcı olarak herhangi bir karakterle ayrıştırılır. +Tarihleri ile saatleri biçiminde yazılır `YYYY-MM-DD hh:mm:ss` ve aynı biçimde ayrıştırılır, ancak ayırıcı olarak herhangi bir karakterle. +Bu, tüm sistem saat diliminde istemci veya sunucu başlatıldığında (hangi veri biçimleri bağlı olarak) oluşur. Saatli tarihler için gün ışığından yararlanma saati belirtilmedi. Bu nedenle, bir dökümü gün ışığından yararlanma saati sırasında kez varsa, dökümü tümden verileri eşleşmiyor ve ayrıştırma iki kez birini seçecektir. +Bir okuma işlemi sırasında, yanlış tarih ve Tarih saatleriyle doğal taşma veya null tarih ve saat, hata iletisi olmadan ayrıştırılabilir. + +Bir istisna olarak, tam 10 ondalık basamaktan oluşuyorsa, tarihlerin zamanlarla ayrıştırılması Unix zaman damgası biçiminde de desteklenir. Sonuç, saat dilimine bağlı değildir. Yyyy-MM-DD ss:mm:ss ve nnnnnnnnnn biçimleri otomatik olarak ayırt edilir. + +Dizeler ters eğik çizgiden kaçan özel karakterlerle çıktılanır. Çıkış için aşağıdaki çıkış dizileri kullanılır: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\'`, `\\`. Ayrıştırma da dizileri destekler `\a`, `\v`, ve `\xHH` (hxex kaçış dizileri) ve herhangi `\c` diz ,iler, nerede `c` herhangi bir karakter (bu diziler dönüştürülür `c`). Böylece, veri okuma, bir satır beslemesinin şu şekilde yazılabileceği formatları destekler `\n` veya `\`, veya bir çizgi besleme olarak. Örneğin, dize `Hello world` boşluk yerine kelimeler arasında bir çizgi beslemesi ile aşağıdaki varyasyonlardan herhangi birinde ayrıştırılabilir: + +``` text +Hello\nworld + +Hello\ +world +``` + +MySQL, sekmeyle ayrılmış dökümleri yazarken kullandığı için ikinci varyant desteklenir. + +Verileri TabSeparated biçiminde geçirirken kaçmanız gereken minimum karakter kümesi: sekme, satır besleme (LF) ve ters eğik çizgi. + +Sadece küçük bir sembol seti kaçtı. Terminalinizin çıktıda mahvedeceği bir dize değerine kolayca rastlayabilirsiniz. + +Diziler köşeli parantez içinde virgülle ayrılmış değerlerin bir listesi olarak yazılır. Dizideki sayı öğeleri normal olarak biçimlendirilir. `Date` ve `DateTime` türleri tek tırnak yazılır. Diz .eler yukarıdaki gibi aynı kural quoteslarla tek tırnak içinde yazılır. + +[NULL](../sql_reference/syntax.md) olarak format islanır `\N`. + +Her eleman [İçiçe](../sql_reference/data_types/nested_data_structures/nested.md) yapılar dizi olarak temsil edilir. + +Mesela: + +``` sql +CREATE TABLE nestedt +( + `id` UInt8, + `aux` Nested( + a UInt8, + b String + ) +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO nestedt Values ( 1, [1], ['a']) +``` + +``` sql +SELECT * FROM nestedt FORMAT TSV +``` + +``` text +1 [1] ['a'] +``` + +## TabSeparatedRaw {#tabseparatedraw} + +Farklıdır `TabSeparated` satırların kaçmadan yazıldığını biçimlendirin. +Bu biçim yalnızca bir sorgu sonucu çıktısı için uygundur, ancak ayrıştırma için değil (bir tabloya eklemek için veri alma). + +Bu biçim adı altında da kullanılabilir `TSVRaw`. + +## TabSeparatedWithNames {#tabseparatedwithnames} + +Bu farklıdır `TabSeparated` sütun adlarının ilk satırda yazıldığını biçimlendirin. +Ayrıştırma sırasında, ilk satır tamamen göz ardı edilir. Sütun adlarını konumlarını belirlemek veya doğruluğunu kontrol etmek için kullanamazsınız. +(Başlık satırı ayrıştırma desteği gelecekte eklenebilir .) + +Bu biçim adı altında da kullanılabilir `TSVWithNames`. + +## TabSeparatedWithNamesAndTypes {#tabseparatedwithnamesandtypes} + +Bu farklıdır `TabSeparated` sütun türleri ikinci satırda iken, sütun adlarının ilk satıra yazıldığını biçimlendirin. +Ayrıştırma sırasında, birinci ve ikinci satırlar tamamen göz ardı edilir. + +Bu biçim adı altında da kullanılabilir `TSVWithNamesAndTypes`. + +## Şablon {#format-template} + +Bu biçim, belirli bir kaçış kuralına sahip değerler için yer tutucularla özel bir biçim dizesinin belirtilmesine izin verir. + +Ayarları kullanır `format_template_resultset`, `format_template_row`, `format_template_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` kullanırken `JSON` kaçmak, daha fazla görmek) + +Ayar `format_template_row` aşağıdaki sözdizimine sahip satırlar için Biçim dizesi içeren dosya yolunu belirtir: + +`delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N`, + +nerede `delimiter_i` değerler arasında bir sınırlayıcı mı (`$` sembol olarak kaçabilir `$$`), +`column_i` değerleri seçilecek veya eklenecek bir sütunun adı veya dizini (boşsa, sütun atlanır), +`serializeAs_i` sütun değerleri için kaçan bir kuraldır. Aşağıdaki kaçış kuralları desteklenir: + +- `CSV`, `JSON`, `XML` (aynı is theimlerin biçim formatslerine benzer şekilde) +- `Escaped` (aynı şekilde `TSV`) +- `Quoted` (aynı şekilde `Values`) +- `Raw` (kaç withoutmadan, benzer şekilde `TSVRaw`) +- `None` (kaçan kural yok, daha fazla görün) + +Kaçan bir kural atlanırsa, o zaman `None` kullanılacaktır. `XML` ve `Raw` sadece çıkış için uygundur. + +Yani, aşağıdaki biçim dizesi için: + + `Search phrase: ${SearchPhrase:Quoted}, count: ${c:Escaped}, ad price: $$${price:JSON};` + +değerleri `SearchPhrase`, `c` ve `price` olarak kaçır ,ılan sütunlar `Quoted`, `Escaped` ve `JSON` (select için) yazdırılacak veya (ınsert için) arasında beklenecektir `Search phrase:`, `, count:`, `, ad price: $` ve `;` sırasıyla sınırlayıcılar. Mesela: + +`Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` + +Bu `format_template_rows_between_delimiter` ayar, sonuncusu hariç her satırdan sonra yazdırılan (veya beklenen) satırlar arasındaki sınırlayıcıyı belirtir (`\n` varsayılan olarak) + +Ayar `format_template_resultset` resultset için bir biçim dizesi içeren dosya yolunu belirtir. Resultset için Biçim dizesi, satır için bir biçim dizesi ile aynı sözdizimine sahiptir ve bir önek, sonek ve bazı ek bilgileri yazdırmanın bir yolunu belirtmeyi sağlar. Sütun adları yerine aşağıdaki yer tutucuları içerir: + +- `data` veri içeren satırlar mı `format_template_row` biçim, ayrılmış `format_template_rows_between_delimiter`. Bu yer tutucu, biçim dizesindeki ilk yer tutucu olmalıdır. +- `totals` toplam değerleri olan satır `format_template_row` biçim (TOPLAMLARLA birlikte kullanıldığında) +- `min` satır içinde minimum değerlere sahip mi `format_template_row` biçim (1 olarak ayarlandığında) +- `max` maksimum değerleri olan satır `format_template_row` biçim (1 olarak ayarlandığında) +- `rows` çıktı satırlarının toplam sayısıdır +- `rows_before_limit` minimum satır sayısı sınırı olmadan olurdu. Yalnızca sorgu sınırı içeriyorsa çıktı. Sorgu GROUP BY içeriyorsa, ROWS\_BEFORE\_LİMİT\_AT\_LEAST SINIRSIZDI olurdu satır tam sayısıdır. +- `time` istek yürütme süresi saniyeler içinde mi +- `rows_read` satır sayısı okun thedu mu +- `bytes_read` bayt sayısı (sıkıştırılmamış) okundu mu + +Tutucu `data`, `totals`, `min` ve `max` kaç rulema kuralı belirtilm (em (elidir (veya `None` açıkça belirtilen) olmalıdır. Kalan yer tutucuları belirtilen kaçan herhangi bir kural olabilir. +Eğer... `format_template_resultset` ayar boş bir dizedir, `${data}` varsayılan değer olarak kullanılır. +Insert sorguları biçimi için önek veya sonek varsa bazı sütunları veya bazı alanları atlamaya izin verir (örneğe bakın). + +Örnek seç: + +``` sql +SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 FORMAT Template SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = '\n ' +``` + +`/some/path/resultset.format`: + +``` text + + Search phrases + + + + ${data} +
    Search phrases
    Search phrase Count
    + + ${max} +
    Max
    + Processed ${rows_read:XML} rows in ${time:XML} sec + + +``` + +`/some/path/row.format`: + +``` text + ${0:XML} ${1:XML} +``` + +Sonuç: + +``` html + + Search phrases + + + + + + + + +
    Search phrases
    Search phrase Count
    8267016
    bathroom interior design 2166
    yandex 1655
    spring 2014 fashion 1549
    freeform photos 1480
    + + +
    Max
    8873898
    + Processed 3095973 rows in 0.1569913 sec + + +``` + +Örnek Ekle: + +``` text +Some header +Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 +Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 +Total rows: 2 +``` + +``` sql +INSERT INTO UserActivity FORMAT Template SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format' +``` + +`/some/path/resultset.format`: + +``` text +Some header\n${data}\nTotal rows: ${:CSV}\n +``` + +`/some/path/row.format`: + +``` text +Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV} +``` + +`PageViews`, `UserID`, `Duration` ve `Sign` yer tutucular içinde tablodaki sütun adları vardır. Sonra değerler `Useless field` satır ve sonra `\nTotal rows:` sonek olarak göz ardı edilecektir. +Giriş verisindeki tüm sınırlayıcılar, belirtilen biçim dizelerindeki sınırlayıcılara kesinlikle eşit olmalıdır. + +## TemplateİgnoreSpaces {#templateignorespaces} + +Bu format sadece giriş için uygundur. +Benzer `Template`, ancak giriş akışındaki sınırlayıcılar ve değerler arasındaki boşluk karakterlerini atlar. Ancak, biçim dizeleri boşluk karakterleri içeriyorsa, bu karakterler giriş akışında beklenir. Ayrıca boş yer tutucuları belirtmek için izin verir (`${}` veya `${:None}`) aralarındaki boşlukları görmezden gelmek için bazı sınırlayıcıları ayrı parçalara ayırmak. Bu tür yer tutucular yalnızca boşluk karakterlerini atlamak için kullanılır. +Okumak mümkün `JSON` bu biçimi kullanarak, sütun değerleri tüm satırlarda aynı sıraya sahipse. Örneğin, aşağıdaki istek çıktı biçim örneğinden veri eklemek için kullanılabilir [JSON](#json): + +``` sql +INSERT INTO table_name FORMAT TemplateIgnoreSpaces SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = ',' +``` + +`/some/path/resultset.format`: + +``` text +{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}} +``` + +`/some/path/row.format`: + +``` text +{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} +``` + +## TSKV {#tskv} + +TabSeparated benzer, ancak name=value biçiminde bir değer çıkarır. Adlar tabseparated biçiminde olduğu gibi kaçtı ve = simgesi de kaçtı. + +``` text +SearchPhrase= count()=8267016 +SearchPhrase=bathroom interior design count()=2166 +SearchPhrase=yandex count()=1655 +SearchPhrase=2014 spring fashion count()=1549 +SearchPhrase=freeform photos count()=1480 +SearchPhrase=angelina jolie count()=1245 +SearchPhrase=omsk count()=1112 +SearchPhrase=photos of dog breeds count()=1091 +SearchPhrase=curtain designs count()=1064 +SearchPhrase=baku count()=1000 +``` + +[NULL](../sql_reference/syntax.md) olarak format islanır `\N`. + +``` sql +SELECT * FROM t_null FORMAT TSKV +``` + +``` text +x=1 y=\N +``` + +Çok sayıda küçük sütun olduğunda, bu biçim etkisizdir ve genellikle kullanmak için hiçbir neden yoktur. Bununla birlikte, verimlilik açısından Jsoneachrow'dan daha kötü değildir. + +Both data output and parsing are supported in this format. For parsing, any order is supported for the values of different columns. It is acceptable for some values to be omitted – they are treated as equal to their default values. In this case, zeros and blank rows are used as default values. Complex values that could be specified in the table are not supported as defaults. + +Ayrıştırma, ek alanın varlığına izin verir `tskv` eşit işareti veya bir değer olmadan. Bu alan yoksayılır. + +## CSV {#csv} + +Virgülle ayrılmış değerler biçimi ([RFC](https://tools.ietf.org/html/rfc4180)). + +Biçimlendirme yaparken, satırlar çift tırnak içine alınır. Bir dizenin içindeki çift alıntı, bir satırda iki çift tırnak olarak çıktılanır. Karakterlerden kaçmak için başka kural yoktur. Tarih ve Tarih-Saat çift tırnak içine alınır. Sayılar tırnak işaretleri olmadan çıktı. Değerler, bir sınırlayıcı karakterle ayrılır; `,` varsayılan olarak. Sınırlayıcı karakteri ayarında tanımlanır [format\_csv\_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter). Satırlar Unıx satır besleme (LF) kullanılarak ayrılır. Diziler CSV'DE aşağıdaki gibi serileştirilir: ilk olarak, dizi TabSeparated biçiminde olduğu gibi bir dizeye serileştirilir ve daha sonra ortaya çıkan dize çift tırnak içinde CSV'YE çıkarılır. CSV biçimindeki Tuples ayrı sütunlar olarak serileştirilir(yani, tuple'daki yuvalanmaları kaybolur). + +``` bash +$ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +``` + +\* Varsayılan olarak, sınırlayıcı `,`. Görmek [format\_csv\_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter) daha fazla bilgi için ayarlama. + +Ayrıştırma yaparken, tüm değerler tırnak işaretleri ile veya tırnak işaretleri olmadan ayrıştırılabilir. Hem çift hem de tek tırnak desteklenmektedir. Satırlar tırnak işaretleri olmadan da düzenlenebilir. Bu durumda, sınırlayıcı karaktere veya satır beslemesine (CR veya LF) ayrıştırılır. RFC'Yİ ihlal ederken, satırları tırnak işaretleri olmadan ayrıştırırken, önde gelen ve sondaki boşluklar ve sekmeler göz ardı edilir. Hat beslemesi için Unix (LF), Windows (CR LF) ve Mac OS Classic (CR LF) türleri desteklenir. + +Boş unquoted giriş değerleri, ilgili sütunlar için varsayılan değerlerle değiştirilir +[ınput\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) +etkindir. + +`NULL` olarak format islanır `\N` veya `NULL` veya boş bir unquoted dize (bkz. ayarlar [ınput\_format\_csv\_unquoted\_null\_literal\_as\_null](../operations/settings/settings.md#settings-input_format_csv_unquoted_null_literal_as_null) ve [ınput\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)). + +CSV biçimi, toplamların ve aşırılıkların çıktısını aynı şekilde destekler `TabSeparated`. + +## CSVWithNames {#csvwithnames} + +Ayrıca, başlık satırını benzer şekilde yazdırır `TabSeparatedWithNames`. + +## CustomSeparated {#format-customseparated} + +Benzer [Şablon](#format-template), ancak tüm sütunları yazdırır veya okur ve ayardan kaçan kuralı kullanır `format_custom_escaping_rule` ve ayarlardan sınırlayıcılar `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` ve `format_custom_result_after_delimiter`, biçim dizelerinden değil. +Ayrıca var `CustomSeparatedIgnoreSpaces` biçim, benzer olan `TemplateIgnoreSpaces`. + +## JSON {#json} + +Verileri json formatında çıkarır. Veri tablolarının yanı sıra, bazı ek bilgilerle birlikte sütun adlarını ve türlerini de çıkarır: çıktı satırlarının toplam sayısı ve bir sınır yoksa çıktı olabilecek satır sayısı. Örnek: + +``` sql +SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTALS ORDER BY c DESC LIMIT 5 FORMAT JSON +``` + +``` json +{ + "meta": + [ + { + "name": "SearchPhrase", + "type": "String" + }, + { + "name": "c", + "type": "UInt64" + } + ], + + "data": + [ + { + "SearchPhrase": "", + "c": "8267016" + }, + { + "SearchPhrase": "bathroom interior design", + "c": "2166" + }, + { + "SearchPhrase": "yandex", + "c": "1655" + }, + { + "SearchPhrase": "spring 2014 fashion", + "c": "1549" + }, + { + "SearchPhrase": "freeform photos", + "c": "1480" + } + ], + + "totals": + { + "SearchPhrase": "", + "c": "8873898" + }, + + "extremes": + { + "min": + { + "SearchPhrase": "", + "c": "1480" + }, + "max": + { + "SearchPhrase": "", + "c": "8267016" + } + }, + + "rows": 5, + + "rows_before_limit_at_least": 141137 +} +``` + +Json JavaScript ile uyumludur. Bunu sağlamak için, bazı karakterler ek olarak kaçar: eğik çizgi `/` olarak kaç İsar `\/`; alternatif Satır sonları `U+2028` ve `U+2029`, hangi bazı tarayıcılar kırmak, olarak kaçtı `\uXXXX`. ASCII denetim karakterleri kaçtı: backspace, form besleme, satır besleme, satır başı ve yatay sekme ile değiştirilir `\b`, `\f`, `\n`, `\r`, `\t` , 00-1f aralığında kalan baytların yanı sıra `\uXXXX` sequences. Invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. For compatibility with JavaScript, Int64 and UInt64 integers are enclosed in double-quotes by default. To remove the quotes, you can set the configuration parameter [output\_format\_json\_quote\_64bit\_integers](../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) 0'a. + +`rows` – The total number of output rows. + +`rows_before_limit_at_least` Minimum satır sayısı sınırı olmadan olurdu. Yalnızca sorgu sınırı içeriyorsa çıktı. +Sorgu GROUP BY içeriyorsa, ROWS\_BEFORE\_LİMİT\_AT\_LEAST SINIRSIZDI olurdu satır tam sayısıdır. + +`totals` – Total values (when using WITH TOTALS). + +`extremes` – Extreme values (when extremes are set to 1). + +Bu biçim yalnızca bir sorgu sonucu çıktısı için uygundur, ancak ayrıştırma için değil (bir tabloya eklemek için veri alma). + +ClickHouse destekler [NULL](../sql_reference/syntax.md) olarak görüntülenen `null` JSON çıkışında. + +Ayrıca bakınız [JSONEachRow](#jsoneachrow) biçimli. + +## JSONCompact {#jsoncompact} + +Yalnızca veri satırlarında json'dan farklıdır, nesnelerde değil, dizilerde çıktıdır. + +Örnek: + +``` json +{ + "meta": + [ + { + "name": "SearchPhrase", + "type": "String" + }, + { + "name": "c", + "type": "UInt64" + } + ], + + "data": + [ + ["", "8267016"], + ["bathroom interior design", "2166"], + ["yandex", "1655"], + ["fashion trends spring 2014", "1549"], + ["freeform photo", "1480"] + ], + + "totals": ["","8873898"], + + "extremes": + { + "min": ["","1480"], + "max": ["","8267016"] + }, + + "rows": 5, + + "rows_before_limit_at_least": 141137 +} +``` + +Bu biçim yalnızca bir sorgu sonucu çıktısı için uygundur, ancak ayrıştırma için değil (bir tabloya eklemek için veri alma). +Ayrıca bakınız `JSONEachRow` biçimli. + +## JSONEachRow {#jsoneachrow} + +Bu biçimi kullanırken, ClickHouse satırları ayrılmış, yeni satırla ayrılmış JSON nesneleri olarak çıkarır, ancak bir bütün olarak veriler geçerli JSON değildir. + +``` json +{"SearchPhrase":"curtain designs","count()":"1064"} +{"SearchPhrase":"baku","count()":"1000"} +{"SearchPhrase":"","count()":"8267016"} +``` + +Verileri eklerken, her satır için ayrı bir JSON nesnesi sağlamanız gerekir. + +### Veri Ekleme {#inserting-data} + +``` sql +INSERT INTO UserActivity FORMAT JSONEachRow {"PageViews":5, "UserID":"4324182021466249494", "Duration":146,"Sign":-1} {"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` + +ClickHouse sağlar: + +- Nesnedeki herhangi bir anahtar-değer çiftleri sırası. +- Bazı değerleri atlama. + +ClickHouse, nesnelerden sonra öğeler ve virgüller arasındaki boşlukları yok sayar. Tüm nesneleri bir satırda geçirebilirsiniz. Onları Satır sonları ile ayırmak zorunda değilsiniz. + +**İhmal edilen değerler işleme** + +ClickHouse, karşılık gelen değerler için varsayılan değerlerle atlanmış değerleri değiştirir [veri türleri](../sql_reference/data_types/index.md). + +Eğer `DEFAULT expr` belirtilen, ClickHouse bağlı olarak farklı ikame kuralları kullanır [ınput\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) ayar. + +Aşağıdaki tabloyu düşünün: + +``` sql +CREATE TABLE IF NOT EXISTS example_table +( + x UInt32, + a DEFAULT x * 2 +) ENGINE = Memory; +``` + +- Eğer `input_format_defaults_for_omitted_fields = 0`, sonra varsayılan değer için `x` ve `a` eşitlikler `0` (varsayılan değer olarak `UInt32` veri türü). +- Eğer `input_format_defaults_for_omitted_fields = 1`, sonra varsayılan değer için `x` eşitlikler `0`, ancak varsayılan değer `a` eşitlikler `x * 2`. + +!!! note "Uyarıcı" + İle veri ek whenlerken `insert_sample_with_metadata = 1`, ClickHouse, ekleme ile karşılaştırıldığında daha fazla hesaplama kaynağı tüketir `insert_sample_with_metadata = 0`. + +### Veri Seçme {#selecting-data} + +Düşünün `UserActivity` örnek olarak tablo: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +Sorgu `SELECT * FROM UserActivity FORMAT JSONEachRow` dönüşler: + +``` text +{"UserID":"4324182021466249494","PageViews":5,"Duration":146,"Sign":-1} +{"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` + +Aksine [JSON](#json) biçimi, geçersiz UTF-8 dizilerinin hiçbir ikame yoktur. Değerleri için olduğu gibi aynı şekilde kaçtı `JSON`. + +!!! note "Not" + Herhangi bir bayt kümesi dizelerde çıktı olabilir. Kullan... `JSONEachRow` tablodaki verilerin herhangi bir bilgi kaybetmeden JSON olarak biçimlendirilebileceğinden eminseniz biçimlendirin. + +### İç içe yapıların kullanımı {#jsoneachrow-nested} + +İle bir tablo varsa [İçiçe](../sql_reference/data_types/nested_data_structures/nested.md) veri türü sütunları, aynı yapıya sahip json verilerini ekleyebilirsiniz. İle bu özelliği etkinleştirin [ınput\_format\_ımport\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) ayar. + +Örneğin, aşağıdaki tabloyu göz önünde bulundurun: + +``` sql +CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory +``` + +Gibi görmek `Nested` veri türü açıklaması, ClickHouse, iç içe geçmiş yapının her bileşenini ayrı bir sütun olarak ele alır (`n.s` ve `n.i` sof )ram )ız için). Verileri aşağıdaki şekilde ekleyebilirsiniz: + +``` sql +INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} +``` + +Hiyerarşik bir json nesnesi olarak veri eklemek için [input\_format\_import\_nested\_json = 1](../operations/settings/settings.md#settings-input_format_import_nested_json). + +``` json +{ + "n": { + "s": ["abc", "def"], + "i": [1, 23] + } +} +``` + +Bu ayar olmadan, ClickHouse bir özel durum atar. + +``` sql +SELECT name, value FROM system.settings WHERE name = 'input_format_import_nested_json' +``` + +``` text +┌─name────────────────────────────┬─value─┐ +│ input_format_import_nested_json │ 0 │ +└─────────────────────────────────┴───────┘ +``` + +``` sql +INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} +``` + +``` text +Code: 117. DB::Exception: Unknown field found while parsing JSONEachRow format: n: (at row 1) +``` + +``` sql +SET input_format_import_nested_json=1 +INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} +SELECT * FROM json_each_row_nested +``` + +``` text +┌─n.s───────────┬─n.i────┐ +│ ['abc','def'] │ [1,23] │ +└───────────────┴────────┘ +``` + +## Yerel {#native} + +En verimli biçim. Veriler ikili formatta bloklar tarafından yazılır ve okunur. Her blok için satır sayısı, sütun sayısı, sütun adları ve türleri ve bu bloktaki sütunların parçaları birbiri ardına kaydedilir. Başka bir deyişle, bu format “columnar” – it doesn't convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients. + +Bu biçimi, yalnızca ClickHouse DBMS tarafından okunabilen dökümleri hızlı bir şekilde oluşturmak için kullanabilirsiniz. Bu formatla kendiniz çalışmak mantıklı değil. + +## Boş {#null} + +Hiçbir şey çıktı. Ancak, sorgu işlenir ve komut satırı istemcisini kullanırken, veriler istemciye iletilir. Bu, performans testi de dahil olmak üzere testler için kullanılır. +Açıkçası, bu format yalnızca ayrıştırma için değil, çıktı için uygundur. + +## Çok {#pretty} + +Verileri Unicode-art tabloları olarak çıkarır, ayrıca TERMİNALDEKİ renkleri ayarlamak için ANSI-escape dizileri kullanır. +Tablonun tam bir ızgarası çizilir ve her satır terminalde iki satır kaplar. +Her sonuç bloğu ayrı bir tablo olarak çıktı. Bu, blokların arabelleğe alma sonuçları olmadan çıkabilmesi için gereklidir (tüm değerlerin görünür genişliğini önceden hesaplamak için arabelleğe alma gerekli olacaktır). + +[NULL](../sql_reference/syntax.md) olarak çıktı `ᴺᵁᴸᴸ`. + +Örnek (gösterilen [PrettyCompact](#prettycompact) biçimli): + +``` sql +SELECT * FROM t_null +``` + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +Satırlar güzel\* biçimlerinde kaçmaz. Örnek için gösterilir [PrettyCompact](#prettycompact) biçimli: + +``` sql +SELECT 'String with \'quotes\' and \t character' AS Escaping_test +``` + +``` text +┌─Escaping_test────────────────────────┐ +│ String with 'quotes' and character │ +└──────────────────────────────────────┘ +``` + +Terminale çok fazla veri boşaltmaktan kaçınmak için yalnızca ilk 10.000 satır yazdırılır. Satır sayısı 10.000'den büyük veya eşitse, ileti “Showed first 10 000” bas .ılmıştır. +Bu biçim yalnızca bir sorgu sonucu çıktısı için uygundur, ancak ayrıştırma için değil (bir tabloya eklemek için veri alma). + +Güzel biçim, toplam değerleri (TOPLAMLARLA birlikte kullanıldığında) ve aşırılıkları (ne zaman ‘extremes’ 1 olarak ayarlanır). Bu durumlarda, toplam değerler ve aşırı değerler ana veriden sonra ayrı tablolarda çıktılanır. Örnek (gösterilen [PrettyCompact](#prettycompact) biçimli): + +``` sql +SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT PrettyCompact +``` + +``` text +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1406958 │ +│ 2014-03-18 │ 1383658 │ +│ 2014-03-19 │ 1405797 │ +│ 2014-03-20 │ 1353623 │ +│ 2014-03-21 │ 1245779 │ +│ 2014-03-22 │ 1031592 │ +│ 2014-03-23 │ 1046491 │ +└────────────┴─────────┘ + +Totals: +┌──EventDate─┬───────c─┐ +│ 0000-00-00 │ 8873898 │ +└────────────┴─────────┘ + +Extremes: +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1031592 │ +│ 2014-03-23 │ 1406958 │ +└────────────┴─────────┘ +``` + +## PrettyCompact {#prettycompact} + +Farklıdır [Çok](#pretty) bu ızgara satırlar arasında çizilir ve sonuç daha kompakttır. +Bu biçim, etkileşimli modda komut satırı istemcisinde varsayılan olarak kullanılır. + +## PrettyCompactMonoBlock {#prettycompactmonoblock} + +Farklıdır [PrettyCompact](#prettycompact) bu kadar 10.000 satır arabelleğe alınır, daha sonra bloklarla değil, tek bir tablo olarak çıktılanır. + +## PrettyNoEscapes {#prettynoescapes} + +ANSİSİ-ESC .ape diz .ilerinin kullanıl .madığı güzel differsden farklıdır. Bu, bu formatı bir tarayıcıda görüntülemek ve aynı zamanda ‘watch’ komut satırı yardımcı programı. + +Örnek: + +``` bash +$ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FORMAT PrettyCompactNoEscapes'" +``` + +Tarayıcıda görüntülemek için HTTP arayüzünü kullanabilirsiniz. + +### PrettyCompactNoEscapes {#prettycompactnoescapes} + +Önceki ayar ile aynı. + +### PrettySpaceNoEscapes {#prettyspacenoescapes} + +Önceki ayar ile aynı. + +## PrettySpace {#prettyspace} + +Farklıdır [PrettyCompact](#prettycompact) bu boşluk (boşluk karakterleri) ızgara yerine kullanılır. + +## RowBinary {#rowbinary} + +Biçimleri ve ikili biçimde satır verileri ayrıştırır. Satırlar ve değerler, ayırıcılar olmadan ardışık olarak listelenir. +Bu biçim, satır tabanlı olduğundan yerel biçimden daha az etkilidir. + +Tamsayılar sabit uzunlukta küçük endian temsilini kullanır. Örneğin, uint64 8 bayt kullanır. +DateTime, Unix zaman damgasını değer olarak içeren Uİnt32 olarak temsil edilir. +Tarih değeri olarak 1970-01-01 yılından bu yana gün sayısını içeren bir uint16 nesnesi olarak temsil edilir. +Dize varint uzunluğu (imzasız) olarak temsil edilir [LEB128](https://en.wikipedia.org/wiki/LEB128)), ardından dizenin baytları. +FixedString sadece bir bayt dizisi olarak temsil edilir. + +Dizi varint uzunluğu (imzasız) olarak temsil edilir [LEB128](https://en.wikipedia.org/wiki/LEB128)), ardından dizinin ardışık elemanları. + +İçin [NULL](../sql_reference/syntax.md#null-literal) destek, 1 veya 0 içeren ek bir bayt her önce eklenir [Nullable](../sql_reference/data_types/nullable.md) değer. 1 ise, o zaman değer `NULL` ve bu bayt ayrı bir değer olarak yorumlanır. 0 ise, bayttan sonraki değer değil `NULL`. + +## Rowbinarywithnames ve türleri {#rowbinarywithnamesandtypes} + +Benzer [RowBinary](#rowbinary), ancak eklenen Başlık ile: + +- [LEB128](https://en.wikipedia.org/wiki/LEB128)- kodlanmış sütun sayısı (N) +- N `String`s sütun adlarını belirtme +- N `String`s sütun türlerini belirleme + +## Değerler {#data-format-values} + +Her satırı parantez içinde yazdırır. Satırlar virgülle ayrılır. Son satırdan sonra virgül yok. Parantez içindeki değerler de virgülle ayrılır. Sayılar tırnak işaretleri olmadan ondalık biçimde çıktıdır. Diziler köşeli parantez içinde çıktı. Kat tırnak içinde çıkış dizelerle, tarihleri ve tarihleri. Kaçan kurallar ve ayrıştırma benzer [TabSeparated](#tabseparated) biçimli. Biçimlendirme sırasında fazladan boşluk eklenmez, ancak ayrıştırma sırasında izin verilir ve atlanır (izin verilmeyen dizi değerleri içindeki boşluklar hariç). [NULL](../sql_reference/syntax.md) olarak temsil edilir `NULL`. + +The minimum set of characters that you need to escape when passing data in Values ​​format: single quotes and backslashes. + +Bu, kullanılan formattır `INSERT INTO t VALUES ...`, ancak sorgu sonuçlarını biçimlendirmek için de kullanabilirsiniz. + +Ayrıca bakınız: [ınput\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) ve [ınput\_format\_values\_deduce\_templates\_of\_expressions](../operations/settings/settings.md#settings-input_format_values_deduce_templates_of_expressions) ayarlar. + +## Dikey {#vertical} + +Her değeri belirtilen sütun adıyla ayrı bir satıra yazdırır. Bu biçim, her satır çok sayıda sütundan oluşuyorsa, yalnızca bir veya birkaç satır yazdırmak için uygundur. + +[NULL](../sql_reference/syntax.md) olarak çıktı `ᴺᵁᴸᴸ`. + +Örnek: + +``` sql +SELECT * FROM t_null FORMAT Vertical +``` + +``` text +Row 1: +────── +x: 1 +y: ᴺᵁᴸᴸ +``` + +Satırlar dikey biçimde kaçmadı: + +``` sql +SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT Vertical +``` + +``` text +Row 1: +────── +test: string with 'quotes' and with some special + characters +``` + +Bu biçim yalnızca bir sorgu sonucu çıktısı için uygundur, ancak ayrıştırma için değil (bir tabloya eklemek için veri alma). + +## VerticalRaw {#verticalraw} + +Benzer [Dikey](#vertical), ama kaçan engelli ile. Bu biçim, yalnızca ayrıştırma (veri alma ve tabloya ekleme) için değil, sorgu sonuçlarının çıktısı için uygundur. + +## XML {#xml} + +XML biçimi, ayrıştırma için değil, yalnızca çıktı için uygundur. Örnek: + +``` xml + + + + + + SearchPhrase + String + + + count() + UInt64 + + + + + + + 8267016 + + + bathroom interior design + 2166 + + + yandex + 1655 + + + 2014 spring fashion + 1549 + + + freeform photos + 1480 + + + angelina jolie + 1245 + + + omsk + 1112 + + + photos of dog breeds + 1091 + + + curtain designs + 1064 + + + baku + 1000 + + + 10 + 141137 + +``` + +Sütun adı kabul edilebilir bir biçime sahip değilse, sadece ‘field’ eleman adı olarak kullanılır. Genel olarak, XML yapısı JSON yapısını izler. +Just as for JSON, invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. + +Dize değerlerinde, karakterler `<` ve `&` olarak kaç arear `<` ve `&`. + +Diziler olarak çıktı `HelloWorld...`ve tuples olarak `HelloWorld...`. + +## CapnProto {#capnproto} + +Cap'n Proto, Protokol Tamponlarına ve tasarrufuna benzer, ancak JSON veya MessagePack gibi olmayan bir ikili mesaj biçimidir. + +Cap'n Proto mesajları kesinlikle yazılır ve kendi kendini tanımlamaz, yani harici bir şema açıklamasına ihtiyaç duyarlar. Şema anında uygulanır ve her sorgu için önbelleğe alınır. + +``` bash +$ cat capnproto_messages.bin | clickhouse-client --query "INSERT INTO test.hits FORMAT CapnProto SETTINGS format_schema='schema:Message'" +``` + +Nerede `schema.capnp` bu gibi görünüyor: + +``` capnp +struct Message { + SearchPhrase @0 :Text; + c @1 :Uint64; +} +``` + +Serializasyon etkilidir ve genellikle sistem yükünü arttırmaz. + +Ayrıca bakınız [Biçim Şeması](#formatschema). + +## Protobuf {#protobuf} + +Protobuf-bir [Protokol Tamp Buffonları](https://developers.google.com/protocol-buffers/) biçimli. + +Bu biçim, bir dış biçim şeması gerektirir. Şema sorgular arasında önbelleğe alınır. +ClickHouse hem destekler `proto2` ve `proto3` sözdizimiler. Tekrarlanan / isteğe bağlı / gerekli alanlar desteklenir. + +Kullanım örnekleri: + +``` sql +SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:MessageType' +``` + +``` bash +cat protobuf_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT Protobuf SETTINGS format_schema='schemafile:MessageType'" +``` + +dosya nerede `schemafile.proto` bu gibi görünüyor: + +``` capnp +syntax = "proto3"; + +message MessageType { + string name = 1; + string surname = 2; + uint32 birthDate = 3; + repeated string phoneNumbers = 4; +}; +``` + +İletişim kuralı arabellekleri' ileti türü Tablo sütunları ve alanları arasındaki yazışmaları bulmak için clickhouse adlarını karşılaştırır. +Bu karşılaştırma büyük / küçük harf duyarsız ve karakterler `_` (alt çizgi) ve `.` (nokta) eşit olarak kabul edilir. +Bir sütun türleri ve protokol arabellekleri ileti alanı farklıysa, gerekli dönüştürme uygulanır. + +İç içe geçmiş mesajlar desteklenir. Örneğin, alan için `z` aşağıdaki ileti türünde + +``` capnp +message MessageType { + message XType { + message YType { + int32 z; + }; + repeated YType y; + }; + XType x; +}; +``` + +ClickHouse adlı bir sütun bulmaya çalışır `x.y.z` (veya `x_y_z` veya `X.y_Z` ve benzeri). +İç içe mesajlar giriş veya çıkış a için uygundur [iç içe veri yapıları](../sql_reference/data_types/nested_data_structures/nested.md). + +Böyle bir protobuf şemasında tanımlanan varsayılan değerler + +``` capnp +syntax = "proto2"; + +message MessageType { + optional int32 result_per_page = 3 [default = 10]; +} +``` + +uygulan ;mamaktadır; [tablo varsayılanları](../sql_reference/statements/create.md#create-default-values) bunların yerine kullanılır. + +ClickHouse girişleri ve çıkışları protobuf mesajları `length-delimited` biçimli. +Bu, her mesajın uzunluğunu bir olarak yazmadan önce anlamına gelir [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). +Ayrıca bakınız [popüler dillerde uzunlukla ayrılmış protobuf mesajları nasıl okunur / yazılır](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). + +## Avro {#data-format-avro} + +[Apache Avro](http://avro.apache.org/) Apache'nin Hadoop projesi kapsamında geliştirilen satır odaklı veri serileştirme çerçevesidir. + +ClickHouse Avro biçimi okuma ve yazma destekler [Avro veri dosyaları](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). + +### Veri Türleri Eşleştirme {#data_types-matching} + +Aşağıdaki tablo, desteklenen veri türlerini ve Clickhouse'la nasıl eşleştiğini gösterir [veri türleri](../sql_reference/data_types/index.md) içinde `INSERT` ve `SELECT` sorgular. + +| Avro veri türü `INSERT` | ClickHouse veri türü | Avro veri türü `SELECT` | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------|------------------------------| +| `boolean`, `int`, `long`, `float`, `double` | [Int(8/16/32)](../sql_reference/data_types/int_uint.md), [Uİnt(8/16/32)](../sql_reference/data_types/int_uint.md) | `int` | +| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [Uİnt64](../sql_reference/data_types/int_uint.md) | `long` | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [Dize](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [FixedString(N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [Enum (8/16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [Dizi(T)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [Null (T)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [Null (Hiçbir Şey)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [Tarihli](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [DateTime64 (3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [DateTime64 (6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | + +\* [Avro mantıksal türleri](http://avro.apache.org/docs/current/spec.html#Logical+Types) + +Desteklenmeyen Avro veri türleri: `record` (non-root), `map` + +Desteklenmeyen Avro mantıksal veri türleri: `uuid`, `time-millis`, `time-micros`, `duration` + +### Veri Ekleme {#inserting-data-1} + +Bir Avro dosyasından ClickHouse tablosuna veri eklemek için: + +``` bash +$ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avro" +``` + +Giriş Avro dosyasının kök şeması olmalıdır `record` tür. + +ClickHouse tablo sütunları ve Avro şema alanları arasındaki yazışmaları bulmak için adlarını karşılaştırır. Bu karşılaştırma büyük / küçük harf duyarlıdır. +Kullanılmayan alanlar atlanır. + +ClickHouse tablo sütunlarının veri türleri, eklenen Avro verilerinin karşılık gelen alanlarından farklı olabilir. Veri eklerken, ClickHouse veri türlerini yukarıdaki tabloya göre yorumlar ve sonra [döküm](../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) karşılık gelen sütun türüne veri. + +### Veri Seçme {#selecting-data-1} + +ClickHouse tablosundan bir Avro dosyasına veri seçmek için: + +``` bash +$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro +``` + +Sütun adları gerekir: + +- ile başla `[A-Za-z_]` +- daha sonra sadece içerir `[A-Za-z0-9_]` + +Çıkış Avro dosya sıkıştırma ve senkronizasyon aralığı ile yapılandırılabilir [output\_format\_avro\_codec](../operations/settings/settings.md#settings-output_format_avro_codec) ve [output\_format\_avro\_sync\_interval](../operations/settings/settings.md#settings-output_format_avro_sync_interval) sırasıyla. + +## AvroConfluent {#data-format-avro-confluent} + +AvroConfluent yaygın olarak kullanılan tek nesne Avro mesajları çözme destekler [Kafka](https://kafka.apache.org/) ve [Confluent Şema Kayıt](https://docs.confluent.io/current/schema-registry/index.html). + +Her Avro iletisi, şema Kayıt defterinin yardımıyla gerçek şemaya çözülebilen bir şema kimliği gömer. + +Şemalar çözüldükten sonra önbelleğe alınır. + +Şema kayıt defteri URL'si ile yapılandırılır [format\_avro\_schema\_registry\_url](../operations/settings/settings.md#settings-format_avro_schema_registry_url) + +### Veri Türleri Eşleştirme {#data_types-matching-1} + +Aynı olarak [Avro](#data-format-avro) + +### Kullanma {#usage} + +Şema çözünürlüğünü hızlı bir şekilde doğrulamak için şunları kullanabilirsiniz [kafkasat](https://github.com/edenhill/kafkacat) ile [clickhouse-yerel](../operations/utilities/clickhouse-local.md): + +``` bash +$ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' +1 a +2 b +3 c +``` + +Kullanmak `AvroConfluent` ile [Kafka](../engines/table_engines/integrations/kafka.md): + +``` sql +CREATE TABLE topic1_stream +( + field1 String, + field2 String +) +ENGINE = Kafka() +SETTINGS +kafka_broker_list = 'kafka-broker', +kafka_topic_list = 'topic1', +kafka_group_name = 'group1', +kafka_format = 'AvroConfluent'; + +SET format_avro_schema_registry_url = 'http://schema-registry'; + +SELECT * FROM topic1_stream; +``` + +!!! note "Uyarıcı" + Ayar `format_avro_schema_registry_url` yapılandırılmış olması gerekiyor `users.xml` yeniden başlattıktan sonra değerini korumak için. + +## Parke {#data-format-parquet} + +[Apache Parke](http://parquet.apache.org/) hadoop ekosisteminde yaygın bir sütunlu depolama biçimidir. ClickHouse, bu format için okuma ve yazma işlemlerini destekler. + +### Veri Türleri Eşleştirme {#data_types-matching-2} + +Aşağıdaki tablo, desteklenen veri türlerini ve Clickhouse'la nasıl eşleştiğini gösterir [veri türleri](../sql_reference/data_types/index.md) içinde `INSERT` ve `SELECT` sorgular. + +| Parke veri türü (`INSERT`) | ClickHouse veri türü | Parke veri türü (`SELECT`) | +|----------------------------|-----------------------------------------------------------|----------------------------| +| `UINT8`, `BOOL` | [Uİnt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [Uınt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [Uİnt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [Uİnt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [Tarihli](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [Dize](../sql_reference/data_types/string.md) | `STRING` | +| — | [FixedString](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [Ondalık](../sql_reference/data_types/decimal.md) | `DECIMAL` | + +ClickHouse yapılandırılabilir hassas destekler `Decimal` tür. Bu `INSERT` sorgu parke davranır `DECIMAL` ClickHouse olarak yazın `Decimal128` tür. + +Desteklen datameyen veri türleri: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. + +ClickHouse tablo sütunlarının veri türleri, eklenen parke verilerinin ilgili alanlarından farklı olabilir. Veri eklerken, ClickHouse veri türlerini yukarıdaki tabloya göre yorumlar ve sonra [döküm](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) ClickHouse tablo sütunu için ayarlanmış olan bu veri türüne ait veriler. + +### Veri ekleme ve seçme {#inserting-and-selecting-data} + +Bir dosyadan parke verilerini ClickHouse tablosuna aşağıdaki komutla ekleyebilirsiniz: + +``` bash +$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" +``` + +Bir ClickHouse tablosundan veri seçin ve aşağıdaki komutla parke formatında bazı dosyaya kaydedebilirsiniz: + +``` bash +$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} +``` + +Hadoop ile veri alışverişi yapmak için şunları kullanabilirsiniz [HDFS tablo motoru](../engines/table_engines/integrations/hdfs.md). + +## ORC {#data-format-orc} + +[Apache ORCC](https://orc.apache.org/) hadoop ekosisteminde yaygın bir sütunlu depolama biçimidir. Bu formatta yalnızca Clickhouse'a veri ekleyebilirsiniz. + +### Veri Türleri Eşleştirme {#data_types-matching-3} + +Aşağıdaki tablo, desteklenen veri türlerini ve Clickhouse'la nasıl eşleştiğini gösterir [veri türleri](../sql_reference/data_types/index.md) içinde `INSERT` sorgular. + +| Orc veri türü (`INSERT`) | ClickHouse veri türü | +|--------------------------|-----------------------------------------------------| +| `UINT8`, `BOOL` | [Uİnt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [Uınt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [Uİnt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [Uİnt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | +| `DATE32` | [Tarihli](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [Dize](../sql_reference/data_types/string.md) | +| `DECIMAL` | [Ondalık](../sql_reference/data_types/decimal.md) | + +ClickHouse yapılandırılabilir hassas destekler `Decimal` tür. Bu `INSERT` sorgu Orc davranır `DECIMAL` ClickHouse olarak yazın `Decimal128` tür. + +Desteklenmeyen Orc veri türleri: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. + +ClickHouse tablo sütunlarının veri türlerinin karşılık gelen ORC veri alanları ile eşleşmesi gerekmez. Veri eklerken, ClickHouse veri türlerini yukarıdaki tabloya göre yorumlar ve sonra [döküm](../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) veri türü için veri kümesi ClickHouse tablo sütun. + +### Veri Ekleme {#inserting-data-2} + +Bir dosyadan Orc verilerini ClickHouse tablosuna aşağıdaki komutla ekleyebilirsiniz: + +``` bash +$ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" +``` + +Hadoop ile veri alışverişi yapmak için şunları kullanabilirsiniz [HDFS tablo motoru](../engines/table_engines/integrations/hdfs.md). + +## Biçim Şeması {#formatschema} + +Biçim şemasını içeren dosya adı, ayar tarafından ayarlanır `format_schema`. +Biçim onelerinden biri kullanıldığında bu ayarı ayarlamak gerekir `Cap'n Proto` ve `Protobuf`. +Biçim şeması, bir dosya adının ve bu dosyadaki bir ileti türünün adının birleşimidir ve iki nokta üst üste ile sınırlandırılmıştır, +e.g. `schemafile.proto:MessageType`. +Dosya, format için standart uzantıya sahipse (örneğin, `.proto` için `Protobuf`), +ihmal edilebilir ve bu durumda, biçim şeması şöyle görünür `schemafile:MessageType`. + +Eğer giriş veya çıkış veri üzerinden [müşteri](../interfaces/cli.md) in the [interaktif mod](../interfaces/cli.md#cli_usage) biçim şe themasında belirtilen dosya adı +mutlak bir yol veya istemci üzerinde geçerli dizine göre bir yol içerebilir. +Eğer istemci kullanıyorsanız [Toplu Modu](../interfaces/cli.md#cli_usage), şemanın yolu güvenlik nedeniyle göreceli olmalıdır. + +Eğer giriş veya çıkış veri üzerinden [HTTP arayüzü](../interfaces/http.md) biçim şemasında belirtilen dosya adı +belirtilen dizinde bulunmalıdır [format\_schema\_path](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) +sunucu yapılandırmasında. + +## Atlama Hataları {#skippingerrors} + +Gibi bazı format suchlar `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` ve `Protobuf` ayrıştırma hatası oluşursa kırık satırı atlayabilir ve bir sonraki satırın başından ayrıştırmaya devam edebilir. Görmek [ınput\_format\_allow\_errors\_num](../operations/settings/settings.md#settings-input_format_allow_errors_num) ve +[ınput\_format\_allow\_errors\_ratio](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) ayarlar. +Sınırlamalar: +- Ayrıştırma hatası durumunda `JSONEachRow` yeni satıra (veya EOF) kadar tüm verileri atlar, bu nedenle satırlar AŞAĞIDAKİLERLE sınırlandırılmalıdır `\n` hataları doğru saymak için. +- `Template` ve `CustomSeparated` bir sonraki satırın başlangıcını bulmak için son sütundan sonra sınırlayıcı ve satırlar arasındaki sınırlayıcıyı kullanın, Bu nedenle hataları atlamak yalnızca en az biri boş değilse çalışır. + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/formats/) diff --git a/docs/tr/interfaces/http.md b/docs/tr/interfaces/http.md new file mode 100644 index 00000000000..a30e05418c8 --- /dev/null +++ b/docs/tr/interfaces/http.md @@ -0,0 +1,511 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 19 +toc_title: "HTTP aray\xFCz\xFC" +--- + +# HTTP arayüzü {#http-interface} + +HTTP arayüzü, herhangi bir programlama dilinden herhangi bir platformda Clickhouse'u kullanmanızı sağlar. Java ve Perl'den ve kabuk komut dosyalarından çalışmak için kullanıyoruz. Diğer bölümlerde, HTTP arayüzü Perl, Python ve Go'dan kullanılır. HTTP arabirimi yerel arabirimden daha sınırlıdır, ancak daha iyi uyumluluğa sahiptir. + +Varsayılan olarak, clickhouse-server, 8123 numaralı bağlantı noktasında HTTP dinler (bu, yapılandırmada değiştirilebilir). + +Parametreler olmadan bir GET / request yaparsanız, 200 yanıt kodunu ve tanımlanan dizeyi döndürür [http\_server\_default\_response](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-http_server_default_response) varsayılan değer “Ok.” (sonunda bir çizgi besleme ile) + +``` bash +$ curl 'http://localhost:8123/' +Ok. +``` + +Sağlık kontrol komut GET / ping isteği kullanın. Bu işleyici her zaman döner “Ok.” (sonunda bir çizgi besleme ile). 18.12.13 sürümünden edinilebilir. + +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +İsteği URL olarak gönder ‘query’ parametre veya bir POST olarak. Veya sorgunun başlangıcını gönder ‘query’ parametre ve postadaki geri kalanı (bunun neden gerekli olduğunu daha sonra açıklayacağız). URL'nin boyutu 16 KB ile sınırlıdır, bu nedenle büyük sorgular gönderirken bunu aklınızda bulundurun. + +Başarılı olursa, 200 yanıt Kodu ve yanıt gövdesinde sonucu alırsınız. +Bir hata oluşursa, 500 yanıt Kodu ve yanıt gövdesinde bir hata açıklaması metni alırsınız. + +GET yöntemini kullanırken, ‘readonly’ ayar .lanmıştır. Başka bir deyişle, verileri değiştiren sorgular için yalnızca POST yöntemini kullanabilirsiniz. Sorgunun kendisini POST gövdesinde veya URL parametresinde gönderebilirsiniz. + +Örnekler: + +``` bash +$ curl 'http://localhost:8123/?query=SELECT%201' +1 + +$ wget -O- -q 'http://localhost:8123/?query=SELECT 1' +1 + +$ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123 +HTTP/1.0 200 OK +Date: Wed, 27 Nov 2019 10:30:18 GMT +Connection: Close +Content-Type: text/tab-separated-values; charset=UTF-8 +X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal +X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f +X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} + +1 +``` + +Gördüğünüz gibi, curl, boşlukların URL'den kaçması gerektiği konusunda biraz rahatsız edici. +Her ne kadar wget her şeyden kaçsa da, onu kullanmanızı önermiyoruz çünkü keep-alive ve Transfer-Encoding: chunked kullanırken HTTP 1.1 üzerinde iyi çalışmıyor. + +``` bash +$ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- +1 + +$ echo 'SELECT 1' | curl 'http://localhost:8123/?query=' --data-binary @- +1 + +$ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @- +1 +``` + +Sorgunun bir parçası parametrede gönderilirse ve gönderinin bir parçası ise, bu iki veri parçası arasına bir satır akışı eklenir. +Örnek (bu işe yaramaz): + +``` bash +$ echo 'ECT 1' | curl 'http://localhost:8123/?query=SEL' --data-binary @- +Code: 59, e.displayText() = DB::Exception: Syntax error: failed at position 0: SEL +ECT 1 +, expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception +``` + +Varsayılan olarak, veri TabSeparated biçiminde döndürülür (daha fazla bilgi için bkz: “Formats” bölme). +Başka bir biçim istemek için sorgunun biçim yan tümcesi kullanın. + +``` bash +$ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @- +┏━━━┓ +┃ 1 ┃ +┡━━━┩ +│ 1 │ +└───┘ +``` + +Ekleme sorguları için veri iletmenin POST yöntemi gereklidir. Bu durumda, URL parametresinde sorgunun başlangıcını yazabilir ve eklemek için verileri iletmek için POST'u kullanabilirsiniz. Eklenecek veriler, örneğin Mysql'den sekmeyle ayrılmış bir döküm olabilir. Bu şekilde, INSERT sorgusu MYSQL'DEN load DATA LOCAL INFİLE'IN yerini alır. + +Örnekler: tablo oluşturma: + +``` bash +$ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @- +``` + +Veri ekleme için tanıdık ekleme sorgusunu kullanma: + +``` bash +$ echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @- +``` + +Veriler sorgudan ayrı olarak gönderilebilir: + +``` bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +Herhangi bir veri biçimini belirtebilirsiniz. Bu ‘Values’ biçim, T değerlerine INSERT yazarken kullanılanla aynıdır: + +``` bash +$ echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @- +``` + +Sekmeyle ayrılmış bir dökümden veri eklemek için ilgili biçimi belirtin: + +``` bash +$ echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @- +``` + +Tablo içeriğini okuma. Paralel sorgu işleme nedeniyle veriler rastgele sırayla çıktılanır: + +``` bash +$ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t' +7 +8 +9 +10 +11 +12 +1 +2 +3 +4 +5 +6 +``` + +Tabloyu silme. + +``` bash +$ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- +``` + +Veri tablosu döndürmeyen başarılı istekler için boş bir yanıt gövdesi döndürülür. + +Veri iletirken dahili ClickHouse sıkıştırma formatını kullanabilirsiniz. Sıkıştırılmış veriler standart olmayan bir biçime sahiptir ve özel `clickhouse-compressor` onunla çalışmak için program (bu ile yüklü `clickhouse-client` paket). Veri ekleme verimliliğini artırmak için, sunucu tarafı sağlama toplamı doğrulamasını kullanarak devre dışı bırakabilirsiniz. [http\_native\_compression\_disable\_checksumming\_on\_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress) ayar. + +Belirt ift ifiyseniz `compress=1` URL'de, sunucu size gönderdiği verileri sıkıştırır. +Belirt ift ifiyseniz `decompress=1` URL'de, sunucu içinde geçirdiğiniz aynı verileri açar. `POST` yöntem. + +Ayrıca kullanmayı seçebilirsiniz [HTTP sıkıştırma](https://en.wikipedia.org/wiki/HTTP_compression). Sıkıştırılmış bir göndermek için `POST` istek, istek başlığını Ekle `Content-Encoding: compression_method`. Clickhouse'un yanıtı sıkıştırması için şunları eklemelisiniz `Accept-Encoding: compression_method`. ClickHouse destekler `gzip`, `br`, ve `deflate` [sıkıştırma yöntemleri](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens). HTTP sıkıştırmasını etkinleştirmek için Clickhouse'u kullanmanız gerekir [enable\_http\_compression](../operations/settings/settings.md#settings-enable_http_compression) ayar. Veri sıkıştırma düzeyini [http\_zlib\_compression\_level](#settings-http_zlib_compression_level) tüm sıkıştırma yöntemleri için ayarlama. + +Bunu, büyük miktarda veri iletirken ağ trafiğini azaltmak veya hemen sıkıştırılmış dökümler oluşturmak için kullanabilirsiniz. + +Sıkıştırma ile veri gönderme örnekleri: + +``` bash +#Sending data to the server: +$ curl -vsS "http://localhost:8123/?enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10' -H 'Accept-Encoding: gzip' + +#Sending data to the client: +$ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gzip' 'http://localhost:8123/' +``` + +!!! note "Not" + Bazı HTTP istemcileri varsayılan olarak sunucudan verileri açabilir ( `gzip` ve `deflate`) ve sıkıştırma ayarlarını doğru kullansanız bile sıkıştırılmış veriler alabilirsiniz. + +Kullanabilirsiniz ‘database’ Varsayılan veritabanını belirtmek için URL parametresi. + +``` bash +$ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @- +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +``` + +Varsayılan olarak, sunucu ayarlarında kayıtlı veritabanı varsayılan veritabanı olarak kullanılır. Varsayılan olarak, bu veritabanı denir ‘default’. Alternatif olarak, her zaman tablo adından önce bir nokta kullanarak veritabanını belirtebilirsiniz. + +Kullanıcı adı ve şifre üç yoldan biriyle belirtilebilir: + +1. HTTP temel kimlik doğrulamasını kullanma. Örnek: + + + +``` bash +$ echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @- +``` + +1. İn the ‘user’ ve ‘password’ URL parametreleri. Örnek: + + + +``` bash +$ echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @- +``` + +1. Kullanım ‘X-ClickHouse-User’ ve ‘X-ClickHouse-Key’ üstbilgi. Örnek: + + + +``` bash +$ echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: password' 'http://localhost:8123/' -d @- +``` + +Kullanıcı adı belirtilmemişse, `default` adı kullanılır. Parola belirtilmezse, boş parola kullanılır. +Tek bir sorguyu veya ayarların tüm profillerini işlemek için herhangi bir ayar belirtmek için URL parametrelerini de kullanabilirsiniz. Örnek: http: / / localhost: 8123/?profil = web & max\_rows\_to\_read = 1000000000 & query = seç + 1 + +Daha fazla bilgi için, bkz: [Ayarlar](../operations/settings/index.md) bölme. + +``` bash +$ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @- +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +``` + +Diğer parametreler hakkında bilgi için bölüme bakın “SET”. + +Benzer şekilde, http protokolünde ClickHouse oturumlarını kullanabilirsiniz. Bunu yapmak için şunları eklemeniz gerekir: `session_id` İsteğe parametre alın. Oturum kimliği olarak herhangi bir dize kullanabilirsiniz. Varsayılan olarak, oturum 60 saniye hareketsizlik sonra sonlandırılır. Bu zaman aşımını değiştirmek için, `default_session_timeout` sunucu yapılandırmasında ayarlama veya `session_timeout` İsteğe parametre alın. Oturum durumunu kontrol etmek için `session_check=1` parametre. Bir kerede yalnızca bir sorgu, tek bir oturum içinde çalıştırılabilir. + +Bir sorgunun ilerleme durumu hakkında bilgi alabilirsiniz `X-ClickHouse-Progress` yanıt başlıkları. Bunu yapmak için etkinleştir [send\_progress\_in\_http\_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). Başlık dizisi örneği: + +``` text +X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128"} +X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128"} +X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128"} +``` + +Olası başlık alanları: + +- `read_rows` — Number of rows read. +- `read_bytes` — Volume of data read in bytes. +- `total_rows_to_read` — Total number of rows to be read. +- `written_rows` — Number of rows written. +- `written_bytes` — Volume of data written in bytes. + +Http bağlantısı kaybolursa çalışan istekler otomatik olarak durmaz. Ayrıştırma ve veri biçimlendirme sunucu tarafında gerçekleştirilir ve ağ kullanarak etkisiz olabilir. +Opsiyonel ‘query\_id’ parametre sorgu kimliği (herhangi bir dize) geçirilebilir. Daha fazla bilgi için bölüme bakın “Settings, replace\_running\_query”. + +Opsiyonel ‘quota\_key’ parametre kota anahtarı (herhangi bir dize) olarak geçirilebilir. Daha fazla bilgi için bölüme bakın “Quotas”. + +HTTP arabirimi, sorgulamak için dış verileri (dış geçici tablolar) geçirmenize izin verir. Daha fazla bilgi için bölüme bakın “External data for query processing”. + +## Yanıt Tamponlama {#response-buffering} + +Sunucu tarafında yanıt arabelleği etkinleştirebilirsiniz. Bu `buffer_size` ve `wait_end_of_query` Bu amaçla URL parametreleri sağlanmıştır. + +`buffer_size` sunucu belleğinde arabellek sonucu bayt sayısını belirler. Sonuç gövdesi bu eşikten büyükse, arabellek HTTP kanalına yazılır ve kalan veriler doğrudan HTTP kanalına gönderilir. + +Tüm yanıtın arabelleğe alındığından emin olmak için `wait_end_of_query=1`. Bu durumda, bellekte depolanan veriler geçici bir sunucu dosyasında arabelleğe alınır. + +Örnek: + +``` bash +$ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary' +``` + +Yanıt Kodu ve HTTP üstbilgileri istemciye gönderildikten sonra bir sorgu işleme hatası oluştu durumları önlemek için arabelleğe alma kullanın. Bu durumda, yanıt gövdesinin sonunda bir hata iletisi yazılır ve istemci tarafında hata yalnızca ayrıştırma aşamasında algılanabilir. + +### Parametrelerle sorgular {#cli-queries-with-parameters} + +Parametrelerle bir sorgu oluşturabilir ve karşılık gelen HTTP istek parametrelerinden onlar için değerler geçirebilirsiniz. Daha fazla bilgi için, bkz. [CLI için parametrelerle sorgular](cli.md#cli-queries-with-parameters). + +### Örnek {#example} + +``` bash +$ curl -sS "
    ?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" +``` + +## Önceden tanımlanmış HTTP arabirimi {#predefined_http_interface} + +ClickHouse HTTP arabirimi üzerinden belirli sorguları destekler. Örneğin, bir tabloya aşağıdaki gibi veri yazabilirsiniz: + +``` bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +ClickHouse ayrıca gibi üçüncü parti araçları ile daha kolay entegrasyon yardımcı olabilir önceden tanımlanmış HTTP arayüzünü destekler [PROMETHEUS ihracatçı](https://github.com/percona-lab/clickhouse_exporter). + +Örnek: + +- Her şeyden önce, bu bölümü sunucu yapılandırma dosyasına ekleyin: + + + +``` xml + + + /metrics + GET + + SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' + + + +``` + +- Artık PROMETHEUS formatında veriler için doğrudan url talep edebilirsiniz: + + + +``` bash +curl -vvv 'http://localhost:8123/metrics' +* Trying ::1... +* Connected to localhost (::1) port 8123 (#0) +> GET /metrics HTTP/1.1 +> Host: localhost:8123 +> User-Agent: curl/7.47.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Wed, 27 Nov 2019 08:54:25 GMT +< Connection: Keep-Alive +< Content-Type: text/plain; charset=UTF-8 +< X-ClickHouse-Server-Display-Name: i-tl62qd0o +< Transfer-Encoding: chunked +< X-ClickHouse-Query-Id: f39235f6-6ed7-488c-ae07-c7ceafb960f6 +< Keep-Alive: timeout=3 +< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} +< +# HELP "Query" "Number of executing queries" +# TYPE "Query" counter +"Query" 1 + +# HELP "Merge" "Number of executing background merges" +# TYPE "Merge" counter +"Merge" 0 + +# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" +# TYPE "PartMutation" counter +"PartMutation" 0 + +# HELP "ReplicatedFetch" "Number of data parts being fetched from replica" +# TYPE "ReplicatedFetch" counter +"ReplicatedFetch" 0 + +# HELP "ReplicatedSend" "Number of data parts being sent to replicas" +# TYPE "ReplicatedSend" counter +"ReplicatedSend" 0 + +* Connection #0 to host localhost left intact +``` + +Örnekten görebileceğiniz gibi, Eğer `` yapılandırmada yapılandırılır.XML dosyası, ClickHouse önceden tanımlanmış türüne alınan HTTP istekleri eşleşecek `` Maç başarılı olursa, ClickHouse ilgili önceden tanımlanmış sorgu yürütecektir. + +Şimdi `` Yapılandır configureılabilir ``, ``, ``, `` ve `` . + +## root\_handler {#root_handler} + +`` kök yolu isteği için belirtilen içeriği döndürür. Belirli dönüş içeriği tarafından yapılandırılır `http_server_default_response` config.xml. belirtilmemişse, iade **Tamam.** + +`http_server_default_response` tanımlanmadı ve Clickhouse'a bir HTTP isteği gönderildi. Sonuç aşağıdaki gibidir: + +``` xml + + + +``` + + $ curl 'http://localhost:8123' + Ok. + +`http_server_default_response` tanımlanır ve Clickhouse'a bir HTTP isteği gönderilir. Sonuç aşağıdaki gibidir: + +``` xml +
    ]]>
    + + + + +``` + + $ curl 'http://localhost:8123' +
    % + +## ping\_handler {#ping_handler} + +`` geçerli ClickHouse sunucusunun durumunu araştırmak için kullanılabilir. ClickHouse HTTP Sunucusu normal olduğunda, Clickhouse'a erişme `` dön willecektir **Tamam.**. + +Örnek: + +``` xml + + /ping + +``` + +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +## replicas\_status\_handler {#replicas_status_handler} + +`` çoğaltma düğümünün durumunu algılamak ve geri dönmek için kullanılır **Tamam.** çoğaltma düğümünde gecikme yoksa. Bir gecikme varsa, belirli bir gecikmeyi iade edin. Değeri `` özelleştirme destekler. Belirt specifymezseniz ``, ClickHouse varsayılan ayarı `` oluyor **/ replicas\_status**. + +Örnek: + +``` xml + + /replicas_status + +``` + +Hiçbir gecikme durumda: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +Ok. +``` + +Gecikmeli dava: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +db.stats: Absolute delay: 22. Relative delay: 22. +``` + +## predefined\_query\_handler {#predefined_query_handler} + +Yapılandırabilirsiniz ``, ``, `` ve `` içinde ``. + +`` HTTP isteğinin yöntem bölümünü eşleştirmekten sorumludur. `` tam tanımına uygundur [yöntem](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) HTTP protokolünde. İsteğe bağlı bir yapılandırmadır. Yapılandırma dosyasında tanımlanmamışsa, HTTP isteğinin yöntem kısmıyla eşleşmez + +`` HTTP isteğinin url bölümünü eşleştirmekten sorumludur. İle uyumludur [RE2](https://github.com/google/re2)'In düzenli ifadeleri. İsteğe bağlı bir yapılandırmadır. Yapılandırma dosyasında tanımlanmamışsa, HTTP isteğinin url kısmıyla eşleşmez + +`` HTTP isteğinin başlık kısmını eşleştirmekten sorumludur. Bu re2 düzenli ifadeler ile uyumludur. İsteğe bağlı bir yapılandırmadır. Yapılandırma dosyasında tanımlanmamışsa, HTTP isteğinin başlık kısmıyla eşleşmez + +`` değer, önceden tanımlanmış bir sorgudur ``, bir HTTP isteği eşleştirildiğinde ve sorgunun sonucu döndürüldüğünde ClickHouse tarafından yürütülür. Bu bir zorunluluktur yapılandırma. + +`` ayar ayarları ve query\_params değerlerini destekler. + +Aşağıdaki örnek değerleri tanımlar `max_threads` ve `max_alter_threads` ayarlar, ardından bu ayarların başarıyla ayarlanıp ayarlanmadığını kontrol etmek için sistem tablosunu sorgular. + +Örnek: + +``` xml + + + GET + + TEST_HEADER_VALUE + [^/]+)(/(?P[^/]+))?]]> + + [^/]+)(/(?P[^/]+))?]]> + + SELECT value FROM system.settings WHERE name = {name_1:String} + SELECT name, value FROM system.settings WHERE name = {name_2:String} + + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' +1 +max_alter_threads 2 +``` + +!!! note "Not" + Birinde ``, biri `` sadece birini destekler `` bir ekleme türü. + +## dynamic\_query\_handler {#dynamic_query_handler} + +`` göre `` artmak `` . + +ClickHouse ayıklar ve karşılık gelen değeri yürütür `` HTTP isteğinin url'sindeki değer. +ClickHouse varsayılan ayarı `` oluyor `/query` . İsteğe bağlı bir yapılandırmadır. Yapılandırma dosyasında tanım yoksa, param iletilmez. + +Bu işlevselliği denemek için örnek max\_threads ve max\_alter\_threads değerlerini tanımlar ve ayarların başarıyla ayarlanıp ayarlanmadığını sorgular. +Fark şu ki ``, sorgu yapılandırma dosyasında yazılır. Ama içinde ``, sorgu HTTP isteğinin param şeklinde yazılır. + +Örnek: + +``` xml + + + + TEST_HEADER_VALUE_DYNAMIC + [^/]+)(/(?P[^/]+))?]]> + + query_param + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/?query_param=SELECT%20value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D&max_threads=1&max_alter_threads=2¶m_name_2=max_alter_threads' +1 +2 +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/http_interface/) diff --git a/docs/tr/interfaces/index.md b/docs/tr/interfaces/index.md new file mode 100644 index 00000000000..6a89d8cce7e --- /dev/null +++ b/docs/tr/interfaces/index.md @@ -0,0 +1,29 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Arabirimler +toc_priority: 14 +toc_title: "Giri\u015F" +--- + +# Arabirimler {#interfaces} + +ClickHouse iki ağ arabirimi sağlar (Her ikisi de isteğe bağlı olarak ek güvenlik için TLS'YE sarılabilir): + +- [HTTP](http.md), belgelenmiş ve doğrudan kullanımı kolay olan. +- [Yerel TCP](tcp.md) daha az yükü olan. + +Çoğu durumda, doğrudan bunlarla etkileşime girmek yerine uygun araç veya kitaplık kullanılması önerilir. Resmi olarak Yandex tarafından desteklenen şunlardır: + +- [Komut satırı istemcisi](cli.md) +- [JDBC sürücüsü](jdbc.md) +- [ODBC sürücüsü](odbc.md) +- [C++ istemci kitaplığı](cpp.md) + +ClickHouse ile çalışmak için çok çeşitli üçüncü taraf kütüphaneleri de vardır: + +- [İstemci kitaplıkları](third-party/client_libraries.md) +- [Entegrasyonlar](third-party/integrations.md) +- [Görsel arayüzler](third-party/gui.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/) diff --git a/docs/tr/interfaces/jdbc.md b/docs/tr/interfaces/jdbc.md new file mode 100644 index 00000000000..a7e69550c5a --- /dev/null +++ b/docs/tr/interfaces/jdbc.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 22 +toc_title: "JDBC s\xFCr\xFCc\xFCs\xFC" +--- + +# JDBC sürücüsü {#jdbc-driver} + +- **[Resmi sürücü](https://github.com/ClickHouse/clickhouse-jdbc)** +- Üçüncü taraf sürücüler: + - [ClickHouse-yerli-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC) + - [clickhouse4j](https://github.com/blynkkk/clickhouse4j) + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/jdbc/) diff --git a/docs/tr/interfaces/mysql.md b/docs/tr/interfaces/mysql.md new file mode 100644 index 00000000000..4e60430f554 --- /dev/null +++ b/docs/tr/interfaces/mysql.md @@ -0,0 +1,49 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 20 +toc_title: "MySQL Aray\xFCz\xFC" +--- + +# MySQL Arayüzü {#mysql-interface} + +ClickHouse MySQL Tel protokolünü destekler. Tarafından etkinleştir canilebilir [mysql\_port](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) yapılandırma dosyasında ayarlama: + +``` xml +9004 +``` + +Komut satırı aracını kullanarak bağlanma örneği `mysql`: + +``` bash +$ mysql --protocol tcp -u default -P 9004 +``` + +Bir bağlantı başarılı olursa çıktı: + +``` text +Welcome to the MySQL monitor. Commands end with ; or \g. +Your MySQL connection id is 4 +Server version: 20.2.1.1-ClickHouse + +Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + +Oracle is a registered trademark of Oracle Corporation and/or its +affiliates. Other names may be trademarks of their respective +owners. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +mysql> +``` + +Tüm MySQL istemcileri ile uyumluluk için, kullanıcı parolasını belirtmeniz önerilir [çift SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) yapılandırma dosyasında. +Kullanarak kullanıcı şifresi belirt ifilirse [SHA256](../operations/settings/settings_users.md#password_sha256_hex), bazı istemciler (mysqljs ve komut satırı aracı mysql eski sürümleri) kimlik doğrulaması mümkün olmayacaktır. + +Kısıtlama: + +- hazırlanan sorgular desteklenmiyor + +- bazı veri türleri dizeleri olarak gönderilir + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/tr/interfaces/odbc.md b/docs/tr/interfaces/odbc.md new file mode 100644 index 00000000000..3cc0cc35700 --- /dev/null +++ b/docs/tr/interfaces/odbc.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 23 +toc_title: "ODBC s\xFCr\xFCc\xFCs\xFC" +--- + +# ODBC sürücüsü {#odbc-driver} + +- [Resmi sürücü](https://github.com/ClickHouse/clickhouse-odbc). + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/odbc/) diff --git a/docs/tr/interfaces/tcp.md b/docs/tr/interfaces/tcp.md new file mode 100644 index 00000000000..b1f712efd7d --- /dev/null +++ b/docs/tr/interfaces/tcp.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 18 +toc_title: Yerel arabirim (TCP) +--- + +# Yerel arabirim (TCP) {#native-interface-tcp} + +Yerel protokol kullanılır [komut satırı istemcisi](cli.md), dağıtılmış sorgu işleme sırasında ve ayrıca diğer C++ programlarında sunucular arası iletişim için. Ne yazık ki, yerel ClickHouse protokolü henüz resmi bir spesifikasyona sahip değildir, ancak ClickHouse kaynak kodundan ters mühendislik yapılabilir (başlangıç [bu civarda](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) ve / veya TCP trafiğini ele alarak ve analiz ederek. + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/tr/interfaces/third-party/client_libraries.md b/docs/tr/interfaces/third-party/client_libraries.md new file mode 100644 index 00000000000..2b89b1c4520 --- /dev/null +++ b/docs/tr/interfaces/third-party/client_libraries.md @@ -0,0 +1,59 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 26 +toc_title: "\u0130stemci Kitapl\u0131klar\u0131" +--- + +# Üçüncü taraf geliştiricilerin istemci kitaplıkları {#client-libraries-from-third-party-developers} + +!!! warning "Uyarı" + Yandex yapar **değil** Aşağıda listelenen kütüphaneleri koruyun ve kalitelerini sağlamak için kapsamlı bir test yapmadınız. + +- Piton + - [ınfi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm) + - [clickhouse-sürücü](https://github.com/mymarilyn/clickhouse-driver) + - [clickhouse-müşteri](https://github.com/yurial/clickhouse-client) + - [aiochclient](https://github.com/maximdanilchenko/aiochclient) +- PHP + - [smı2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) + - [8bitov / clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client) + - [bozerkins / clickhouse-müşteri](https://packagist.org/packages/bozerkins/clickhouse-client) + - [simpod / clickhouse-müşteri](https://packagist.org/packages/simpod/clickhouse-client) + - [seva-code / php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) + - [SeasClick c ++ istemcisi](https://github.com/SeasX/SeasClick) +- Gitmek + - [clickhouse](https://github.com/kshvakov/clickhouse/) + - [git-clickhouse](https://github.com/roistat/go-clickhouse) + - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) + - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) +- NodeJs + - [NodeJs) clickhouse)](https://github.com/TimonKK/clickhouse) + - [düğüm-clickhouse](https://github.com/apla/node-clickhouse) +- Per perll + - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) + - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) + - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) +- Yakut + - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) +- R + - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) + - [RClickHouse](https://github.com/IMSMWU/RClickHouse) +- Java + - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) + - [clickhouse-müşteri](https://github.com/Ecwid/clickhouse-client) +- SC scalaala + - [clickhouse-Scala-istemci](https://github.com/crobox/clickhouse-scala-client) +- Kotlin + - [AORM](https://github.com/TanVD/AORM) +- C\# + - [ClickHouse.Gürültü](https://github.com/killwort/ClickHouse-Net) + - [ClickHouse.Müşteri](https://github.com/DarkWanderer/ClickHouse.Client) + - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) +- İksir + - [clickhousex](https://github.com/appodeal/clickhousex/) +- Nim + - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/third-party/client_libraries/) diff --git a/docs/tr/interfaces/third-party/gui.md b/docs/tr/interfaces/third-party/gui.md new file mode 100644 index 00000000000..cb41103563d --- /dev/null +++ b/docs/tr/interfaces/third-party/gui.md @@ -0,0 +1,152 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 28 +toc_title: "G\xF6rsel Aray\xFCzler" +--- + +# Üçüncü taraf geliştiricilerin görsel arayüzleri {#visual-interfaces-from-third-party-developers} + +## Açık Kaynak {#open-source} + +### Tabix {#tabix} + +ClickHouse için web arayüzü [Tabix](https://github.com/tabixio/tabix) projelendirmek. + +Özellikler: + +- Ek yazılım yüklemeye gerek kalmadan doğrudan tarayıcıdan ClickHouse ile çalışır. +- Sözdizimi vurgulama ile sorgu editörü. +- Komutların otomatik tamamlanması. +- Sorgu yürütme grafik analizi için araçlar. +- Renk düzeni seçenekleri. + +[Tabix belgeleri](https://tabix.io/doc/). + +### HouseOps {#houseops} + +[HouseOps](https://github.com/HouseOps/HouseOps) OSX, Linux ve Windows için bir UI / IDE. + +Özellikler: + +- Sözdizimi vurgulama ile sorgu oluşturucu. Yanıtı bir tablo veya JSON görünümünde görüntüleyin. +- CSV veya JSON olarak ihracat sorgu sonuçları. +- Açıklamaları ile süreçlerin listesi. Yazma modu. Durdurmak için yeteneği (`KILL`) işleyiş. +- Veritabanı grafiği. Tüm tabloları ve sütunlarını ek bilgilerle gösterir. +- Sütun boyutunun hızlı bir görünümü. +- Sunucu yapılandırması. + +Aşağıdaki özellikler geliştirme için planlanmıştır: + +- Veritabanı yönetimi. +- Kullanıcı yönetimi. +- Gerçek zamanlı veri analizi. +- Küme izleme. +- Küme yönetimi. +- Çoğaltılmış ve Kafka tablolarının izlenmesi. + +### Fener {#lighthouse} + +[Fener](https://github.com/VKCOM/lighthouse) ClickHouse için hafif bir web arayüzüdür. + +Özellikler: + +- Filtreleme ve meta veriler içeren tablo listesi. +- Filtreleme ve sıralama ile tablo önizleme. +- Salt okunur sorgu yürütme. + +### Redash {#redash} + +[Redash](https://github.com/getredash/redash) veri görselleştirme için bir platformdur. + +ClickHouse dahil olmak üzere birden fazla veri kaynağı için destekler, Redash bir son veri kümesi içine farklı veri kaynaklarından gelen sorguların sonuçlarını katılabilir. + +Özellikler: + +- Sorguların güçlü editörü. +- Veritabanı Gezgini. +- Verileri farklı formlarda temsil etmenize izin veren görselleştirme araçları. + +### DBeaver {#dbeaver} + +[DBeaver](https://dbeaver.io/) - ClickHouse desteği ile evrensel masaüstü veritabanı istemcisi. + +Özellikler: + +- Sözdizimi vurgulama ve otomatik tamamlama ile sorgu geliştirme. +- Filtreler ve meta arama ile tablo listesi. +- Tablo veri önizleme. +- Tam metin arama. + +### clickhouse-clı {#clickhouse-cli} + +[clickhouse-clı](https://github.com/hatarist/clickhouse-cli) Python 3 ile yazılmış ClickHouse için alternatif bir komut satırı istemcisidir. + +Özellikler: + +- Otomatik tamamlama. +- Sorgular ve veri çıkışı için sözdizimi vurgulama. +- Veri çıkışı için çağrı cihazı desteği. +- Özel PostgreSQL benzeri komutlar. + +### clickhouse-flamegraph {#clickhouse-flamegraph} + +[clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) görselleştirmek için özel bir araçtır `system.trace_log` olarak [flamegraph](http://www.brendangregg.com/flamegraphs.html). + +## Ticari {#commercial} + +### Datriagrpip {#datagrip} + +[Datriagrpip](https://www.jetbrains.com/datagrip/) ClickHouse için özel destek ile JetBrains bir veritabanı IDE mi. PyCharm, IntelliJ IDEA, GoLand, PhpStorm ve diğerleri: aynı zamanda diğer IntelliJ tabanlı araçlar gömülüdür. + +Özellikler: + +- Çok hızlı kod tamamlama. +- ClickHouse sözdizimi vurgulama. +- Clickhouse'a özgü özellikler için destek, örneğin iç içe geçmiş sütunlar, tablo motorları. +- Veri Editörü. +- Refactorings. +- Arama ve navigasyon. + +### Yandex DataLens {#yandex-datalens} + +[Yandex DataLens](https://cloud.yandex.ru/services/datalens) veri görselleştirme ve analitik bir hizmettir. + +Özellikler: + +- Basit çubuk grafiklerden karmaşık panolara kadar geniş bir yelpazede mevcut görselleştirmeler. +- Panolar kamuya açık hale getirilebilir. +- ClickHouse dahil olmak üzere birden fazla veri kaynağı için destek. +- ClickHouse dayalı hayata veri depolama. + +DataLens olduğunu [ücretsiz olarak kullanılabilir](https://cloud.yandex.com/docs/datalens/pricing) düşük yük projeleri için, ticari kullanım için bile. + +- [DataLens belgeleri](https://cloud.yandex.com/docs/datalens/). +- [Öğretici](https://cloud.yandex.com/docs/solutions/datalens/data-from-ch-visualization) bir ClickHouse veritabanından veri görselleştirme üzerinde. + +### Holistik Yazılım {#holistics-software} + +[Holistik](https://www.holistics.io/) tam yığın veri platformu ve iş zekası aracıdır. + +Özellikler: + +- Otomatik e-posta, bolluk ve raporların Google levha programları. +- Görselleştirmeler, sürüm kontrolü, Otomatik tamamlama, yeniden kullanılabilir sorgu bileşenleri ve dinamik filtreler ile SQL editörü. +- IFRAME aracılığıyla raporların ve gösterge panellerinin gömülü analitiği. +- Veri hazırlama ve ETL yetenekleri. +- Verilerin ilişkisel haritalama için SQL veri modelleme desteği. + +### Looker {#looker} + +[Looker](https://looker.com) ClickHouse dahil 50+ veritabanı lehçeleri desteği ile bir veri platformu ve iş zekası aracıdır. Looker bir SaaS platformu olarak kullanılabilir ve kendi kendine barındırılan. Kullanıcılar, verileri keşfetmek görselleştirme ve panoları, zamanlama raporları oluşturmak ve meslektaşları ile kendi görüşlerini paylaşmak için tarayıcı üzerinden Looker kullanabilirsiniz. Looker, bu özellikleri diğer uygulamalara gömmek için zengin bir araç seti ve bir API sağlar +verileri diğer uygulamalarla entegre etmek. + +Özellikler: + +- Küratörlüğünü destekleyen bir dil olan LookML kullanarak kolay ve çevik geliştirme + [Veri Modelleme](https://looker.com/platform/data-modeling) rapor yazarları ve son kullanıcıları desteklemek. +- Looker ile güçlü iş akışı entegrasyonu [Veri İşlemleri](https://looker.com/platform/actions). + +[Looker içinde ClickHouse nasıl yapılandırılır.](https://docs.looker.com/setup-and-management/database-config/clickhouse) + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/tr/interfaces/third-party/index.md b/docs/tr/interfaces/third-party/index.md new file mode 100644 index 00000000000..d8332c00c26 --- /dev/null +++ b/docs/tr/interfaces/third-party/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "\xDC\xE7\xFCnc\xFC Taraf" +toc_priority: 24 +--- + + diff --git a/docs/tr/interfaces/third-party/integrations.md b/docs/tr/interfaces/third-party/integrations.md new file mode 100644 index 00000000000..2216e68a4c4 --- /dev/null +++ b/docs/tr/interfaces/third-party/integrations.md @@ -0,0 +1,99 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 27 +toc_title: Entegrasyonlar +--- + +# Üçüncü taraf geliştiricilerin entegrasyon kütüphaneleri {#integration-libraries-from-third-party-developers} + +!!! warning "Uyarı" + Yandex yapar **değil** Aşağıda listelenen araçları ve kütüphaneleri koruyun ve kalitelerini sağlamak için kapsamlı bir test yapmadınız. + +## Altyapı Ürünleri {#infrastructure-products} + +- İlişkisel veritabanı yönetim sistemleri + - [MySQL](https://www.mysql.com) + - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) + - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) + - [horgh-çoğaltıcı](https://github.com/larsnovikov/horgh-replicator) + - [PostgreSQL](https://www.postgresql.org) + - [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) + - [ınfi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (kullanma [ınfi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [pg2ch](https://github.com/mkabilov/pg2ch) + - [clickhouse\_fdw](https://github.com/adjust/clickhouse_fdw) + - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) + - [ClickHouseMigrator](https://github.com/zlzforever/ClickHouseMigrator) +- Mesaj kuyrukları + - [Kafka](https://kafka.apache.org) + - [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (kullanma [Go client](https://github.com/kshvakov/clickhouse/)) +- Nesne depoları + - [S3](https://en.wikipedia.org/wiki/Amazon_S3) + - [clickhouse-yedekleme](https://github.com/AlexAkulov/clickhouse-backup) +- Konteyner orkestrasyonu + - [Kubernetes](https://kubernetes.io) + - [clickhouse-operatör](https://github.com/Altinity/clickhouse-operator) +- Yapılandırma yönetimi + - [kuklacı](https://puppet.com) + - [ınnogames / clickhouse](https://forge.puppet.com/innogames/clickhouse) + - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) +- İzleme + - [Grafit](https://graphiteapp.org) + - [graphouse](https://github.com/yandex/graphouse) + - [karbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + + - [grafit-clickhouse](https://github.com/lomik/graphite-clickhouse) + - [grafit-ch-doktoru](https://github.com/innogames/graphite-ch-optimizer) - staled bölümleri optimize eder [\* Graphıtemergetree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) eğer kurallar [toplaması yapılandırması](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) uygulanabilir + - [Grafana](https://grafana.com/) + - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) + - [Prometheus](https://prometheus.io/) + - [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter) + - [PromHouse](https://github.com/Percona-Lab/PromHouse) + - [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (kullanma [Go client](https://github.com/kshvakov/clickhouse/)) + - [Nagios](https://www.nagios.org/) + - [check\_clickhouse](https://github.com/exogroup/check_clickhouse/) + - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) + - [Zabbix](https://www.zabbix.com) + - [clickhouse-zabbix-şablon](https://github.com/Altinity/clickhouse-zabbix-template) + - [Sematext](https://sematext.com/) + - [clickhouse entegrasyonu](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) +- Günlük + - [rsyslog](https://www.rsyslog.com/) + - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) + - [fluentd](https://www.fluentd.org) + - [loghouse](https://github.com/flant/loghouse) (içinde [Kubernetes](https://kubernetes.io)) + - [logagent](https://www.sematext.com/logagent) + - [logagent çıktı-eklenti-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) +- G geoeo + - [MaxMind](https://dev.maxmind.com/geoip/) + - [clickhouse-maxmind-geoıp](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) + +## Programlama Dili {#programming-language-ecosystems} + +- Piton + - [SQLAlchemy](https://www.sqlalchemy.org) + - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (kullanma [ınfi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [Pandalar](https://pandas.pydata.org) + - [pandahouse](https://github.com/kszucs/pandahouse) +- PHP + - [Doctrine](https://www.doctrine-project.org/) + - [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse) +- R + - [dplyr](https://db.rstudio.com/dplyr/) + - [RClickHouse](https://github.com/IMSMWU/RClickHouse) (kullanma [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) +- Java + - [Hadoop](http://hadoop.apache.org) + - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (kullanma [JDBC](../../sql_reference/table_functions/jdbc.md)) +- SC scalaala + - [Akka](https://akka.io) + - [clickhouse-Scala-istemci](https://github.com/crobox/clickhouse-scala-client) +- C\# + - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) + - [ClickHouse.Gürültü](https://github.com/killwort/ClickHouse-Net) + - [ClickHouse.Müşteri](https://github.com/DarkWanderer/ClickHouse.Client) + - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) + - [ClickHouse. Net. Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) +- İksir + - [Ecto](https://github.com/elixir-ecto/ecto) + - [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto) + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) diff --git a/docs/tr/interfaces/third-party/proxy.md b/docs/tr/interfaces/third-party/proxy.md new file mode 100644 index 00000000000..7ff9fc3642f --- /dev/null +++ b/docs/tr/interfaces/third-party/proxy.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 29 +toc_title: Vekiller +--- + +# Üçüncü taraf geliştiricilerin Proxy sunucuları {#proxy-servers-from-third-party-developers} + +## chproxy {#chproxy} + +[chproxy](https://github.com/Vertamedia/chproxy), ClickHouse veritabanı için bir HTTP proxy ve yük dengeleyici. + +Özellikler: + +- Kullanıcı başına Yönlendirme ve yanıt önbelleğe alma. +- Esnek sınırlar. +- Otomatik SSL sertifikası yenileme. + +Go uygulanan. + +## KittenHouse {#kittenhouse} + +[KittenHouse](https://github.com/VKCOM/kittenhouse) ClickHouse ve uygulama sunucusu arasında yerel bir proxy olacak şekilde tasarlanmıştır. + +Özellikler: + +- Bellek içi ve diskteki veri arabelleği. +- Tablo başına yönlendirme. +- Yük dengeleme ve sağlık kontrolü. + +Go uygulanan. + +## ClickHouse-Toplu {#clickhouse-bulk} + +[ClickHouse-Toplu](https://github.com/nikepan/clickhouse-bulk) basit bir ClickHouse ekleme toplayıcı. + +Özellikler: + +- Grup istekleri ve eşik veya aralık ile gönderin. +- Birden çok uzak sunucu. +- Temel kimlik doğrulama. + +Go uygulanan. + +[Orijinal makale](https://clickhouse.tech/docs/en/interfaces/third-party/proxy/) diff --git a/docs/tr/introduction/adopters.md b/docs/tr/introduction/adopters.md new file mode 100644 index 00000000000..0e180e161ec --- /dev/null +++ b/docs/tr/introduction/adopters.md @@ -0,0 +1,83 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 8 +toc_title: Benimseyenler +--- + +# ClickHouse Benimseyenler {#clickhouse-adopters} + +!!! warning "Uyarı" + ClickHouse ve onların Başarı Öyküleri kullanarak şirketlerin aşağıdaki liste kamu kaynaklarından monte edilir, böylece mevcut gerçeklikten farklı olabilir. Şirketinizde Clickhouse'u benimseme hikayesini paylaşırsanız memnun oluruz ve [listeye Ekle](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), ancak lütfen bunu yaparak herhangi bir NDA sorun yaşamayacağınızdan emin olun. Diğer şirketlerden gelen yayınlarla güncellemeler sağlamak da yararlıdır. + +| Şirket | Sektör | Usecase | Küme Boyutu | (Un) Sıkıştırılmış Veri Boyutu\* | Başvurma | +|----------------------------------------------------------------------------------------------------------|---------------------------------|-----------------------------|------------------------------------------------|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [2gıs](https://2gis.ru) | Haritalar | İzleme | — | — | [Rusça konuşun, Temmuz 2019](https://youtu.be/58sPkXfq6nw) | +| [Aloha Tarayıcı](https://alohabrowser.com/) | Mobil Uygulama | Tarayıcı arka ucu | — | — | [Rusça slaytlar, Mayıs 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | +| [Amadeus](https://amadeus.com/) | Seyahat | Analiz | — | — | [Basın Bülteni, Nisan 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| [Appsflyer](https://www.appsflyer.com) | Mobil analitik | Ana ürün | — | — | [Rusça konuşun, Temmuz 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | +| [ArenaData](https://arenadata.tech/) | Veri Platformu | Ana ürün | — | — | [Rusça slaytlar, Aralık 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | +| [Badoo](https://badoo.com) | Çıkma | Timeseries | — | — | [Rusça slaytlar, Aralık 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | +| [Benocs](https://www.benocs.com/) | Ağ telemetri ve analitik | Ana Ürün | — | — | [İngilizce slaytlar, Ekim 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | +| [Bloomberg](https://www.bloomberg.com/) | Finans, Medya | İzleme | 102 sunucular | — | [Slaytlar, Mayıs 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| [Bloxy](https://bloxy.info) | Blockchain | Analiz | — | — | [Rusça slaytlar, Ağustos 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | +| `Dataliance/UltraPower` | Telekom | Analiz | — | — | [Çince slaytlar, Ocak 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | +| [CARTO](https://carto.com/) | İş Zek Businessası | G geoeo analyt analyticsics | — | — | [ClickHouse ile coğrafi işleme](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CERN](http://public.web.cern.ch/public/) | Araştırma | Deney | — | — | [Basın bülteni, Nisan 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | +| [Cisco](http://cisco.com/) | Ağ | Trafik analizi | — | — | [Yıldırım konuşması, Ekim 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [Kale Menkul Değerler](https://www.citadelsecurities.com/) | Finansman | — | — | — | [Katkı, Mart 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | +| [Citymobil](https://city-mobil.ru) | Taksicilik | Analiz | — | — | [Rusça blog yazısı, Mart 2020](https://habr.com/en/company/citymobil/blog/490660/) | +| [ContentSquare](https://contentsquare.com) | Web analyt webics | Ana ürün | — | — | [Fransızca Blog yazısı, Kasım 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| [Cloudflare](https://cloudflare.com) | CDN | Trafik analizi | 36 sunucu | — | [Blog yazısı, Mayıs 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog yazısı, Mart 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| [Corunet](https://coru.net/) | Analiz | Ana ürün | — | — | [İngilizce slaytlar, Nisan 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | +| [CraiditX 氪信](https://creditx.com) | Finans AI | Analiz | — | — | [İngilizce slaytlar, Kasım 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| [Criteo/Storetail](https://www.criteo.com/) | Perakendecilik | Ana ürün | — | — | [İngilizce slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| [Deut Banksche Bank](https://db.com) | Finansman | Bİ analitik | — | — | [İngilizce slaytlar, Ekim 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | +| [Diva-e](https://www.diva-e.com) | Dijital danışmanlık | Ana Ürün | — | — | [İngilizce slaytlar, Eylül 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | +| [Exness](https://www.exness.com) | Ticaret | Metrikler, Günlük Kaydı | — | — | [Rusça konuşun, Mayıs 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| [Geniee](https://geniee.co.jp) | Reklam Ağı | Ana ürün | — | — | [Japonca Blog yazısı, Temmuz 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [HUYA](https://www.huya.com/) | Video Akışı | Analiz | — | — | [Çince slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | +| [Idealista](https://www.idealista.com) | Emlak | Analiz | — | — | [İngilizce Blog yazısı, Nisan 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [Infovista](https://www.infovista.com/) | Ağlar | Analiz | — | — | [İngilizce slaytlar, Ekim 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [Innogames](https://www.innogames.com) | Oyun | Metrikler, Günlük Kaydı | — | — | [Rusça slaytlar, Eylül 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| [Integros](https://integros.com) | Video hizmetleri platformu | Analiz | — | — | [Rusça slaytlar, Mayıs 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Kodiak Verileri](https://www.kodiakdata.com/) | Bulutlar | Ana ürün | — | — | [Engish slaytlar, Nisan 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | +| [Kontur](https://kontur.ru) | Yazılım Geliştirme | Metrik | — | — | [Rusça konuşma, Kasım 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | +| [LifeStreet](https://lifestreet.com/) | Reklam Ağı | Ana ürün | 75 sunucu (3 kopya) | 5.27 Pıb | [Rusça Blog yazısı, Şubat 2017](https://habr.com/en/post/322620/) | +| [Mail.ru Bulut Çözümleri](https://mcs.mail.ru/) | Bulut hizmetleri | Ana ürün | — | — | [ClickHouse örneğini Rusça olarak çalıştırma](https://mcs.mail.ru/help/db-create/clickhouse#) | +| [MessageBird](https://www.messagebird.com) | Telekomünikasyonlar | İstatistik | — | — | [İngilizce slaytlar, Kasım 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | +| [MGID](https://www.mgid.com/) | Reklam Ağı | Web-analyt -ics | — | — | [Analitik DBMS ClickHouse uygulama deneyimimiz, Rusça](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [OneAPM](https://www.oneapm.com/) | İzleme ve veri analizi | Ana ürün | — | — | [Çince slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| [Pragma Yenilik](http://www.pragma-innovation.fr/) | Telemetri ve büyük veri analizi | Ana ürün | — | — | [İngilizce slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | +| [QINGCLOUD](https://www.qingcloud.com/) | Bulut hizmetleri | Ana ürün | — | — | [Çince slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | +| [Qrator](https://qrator.net) | DDoS koruması | Ana ürün | — | — | [Blog Yazısı, Mart 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [Beijing per PERCENTC İnformationent Information Technology Co., Ltd. Ltd.Şti.](https://www.percent.cn/) | Analiz | Ana Ürün | — | — | [Çince slaytlar, Haziran 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| [Serseri](https://rambler.ru) | İnternet Hizmetleri | Analiz | — | — | [Rusça konuşma, Nisan 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | +| [Tencent](https://www.tencent.com) | Mesaj | Günlük | — | — | [Çince konuşun, Kasım 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | +| [Trafik Yıldız Starsları](https://trafficstars.com/) | Reklam Ağı | — | — | — | [Rusça slaytlar, Mayıs 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [S7 Havayolları](https://www.s7.ru) | Havayolular | Metrikler, Günlük Kaydı | — | — | [Rusça konuş, Mart 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| [SEMrush](https://www.semrush.com/) | Pazarlamacı | Ana ürün | — | — | [Rusça slaytlar, Ağustos 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | +| [scireum GmbH](https://www.scireum.de/) | e-ticaret | Ana ürün | — | — | [Almanca konuşma, Şubat 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| [Nöbet](https://sentry.io/) | Yazılımcı | Ürün için arka uç | — | — | [İngilizce Blog yazısı, Mayıs 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Devlet Sosyal Güvenlik | Analiz | — | — | [İngilizce slaytlar, Kasım 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | +| [seo.do](https://seo.do/) | Analiz | Ana ürün | — | — | [İngilizce slaytlar, Kasım 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| [Sina](http://english.sina.com/index.html) | Haberci | — | — | — | [Çince slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | +| [SMI2](https://smi2.ru/) | Haberci | Analiz | — | — | [Rusça blog yazısı, Kasım 2017](https://habr.com/ru/company/smi2/blog/314558/) | +| [Splunk](https://www.splunk.com/) | İş Analitiği | Ana ürün | — | — | [İngilizce slaytlar, Ocak 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | +| [Spotify](https://www.spotify.com) | Müzik | Deney | — | — | [Slaytlar, Temmuz 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | +| [Tencent](https://www.tencent.com) | Büyük Veri | Veri işleme | — | — | [Çince slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| [Uber](https://www.uber.com) | Taksicilik | Günlük | — | — | [Slay ,tlar, Şubat 20 202020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [VKontakte](https://vk.com) | Sosyal Ağ | İstatistik, Günlük | — | — | [Rusça slaytlar, Ağustos 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| [Wisebits](https://wisebits.com/) | BT çözümleri | Analiz | — | — | [Rusça slaytlar, Mayıs 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Xiaoxin Tech.](https://www.xiaoheiban.cn/) | Eğitici | Ortak amaç | — | — | [İngilizce slaytlar, Kasım 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | +| [Ximalaya](https://www.ximalaya.com/) | Ses paylaşımı | OLAP | — | — | [İngilizce slaytlar, Kasım 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | +| [Yandex Bulut](https://cloud.yandex.ru/services/managed-clickhouse) | Genel Bulut | Ana ürün | — | — | [Rusça konuşun, Aralık 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | +| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | İş Zek Businessası | Ana ürün | — | — | [Rusça slaytlar, Aralık 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [Yandex Pazarı](https://market.yandex.ru/) | e-ticaret | Metrikler, Günlük Kaydı | — | — | [Rusça konuşma, Ocak 2019](https://youtu.be/_l1qP0DyBcA?t=478) | +| [Yandex Metrica](https://metrica.yandex.com) | Web analyt webics | Ana ürün | Bir kümede 360 sunucu, bir bölümde 1862 sunucu | 66.41 Pıb / 5.68 Pıb | [Slay ,tlar, Şubat 20 202020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| [ЦВТ](https://htc-cs.ru/) | Yazılım Geliştirme | Metrikler, Günlük Kaydı | — | — | [Blog yazısı, Mart 2019, Rusça](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [МКБ](https://mkb.ru/) | Banka | Web-sistem izleme | — | — | [Rusça slaytlar, Eylül 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | +| [金数据](https://jinshuju.net) | Bİ analitik | Ana ürün | — | — | [Çince slaytlar, Ekim 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | +| [Instana](https://www.instana.com) | APM Platformu | Ana ürün | — | — | [Twitter mesaj](https://twitter.com/mieldonkers/status/1248884119158882304) | + +[Orijinal makale](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/tr/introduction/distinctive_features.md b/docs/tr/introduction/distinctive_features.md new file mode 100644 index 00000000000..3b26dea645c --- /dev/null +++ b/docs/tr/introduction/distinctive_features.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 4 +toc_title: "Ay\u0131rt Edici \xD6zellikler" +--- + +# Clickhouse'un ayırt edici özellikleri {#distinctive-features-of-clickhouse} + +## Doğru sütun yönelimli DBMS {#true-column-oriented-dbms} + +Bir gerçek sütun yönelimli DBMS, hiçbir ek veri değerleri ile depolanır. Diğer şeylerin yanı sıra, bu, uzunluklarının saklanmasını önlemek için sabit uzunluk değerlerinin desteklenmesi gerektiği anlamına gelir “number” değerlerin yanında. Örnek olarak, bir milyar Uİnt8 tipi değerler yaklaşık 1 GB sıkıştırılmamış tüketmelidir veya bu CPU kullanımını güçlü bir şekilde etkiler. Verileri kompakt bir şekilde saklamak esastır (herhangi bir “garbage”) sıkıştırılmamış olsa bile, dekompresyon hızı (CPU kullanımı) esas olarak sıkıştırılmamış verilerin hacmine bağlıdır. + +Farklı sütunların değerlerini ayrı ayrı depolayabilen, ancak diğer senaryolar için optimizasyonları nedeniyle analitik sorguları etkili bir şekilde işleyemeyen sistemler olduğu için dikkat çekicidir. Örnekler HBase, BigTable, Cassandra ve HyperTable. Bu sistemlerde, saniyede yüz bin satır civarında verim elde edersiniz, ancak saniyede yüz milyonlarca satır olmaz. + +Clickhouse'un tek bir veritabanı değil, bir veritabanı yönetim sistemi olduğunu da belirtmek gerekir. ClickHouse, çalışma zamanında tablolar ve veritabanları oluşturmak, veri yüklemek ve sunucuyu yeniden yapılandırmadan ve yeniden başlatmadan sorguları çalıştırmaya izin verir. + +## Veri Sıkıştırma {#data-compression} + +Bazı sütun yönelimli DBMSs (InfiniDB CE ve MonetDB) veri sıkıştırma kullanmayın. Bununla birlikte, veri sıkıştırma mükemmel performans elde etmede önemli bir rol oynar. + +## Verilerin Disk Depolama {#disk-storage-of-data} + +Verileri fiziksel olarak birincil anahtara göre sıralamak, belirli değerleri veya değer aralıkları için düşük gecikme süresi ile birkaç düzine milisaniyeden daha az veri ayıklamayı mümkün kılar. Bazı sütun yönelimli Dbms'ler (SAP HANA ve Google PowerDrill gibi) yalnızca RAM'de çalışabilir. Bu yaklaşım, gerçek zamanlı analiz için gerekenden daha büyük bir donanım bütçesinin tahsisini teşvik eder. ClickHouse düzenli sabit diskler üzerinde çalışmak üzere tasarlanmıştır, bu da GB veri depolama başına maliyetin düşük olduğu anlamına gelir, ancak varsa SSD ve ek RAM de tamamen kullanılır. + +## Birden fazla çekirdekte paralel işleme {#parallel-processing-on-multiple-cores} + +Büyük sorgular, geçerli sunucuda bulunan tüm gerekli kaynakları alarak doğal olarak paralelleştirilir. + +## Birden çok sunucuda dağıtılmış işleme {#distributed-processing-on-multiple-servers} + +Yukarıda belirtilen sütunlu Dbms'lerin neredeyse hiçbiri dağıtılmış sorgu işleme desteğine sahip değildir. +Clickhouse'da, veriler farklı parçalarda bulunabilir. Her parça, hata toleransı için kullanılan bir grup kopya olabilir. Tüm kırıklar, kullanıcı için şeffaf olarak paralel bir sorgu çalıştırmak için kullanılır. + +## SQL desteği {#sql-support} + +ClickHouse, çoğu durumda SQL standardına özdeş olan sql'i temel alan bildirime dayalı bir sorgu dilini destekler. +Desteklenen sorgular arasında GROUP BY, ORDER BY, from, ın ve JOIN yan tümceleri ve skaler alt sorgular bulunur. +Bağımlı alt sorgular ve pencere işlevleri desteklenmez. + +## Vektör Motoru {#vector-engine} + +Veriler yalnızca sütunlar tarafından saklanmakla kalmaz, aynı zamanda yüksek CPU verimliliği elde etmeyi sağlayan vektörler (sütunların parçaları) tarafından işlenir. + +## Gerçek zamanlı veri güncellemeleri {#real-time-data-updates} + +ClickHouse, birincil anahtarlı tabloları destekler. Birincil anahtar aralığındaki sorguları hızlı bir şekilde gerçekleştirmek için, veriler birleştirme ağacını kullanarak aşamalı olarak sıralanır. Bu nedenle, veriler sürekli olarak tabloya eklenebilir. Yeni veri Yutulduğunda hiçbir kilit alınır. + +## Dizin {#index} + +Birincil anahtara göre fiziksel olarak sıralanmış bir veriye sahip olmak, belirli değerleri veya değer aralıkları için düşük gecikme süresi ile birkaç düzine milisaniyeden daha az veri çıkarmayı mümkün kılar. + +## Çevrimiçi sorgular için uygundur {#suitable-for-online-queries} + +Düşük gecikme süresi, kullanıcı arayüzü sayfası yüklenirken, sorguların gecikmeden ve önceden bir cevap hazırlamaya çalışmadan işlenebileceği anlamına gelir. Başka bir deyişle, çevrimiçi. + +## Yaklaşık hesaplamalar için destek {#support-for-approximated-calculations} + +ClickHouse performans için doğruluk ticaret için çeşitli yollar sağlar: + +1. Farklı değerler, medyan ve quantiles sayısı yaklaşık hesaplama için toplam işlevleri. +2. Verilerin bir bölümünü (örnek) temel alan bir sorguyu çalıştırmak ve yaklaşık bir sonuç almak. Bu durumda, diskten orantılı olarak daha az veri alınır. +3. Tüm anahtarlar yerine, sınırlı sayıda rastgele anahtar için bir toplama çalıştırma. Verilerde anahtar dağıtımı için belirli koşullar altında, bu daha az kaynak kullanırken makul derecede doğru bir sonuç sağlar. + +## Veri çoğaltma ve Veri Bütünlüğü desteği {#data-replication-and-data-integrity-support} + +ClickHouse zaman uyumsuz çoklu ana çoğaltma kullanır. Kullanılabilir herhangi bir yineleme için yazıldıktan sonra kalan tüm yinelemeler arka planda kendi kopyasını almak. Sistem, farklı yinelemelerde aynı verileri korur. Çoğu arızadan sonra kurtarma, karmaşık durumlarda otomatik olarak veya yarı otomatik olarak gerçekleştirilir. + +Daha fazla bilgi için bölüme bakın [Veri çoğaltma](../engines/table_engines/mergetree_family/replication.md). + +## Dezavantajları olarak kabul edilebilir özellikler {#clickhouse-features-that-can-be-considered-disadvantages} + +1. Tam teşekküllü işlemler yok. +2. Yüksek oranda ve düşük gecikme ile zaten eklenen verileri değiştirme veya silme yeteneği eksikliği. Verileri temizlemek veya değiştirmek için toplu silme ve güncellemeler vardır, örneğin Aşağıdakilere uymak için [GDPR](https://gdpr-info.eu). +3. Seyrek dizin, Clickhouse'u anahtarlarıyla tek satırları almak için nokta sorguları için çok uygun değildir. + +[Orijinal makale](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/tr/introduction/history.md b/docs/tr/introduction/history.md new file mode 100644 index 00000000000..cfc18df2981 --- /dev/null +++ b/docs/tr/introduction/history.md @@ -0,0 +1,56 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 7 +toc_title: "Tarih\xE7e" +--- + +# ClickHouse Geçmişi {#clickhouse-history} + +ClickHouse güç başlangıçta geliştirilmiştir [Üye.Metrica](https://metrica.yandex.com/), [dünyanın en büyük ikinci web analiz platformu](http://w3techs.com/technologies/overview/traffic_analysis/all) ve bu sistemin temel bileşeni olmaya devam ediyor. Veritabanında 13 trilyondan fazla kayıt ve günlük 20 milyardan fazla etkinlik ile ClickHouse, doğrudan toplanmamış verilerden anında özel raporlar oluşturmanıza olanak tanır. Bu makale Kısaca Clickhouse'un gelişiminin ilk aşamalarında hedeflerini kapsamaktadır. + +Üye.Metrica kullanıcı tarafından tanımlanan keyfi kesimleri ile, hit ve oturumları dayalı anında özelleştirilmiş raporlar oluşturur. Bunu sık sık yapmak, benzersiz kullanıcı sayısı gibi karmaşık agregalar oluşturmayı gerektirir. Bir rapor oluşturmak için yeni veriler gerçek zamanlı olarak gelir. + +Nisan 2014 itibariyle, Yandex.Metrica, günlük olarak yaklaşık 12 milyar olayı (sayfa görüntüleme ve tıklama) izliyordu. Tüm bu olaylar özel raporlar oluşturmak için saklanmalıdır. Tek bir sorgu, birkaç yüz milisaniye içinde milyonlarca satırı veya sadece birkaç saniye içinde yüz milyonlarca satırı taramayı gerektirebilir. + +## Yandex kullanımı.Metrica ve diğer Yandex Hizmetleri {#usage-in-yandex-metrica-and-other-yandex-services} + +ClickHouse, Yandex'te birden fazla amaca hizmet eder.Metrica. +Ana görevi, toplanmamış verileri kullanarak çevrimiçi modda raporlar oluşturmaktır. Veritabanında 20.3 trilyon satırdan fazla depolayan 374 sunucu kümesi kullanır. Sıkıştırılmış verilerin hacmi, yinelenenleri ve kopyaları hesaba katmadan yaklaşık 2 PB'DİR. Sıkıştırılmamış verilerin hacmi (TSV formatında) Yaklaşık 17 PB olacaktır. + +ClickHouse ayrıca aşağıdaki süreçlerde önemli bir rol oynar: + +- Yandex'den oturum tekrarı için veri saklama.Metrica. +- Ara veri işleme. +- Analitik ile küresel raporlar oluşturma. +- Yandex hata ayıklama için sorguları çalıştırma.Metrica motoru. +- API ve kullanıcı arayüzü günlükleri analiz. + +Günümüzde, diğer Yandex hizmetlerinde ve bölümlerinde birden fazla düzine ClickHouse kurulumu bulunmaktadır: arama dikey, e-ticaret, reklam, iş analitiği, mobil geliştirme, kişisel hizmetler ve diğerleri. + +## Toplanmış ve toplanmamış veriler {#aggregated-and-non-aggregated-data} + +İstatistikleri etkili bir şekilde hesaplamak için, veri hacmini azalttığından verileri toplamanız gerektiğine dair yaygın bir görüş vardır. + +Ancak veri toplama birçok sınırlama ile birlikte gelir: + +- Gerekli raporların önceden tanımlanmış bir listesine sahip olmanız gerekir. +- Kullanıcı özel raporlar yapamaz. +- Çok sayıda farklı anahtar üzerinde toplanırken, veri hacmi zorlukla azaltılır, bu nedenle toplama işe yaramaz. +- Çok sayıda rapor için çok fazla toplama varyasyonu vardır (kombinatoryal patlama). +- Anahtarları yüksek önemlilik (URL'ler gibi) ile toplarken, veri hacmi çok fazla azaltılmaz (iki kattan daha az). +- Bu nedenle, toplama ile veri hacmi küçültmek yerine büyüyebilir. +- Kullanıcılar onlar için oluşturduğumuz tüm raporları görüntülemez. Bu hesaplamaların büyük bir kısmı işe yaramaz. +- Verilerin mantıksal bütünlüğü, çeşitli toplamalar için ihlal edilebilir. + +Hiçbir şeyi toplamazsak ve toplanmamış verilerle çalışırsak, bu hesaplamaların hacmini azaltabilir. + +Bununla birlikte, toplama ile, çalışmanın önemli bir kısmı çevrimdışı olarak alınır ve nispeten sakin bir şekilde tamamlanır. Buna karşılık, çevrimiçi hesaplamalar, kullanıcı sonucu beklediğinden mümkün olduğunca hızlı hesaplamayı gerektirir. + +Üye.Metrica, raporların çoğunluğu için kullanılan Metrage adı verilen verileri toplamak için özel bir sisteme sahiptir. +2009'dan itibaren Yandex.Metrica, daha önce Rapor Oluşturucusu için kullanılan OLAPServer adlı toplanmamış veriler için özel bir OLAP veritabanı da kullandı. +OLAPServer, toplanmamış veriler için iyi çalıştı, ancak tüm raporlar için istenildiği gibi kullanılmasına izin vermeyen birçok kısıtlamaya sahipti. Bunlar, veri türleri için destek eksikliği (yalnızca sayılar) ve verileri gerçek zamanlı olarak aşamalı olarak güncelleyememe (yalnızca verileri günlük olarak yeniden yazarak yapılabilir) içeriyordu. OLAPServer bir DBMS değil, özel bir dB'dir. + +ClickHouse için ilk hedef OLAPServer sınırlamaları kaldırmak ve tüm raporlar için toplanmamış verilerle çalışma sorunu çözmek oldu, ama yıllar içinde, analitik görevler geniş bir yelpazede için uygun bir genel amaçlı veritabanı yönetim sistemi haline gelmiştir. + +[Orijinal makale](https://clickhouse.tech/docs/en/introduction/history/) diff --git a/docs/tr/introduction/index.md b/docs/tr/introduction/index.md new file mode 100644 index 00000000000..9691671aefa --- /dev/null +++ b/docs/tr/introduction/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Giri\u015F" +toc_priority: 1 +--- + + diff --git a/docs/tr/introduction/performance.md b/docs/tr/introduction/performance.md new file mode 100644 index 00000000000..c07614ba71e --- /dev/null +++ b/docs/tr/introduction/performance.md @@ -0,0 +1,32 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 6 +toc_title: Performans +--- + +# Performans {#performance} + +Yandex'deki dahili test sonuçlarına göre, ClickHouse, test için mevcut olan sınıfının sistemleri arasında karşılaştırılabilir işletim senaryoları için en iyi performansı (hem uzun sorgular için en yüksek verim hem de kısa sorgularda en düşük gecikme süresi) gösterir. Test sonuçlarını bir [ayrı sayfa](https://clickhouse.tech/benchmark.html). + +Çok sayıda bağımsız kriterler benzer sonuçlara geldi. Bir internet araması kullanarak bulmak zor değildir veya görebilirsiniz [ilgili bağlantı ourlardan oluşan küçük koleksiyon collectionumuz](https://clickhouse.tech/#independent-benchmarks). + +## Tek bir büyük sorgu için çıktı {#throughput-for-a-single-large-query} + +Verim, saniyede satır veya saniyede megabayt olarak ölçülebilir. Veriler sayfa önbelleğine yerleştirilirse, çok karmaşık olmayan bir sorgu, modern donanım üzerinde tek bir sunucuda yaklaşık 2-10 GB/s sıkıştırılmamış veri hızında işlenir (en basit durumlar için, hız 30 GB/s'ye ulaşabilir). Veri sayfa önbelleğine yerleştirilmezse, hız disk alt sistemine ve veri sıkıştırma hızına bağlıdır. Örneğin, disk alt sistemi 400 MB/s veri okuma izin verir ve veri sıkıştırma hızı 3 ise, hız 1.2 GB / s civarında olması bekleniyor. saniyede satır hızı elde etmek için hızı saniyede bayt cinsinden sorguda kullanılan sütunların toplam boyutuna bölün. Örneğin, 10 bayt sütun ayıklanırsa, hızın saniyede yaklaşık 100-200 milyon satır olması beklenir. + +İşlem hızı, dağıtılmış işlem için neredeyse doğrusal olarak artar, ancak yalnızca toplama veya sıralamadan kaynaklanan satır sayısı çok büyük değilse. + +## Kısa Sorguları İşlerken Gecikme Süresi {#latency-when-processing-short-queries} + +Bir sorgu birincil anahtar kullanır ve çok fazla sütun ve satır (yüzbinlerce) işlemek için seçmez, veri sayfa önbelleğine yerleştirilirse, 50 milisaniyeden daha az gecikme süresi (en iyi durumda milisaniye tek basamak) bekleyebilirsiniz. Aksi takdirde, gecikme çoğunlukla arama sayısı tarafından hakimdir. Aşırı yüklenmemiş bir sistem için dönen disk sürücüleri kullanırsanız, gecikme bu formülle tahmin edilebilir: `seek time (10 ms) * count of columns queried * count of data parts`. + +## Büyük miktarda kısa sorgu işlerken verim {#throughput-when-processing-a-large-quantity-of-short-queries} + +Aynı koşullar altında, ClickHouse tek bir sunucuda saniyede birkaç yüz sorgu işleyebilir (en iyi durumda birkaç bine kadar). Bu senaryo analitik DBMSs için tipik olmadığından, saniyede en fazla 100 sorgu beklemenizi öneririz. + +## Veri Eklerken Performans {#performance-when-inserting-data} + +Verileri en az 1000 satırlık paketlere veya saniyede tek bir istekten daha fazla olmayan paketlere eklemenizi öneririz. Sekmeyle ayrılmış bir dökümden MergeTree tablosuna eklerken, ekleme hızı 50 ila 200 MB/s arasında olabilir. eklenen satırlar yaklaşık 1 Kb boyutundaysa, hız saniyede 50.000 ila 200.000 satır olacaktır. Satırlar küçükse, performans saniyede satırlarda daha yüksek olabilir (Banner sistem verileri üzerinde -`>` Saniyede 500 rows.000 satır; Graf ;it ver ;ilerinde -`>` Saniyede 1.000.000 satır). Performansı artırmak için, paralel olarak doğrusal olarak ölçeklenen birden çok ekleme sorgusu yapabilirsiniz. + +[Orijinal makale](https://clickhouse.tech/docs/en/introduction/performance/) diff --git a/docs/tr/operations/access_rights.md b/docs/tr/operations/access_rights.md new file mode 100644 index 00000000000..b270c0ecbed --- /dev/null +++ b/docs/tr/operations/access_rights.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 48 +toc_title: "Eri\u015Fim Haklar\u0131" +--- + +# Erişim Hakları {#access-rights} + +Kullanıcılar ve erişim hakları kullanıcı yapılandırmasında ayarlanır. Bu genellikle `users.xml`. + +Kullanıcılar kaydedilir `users` bölme. İşte bir parçası `users.xml` Dosya: + +``` xml + + + + + + + + + + + + default + + + default + + + + + + + web + default + + test + + + test + + + +``` + +İki kullanıcıdan bir bildirim görebilirsiniz: `default`ve`web`. Ek weledik `web` kullanıcı ayrı ayrı. + +Bu `default` kullanıcı adı geçilmez durumlarda kullanıcı seçilir. Bu `default` kullanıcı, sunucu veya kümenin yapılandırması, sunucu veya kümenin yapılandırılmasını belirtmezse, dağıtılmış sorgu işleme için de kullanılır. `user` ve `password` (on bölümüne bakın [Dağılı](../engines/table_engines/special/distributed.md) motor). + +The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail. + +Parola, açık metin (önerilmez) veya SHA-256'da belirtilir. Haşhaş tuzlu değil. Bu bağlamda, bu şifreleri potansiyel kötü amaçlı saldırılara karşı güvenlik sağlamak olarak düşünmemelisiniz. Aksine, çalışanlardan korunmak için gereklidir. + +Erişime izin verilen ağların listesi belirtilir. Bu örnekte, her iki kullanıcı için ağ listesi ayrı bir dosyadan yüklenir (`/etc/metrika.xml`) içeren `networks` ikame. İşte bunun bir parçası: + +``` xml + + ... + + ::/64 + 203.0.113.0/24 + 2001:DB8::/32 + ... + + +``` + +Bu ağ listesini doğrudan tanımlayabilirsiniz `users.xml` veya bir dosyada `users.d` dizin (daha fazla bilgi için bölüme bakın “[Yapılandırma dosyaları](configuration_files.md#configuration_files)”). + +Yapılandırma, her yerden erişimin nasıl açılacağını açıklayan yorumları içerir. + +Üretimde kullanım için, sadece belirtin `ip` elemanları (IP adresleri ve maskeleri), kullanıl ,dığından beri `host` ve `hoost_regexp` ekstra gecikmeye neden olabilir. + +Daha sonra kullanıcı ayarları profili belirtilir (bölüme bakın “[Ayarlar profilleri](settings/settings_profiles.md)”. Varsayılan profili belirtebilirsiniz, `default'`. Profilin herhangi bir adı olabilir. Farklı kullanıcılar için aynı profili belirtebilirsiniz. Ayarlar profilinde yazabileceğiniz en önemli şey `readonly=1` sağlar okumak-sadece erişim. Ardından kullanılacak kotayı belirtin (bölüme bakın “[Kotalar](quotas.md#quotas)”). Varsayılan kotayı belirtebilirsiniz: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. + +İsteğe bağlı `` bölümünde, kullanıcının erişebileceği veritabanlarının bir listesini de belirtebilirsiniz. Varsayılan olarak, tüm veritabanları kullanıcı tarafından kullanılabilir. Belirtebilirsiniz `default` veritabanı. Bu durumda, kullanıcı varsayılan olarak veritabanına erişim alır. + +İsteğe bağlı `` bölümünde, kullanıcının erişebileceği sözlüklerin bir listesini de belirtebilirsiniz. Varsayılan olarak, tüm sözlükler kullanıcı tarafından kullanılabilir. + +Erişim `system` veritabanı her zaman izin verilir (bu veritabanı sorguları işlemek için kullanıldığından). + +Kullanıcı kullanarak onları tüm veritabanları ve tabloların bir listesini alabilirsiniz `SHOW` tek tek veritabanlarına erişime izin verilmese bile, sorgular veya sistem tabloları. + +Veritabanı erişimi ile ilgili değildir [readonly](settings/permissions_for_queries.md#settings_readonly) ayar. Bir veritabanına tam erişim izni veremezsiniz ve `readonly` başka birine erişim. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/access_rights/) diff --git a/docs/tr/operations/backup.md b/docs/tr/operations/backup.md new file mode 100644 index 00000000000..5d1d806e5dd --- /dev/null +++ b/docs/tr/operations/backup.md @@ -0,0 +1,41 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 49 +toc_title: Veri Yedekleme +--- + +# Veri Yedekleme {#data-backup} + +Karşın [çoğalma](../engines/table_engines/mergetree_family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [50 GB'den fazla veri içeren MergeTree benzeri bir motorla tabloları bırakamazsınız](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Ancak, bu önlemler olası tüm davaları kapsamaz ve atlatılabilir. + +Olası insan hatalarını etkili bir şekilde azaltmak için, verilerinizi yedeklemek ve geri yüklemek için dikkatli bir şekilde bir strateji hazırlamanız gerekir **önceden**. + +Her şirketin farklı kaynakları ve iş gereksinimleri vardır, bu nedenle her duruma uyacak ClickHouse yedeklemeleri ve geri yüklemeleri için evrensel bir çözüm yoktur. Bir gigabayt veri için ne işe yarar, muhtemelen onlarca petabayt için çalışmaz. Aşağıda tartışılacak olan kendi artıları ve eksileri ile çeşitli Olası yaklaşımlar vardır. Çeşitli eksikliklerini telafi etmek için sadece bir tane yerine birkaç yaklaşım kullanmak iyi bir fikirdir. + +!!! note "Not" + Bir şeyi yedeklediyseniz ve geri yüklemeyi hiç denemediyseniz, aslında ihtiyacınız olduğunda Geri Yüklemenin düzgün çalışmayacağını (veya en azından işin tahammül edebileceğinden daha uzun süreceğini) unutmayın. Bu nedenle, seçtiğiniz yedekleme yaklaşımı ne olursa olsun, geri yükleme işlemini de otomatikleştirdiğinizden emin olun ve düzenli olarak yedek bir ClickHouse kümesinde uygulayın. + +## Kaynak Verileri Başka Bir Yerde Çoğaltma {#duplicating-source-data-somewhere-else} + +Genellikle Clickhouse'a alınan veriler, aşağıdaki gibi bir tür kalıcı sıra yoluyla teslim edilir [Apache Kafka](https://kafka.apache.org). Bu durumda, Clickhouse'a yazılırken aynı veri akışını okuyacak ve bir yerde soğuk depoda depolayacak ek bir abone kümesi yapılandırmak mümkündür. Çoğu şirket zaten bir nesne deposu veya dağıtılmış bir dosya sistemi gibi olabilecek bazı varsayılan önerilen soğuk depolamaya sahiptir [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). + +## Dosya Sistemi Anlık Görüntüleri {#filesystem-snapshots} + +Bazı yerel dosya sistemleri anlık görüntü işlevselliği sağlar (örneğin, [ZFS](https://en.wikipedia.org/wiki/ZFS)), ancak canlı sorguları sunmak için en iyi seçenek olmayabilir. Olası bir çözüm, bu tür dosya sistemi ile ek kopyalar oluşturmak ve bunları [Dağılı](../engines/table_engines/special/distributed.md) için kullanılan tablolar `SELECT` sorgular. Bu tür yinelemelerdeki anlık görüntüler, verileri değiştiren sorguların erişemeyeceği bir yerde olacaktır. Bonus olarak, bu yinelemeler, sunucu başına daha fazla disk eklenmiş özel donanım yapılandırmalarına sahip olabilir ve bu da uygun maliyetli olabilir. + +## clickhouse-fotokopi makinesi {#clickhouse-copier} + +[clickhouse-fotokopi makinesi](utilities/clickhouse-copier.md) başlangıçta yeniden shard petabyte boyutlu tablolar için oluşturulan çok yönlü bir araçtır. Ayrıca yedekleme için kullanılan ve güvenilir clickhouse tablolar ve kümeler arasında veri kopyalar çünkü amaçlar geri olabilir. + +Daha küçük veri hacimleri için, basit bir `INSERT INTO ... SELECT ...` uzak tablolara da çalışabilir. + +## Parçalar ile manipülasyonlar {#manipulations-with-parts} + +ClickHouse kullanarak sağlar `ALTER TABLE ... FREEZE PARTITION ...` tablo bölümleri yerel bir kopyasını oluşturmak için sorgu. Bu hardlinks kullanarak uygulanır `/var/lib/clickhouse/shadow/` klasör, bu yüzden genellikle eski veriler için ekstra disk alanı tüketmez. Oluşturulan dosyaların kopyaları ClickHouse server tarafından işlenmez, bu yüzden onları orada bırakabilirsiniz: herhangi bir ek harici sistem gerektirmeyen basit bir yedeklemeniz olacak, ancak yine de donanım sorunlarına eğilimli olacaktır. Bu nedenle, bunları uzaktan başka bir konuma kopyalamak ve ardından yerel kopyaları kaldırmak daha iyidir. Dağıtılmış dosya sistemleri ve nesne depoları bunun için hala iyi bir seçenektir, ancak yeterince büyük kapasiteye sahip normal ekli dosya sunucuları da işe yarayabilir (bu durumda aktarım ağ dosya sistemi veya belki de [rsync](https://en.wikipedia.org/wiki/Rsync)). + +Bölüm işlemleriyle ilgili sorgular hakkında daha fazla bilgi için bkz. [ALTER belgeleri](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). + +Bu yaklaşımı otomatikleştirmek için üçüncü taraf bir araç kullanılabilir: [clickhouse-yedekleme](https://github.com/AlexAkulov/clickhouse-backup). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/tr/operations/configuration_files.md b/docs/tr/operations/configuration_files.md new file mode 100644 index 00000000000..fda72e6ba21 --- /dev/null +++ b/docs/tr/operations/configuration_files.md @@ -0,0 +1,57 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 50 +toc_title: "Yap\u0131land\u0131rma Dosyalar\u0131" +--- + +# Yapılandırma Dosyaları {#configuration_files} + +ClickHouse Çoklu dosya yapılandırma yönetimini destekler. Ana sunucu yapılandırma dosyası `/etc/clickhouse-server/config.xml`. Diğer dosyalar içinde olmalıdır `/etc/clickhouse-server/config.d` dizin. + +!!! note "Not" + Tüm yapılandırma dosyaları XML biçiminde olmalıdır. Ayrıca, genellikle aynı kök öğeye sahip olmalıdırlar ``. + +Ana yapılandırma dosyasında belirtilen bazı ayarlar diğer yapılandırma dosyalarında geçersiz kılınabilir. Bu `replace` veya `remove` bu yapılandırma dosyalarının öğeleri için öznitelikler belirtilebilir. + +Her ikisi de belirtilmezse, yinelenen çocukların değerlerini değiştirerek öğelerin içeriğini yinelemeli olarak birleştirir. + +Eğer `replace` belirtilen, tüm öğeyi belirtilen ile değiştirir. + +Eğer `remove` belirt .ilirse, öğeyi siler. + +Yapılandırma ayrıca tanımlayabilir “substitutions”. Bir öğe varsa `incl` öznitelik, dosyadan karşılık gelen ikame değeri olarak kullanılacaktır. Varsayılan olarak, değiştirmeler ile dosyanın yolu `/etc/metrika.xml`. Bu değiştirilebilir [include\_from](server_configuration_parameters/settings.md#server_configuration_parameters-include_from) sunucu yapılandırmasında öğe. İkame değerleri belirtilen `/yandex/substitution_name` bu dosyadaki öğeler. Belirtilen bir ika Ame halinde `incl` yok, günlüğe kaydedilir. Clickhouse'un eksik değiştirmelerin günlüğe kaydedilmesini önlemek için `optional="true"` öznitelik (örneğin, ayarlar [makrolar](server_configuration_parameters/settings.md)). + +İkame da ZooKeeper yapılabilir. Bunu yapmak için özniteliği belirtin `from_zk = "/path/to/node"`. Eleman değeri, düğümün içeriği ile değiştirilir `/path/to/node` ZooKeeper. Ayrıca ZooKeeper düğümünde bir XML alt ağacının tamamını koyabilirsiniz ve kaynak öğeye tamamen eklenecektir. + +Bu `config.xml` dosya kullanıcı ayarları, profiller ve kotalar ile ayrı bir yapılandırma belirtebilirsiniz. Bu yapılandırmanın göreli yolu, `users_config` öğe. Varsayılan olarak, bu `users.xml`. Eğer `users_config` atlanır, kullanıcı ayarları, profiller ve kotalar doğrudan belirtilir `config.xml`. + +Kullanıcılar yapılandırma benzer ayrı dosyaları içine bölünmüş olabilir `config.xml` ve `config.d/`. +Dizin adı olarak tanımlanır `users_config` olmadan ayarı `.xml` postfix ile birleştirilmiş `.d`. +Dizin `users.d` varsayılan olarak kullanılır, gibi `users_config` varsayılan olarak `users.xml`. +Örneğin, bu gibi her kullanıcı için ayrı yapılandırma dosyasına sahip olabilirsiniz: + +``` bash +$ cat /etc/clickhouse-server/users.d/alice.xml +``` + +``` xml + + + + analytics + + ::/0 + + ... + analytics + + + +``` + +Her yapılandırma dosyası için sunucu da üretir `file-preprocessed.xml` başlatırken dosyalar. Bu dosyalar, tamamlanmış tüm değiştirmeleri ve geçersiz kılmaları içerir ve bunlar bilgi amaçlı kullanım içindir. Zookeeper değiştirmelerin yapılandırma dosyalarında kullanılan ancak ZooKeeper sunucu başlangıcında kullanılabilir değilse, sunucu yapılandırmayı önceden işlenmiş dosyadan yükler. + +Sunucu, yapılandırma dosyalarındaki değişikliklerin yanı sıra, değiştirmeleri ve geçersiz kılmaları gerçekleştirirken kullanılan dosya ve ZooKeeper düğümlerini izler ve anında kullanıcılar ve kümeler için ayarları yeniden yükler. Bu, sunucuyu yeniden başlatmadan kümeyi, kullanıcıları ve ayarlarını değiştirebileceğiniz anlamına gelir. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/configuration_files/) diff --git a/docs/tr/operations/index.md b/docs/tr/operations/index.md new file mode 100644 index 00000000000..e20eefc1ab0 --- /dev/null +++ b/docs/tr/operations/index.md @@ -0,0 +1,28 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Harekat +toc_priority: 41 +toc_title: "Giri\u015F" +--- + +# Harekat {#operations} + +ClickHouse işlemleri kılavuzu aşağıdaki ana bölümlerden oluşur: + +- [Gereksinimler](requirements.md) +- [İzleme](monitoring.md) +- [Arıza](troubleshooting.md) +- [Kullanım Önerileri](tips.md) +- [Güncelleme Prosedürü](update.md) +- [Erişim Hakları](access_rights.md) +- [Veri Yedekleme](backup.md) +- [Yapılandırma Dosyaları](configuration_files.md) +- [Kotalar](quotas.md) +- [Sistem Tabloları](system_tables.md) +- [Sunucu Yapılandırma Parametreleri](server_configuration_parameters/index.md) +- [Donanımınızı ClickHouse İle Test Etme](performance_test.md) +- [Ayarlar](settings/index.md) +- [Programlar](utilities/index.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/) diff --git a/docs/tr/operations/monitoring.md b/docs/tr/operations/monitoring.md new file mode 100644 index 00000000000..eb5b7bd6dc8 --- /dev/null +++ b/docs/tr/operations/monitoring.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 45 +toc_title: "\u0130zleme" +--- + +# İzleme {#monitoring} + +İzleyebilirsiniz: + +- Donanım kaynaklarının kullanımı. +- ClickHouse sunucu metrikleri. + +## Kaynak Kullanımı {#resource-utilization} + +ClickHouse, donanım kaynaklarının durumunu tek başına izlemez. + +İzleme ayarlamak için önerilir : + +- İşlemcilerde yük ve sıcaklık. + + Kullanabilirsiniz [dmesg](https://en.wikipedia.org/wiki/Dmesg), [turbostat](https://www.linux.org/docs/man8/turbostat.html) ya da diğer aletler. + +- Depolama sistemi, RAM ve ağ kullanımı. + +## ClickHouse Sunucu Metrikleri {#clickhouse-server-metrics} + +ClickHouse sunucu kendini devlet izleme için araçlar gömülü vardır. + +Sunucu olaylarını izlemek için sunucu günlüklerini kullanın. Görmek [kaydedici](server_configuration_parameters/settings.md#server_configuration_parameters-logger) yapılandırma dosyasının bölümü. + +ClickHouse toplar: + +- Sunucunun hesaplama kaynaklarını nasıl kullandığına dair farklı metrikler. +- Sorgu işleme ile ilgili ortak istatistikler. + +Metrikleri şu adreste bulabilirsiniz: [sistem.metrik](../operations/system_tables.md#system_tables-metrics), [sistem.etkinlik](../operations/system_tables.md#system_tables-events), ve [sistem.asynchronous\_metrics](../operations/system_tables.md#system_tables-asynchronous_metrics) Tablolar. + +Clickhouse'u metrikleri dışa aktaracak şekilde yapılandırabilirsiniz [Grafit](https://github.com/graphite-project). Görmek [Graf sectionit bölümü](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) ClickHouse sunucu yapılandırma dosyasında. Metriklerin dışa aktarımını yapılandırmadan önce, grafit'i resmi olarak takip ederek ayarlamanız gerekir [kılavuz](https://graphite.readthedocs.io/en/latest/install.html). + +Clickhouse'u metrikleri dışa aktaracak şekilde yapılandırabilirsiniz [Prometheus](https://prometheus.io). Görmek [Prometheus bölümü](server_configuration_parameters/settings.md#server_configuration_parameters-prometheus) ClickHouse sunucu yapılandırma dosyasında. Metriklerin dışa aktarılmasını yapılandırmadan önce, prometheus'u yetkililerini takip ederek ayarlamanız gerekir [kılavuz](https://prometheus.io/docs/prometheus/latest/installation/). + +Ayrıca, http API aracılığıyla sunucu kullanılabilirliğini izleyebilirsiniz. Sen sendd the `HTTP GET` istek için `/ping`. Sunucu mevcutsa, yanıt verir `200 OK`. + +Bir küme yapılandırmasındaki sunucuları izlemek için [max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) parametre ve HTTP kaynağını kullanın `/replicas_status`. Bir istek için `/replicas_status` dönüşler `200 OK` çoğaltma kullanılabilir ve diğer yinelemeler gecikmiş değil. Bir çoğaltma gecikirse, döndürür `503 HTTP_SERVICE_UNAVAILABLE` boşluk hakkında bilgi ile. diff --git a/docs/tr/operations/optimizing_performance/index.md b/docs/tr/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..9d39c082a98 --- /dev/null +++ b/docs/tr/operations/optimizing_performance/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Performans\u0131 Optimize Etme" +toc_priority: 52 +--- + + diff --git a/docs/tr/operations/optimizing_performance/sampling_query_profiler.md b/docs/tr/operations/optimizing_performance/sampling_query_profiler.md new file mode 100644 index 00000000000..0d86dfad5ef --- /dev/null +++ b/docs/tr/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1,64 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 54 +toc_title: "Sorgu Profili Olu\u015Fturma" +--- + +# Örnekleme Sorgusu Profiler {#sampling-query-profiler} + +ClickHouse, sorgu yürütülmesini analiz etmeyi sağlayan örnekleme profiler'i çalıştırır. Profiler kullanarak sorgu yürütme sırasında en sık kullanılan kaynak kodu yordamları bulabilirsiniz. Boşta kalma süresi de dahil olmak üzere harcanan CPU zamanını ve duvar saati zamanını izleyebilirsiniz. + +Profiler kullanmak için: + +- Kurulum [trace\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) sunucu yapılandırması bölümü. + + Bu bölüm yapılandırır [trace\_log](../../operations/system_tables.md#system_tables-trace_log) profiler işleyişinin sonuçlarını içeren sistem tablosu. Varsayılan olarak yapılandırılmıştır. Bu tablodaki verilerin yalnızca çalışan bir sunucu için geçerli olduğunu unutmayın. Sunucu yeniden başlatıldıktan sonra ClickHouse tabloyu temizlemez ve depolanan tüm sanal bellek adresi geçersiz hale gelebilir. + +- Kurulum [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) veya [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) ayarlar. Her iki ayar da aynı anda kullanılabilir. + + Bu ayarlar, profiler zamanlayıcılarını yapılandırmanıza izin verir. Bunlar oturum ayarları olduğundan, tüm sunucu, bireysel kullanıcılar veya kullanıcı profilleri, etkileşimli oturumunuz ve her bir sorgu için farklı örnekleme sıklığı elde edebilirsiniz. + +Varsayılan örnekleme frekansı saniyede bir örnektir ve hem CPU hem de gerçek zamanlayıcılar etkindir. Bu frekans, ClickHouse kümesi hakkında yeterli bilgi toplamaya izin verir. Aynı zamanda, bu sıklıkla çalışan profiler, ClickHouse sunucusunun performansını etkilemez. Her bir sorguyu profillemeniz gerekiyorsa, daha yüksek örnekleme frekansı kullanmayı deneyin. + +Analiz etmek `trace_log` sistem tablosu: + +- Yüklemek `clickhouse-common-static-dbg` paket. Görmek [DEB paketlerinden yükleyin](../../getting_started/install.md#install-from-deb-packages). + +- Tarafından iç gözlem işlevlerine izin ver [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) ayar. + + Güvenlik nedenleriyle, iç gözlem işlevleri varsayılan olarak devre dışı bırakılır. + +- Kullan... `addressToLine`, `addressToSymbol` ve `demangle` [iç gözlem fonksiyonları](../../sql_reference/functions/introspection.md) ClickHouse kodu işlev adları ve konumlarını almak için. Bazı sorgu için bir profil almak için, `trace_log` Tablo. Bireysel fonksiyonları bütün yığın izleri ya da veri toplama yapabilirsiniz. + +Görselleştirmeniz gerekiyorsa `trace_log` bilgi, deneyin [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) ve [speedscope](https://github.com/laplab/clickhouse-speedscope). + +## Örnek {#example} + +Bu örnekte biz: + +- Filtre `trace_log` bir sorgu tanımlayıcısı ve geçerli tarihe göre veri. + +- Yığın izleme ile toplama. + +- İç gözlem işlevlerini kullanarak, bir rapor alacağız: + + - Sembollerin isimleri ve karşılık gelen kaynak kodu işlevleri. + - Bu işlevlerin kaynak kodu konumları. + + + +``` sql +SELECT + count(), + arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym +FROM system.trace_log +WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) +GROUP BY trace +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +{% include "operations/performance/sampling_query_profiler_example_result.txt" %} +``` diff --git a/docs/tr/operations/performance/sampling_query_profiler_example_result.txt b/docs/tr/operations/performance/sampling_query_profiler_example_result.txt new file mode 120000 index 00000000000..58c5abe7122 --- /dev/null +++ b/docs/tr/operations/performance/sampling_query_profiler_example_result.txt @@ -0,0 +1 @@ +../../../en/operations/performance/sampling_query_profiler_example_result.txt \ No newline at end of file diff --git a/docs/tr/operations/performance_test.md b/docs/tr/operations/performance_test.md new file mode 100644 index 00000000000..dbfbc39998d --- /dev/null +++ b/docs/tr/operations/performance_test.md @@ -0,0 +1,82 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 54 +toc_title: "Donan\u0131m Test" +--- + +# Donanımınızı ClickHouse ile Test etme {#how-to-test-your-hardware-with-clickhouse} + +Bu talimat ile ClickHouse paketlerinin kurulumu olmadan herhangi bir sunucuda temel ClickHouse performans testi çalıştırabilirsiniz. + +1. Gitmek “commits” sayfa: https://github.com/ClickHouse/ClickHouse/commits/master + +2. Yeşil ile ilk yeşil onay işareti veya kırmızı Haç tıklayın “ClickHouse Build Check” ve tıklayın “Details” link yakın “ClickHouse Build Check”. Bazı taahhütlerde böyle bir bağlantı yoktur, örneğin belgelerle taahhüt eder. Bu durumda, bu bağlantıya sahip en yakın taahhüt seçin. + +3. Bağlantıyı kopyala “clickhouse” amd64 veya aarch64 için ikili. + +4. sunucuya ssh ve wget ile indirin: + + + + # For amd64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse + # For aarch64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse + # Then do: + chmod a+x clickhouse + +1. İndir yapılandırmaları: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml + mkdir config.d + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml + +1. Ben benchmarkch filesmark dosyaları indir: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh + chmod a+x benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql + +1. İndir göre test verileri [Üye.Metrica veri kümesi](../getting_started/example_datasets/metrica.md) talimat (“hits” 100 milyon satır içeren tablo). + + + + wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz + tar xvf hits_100m_obfuscated_v1.tar.xz -C . + mv hits_100m_obfuscated_v1/* . + +1. Sunucuyu Çalıştır: + + + + ./clickhouse server + +1. Verileri kontrol edin: başka bir terminaldeki sunucuya ssh + + + + ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" + 100000000 + +1. Edit the benchmark-new.sh, değişim `clickhouse-client` -e doğru `./clickhouse client` ve Ekle `–-max_memory_usage 100000000000` parametre. + + + + mcedit benchmark-new.sh + +1. Ben benchmarkch runmark Çalıştır: + + + + ./benchmark-new.sh hits_100m_obfuscated + +1. Donanım yapılandırmanız hakkındaki numaraları ve bilgileri şu adrese gönderin clickhouse-feedback@yandex-team.com + +Tüm sonuçlar burada yayınlanmaktadır: https://clickhouse.teknoloji / benchmark\_hardware.html diff --git a/docs/tr/operations/quotas.md b/docs/tr/operations/quotas.md new file mode 100644 index 00000000000..e6b1de2b7f8 --- /dev/null +++ b/docs/tr/operations/quotas.md @@ -0,0 +1,112 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 51 +toc_title: Kotalar +--- + +# Kotalar {#quotas} + +Kotalar, belirli bir süre boyunca kaynak kullanımını sınırlamanıza veya kaynak kullanımını izlemenize izin verir. +Kotalar genellikle kullanıcı yapılandırmasında ayarlanır ‘users.xml’. + +Sistem ayrıca tek bir sorgunun karmaşıklığını sınırlamak için bir özelliğe sahiptir. Bölümüne bakınız “Restrictions on query complexity”). + +Sorgu karmaşıklığı kısıtlamalarının aksine, kotalar: + +- Tek bir sorguyu sınırlamak yerine, belirli bir süre boyunca çalıştırılabilen sorgu kümesine kısıtlamalar yerleştirin. +- Dağıtılmış sorgu işleme için tüm uzak sunucularda harcanan kaynaklar için hesap. + +Bölümüne bakalım ‘users.xml’ kotaları tanımlayan dosya. + +``` xml + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + +``` + +Varsayılan olarak, kota, kullanımı sınırlamadan her saat için kaynak tüketimini izler. +Her aralık için hesaplanan kaynak tüketimi, her istekten sonra sunucu günlüğüne çıktıdır. + +``` xml + + + + + 3600 + + 1000 + 100 + 1000000000 + 100000000000 + 900 + + + + 86400 + + 10000 + 1000 + 5000000000 + 500000000000 + 7200 + + +``` + +İçin ‘statbox’ kota, kısıtlamalar her saat ve her 24 saat (86.400 saniye) için ayarlanır. Aralık saydım, bir uygulama başlangıç zamanı tamir anda tanımlanmış. Başka bir deyişle, 24 saatlik Aralık mutlaka gece yarısı başlamaz. + +Aralık sona erdiğinde, toplanan tüm değerler temizlenir. Bir sonraki saat için kota Hesaplaması başlar. + +İşte kısıt amountslan theabilecek miktar amountslar: + +`queries` – The total number of requests. + +`errors` – The number of queries that threw an exception. + +`result_rows` – The total number of rows given as a result. + +`read_rows` – The total number of source rows read from tables for running the query on all remote servers. + +`execution_time` – The total query execution time, in seconds (wall time). + +En az bir zaman aralığı için sınır aşılırsa, hangi kısıtlamanın aşıldığı, hangi aralık için ve yeni Aralık başladığında (sorgular yeniden gönderildiğinde) bir metin ile bir istisna atılır. + +Kota kullanabilirsiniz “quota key” birden fazla anahtar için kaynakları bağımsız olarak rapor etme özelliği. İşte bunun bir örneği: + +``` xml + + + + +``` + +Kota kullanıcılara atanır ‘users’ yapılandırma bölümü. Bölümüne bakınız “Access rights”. + +Dağıtılmış sorgu işleme için birikmiş tutarları istekte bulunan sunucuda depolanır. Yani kullanıcı başka bir sunucuya giderse, oradaki kota “start over”. + +Sunucu yeniden başlatıldığında, kotalar sıfırlanır. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/quotas/) diff --git a/docs/tr/operations/requirements.md b/docs/tr/operations/requirements.md new file mode 100644 index 00000000000..c77159d11d7 --- /dev/null +++ b/docs/tr/operations/requirements.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 44 +toc_title: Gereksinimler +--- + +# Gereksinimler {#requirements} + +## CPU {#cpu} + +Önceden oluşturulmuş deb paketlerinden kurulum için, x86\_64 mimarisine sahip bir CPU kullanın ve sse 4.2 yönergelerini destekleyin. Clickhouse'u SSE 4.2'yi desteklemeyen veya AArch64 veya PowerPC64LE mimarisine sahip işlemcilerle çalıştırmak için, kaynaklardan Clickhouse'u oluşturmanız gerekir. + +ClickHouse paralel veri işleme uygular ve mevcut tüm donanım kaynaklarını kullanır. Bir işlemci seçerken, Clickhouse'un çok sayıda çekirdeğe sahip konfigürasyonlarda daha verimli çalıştığını, ancak daha az çekirdeğe ve daha yüksek bir saat hızına sahip konfigürasyonlardan daha düşük bir saat hızına sahip olduğunu göz önünde bulundurun. Örneğin, 2600 MHz'lik 16 çekirdek, 3600 MHz'lik 8 çekirdeğe tercih edilir. + +Kullanılması tavsiye edilir **Turbo Bo Boostost** ve **hyper-thre -ading** teknolojiler. Tipik bir iş yükü ile performansı önemli ölçüde artırır. + +## RAM {#ram} + +Önemsiz olmayan sorgular gerçekleştirmek için en az 4GB RAM kullanmanızı öneririz. ClickHouse sunucusu çok daha az miktarda RAM ile çalışabilir, ancak sorguları işlemek için bellek gerektirir. + +Gerekli RAM hacmi Aşağıdakilere bağlıdır: + +- Sorguların karmaşıklığı. +- Sorgularda işlenen veri miktarı. + +Gerekli RAM hacmini hesaplamak için, aşağıdakiler için geçici verilerin boyutunu tahmin etmelisiniz [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) ve kullandığınız diğer işlemler. + +ClickHouse geçici veriler için harici bellek kullanabilirsiniz. Görmek [Harici bellekte grupla](../sql_reference/statements/select.md#select-group-by-in-external-memory) ayrıntılar için. + +## Takas Dosyası {#swap-file} + +Üretim ortamları için takas dosyasını devre dışı bırakın. + +## Depolama Alt Sistemi {#storage-subsystem} + +Clickhouse'u yüklemek için 2GB Boş disk alanına sahip olmanız gerekir. + +Verileriniz için gereken depolama hacmi ayrı ayrı hesaplanmalıdır. Değerlendirme şunları içermelidir: + +- Veri hacminin tahmini. + + Verilerin bir örneğini alabilir ve ondan bir satırın ortalama boyutunu alabilirsiniz. Ardından değeri, depolamayı planladığınız satır sayısıyla çarpın. + +- Veri sıkıştırma katsayısı. + + Veri sıkıştırma katsayısını tahmin etmek için, verilerinizin bir örneğini Clickhouse'a yükleyin ve verilerin gerçek boyutunu depolanan tablonun boyutuyla karşılaştırın. Örneğin, clickstream verileri genellikle 6-10 kez sıkıştırılır. + +Saklanacak verilerin son hacmini hesaplamak için, sıkıştırma katsayısını tahmini veri hacmine uygulayın. Verileri birkaç yinelemede depolamayı planlıyorsanız, tahmini birimi yinelemelerin sayısıyla çarpın. + +## Ağ {#network} + +Mümkünse, 10g veya daha yüksek sınıftaki ağları kullanın. + +Ağ bant genişliği, büyük miktarda Ara veriyle dağıtılmış sorguları işlemek için kritik öneme sahiptir. Ayrıca, ağ hızı çoğaltma işlemlerini etkiler. + +## Yazılım {#software} + +ClickHouse öncelikle Linux işletim sistemleri ailesi için geliştirilmiştir. Önerilen Linux dağıtımı Ubuntu'dur. Bu `tzdata` paket sisteme kurulmalıdır. + +ClickHouse diğer işletim sistemi ailelerinde de çalışabilir. Ayrıntıları görün [Başlarken](../getting_started/index.md) belgelerin bölümü. diff --git a/docs/tr/operations/server_configuration_parameters/index.md b/docs/tr/operations/server_configuration_parameters/index.md new file mode 100644 index 00000000000..f1a20b924f0 --- /dev/null +++ b/docs/tr/operations/server_configuration_parameters/index.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Sunucu Yap\u0131land\u0131rma Parametreleri" +toc_priority: 54 +toc_title: "Giri\u015F" +--- + +# Sunucu yapılandırma parametreleri {#server-settings} + +Bu bölüm, oturum veya sorgu düzeyinde değiştirilemeyen sunucu ayarlarının açıklamalarını içerir. + +Bu ayarlar saklanır `config.xml` ClickHouse sunucusunda dosya. + +Diğer ayarlar aşağıda açıklanmıştır “[Ayarlar](../settings/index.md#settings)” bölme. + +Ayarları incelemeden önce, [Yapılandırma dosyaları](../configuration_files.md#configuration_files) bölüm ve ikame kullanımı (not `incl` ve `optional` öznitelik). + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/tr/operations/server_configuration_parameters/settings.md b/docs/tr/operations/server_configuration_parameters/settings.md new file mode 100644 index 00000000000..bc0a464c511 --- /dev/null +++ b/docs/tr/operations/server_configuration_parameters/settings.md @@ -0,0 +1,896 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 57 +toc_title: "Sunucu Ayarlar\u0131" +--- + +# Sunucu Ayarları {#server-settings} + +## buıltın\_dıctıonarıes\_reload\_ınterval {#builtin-dictionaries-reload-interval} + +Dahili sözlükleri yeniden yüklemeden önce saniye cinsinden Aralık. + +ClickHouse, her x saniyede bir yerleşik sözlükleri yeniden yükler. Bu, sözlükleri düzenlemeyi mümkün kılar “on the fly” sunucuyu yeniden başlatmadan. + +Varsayılan değer: 3600. + +**Örnek** + +``` xml +3600 +``` + +## sıkıştırma {#server-settings-compression} + +İçin veri sıkıştırma ayarları [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)- motor masaları. + +!!! warning "Uyarıcı" + Sadece ClickHouse kullanmaya başladıysanız kullanmayın. + +Yapılandırma şablonu: + +``` xml + + + ... + ... + ... + + ... + +``` + +`` alanlar: + +- `min_part_size` – The minimum size of a data part. +- `min_part_size_ratio` – The ratio of the data part size to the table size. +- `method` – Compression method. Acceptable values: `lz4` veya `zstd`. + +Birden fazla yapılandırabilirsiniz `` bölmeler. + +Koşullar yerine getirildiğinde eylemler: + +- Bir veri parçası bir koşul kümesiyle eşleşirse, ClickHouse belirtilen sıkıştırma yöntemini kullanır. +- Bir veri parçası birden çok koşul kümesiyle eşleşirse, ClickHouse ilk eşleşen koşul kümesini kullanır. + +Bir veri bölümü için herhangi bir koşul karşılanmazsa, ClickHouse `lz4` sıkıştırma. + +**Örnek** + +``` xml + + + 10000000000 + 0.01 + zstd + + +``` + +## default\_database {#default-database} + +Varsayılan veritabanı. + +Veritabanlarının bir listesini almak için [SHOW DATABASES](../../sql_reference/statements/show.md#show-databases) sorgu. + +**Örnek** + +``` xml +default +``` + +## default\_profile {#default-profile} + +Varsayılan ayarlar profili. + +Ayarlar profilleri parametrede belirtilen dosyada bulunur `user_config`. + +**Örnek** + +``` xml +default +``` + +## dictionaries\_config {#server_configuration_parameters-dictionaries_config} + +Dış sözlükler için yapılandırma dosyasının yolu. + +Yol: + +- Mutlak yolu veya sunucu yapılandırma dosyasına göre yolu belirtin. +- Yol joker karakterler içerebilir \* ve ?. + +Ayrıca bakınız “[Dış söz dictionarieslükler](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)”. + +**Örnek** + +``` xml +*_dictionary.xml +``` + +## dictionaries\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load} + +Sözlüklerin tembel yüklenmesi. + +Eğer `true`, sonra her sözlük ilk kullanımda oluşturulur. Sözlük oluşturma başarısız olursa, sözlüğü kullanan işlev bir özel durum atar. + +Eğer `false`, sunucu başladığında tüm sözlükler oluşturulur ve bir hata varsa, sunucu kapanır. + +Varsayılan değer `true`. + +**Örnek** + +``` xml +true +``` + +## format\_schema\_path {#server_configuration_parameters-format_schema_path} + +Dizin için şemalar gibi giriş verileri için şemaları ile yolu [CapnProto](../../interfaces/formats.md#capnproto) biçimli. + +**Örnek** + +``` xml + + format_schemas/ +``` + +## grafit {#server_configuration_parameters-graphite} + +Veri gönderme [Grafit](https://github.com/graphite-project). + +Ayarlar: + +- host – The Graphite server. +- port – The port on the Graphite server. +- interval – The interval for sending, in seconds. +- timeout – The timeout for sending data, in seconds. +- root\_path – Prefix for keys. +- metrics – Sending data from the [sistem.metrik](../../operations/system_tables.md#system_tables-metrics) Tablo. +- events – Sending deltas data accumulated for the time period from the [sistem.etkinlik](../../operations/system_tables.md#system_tables-events) Tablo. +- events\_cumulative – Sending cumulative data from the [sistem.etkinlik](../../operations/system_tables.md#system_tables-events) Tablo. +- asynchronous\_metrics – Sending data from the [sistem.asynchronous\_metrics](../../operations/system_tables.md#system_tables-asynchronous_metrics) Tablo. + +Birden fazla yapılandırabilirsiniz `` yanlar. Örneğin, bunu farklı aralıklarla farklı veri göndermek için kullanabilirsiniz. + +**Örnek** + +``` xml + + localhost + 42000 + 0.1 + 60 + one_min + true + true + false + true + +``` + +## graphite\_rollup {#server_configuration_parameters-graphite-rollup} + +Grafit için inceltme verileri için ayarlar. + +Daha fazla ayrıntı için bkz. [Graphıtemergetree](../../engines/table_engines/mergetree_family/graphitemergetree.md). + +**Örnek** + +``` xml + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +## http\_port/https\_port {#http-porthttps-port} + +Http(ler) üzerinden sunucuya bağlanmak için bağlantı noktası. + +Eğer `https_port` belirtilen, [openSSL](#server_configuration_parameters-openssl) yapılandırılmalıdır. + +Eğer `http_port` belirtilmişse, OpenSSL yapılandırması ayarlanmış olsa bile göz ardı edilir. + +**Örnek** + +``` xml +0000 +``` + +## http\_server\_default\_response {#server_configuration_parameters-http_server_default_response} + +ClickHouse HTTP (s) sunucusuna eriştiğinizde varsayılan olarak gösterilen sayfa. +Varsayılan değer “Ok.” (sonunda bir çizgi besleme ile) + +**Örnek** + +Açıyor `https://tabix.io/` eriş whenirken `http://localhost: http_port`. + +``` xml + +
    ]]> +
    +``` + +## include\_from {#server_configuration_parameters-include_from} + +Değiştirmeleri ile dosyanın yolu. + +Daha fazla bilgi için bölüme bakın “[Yapılandırma dosyaları](../configuration_files.md#configuration_files)”. + +**Örnek** + +``` xml +/etc/metrica.xml +``` + +## ınterserver\_http\_port {#interserver-http-port} + +ClickHouse sunucuları arasında veri alışverişi için bağlantı noktası. + +**Örnek** + +``` xml +9009 +``` + +## ınterserver\_http\_host {#interserver-http-host} + +Bu sunucuya erişmek için diğer sunucular tarafından kullanılabilecek ana bilgisayar adı. + +Eğer ihmal edilirse, aynı şekilde tanımlanır `hostname-f` komut. + +Belirli bir ağ arayüzünden kopmak için kullanışlıdır. + +**Örnek** + +``` xml +example.yandex.ru +``` + +## ınterserver\_http\_credentials {#server-settings-interserver-http-credentials} + +Sırasında kimlik doğrulaması için kullanılan kullanıcı adı ve şifre [çoğalma](../../engines/table_engines/mergetree_family/replication.md) çoğaltılan \* motorlarla. Bu kimlik bilgileri yalnızca yinelemeler arasındaki iletişim için kullanılır ve ClickHouse istemcileri için kimlik bilgileri ile ilgisizdir. Sunucu, yinelemeleri bağlamak için bu kimlik bilgilerini denetliyor ve diğer yinelemelere bağlanırken aynı kimlik bilgilerini kullanıyor. Bu nedenle, bu kimlik bilgileri kümedeki tüm yinelemeler için aynı şekilde ayarlanmalıdır. +Varsayılan olarak, kimlik doğrulama kullanılmaz. + +Bu bölüm aşağıdaki parametreleri içerir: + +- `user` — username. +- `password` — password. + +**Örnek** + +``` xml + + admin + 222 + +``` + +## keep\_alive\_timeout {#keep-alive-timeout} + +ClickHouse bağlantıyı kapatmadan önce gelen istekleri bekler saniye sayısı. Varsayılan 3 saniye. + +**Örnek** + +``` xml +3 +``` + +## listen\_host {#server_configuration_parameters-listen_host} + +İsteklerin gelebileceği ana bilgisayarlarda kısıtlama. Sunucunun hepsini yanıtlamasını istiyorsanız, belirtin `::`. + +Örnekler: + +``` xml +::1 +127.0.0.1 +``` + +## kaydedici {#server_configuration_parameters-logger} + +Günlük ayarları. + +Anahtarlar: + +- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. +- log – The log file. Contains all the entries according to `level`. +- errorlog – Error log file. +- size – Size of the file. Applies to `log`ve`errorlog`. Dosya ulaştıktan sonra `size`, ClickHouse arşivleri ve yeniden adlandırır ve onun yerine yeni bir günlük dosyası oluşturur. +- count – The number of archived log files that ClickHouse stores. + +**Örnek** + +``` xml + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + +``` + +Syslog yazma da desteklenmektedir. Yapılandırma örneği: + +``` xml + + 1 + +
    syslog.remote:10514
    + myhost.local + LOG_LOCAL6 + syslog +
    +
    +``` + +Anahtarlar: + +- use\_syslog — Required setting if you want to write to the syslog. +- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. +- hostname — Optional. The name of the host that logs are sent from. +- facility — [Syslog tesisi anahtar sözcüğü](https://en.wikipedia.org/wiki/Syslog#Facility) ile büyük harf inlerle “LOG\_” önek: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3` vb.). + Varsayılan değer: `LOG_USER` eğer `address` belirtilen, `LOG_DAEMON otherwise.` +- format – Message format. Possible values: `bsd` ve `syslog.` + +## makrolar {#macros} + +Çoğaltılmış tablolar için parametre değiştirmeleri. + +Çoğaltılmış tablolar kullanılmazsa atlanabilir. + +Daha fazla bilgi için bölüme bakın “[Çoğaltılmış tablolar oluşturma](../../engines/table_engines/mergetree_family/replication.md)”. + +**Örnek** + +``` xml + +``` + +## mark\_cache\_size {#server-mark-cache-size} + +Tablo motorları tarafından kullanılan işaretlerin önbelleğinin yaklaşık boyutu (bayt cinsinden) [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) aile. + +Önbellek sunucu için paylaşılır ve bellek gerektiği gibi ayrılır. Önbellek boyutu en az 5368709120 olmalıdır. + +**Örnek** + +``` xml +5368709120 +``` + +## max\_concurrent\_queries {#max-concurrent-queries} + +Aynı anda işlenen isteklerin maksimum sayısı. + +**Örnek** + +``` xml +100 +``` + +## max\_connections {#max-connections} + +En fazla gelen bağlantı sayısı. + +**Örnek** + +``` xml +4096 +``` + +## max\_open\_files {#max-open-files} + +Maksimum açık dosya sayısı. + +Varsayılan olarak: `maximum`. + +Biz beri Mac OS X bu seçeneği kullanmanızı öneririz `getrlimit()` işlev yanlış bir değer döndürür. + +**Örnek** + +``` xml +262144 +``` + +## max\_table\_size\_to\_drop {#max-table-size-to-drop} + +Tabloları silme konusunda kısıtlama. + +Eğer bir boyutu [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) tablo aşıyor `max_table_size_to_drop` (bayt cinsinden), bir bırakma sorgusu kullanarak silemezsiniz. + +ClickHouse sunucusunu yeniden başlatmadan tabloyu silmeniz gerekiyorsa, `/flags/force_drop_table` dosya ve bırakma sorgusunu çalıştırın. + +Varsayılan değer: 50 GB. + +0 değeri, herhangi bir kısıtlama olmaksızın tüm tabloları silebileceğiniz anlamına gelir. + +**Örnek** + +``` xml +0 +``` + +## merge\_tree {#server_configuration_parameters-merge_tree} + +Tablolar için ince ayar [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +Daha fazla bilgi için bkz: MergeTreeSettings.h başlık dosyası. + +**Örnek** + +``` xml + + 5 + +``` + +## openSSL {#server_configuration_parameters-openssl} + +SSL istemci / sunucu yapılandırması. + +SSL desteği tarafından sağlanmaktadır `libpoco` kitaplık. Arayüz dosyada açıklanmıştır [SSLManager.sa](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) + +Sunucu/istemci ayarları için tuşlar: + +- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. +- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` sertifika içerir. +- caConfig – The path to the file or directory that contains trusted root certificates. +- verificationMode – The method for checking the node's certificates. Details are in the description of the [Bağlama](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) sınıf. Olası değerler: `none`, `relaxed`, `strict`, `once`. +- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| +- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. +- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Kabul edilebilir değerler: `true`, `false`. +- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. Bu parametre her zaman sunucu oturumu önbelleğe alır ve istemci önbellekleme istedi, sorunları önlemek yardımcı olduğundan önerilir. Varsayılan değer: `${application.name}`. +- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. +- sessionTimeout – Time for caching the session on the server. +- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS. +- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols – Protocols that are not allowed to use. +- preferServerCiphers – Preferred server ciphers on the client. + +**Ayarlar örneği:** + +``` xml + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + +``` + +## part\_log {#server_configuration_parameters-part-log} + +İlişkili olayları günlüğe kaydetme [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). Örneğin, veri ekleme veya birleştirme. Birleştirme algoritmalarını simüle etmek ve özelliklerini karşılaştırmak için günlüğü kullanabilirsiniz. Birleştirme işlemini görselleştirebilirsiniz. + +Sorgular günlüğe kaydedilir [sistem.part\_log](../../operations/system_tables.md#system_tables-part-log) tablo, ayrı bir dosyada değil. Bu tablonun adını aşağıdaki tabloda yapılandırabilirsiniz: `table` parametre (aşağıya bakınız). + +Günlüğü yapılandırmak için aşağıdaki parametreleri kullanın: + +- `database` – Name of the database. +- `table` – Name of the system table. +- `partition_by` – Sets a [özel bölümleme anahtarı](../../engines/table_engines/mergetree_family/custom_partitioning_key.md). +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +**Örnek** + +``` xml + + system + part_log
    + toMonday(event_date) + 7500 +
    +``` + +## yol {#server_configuration_parameters-path} + +Veri içeren dizinin yolu. + +!!! note "Not" + Sondaki eğik çizgi zorunludur. + +**Örnek** + +``` xml +/var/lib/clickhouse/ +``` + +## prometheus {#server_configuration_parameters-prometheus} + +Kazıma için metrik verilerini açığa çıkarma [Prometheus](https://prometheus.io). + +Ayarlar: + +- `endpoint` – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. +- `port` – Port for `endpoint`. +- `metrics` – Flag that sets to expose metrics from the [sistem.metrik](../system_tables.md#system_tables-metrics) Tablo. +- `events` – Flag that sets to expose metrics from the [sistem.etkinlik](../system_tables.md#system_tables-events) Tablo. +- `asynchronous_metrics` – Flag that sets to expose current metrics values from the [sistem.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics) Tablo. + +**Örnek** + +``` xml + + /metrics + 8001 + true + true + true + +``` + +## query\_log {#server_configuration_parameters-query-log} + +İle alınan günlük sorgu settinglarının ayarlanması [log\_queries = 1](../settings/settings.md) ayar. + +Sorgular günlüğe kaydedilir [sistem.query\_log](../../operations/system_tables.md#system_tables-query_log) tablo, ayrı bir dosyada değil. Tablonun adını değiştirebilirsiniz. `table` parametre (aşağıya bakınız). + +Günlüğü yapılandırmak için aşağıdaki parametreleri kullanın: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [özel bölümleme anahtarı](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) bir masa için. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +Tablo yoksa, ClickHouse bunu oluşturur. ClickHouse sunucusu güncelleştirildiğinde sorgu günlüğünün yapısı değiştiyse, eski yapıya sahip tablo yeniden adlandırılır ve otomatik olarak yeni bir tablo oluşturulur. + +**Örnek** + +``` xml + + system + query_log
    + toMonday(event_date) + 7500 +
    +``` + +## query\_thread\_log {#server_configuration_parameters-query-thread-log} + +İle alınan sorguların günlük iş parçacıklarının ayarlanması [log\_query\_threads = 1](../settings/settings.md#settings-log-query-threads) ayar. + +Sorgular günlüğe kaydedilir [sistem.query\_thread\_log](../../operations/system_tables.md#system_tables-query-thread-log) tablo, ayrı bir dosyada değil. Tablonun adını değiştirebilirsiniz. `table` parametre (aşağıya bakınız). + +Günlüğü yapılandırmak için aşağıdaki parametreleri kullanın: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [özel bölümleme anahtarı](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) bir sistem tablosu için. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +Tablo yoksa, ClickHouse bunu oluşturur. Sorgu iş parçacığı günlüğü yapısını değiştirdiyseniz ClickHouse sunucu güncelleştirildi, tablo eski yapısı ile yeniden adlandırılır ve yeni bir tablo otomatik olarak oluşturulur. + +**Örnek** + +``` xml + + system + query_thread_log
    + toMonday(event_date) + 7500 +
    +``` + +## trace\_log {#server_configuration_parameters-trace_log} + +İçin ayarlar [trace\_log](../../operations/system_tables.md#system_tables-trace_log) sistem tablosu çalışması. + +Parametre: + +- `database` — Database for storing a table. +- `table` — Table name. +- `partition_by` — [Özel bölümleme anahtarı](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) bir sistem tablosu için. +- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. + +Varsayılan sunucu yapılandırma dosyası `config.xml` aşağıdaki ayarlar bölümünü içerir: + +``` xml + + system + trace_log
    + toYYYYMM(event_date) + 7500 +
    +``` + +## query\_masking\_rules {#query-masking-rules} + +Regexp tabanlı kurallar, sorgulara ve tüm günlük iletilerine sunucu günlüklerinde depolamadan önce uygulanacak, +`system.query_log`, `system.text_log`, `system.processes` tablo ve istemciye gönderilen günlüklerde. Önlem allowseyi sağlayan +SQL sorgularından hassas veri sızıntısı (isimler, e-postalar, kişisel +kimlik veya kredi kartı numaraları) günlükleri için. + +**Örnek** + +``` xml + + + hide SSN + (^|\D)\d{3}-\d{2}-\d{4}($|\D) + 000-00-0000 + + +``` + +Config alanları: +- `name` - kuralın adı (isteğe bağlı) +- `regexp` - Re2 uyumlu düzenli ifade (zorunlu) +- `replace` - hassas veriler için ikame dizesi (isteğe bağlı, varsayılan olarak-altı Yıldız İşareti) + +Maskeleme kuralları tüm sorguya uygulanır (hatalı biçimlendirilmiş / ayrıştırılamayan sorgulardan hassas verilerin sızıntılarını önlemek için). + +`system.events` tablo sayacı var `QueryMaskingRulesMatch` hangi sorgu maskeleme kuralları maçları genel bir numarası var. + +Dağıtılmış sorgular için her sunucu ayrı ayrı yapılandırılmalıdır, aksi takdirde alt sorgular diğerine iletilir +düğümler maskeleme olmadan saklanır. + +## remote\_servers {#server-settings-remote-servers} + +Tarafından kullanılan küm ofelerin yapılandırması [Dağılı](../../engines/table_engines/special/distributed.md) tablo motoru ve `cluster` tablo işlevi. + +**Örnek** + +``` xml + +``` + +Değeri için `incl` öznitelik, bölümüne bakın “[Yapılandırma dosyaları](../configuration_files.md#configuration_files)”. + +**Ayrıca Bakınız** + +- [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) + +## saat dilimi {#server_configuration_parameters-timezone} + +Sunucunun saat dilimi. + +UTC saat dilimi veya coğrafi konum (örneğin, Afrika / Abidjan) için bir IANA tanımlayıcısı olarak belirtilir. + +Saat dilimi, datetime alanları metin biçimine (ekranda veya dosyada yazdırıldığında) çıktığında ve datetime'ı bir dizeden alırken dize ve DateTime biçimleri arasındaki dönüşümler için gereklidir. Ayrıca, saat dilimi, giriş parametrelerinde saat dilimini almadıkları takdirde saat ve tarih ile çalışan işlevlerde kullanılır. + +**Örnek** + +``` xml +Europe/Moscow +``` + +## tcp\_port {#server_configuration_parameters-tcp_port} + +TCP protokolü üzerinden istemcilerle iletişim kurmak için bağlantı noktası. + +**Örnek** + +``` xml +9000 +``` + +## tcp\_port\_secure {#server_configuration_parameters-tcp_port-secure} + +İstemcilerle güvenli iletişim için TCP bağlantı noktası. İle kullanın [OpenSSL](#server_configuration_parameters-openssl) ayarlar. + +**Olası değerler** + +Pozitif tamsayı. + +**Varsayılan değer** + +``` xml +9440 +``` + +## mysql\_port {#server_configuration_parameters-mysql_port} + +MySQL protokolü üzerinden istemcilerle iletişim kurmak için bağlantı noktası. + +**Olası değerler** + +Pozitif tamsayı. + +Örnek + +``` xml +9004 +``` + +## tmp\_path {#server-settings-tmp_path} + +Büyük sorguları işlemek için geçici veri yolu. + +!!! note "Not" + Sondaki eğik çizgi zorunludur. + +**Örnek** + +``` xml +/var/lib/clickhouse/tmp/ +``` + +## tmp\_policy {#server-settings-tmp-policy} + +Politika dan [`storage_configuration`](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) geçici dosyaları saklamak için. +Set değilse [`tmp_path`](#server-settings-tmp_path) kullanılır, aksi takdirde göz ardı edilir. + +!!! note "Not" + - `move_factor` göz ardı edilir +- `keep_free_space_bytes` göz ardı edilir +- `max_data_part_size_bytes` göz ardı edilir +- bu Politikada tam olarak bir cilt olmalı + +## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} + +Tablo motorları tarafından kullanılan sıkıştırılmamış veriler için önbellek boyutu (bayt cinsinden) [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +Sunucu için bir paylaşılan önbellek var. Bellek talep üzerine tahsis edilir. Seçenek varsa önbellek kullanılır [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) etkindir. + +Sıkıştırılmamış önbellek, tek tek durumlarda çok kısa sorgular için avantajlıdır. + +**Örnek** + +``` xml +8589934592 +``` + +## user\_files\_path {#server_configuration_parameters-user_files_path} + +Kullanıcı dosyaları ile dizin. Tablo işlevinde kullanılır [Dosya()](../../sql_reference/table_functions/file.md). + +**Örnek** + +``` xml +/var/lib/clickhouse/user_files/ +``` + +## users\_config {#users-config} + +İçeren dosyanın yolu: + +- Kullanıcı yapılandırmaları. +- Erişim hakları. +- Ayarlar profilleri. +- Kota ayarları. + +**Örnek** + +``` xml +users.xml +``` + +## zookeeper {#server-settings_zookeeper} + +ClickHouse ile etkileşim sağlayan ayarları içerir [ZooKeeper](http://zookeeper.apache.org/) küme. + +ClickHouse, çoğaltılmış tabloları kullanırken kopyaların meta verilerini depolamak için ZooKeeper kullanır. Çoğaltılmış tablolar kullanılmazsa, parametrelerin bu bölümü atlanabilir. + +Bu bölüm aşağıdaki parametreleri içerir: + +- `node` — ZooKeeper endpoint. You can set multiple endpoints. + + Mesela: + + + +``` xml + + example_host + 2181 + +``` + + The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. + +- `session_timeout` — Maximum timeout for the client session in milliseconds. +- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) bu, ClickHouse sunucusu tarafından kullanılan znodes için kök olarak kullanılır. İsteğe bağlı. +- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. + +**Örnek yapılandırma** + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + 30000 + 10000 + + /path/to/zookeeper/node + + user:password + +``` + +**Ayrıca Bakınız** + +- [Çoğalma](../../engines/table_engines/mergetree_family/replication.md) +- [ZooKeeper programcı Kılavuzu](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) + +## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} + +ZooKeeper veri parçası başlıkları için depolama yöntemi. + +Bu ayar yalnızca `MergeTree` aile. Belirt specifiedilebilir: + +- Küresel olarak [merge\_tree](#server_configuration_parameters-merge_tree) bu bölüm `config.xml` Dosya. + + ClickHouse sunucudaki tüm tablolar için ayarı kullanır. Ayarı istediğiniz zaman değiştirebilirsiniz. Mevcut tablolar, ayar değiştiğinde davranışlarını değiştirir. + +- Her tablo için. + + Bir tablo oluştururken, karşılık gelen [motor ayarı](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). Genel ayar değişse bile, bu ayara sahip varolan bir tablonun davranışı değişmez. + +**Olası değerler** + +- 0 — Functionality is turned off. +- 1 — Functionality is turned on. + +Eğer `use_minimalistic_part_header_in_zookeeper = 1`, sonraları [çoğaltıyordu](../../engines/table_engines/mergetree_family/replication.md) tablolar, veri parçalarının başlıklarını tek bir `znode`. Tablo çok sayıda sütun içeriyorsa, bu depolama yöntemi Zookeeper'da depolanan verilerin hacmini önemli ölçüde azaltır. + +!!! attention "Dikkat" + Uyguladıktan sonra `use_minimalistic_part_header_in_zookeeper = 1`, ClickHouse sunucusunu bu ayarı desteklemeyen bir sürüme düşüremezsiniz. Bir kümedeki sunucularda ClickHouse yükseltirken dikkatli olun. Tüm sunucuları bir kerede yükseltmeyin. Clickhouse'un yeni sürümlerini bir test ortamında veya bir kümenin yalnızca birkaç sunucusunda test etmek daha güvenlidir. + + Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. + +**Varsayılan değer:** 0. + +## disable\_internal\_dns\_cache {#server-settings-disable-internal-dns-cache} + +İç DNS önbelleğini devre dışı bırakır. Sistemlerinde ClickHouse işletim için tavsiye +Kubernetes gibi sık sık değişen altyapı ile. + +**Varsayılan değer:** 0. + +## dns\_cache\_update\_period {#server-settings-dns-cache-update-period} + +ClickHouse iç DNS önbelleğinde saklanan IP adreslerini güncelleme süresi (saniye cinsinden). +Güncelleştirme, ayrı bir sistem iş parçacığında zaman uyumsuz olarak gerçekleştirilir. + +**Varsayılan değer**: 15. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/tr/operations/settings/constraints_on_settings.md b/docs/tr/operations/settings/constraints_on_settings.md new file mode 100644 index 00000000000..a9319c2df69 --- /dev/null +++ b/docs/tr/operations/settings/constraints_on_settings.md @@ -0,0 +1,75 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 62 +toc_title: "Ayarlardaki k\u0131s\u0131tlamalar" +--- + +# Ayarlardaki kısıtlamalar {#constraints-on-settings} + +Ayarlardaki kısıtlamalar, `profiles` bu bölüm `user.xml` yapılandırma dosyası ve kullanıcıların bazı ayarları değiştirmelerini yasakla `SET` sorgu. +Kısıtlamalar aşağıdaki gibi tanımlanır: + +``` xml + + + + + lower_boundary + + + upper_boundary + + + lower_boundary + upper_boundary + + + + + + + +``` + +Kullanıcı kısıtlamaları ihlal etmeye çalışırsa, bir istisna atılır ve ayar değiştirilmez. +Desteklenen üç tür kısıtlama vardır: `min`, `max`, `readonly`. Bu `min` ve `max` kısıtlamalar, sayısal bir ayar için üst ve alt sınırları belirtir ve birlikte kullanılabilir. Bu `readonly` kısıtlama, kullanıcının karşılık gelen ayarı hiç değiştiremeyeceğini belirtir. + +**Örnek:** İzin vermek `users.xml` hatları içerir: + +``` xml + + + 10000000000 + 0 + ... + + + 5000000000 + 20000000000 + + + + + + + +``` + +Aşağıdaki sorgular tüm atma istisnaları: + +``` sql +SET max_memory_usage=20000000001; +SET max_memory_usage=4999999999; +SET force_index_by_date=1; +``` + +``` text +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be greater than 20000000000. +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be less than 5000000000. +Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. +``` + +**Not:** bu `default` profil özel işleme sahiptir: tüm kısıtlamalar için tanımlanan `default` profil varsayılan kısıtlamalar haline gelir, bu nedenle bu kullanıcılar için açıkça geçersiz kılınana kadar tüm kullanıcıları kısıtlarlar. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/tr/operations/settings/index.md b/docs/tr/operations/settings/index.md new file mode 100644 index 00000000000..72270b8a397 --- /dev/null +++ b/docs/tr/operations/settings/index.md @@ -0,0 +1,32 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Ayarlar +toc_priority: 55 +toc_title: "Giri\u015F" +--- + +# Ayarlar {#settings} + +Aşağıda açıklanan tüm ayarları yapmanın birden çok yolu vardır. +Ayarlar katmanlar halinde yapılandırılır, böylece sonraki her katman önceki ayarları yeniden tanımlar. + +Öncelik sırasına göre ayarları yapılandırma yolları: + +- Ayarlar `users.xml` sunucu yapılandırma dosyası. + + Eleman setında Set ``. + +- Oturum ayarları. + + Göndermek `SET setting=value` etkileşimli modda ClickHouse konsol istemcisinden. + Benzer şekilde, http protokolünde ClickHouse oturumlarını kullanabilirsiniz. Bunu yapmak için şunları belirtmeniz gerekir `session_id` HTTP parametresi. + +- Sorgu ayarları. + + - ClickHouse konsol istemcisini etkileşimli olmayan modda başlatırken, başlangıç parametresini ayarlayın `--setting=value`. + - HTTP API'sini kullanırken, CGI parametrelerini geçirin (`URL?setting_1=value&setting_2=value...`). + +Yalnızca sunucu yapılandırma dosyasında yapılabilecek ayarlar bu bölümde yer almaz. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/settings/) diff --git a/docs/tr/operations/settings/permissions_for_queries.md b/docs/tr/operations/settings/permissions_for_queries.md new file mode 100644 index 00000000000..2f65630604c --- /dev/null +++ b/docs/tr/operations/settings/permissions_for_queries.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 58 +toc_title: "Sorgular i\xE7in izinler" +--- + +# Sorgular için izinler {#permissions_for_queries} + +Clickhouse'daki sorgular birkaç türe ayrılabilir: + +1. Veri sorgularını oku: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. +2. Veri sorgularını yaz: `INSERT`, `OPTIMIZE`. +3. Ayarları değiştir sorgu: `SET`, `USE`. +4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) sorgular: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. +5. `KILL QUERY`. + +Aşağıdaki ayarlar, kullanıcı izinlerini sorgu Türüne göre düzenler: + +- [readonly](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. +- [allow\_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries. + +`KILL QUERY` herhangi bir ayar ile yapılabilir. + +## readonly {#settings_readonly} + +Veri okuma, veri yazma ve ayar sorgularını değiştirme izinlerini kısıtlar. + +Sorguların türlere nasıl ayrıldığını görün [üzerinde](#permissions_for_queries). + +Olası değerler: + +- 0 — All queries are allowed. +- 1 — Only read data queries are allowed. +- 2 — Read data and change settings queries are allowed. + +Sonra ayarı `readonly = 1`, kullanıcı değiştir canemez `readonly` ve `allow_ddl` geçerli oturumda ayarlar. + +Kullanırken `GET` metho thed in the [HTTP arayüzü](../../interfaces/http.md), `readonly = 1` otomatik olarak ayarlanır. Değiştirmek için veri kullanın `POST` yöntem. + +Ayar `readonly = 1` kullanıcının tüm ayarları değiştirmesini yasaklayın. Kullanıcıyı yasaklamanın bir yolu var +sadece belirli ayarları değiştirmekten, ayrıntılar için bkz [ayarlardaki kısıtlamalar](constraints_on_settings.md). + +Varsayılan değer: 0 + +## allow\_ddl {#settings_allow_ddl} + +İzin verir veya reddeder [DDL](https://en.wikipedia.org/wiki/Data_definition_language) sorgular. + +Sorguların türlere nasıl ayrıldığını görün [üzerinde](#permissions_for_queries). + +Olası değerler: + +- 0 — DDL queries are not allowed. +- 1 — DDL queries are allowed. + +Yürüt canemezsiniz `SET allow_ddl = 1` eğer `allow_ddl = 0` geçerli oturum için. + +Varsayılan değer: 1 + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/tr/operations/settings/query_complexity.md b/docs/tr/operations/settings/query_complexity.md new file mode 100644 index 00000000000..54958b786ab --- /dev/null +++ b/docs/tr/operations/settings/query_complexity.md @@ -0,0 +1,302 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 59 +toc_title: "Sorgu karma\u015F\u0131kl\u0131\u011F\u0131 \xFCzerindeki k\u0131s\u0131\ + tlamalar" +--- + +# Sorgu Karmaşıklığı Üzerindeki Kısıtlamalar {#restrictions-on-query-complexity} + +Sorgu karmaşıklığı üzerindeki kısıtlamalar ayarların bir parçasıdır. +Kullanıcı arabiriminden daha güvenli yürütme sağlamak için kullanılırlar. +Hemen hemen tüm kısıtlamalar sadece aşağıdakiler için geçerlidir `SELECT`. Dağıtılmış sorgu işleme için kısıtlamalar her sunucuda ayrı ayrı uygulanır. + +ClickHouse, her satır için değil, veri bölümleri için kısıtlamaları denetler. Bu, veri parçasının boyutu ile kısıtlama değerini aşabileceğiniz anlamına gelir. + +Üzerindeki kısıtlamalar “maximum amount of something” 0 değerini alabilir, yani “unrestricted”. +Çoğu kısıtlama da bir ‘overflow\_mode’ ayar, sınır aşıldığında ne yapılması gerektiği anlamına gelir. +İki değerden birini alabilir: `throw` veya `break`. Toplama (group\_by\_overflow\_mode) üzerindeki kısıtlamalar da değere sahiptir `any`. + +`throw` – Throw an exception (default). + +`break` – Stop executing the query and return the partial result, as if the source data ran out. + +`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don't add new keys to the set. + +## max\_memory\_usage {#settings_max_memory_usage} + +Tek bir sunucuda bir sorgu çalıştırmak için kullanılacak en fazla RAM miktarı. + +Varsayılan yapılandırma dosyasında maksimum 10 GB'DİR. + +Bu ayar, kullanılabilir belleğin hacmini veya makinedeki toplam bellek hacmini dikkate almaz. +Kısıtlama, tek bir sunucu içindeki tek bir sorgu için geçerlidir. +Kullanabilirsiniz `SHOW PROCESSLIST` her sorgu için geçerli bellek tüketimini görmek için. +Ayrıca, en yüksek bellek tüketimi her sorgu için izlenir ve günlüğe yazılır. + +Bellek kullanımı, belirli toplama işlevlerinin durumları için izlenmez. + +Toplam işlevlerin durumları için bellek kullanımı tam olarak izlenmiyor `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` itibaren `String` ve `Array` değişkenler. + +Bellek tüketimi de parametrelerle sınırlıdır `max_memory_usage_for_user` ve `max_memory_usage_for_all_queries`. + +## max\_memory\_usage\_for\_user {#max-memory-usage-for-user} + +Tek bir sunucuda bir kullanıcının sorguları çalıştırmak için kullanılacak en fazla RAM miktarı. + +Varsayılan değerler [Ayarlar.sa](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L288). Varsayılan olarak, tutar sınırlı değildir (`max_memory_usage_for_user = 0`). + +Ayrıca açıklamasına bakın [max\_memory\_usage](#settings_max_memory_usage). + +## max\_memory\_usage\_for\_all\_queries {#max-memory-usage-for-all-queries} + +Tek bir sunucuda tüm sorguları çalıştırmak için kullanılacak en fazla RAM miktarı. + +Varsayılan değerler [Ayarlar.sa](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L289). Varsayılan olarak, tutar sınırlı değildir (`max_memory_usage_for_all_queries = 0`). + +Ayrıca açıklamasına bakın [max\_memory\_usage](#settings_max_memory_usage). + +## max\_rows\_to\_read {#max-rows-to-read} + +Aşağıdaki kısıtlamalar her blokta kontrol edilebilir (her satır yerine). Yani, kısıtlamalar biraz kırılabilir. +Birden çok iş parçacığında bir sorgu çalıştırırken, aşağıdaki kısıtlamalar her iş parçacığı için ayrı ayrı uygulanır. + +Bir sorgu çalıştırırken bir tablodan okunabilen satır sayısı. + +## max\_bytes\_to\_read {#max-bytes-to-read} + +Bir sorgu çalıştırırken bir tablodan okunabilen bayt sayısı (sıkıştırılmamış veri). + +## read\_overflow\_mode {#read-overflow-mode} + +Okunan veri hacmi sınırlardan birini aştığında ne yapmalı: ‘throw’ veya ‘break’. Varsayılan olarak, atın. + +## max\_rows\_to\_group\_by {#settings-max-rows-to-group-by} + +Toplama alınan benzersiz anahtarların maksimum sayısı. Bu ayar, toplama sırasında bellek tüketimini sınırlamanızı sağlar. + +## group\_by\_overflow\_mode {#group-by-overflow-mode} + +Toplama için benzersiz anahtarların sayısı sınırı aştığında ne yapmalı: ‘throw’, ‘break’, veya ‘any’. Varsayılan olarak, atın. +Kullanarak ‘any’ değer, GROUP BY'NİN bir yaklaşımını çalıştırmanızı sağlar. Bu yaklaşımın kalitesi, verilerin istatistiksel niteliğine bağlıdır. + +## max\_bytes\_before\_external\_group\_by {#settings-max_bytes_before_external_group_by} + +Çalıştırmayı etkinleştirir veya devre dışı bırakır `GROUP BY` harici bellekte yan tümceleri. Görmek [Harici bellekte grupla](../../sql_reference/statements/select.md#select-group-by-in-external-memory). + +Olası değerler: + +- Tek tarafından kullanılabilecek maksimum RAM hacmi (bayt cinsinden) [GROUP BY](../../sql_reference/statements/select.md#select-group-by-clause) işleyiş. +- 0 — `GROUP BY` harici bellekte devre dışı. + +Varsayılan değer: 0. + +## max\_rows\_to\_sort {#max-rows-to-sort} + +Sıralamadan önce en fazla satır sayısı. Bu, sıralama yaparken bellek tüketimini sınırlamanıza izin verir. + +## max\_bytes\_to\_sort {#max-bytes-to-sort} + +Sıralamadan önce en fazla bayt sayısı. + +## sort\_overflow\_mode {#sort-overflow-mode} + +Sıralamadan önce alınan satır sayısı sınırlardan birini aşarsa ne yapmalı: ‘throw’ veya ‘break’. Varsayılan olarak, atın. + +## max\_result\_rows {#setting-max_result_rows} + +Sonuçtaki satır sayısını sınırlayın. Ayrıca, dağıtılmış bir sorgunun parçalarını çalıştırırken alt sorgular ve uzak sunucularda da kontrol edildi. + +## max\_result\_bytes {#max-result-bytes} + +Sonuçtaki bayt sayısını sınırlayın. Önceki ayar ile aynı. + +## result\_overflow\_mode {#result-overflow-mode} + +Sonucun hacmi sınırlardan birini aşarsa ne yapmalı: ‘throw’ veya ‘break’. Varsayılan olarak, atın. + +Kullanım ‘break’ LİMİT kullanmaya benzer. `Break` yürütmeyi yalnızca blok düzeyinde keser. Bu, döndürülen satırların miktarının daha büyük olduğu anlamına gelir [max\_result\_rows](#setting-max_result_rows) birden çok [max\_block\_size](settings.md#setting-max_block_size) ve bağlıdır [max\_threads](settings.md#settings-max_threads). + +Örnek: + +``` sql +SET max_threads = 3, max_block_size = 3333; +SET max_result_rows = 3334, result_overflow_mode = 'break'; + +SELECT * +FROM numbers_mt(100000) +FORMAT Null; +``` + +Sonuç: + +``` text +6666 rows in set. ... +``` + +## max\_execution\_time {#max-execution-time} + +Saniye cinsinden maksimum sorgu yürütme süresi. +Şu anda, sıralama aşamalarından biri için veya toplama işlevlerini birleştirirken ve sonlandırırken kontrol edilmez. + +## timeout\_overflow\_mode {#timeout-overflow-mode} + +Sorgu daha uzun çalıştırılırsa ne yapmalı ‘max\_execution\_time’: ‘throw’ veya ‘break’. Varsayılan olarak, atın. + +## min\_execution\_speed {#min-execution-speed} + +Saniyede satırlarda minimum yürütme hızı. Her veri bloğunda ne zaman kontrol edildi ‘timeout\_before\_checking\_execution\_speed’ doluyor. Yürütme hızı düşükse, bir istisna atılır. + +## min\_execution\_speed\_bytes {#min-execution-speed-bytes} + +Saniyede en az yürütme bayt sayısı. Her veri bloğunda ne zaman kontrol edildi ‘timeout\_before\_checking\_execution\_speed’ doluyor. Yürütme hızı düşükse, bir istisna atılır. + +## max\_execution\_speed {#max-execution-speed} + +Saniyede en fazla yürütme satırı sayısı. Her veri bloğunda ne zaman kontrol edildi ‘timeout\_before\_checking\_execution\_speed’ doluyor. Yürütme hızı yüksekse, yürütme hızı azaltılır. + +## max\_execution\_speed\_bytes {#max-execution-speed-bytes} + +Saniyede en fazla yürütme bayt sayısı. Her veri bloğunda ne zaman kontrol edildi ‘timeout\_before\_checking\_execution\_speed’ doluyor. Yürütme hızı yüksekse, yürütme hızı azaltılır. + +## timeout\_before\_checking\_execution\_speed {#timeout-before-checking-execution-speed} + +Yürütme hızının çok yavaş olmadığını kontrol eder (en az ‘min\_execution\_speed’), saniye içinde belirtilen süre dolduktan sonra. + +## max\_columns\_to\_read {#max-columns-to-read} + +Tek bir sorguda bir tablodan okunabilen sütun sayısı. Bir sorgu daha fazla sayıda sütun okuma gerektiriyorsa, bir özel durum atar. + +## max\_temporary\_columns {#max-temporary-columns} + +Sabit sütunlar da dahil olmak üzere bir sorgu çalıştırırken aynı anda RAM'de tutulması gereken geçici sütun sayısı. Bundan daha fazla geçici sütun varsa, bir istisna atar. + +## max\_temporary\_non\_const\_columns {#max-temporary-non-const-columns} + +Aynı şey ‘max\_temporary\_columns’, ancak sabit sütunları saymadan. +Bir sorgu çalıştırırken sabit sütunların oldukça sık oluşturulduğunu, ancak yaklaşık sıfır bilgi işlem kaynağı gerektirdiğini unutmayın. + +## max\_subquery\_depth {#max-subquery-depth} + +Alt sorguların maksimum yuvalama derinliği. Alt sorgular daha derinse, bir istisna atılır. Varsayılan olarak, 100. + +## max\_pipeline\_depth {#max-pipeline-depth} + +Maksimum boru hattı derinliği. Sorgu işleme sırasında her veri bloğunun geçtiği dönüşümlerin sayısına karşılık gelir. Tek bir sunucunun sınırları içinde sayılır. Boru hattı derinliği büyükse, bir istisna atılır. Varsayılan olarak, 1000. + +## max\_ast\_depth {#max-ast-depth} + +Sorgu sözdizimsel ağacının en fazla yuvalama derinliği. Aşılırsa, bir istisna atılır. +Şu anda, ayrıştırma sırasında değil, yalnızca sorguyu ayrıştırdıktan sonra kontrol edilir. Yani, ayrıştırma sırasında çok derin bir sözdizimsel ağaç oluşturulabilir, ancak sorgu başarısız olur. Varsayılan olarak, 1000. + +## max\_ast\_elements {#max-ast-elements} + +Sorgu sözdizimsel ağacındaki en fazla öğe sayısı. Aşılırsa, bir istisna atılır. +Önceki ayarla aynı şekilde, yalnızca sorguyu ayrıştırdıktan sonra kontrol edilir. Varsayılan olarak, 50.000. + +## max\_rows\_in\_set {#max-rows-in-set} + +Bir alt sorgudan oluşturulan In yan tümcesinde bir veri kümesi için satır sayısı. + +## max\_bytes\_in\_set {#max-bytes-in-set} + +Bir alt sorgudan oluşturulan In yan tümcesinde bir set tarafından kullanılan en fazla bayt sayısı (sıkıştırılmamış veri). + +## set\_overflow\_mode {#set-overflow-mode} + +Veri miktarı sınırlardan birini aştığında ne yapmalı: ‘throw’ veya ‘break’. Varsayılan olarak, atın. + +## max\_rows\_ın\_distinct {#max-rows-in-distinct} + +DISTINCT kullanırken en fazla sayıda farklı satır. + +## max\_bytes\_ın\_distinct {#max-bytes-in-distinct} + +DISTINCT kullanırken bir karma tablo tarafından kullanılan bayt sayısı. + +## distinct\_overflow\_mode {#distinct-overflow-mode} + +Veri miktarı sınırlardan birini aştığında ne yapmalı: ‘throw’ veya ‘break’. Varsayılan olarak, atın. + +## max\_rows\_to\_transfer {#max-rows-to-transfer} + +Uzak bir sunucuya geçirilen veya GLOBAL In kullanırken geçici bir tabloya kaydedilen satır sayısı. + +## max\_bytes\_to\_transfer {#max-bytes-to-transfer} + +Uzak bir sunucuya geçirilen veya GLOBAL In kullanırken geçici bir tabloya kaydedilen bayt sayısı (sıkıştırılmamış veri). + +## transfer\_overflow\_mode {#transfer-overflow-mode} + +Veri miktarı sınırlardan birini aştığında ne yapmalı: ‘throw’ veya ‘break’. Varsayılan olarak, atın. + +## max\_rows\_in\_join {#settings-max_rows_in_join} + +Tabloları birleştirirken kullanılan karma tablodaki satır sayısını sınırlar. + +Bu ayarlar aşağıdakiler için geçerlidir [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) işlemleri ve [Katmak](../../engines/table_engines/special/join.md) masa motoru. + +Bir sorgu birden çok birleşim içeriyorsa, ClickHouse her Ara sonuç için bu ayarı denetler. + +Limit ulaşıldığında ClickHouse farklı eylemlerle devam edebilirsiniz. Kullan... [join\_overflow\_mode](#settings-join_overflow_mode) eylemi seçmek için ayarlama. + +Olası değerler: + +- Pozitif tamsayı. +- 0 — Unlimited number of rows. + +Varsayılan değer: 0. + +## max\_bytes\_in\_join {#settings-max_bytes_in_join} + +Tabloları birleştirirken kullanılan karma tablonun bayt cinsinden boyutunu sınırlar. + +Bu ayarlar aşağıdakiler için geçerlidir [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) işlemleri ve [Jo tablein table engine](../../engines/table_engines/special/join.md). + +Sorgu birleşimler içeriyorsa, ClickHouse her Ara sonuç için bu ayarı denetler. + +Limit ulaşıldığında ClickHouse farklı eylemlerle devam edebilirsiniz. Kullanmak [join\_overflow\_mode](#settings-join_overflow_mode) eylemi seçmek için ayarlar. + +Olası değerler: + +- Pozitif tamsayı. +- 0 — Memory control is disabled. + +Varsayılan değer: 0. + +## join\_overflow\_mode {#settings-join_overflow_mode} + +Tanımlar katılın aşağıdaki sınırlar her zaman eylem ClickHouse gerçekleştirdiği ulaştı: + +- [max\_bytes\_in\_join](#settings-max_bytes_in_join) +- [max\_rows\_in\_join](#settings-max_rows_in_join) + +Olası değerler: + +- `THROW` — ClickHouse throws an exception and breaks operation. +- `BREAK` — ClickHouse breaks operation and doesn't throw an exception. + +Varsayılan değer: `THROW`. + +**Ayrıca Bakınız** + +- [Jo](../../sql_reference/statements/select.md#select-join) +- [Jo tablein table engine](../../engines/table_engines/special/join.md) + +## max\_partitions\_per\_ınsert\_block {#max-partitions-per-insert-block} + +Eklenen tek bir bloktaki en fazla bölüm sayısını sınırlar. + +- Pozitif tamsayı. +- 0 — Unlimited number of partitions. + +Varsayılan değer: 100. + +**Ayrıntı** + +Veri eklerken, ClickHouse eklenen bloktaki bölüm sayısını hesaplar. Bölüm sayısı fazla ise `max_partitions_per_insert_block`, ClickHouse aşağıdaki metinle bir özel durum atar: + +> “Too many partitions for single INSERT block (more than” + toString (max\_parts) + “). The limit is controlled by ‘max\_partitions\_per\_insert\_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).” + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) diff --git a/docs/tr/operations/settings/settings.md b/docs/tr/operations/settings/settings.md new file mode 100644 index 00000000000..48e48ca00a6 --- /dev/null +++ b/docs/tr/operations/settings/settings.md @@ -0,0 +1,1235 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 60 +toc_title: Ayarlar +--- + +# Ayarlar {#settings} + +## distributed\_product\_mode {#distributed-product-mode} + +Davranışını değiştirir [dağıtılmış alt sorgular](../../sql_reference/statements/select.md). + +ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. + +Kısıtlama: + +- Yalnızca ın ve JOIN alt sorguları için uygulanır. +- Yalnızca FROM bölümü birden fazla parça içeren dağıtılmış bir tablo kullanıyorsa. +- Alt sorgu birden fazla parça içeren dağıtılmış bir tablo ile ilgiliyse. +- Bir tablo için kullanılmaz-değerli [uzak](../../sql_reference/table_functions/remote.md) işlev. + +Olası değerler: + +- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” özel). +- `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` +- `global` — Replaces the `IN`/`JOIN` ile sorgu `GLOBAL IN`/`GLOBAL JOIN.` +- `allow` — Allows the use of these types of subqueries. + +## enable\_optimize\_predicate\_expression {#enable-optimize-predicate-expression} + +Yüklemi pushdown açar `SELECT` sorgular. + +Yüklemi pushdown, dağıtılmış sorgular için ağ trafiğini önemli ölçüde azaltabilir. + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 1. + +Kullanma + +Aşağıdaki sorguları düşünün: + +1. `SELECT count() FROM test_table WHERE date = '2018-10-10'` +2. `SELECT count() FROM (SELECT * FROM test_table) WHERE date = '2018-10-10'` + +Eğer `enable_optimize_predicate_expression = 1`, daha sonra bu sorguların yürütme süresi eşittir çünkü ClickHouse geçerlidir `WHERE` işlerken alt sorguya. + +Eğer `enable_optimize_predicate_expression = 0`, daha sonra ikinci sorgunun yürütme süresi çok daha uzundur, çünkü `WHERE` yan tümcesi alt sorgu tamamlandıktan sonra tüm veriler için geçerlidir. + +## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} + +Güncelleştirilmiş veriler mevcut değilse, bir sorgu için güncel olmayan bir yineleme zorlar. Görmek [Çoğalma](../../engines/table_engines/mergetree_family/replication.md). + +ClickHouse, tablonun eski kopyalarından en alakalı olanı seçer. + +Yaparken kullanılır `SELECT` çoğaltılmış tablolara işaret eden dağıtılmış bir tablodan. + +Varsayılan olarak, 1 (etkin). + +## force\_index\_by\_date {#settings-force_index_by_date} + +Dizin tarihe göre kullanılamıyorsa, sorgu yürütülmesini devre dışı bırakır. + +MergeTree ailesindeki tablolarla çalışır. + +Eğer `force_index_by_date=1`, ClickHouse sorgunun veri aralıklarını kısıtlamak için kullanılabilecek bir tarih anahtarı koşulu olup olmadığını denetler. Uygun bir koşul yoksa, bir istisna atar. Ancak, koşul okumak için veri miktarını azaltır olup olmadığını denetlemez. Örneğin, durum `Date != ' 2000-01-01 '` tablodaki tüm verilerle eşleştiğinde bile kabul edilebilir (yani, sorguyu çalıştırmak tam bir tarama gerektirir). MergeTree tablolarındaki veri aralıkları hakkında daha fazla bilgi için bkz. [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +## force\_primary\_key {#force-primary-key} + +Birincil anahtar tarafından dizin oluşturma mümkün değilse, sorgu yürütülmesini devre dışı bırakır. + +MergeTree ailesindeki tablolarla çalışır. + +Eğer `force_primary_key=1`, ClickHouse, sorgunun veri aralıklarını kısıtlamak için kullanılabilecek bir birincil anahtar koşulu olup olmadığını denetler. Uygun bir koşul yoksa, bir istisna atar. Ancak, koşul okumak için veri miktarını azaltır olup olmadığını denetlemez. MergeTree tablolarındaki veri aralıkları hakkında daha fazla bilgi için bkz. [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +## format\_schema {#format-schema} + +Bu parametre, aşağıdaki gibi bir şema tanımı gerektiren biçimler kullanırken kullanışlıdır [Cap'n Proto](https://capnproto.org/) veya [Protobuf](https://developers.google.com/protocol-buffers/). Değer biçime bağlıdır. + +## fsync\_metadata {#fsync-metadata} + +Etkinleştirir veya devre dışı bırakır [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) yazarken `.sql` eğe. Varsayılan olarak etkin. + +Sunucu, sürekli olarak oluşturulan ve yok edilen milyonlarca küçük tabloya sahipse, onu devre dışı bırakmak mantıklıdır. + +## enable\_http\_compression {#settings-enable_http_compression} + +Bir HTTP isteğine yanıt olarak veri sıkıştırmasını etkinleştirir veya devre dışı bırakır. + +Daha fazla bilgi için, okuyun [HTTP arayüzü açıklaması](../../interfaces/http.md). + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 0. + +## http\_zlib\_compression\_level {#settings-http_zlib_compression_level} + +Eğer bir HTTP isteğine yanıt veri sıkıştırma düzeyini ayarlar [enable\_http\_compression = 1](#settings-enable_http_compression). + +Olası değerler: 1'den 9'a kadar olan sayılar. + +Varsayılan değer: 3. + +## http\_native\_compression\_disable\_checksumming\_on\_decompress {#settings-http_native_compression_disable_checksumming_on_decompress} + +İstemciden HTTP POST verilerini açarken sağlama toplamı doğrulamasını etkinleştirir veya devre dışı bırakır. Sadece ClickHouse yerel sıkıştırma formatı için kullanılır (ile kullanılmaz `gzip` veya `deflate`). + +Daha fazla bilgi için, okuyun [HTTP arayüzü açıklaması](../../interfaces/http.md). + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 0. + +## send\_progress\_in\_http\_headers {#settings-send_progress_in_http_headers} + +Etkinleştirir veya devre dışı bırakır `X-ClickHouse-Progress` HTTP yanıt başlıkları `clickhouse-server` yanıtlar. + +Daha fazla bilgi için, okuyun [HTTP arayüzü açıklaması](../../interfaces/http.md). + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 0. + +## max\_http\_get\_redirects {#setting-max_http_get_redirects} + +Maksimum http get yönlendirme atlama sayısını sınırlar [URL](../../engines/table_engines/special/url.md)- motor masaları. Ayarı tablolar iki tür tarafından oluşturulan bu geçerlidir: [CREATE TABLE](../../sql_reference/statements/create.md#create-table-query) sorgu ve [url](../../sql_reference/table_functions/url.md) tablo işlevi. + +Olası değerler: + +- Herhangi bir pozitif tamsayı şerbetçiotu sayısı. +- 0 — No hops allowed. + +Varsayılan değer: 0. + +## ınput\_format\_allow\_errors\_num {#settings-input_format_allow_errors_num} + +Metin biçimlerinden (CSV, TSV, vb.) okurken kabul edilebilir hataların maksimum sayısını ayarlar.). + +Varsayılan değer 0'dır. + +Her zaman ile eşleştirmek `input_format_allow_errors_ratio`. + +Satırları okurken bir hata oluştu, ancak hata sayacı hala daha az `input_format_allow_errors_num`, ClickHouse satırı yok sayar ve bir sonrakine geçer. + +Eğer her ikisi de `input_format_allow_errors_num` ve `input_format_allow_errors_ratio` aşıldı, ClickHouse bir istisna atar. + +## ınput\_format\_allow\_errors\_ratio {#settings-input_format_allow_errors_ratio} + +Metin biçimlerinden (CSV, TSV, vb.) okurken izin verilen maksimum hata yüzdesini ayarlar.). +Hataların yüzdesi 0 ile 1 arasında kayan nokta sayısı olarak ayarlanır. + +Varsayılan değer 0'dır. + +Her zaman ile eşleştirmek `input_format_allow_errors_num`. + +Satırları okurken bir hata oluştu, ancak hata sayacı hala daha az `input_format_allow_errors_ratio`, ClickHouse satırı yok sayar ve bir sonrakine geçer. + +Eğer her ikisi de `input_format_allow_errors_num` ve `input_format_allow_errors_ratio` aşıldı, ClickHouse bir istisna atar. + +## ınput\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} + +Hızlı akış ayrıştırıcısı verileri ayrıştıramazsa, tam SQL ayrıştırıcısını etkinleştirir veya devre dışı bırakır. Bu ayar yalnızca için kullanılır [Değerler](../../interfaces/formats.md#data-format-values) veri ekleme sırasında biçimlendirin. Sözdizimi ayrıştırma hakkında daha fazla bilgi için bkz: [Sözdizimi](../../sql_reference/syntax.md) bölme. + +Olası değerler: + +- 0 — Disabled. + + Bu durumda, biçimlendirilmiş veri sağlamanız gerekir. Görmek [Biçimliler](../../interfaces/formats.md) bölme. + +- 1 — Enabled. + + Bu durumda, bir SQL ifadesini bir değer olarak kullanabilirsiniz, ancak veri ekleme bu şekilde çok daha yavaştır. Yalnızca biçimlendirilmiş veri eklerseniz, ClickHouse ayar değeri 0 gibi davranır. + +Varsayılan değer: 1. + +Kullanım örneği + +Ekle [DateTime](../../sql_reference/data_types/datetime.md) farklı ayarlarla değer yazın. + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Exception on client: +Code: 27. DB::Exception: Cannot parse input: expected ) before: now()): (at row 1) +``` + +``` sql +SET input_format_values_interpret_expressions = 1; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Ok. +``` + +Son sorgu Aşağıdakilere eşdeğerdir: + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t SELECT now() +``` + +``` text +Ok. +``` + +## ınput\_format\_values\_deduce\_templates\_of\_expressions {#settings-input_format_values_deduce_templates_of_expressions} + +SQL deyimleri için şablon kesintisini etkinleştirir veya devre dışı bırakır [Değerler](../../interfaces/formats.md#data-format-values) biçimli. Bu ayrıştırma ve ifadeleri yorumlama sağlar `Values` ardışık satırlardaki ifadeler aynı yapıya sahipse çok daha hızlı. ClickHouse, bir ifadenin şablonunu çıkarmaya, bu şablonu kullanarak aşağıdaki satırları ayrıştırmaya ve ifadeyi başarılı bir şekilde ayrıştırılmış satırların bir yığınında değerlendirmeye çalışır. + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 1. + +Aşağıdaki sorgu için: + +``` sql +INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (upper('Values')), ... +``` + +- Eğer `input_format_values_interpret_expressions=1` ve `format_values_deduce_templates_of_expressions=0`, ifadeler her satır için ayrı ayrı yorumlanır (bu çok sayıda satır için çok yavaştır). +- Eğer `input_format_values_interpret_expressions=0` ve `format_values_deduce_templates_of_expressions=1`, birinci, ikinci ve üçüncü satırlardaki ifadeler şablon kullanılarak ayrıştırılır `lower(String)` ve birlikte yorumlanır, ileri satırdaki ifade başka bir şablonla ayrıştırılır (`upper(String)`). +- Eğer `input_format_values_interpret_expressions=1` ve `format_values_deduce_templates_of_expressions=1`, önceki durumda olduğu gibi aynı, ama aynı zamanda şablon anlamak mümkün değilse ayrı ayrı ifadeleri yorumlama geri dönüş sağlar. + +## ınput\_format\_values\_accurate\_types\_of\_literals {#settings-input-format-values-accurate-types-of-literals} + +Bu ayar yalnızca şu durumlarda kullanılır `input_format_values_deduce_templates_of_expressions = 1`. Bu, bazı sütunların ifadelerinin aynı yapıya sahip olması, ancak farklı türlerde sayısal değişmezler içermesi olabilir, örneğin + +``` sql +(..., abs(0), ...), -- UInt64 literal +(..., abs(3.141592654), ...), -- Float64 literal +(..., abs(-1), ...), -- Int64 literal +``` + +Olası değerler: + +- 0 — Disabled. + + In this case, ClickHouse may use a more general type for some literals (e.g., `Float64` veya `Int64` yerine `UInt64` için `42`), ancak taşma ve hassasiyet sorunlarına neden olabilir. + +- 1 — Enabled. + + Bu durumda, ClickHouse gerçek literal türünü denetler ve karşılık gelen türde bir ifade şablonu kullanır. Bazı durumlarda, ifade değerlendirmesini önemli ölçüde yavaşlatabilir `Values`. + +Varsayılan değer: 1. + +## ınput\_format\_defaults\_for\_omitted\_fields {#session_settings-input_format_defaults_for_omitted_fields} + +Yaparken `INSERT` sorgular, atlanmış giriş sütun değerlerini ilgili sütunların varsayılan değerleriyle değiştirin. Bu seçenek yalnızca aşağıdakiler için geçerlidir [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) ve [TabSeparated](../../interfaces/formats.md#tabseparated) biçimliler. + +!!! note "Not" + Bu seçenek etkinleştirildiğinde, genişletilmiş tablo meta verileri sunucudan istemciye gönderilir. Sunucuda ek bilgi işlem kaynakları tüketir ve performansı azaltabilir. + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 1. + +## ınput\_format\_tsv\_empty\_as\_default {#settings-input-format-tsv-empty-as-default} + +Etkinleştirildiğinde, TSV'DEKİ boş giriş alanlarını varsayılan değerlerle değiştirin. Karmaşık varsayılan ifadeler için `input_format_defaults_for_omitted_fields` de etkin olmalıdır. + +Varsayılan olarak devre dışı. + +## ınput\_format\_null\_as\_default {#settings-input-format-null-as-default} + +Giriş verileri içeriyorsa, varsayılan değerleri kullanarak etkinleştirir veya devre dışı bırakır `NULL`, ancak ilgili sütunun veri türü değil `Nullable(T)` (Metin Giriş biçimleri için). + +## ınput\_format\_skip\_unknown\_fields {#settings-input-format-skip-unknown-fields} + +Etkinleştirir veya ek veri ekleme atlama devre dışı bırakır. + +Veri yazarken, giriş verileri hedef tabloda bulunmayan sütunlar içeriyorsa, ClickHouse bir özel durum atar. Atlama etkinleştirilirse, ClickHouse ek veri eklemez ve bir istisna atmaz. + +Desteklenen formatlar: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) +- [CSVWithNames](../../interfaces/formats.md#csvwithnames) +- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) +- [TSKV](../../interfaces/formats.md#tskv) + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 0. + +## ınput\_format\_ımport\_nested\_json {#settings-input_format_import_nested_json} + +Json verilerinin iç içe nesnelerle eklenmesini etkinleştirir veya devre dışı bırakır. + +Desteklenen formatlar: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 0. + +Ayrıca bakınız: + +- [İç içe yapıların kullanımı](../../interfaces/formats.md#jsoneachrow-nested) ile... `JSONEachRow` biçimli. + +## ınput\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} + +Veri eklerken sütun sırasını denetlemeyi etkinleştirir veya devre dışı bırakır. + +Ekleme performansını artırmak için, giriş verilerinin sütun sırasının hedef tablodaki ile aynı olduğundan eminseniz, bu denetimi devre dışı bırakmanızı öneririz. + +Desteklenen formatlar: + +- [CSVWithNames](../../interfaces/formats.md#csvwithnames) +- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 1. + +## date\_time\_input\_format {#settings-date_time_input_format} + +Tarih ve saat metin gösterimi bir ayrıştırıcı seçme sağlar. + +Ayar için geçerli değildir [tarih ve saat fonksiyonları](../../sql_reference/functions/date_time_functions.md). + +Olası değerler: + +- `'best_effort'` — Enables extended parsing. + + ClickHouse temel ayrıştırmak `YYYY-MM-DD HH:MM:SS` format ve tüm [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) tarih ve saat biçimleri. Mesela, `'2018-06-08T01:02:03.000Z'`. + +- `'basic'` — Use basic parser. + + ClickHouse sadece temel ayrıştırmak `YYYY-MM-DD HH:MM:SS` biçimli. Mesela, `'2019-08-20 10:18:56'`. + +Varsayılan değer: `'basic'`. + +Ayrıca bakınız: + +- [DateTime veri türü.](../../sql_reference/data_types/datetime.md) +- [Tarihler ve saatler ile çalışmak için fonksiyonlar.](../../sql_reference/functions/date_time_functions.md) + +## join\_default\_strictness {#settings-join_default_strictness} + +Ayarlar varsayılan strictness için [Maddeleri KATILIN ](../../sql_reference/statements/select.md#select-join). + +Olası değerler: + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [Kartezyen ürün](https://en.wikipedia.org/wiki/Cartesian_product) eşleşen satırlardan. Bu normaldir `JOIN` standart SQL'DEN davranış. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` ve `ALL` aynı. +- `ASOF` — For joining sequences with an uncertain match. +- `Empty string` — If `ALL` veya `ANY` sorguda belirtilmezse, ClickHouse bir özel durum atar. + +Varsayılan değer: `ALL`. + +## join\_any\_take\_last\_row {#settings-join_any_take_last_row} + +İle birleştirme işlemlerinin davranışını değiştirir `ANY` katılık. + +!!! warning "Dikkat" + Bu ayar yalnızca aşağıdakiler için geçerlidir `JOIN` ile işlemler [Katmak](../../engines/table_engines/special/join.md) motor tabloları. + +Olası değerler: + +- 0 — If the right table has more than one matching row, only the first one found is joined. +- 1 — If the right table has more than one matching row, only the last one found is joined. + +Varsayılan değer: 0. + +Ayrıca bakınız: + +- [Jo](../../sql_reference/statements/select.md#select-join) +- [Jo tablein table engine](../../engines/table_engines/special/join.md) +- [join\_default\_strictness](#settings-join_default_strictness) + +## join\_use\_nulls {#join_use_nulls} + +Türünü ayarlar [JOIN](../../sql_reference/statements/select.md) davranış. Tabloları birleştirirken boş hücreler görünebilir. ClickHouse bu ayara göre onları farklı şekilde doldurur. + +Olası değerler: + +- 0 — The empty cells are filled with the default value of the corresponding field type. +- 1 — `JOIN` standart SQL ile aynı şekilde davranır. Karşılık gelen alanın türü dönüştürülür [Nullable](../../sql_reference/data_types/nullable.md#data_type-nullable) ve boş hücreler ile doldurulur [NULL](../../sql_reference/syntax.md). + +Varsayılan değer: 0. + +## max\_block\_size {#setting-max_block_size} + +Clickhouse'da, veriler bloklarla (sütun parçaları kümeleri) işlenir. Tek bir blok için dahili işlem döngüleri yeterince verimlidir, ancak her blokta gözle görülür harcamalar vardır. Bu `max_block_size` ayar, blokun boyutunun (satır sayımında) tablolardan yükleneceği bir öneridir. Blok boyutu çok küçük olmamalı, böylece her bloktaki harcamalar hala fark edilebilir, ancak çok büyük olmamalı, böylece ilk blok hızla işlendikten sonra tamamlanan limitli sorgu çok büyük olmamalıdır. Amaç, birden çok iş parçacığında çok sayıda sütun ayıklarken çok fazla bellek tüketmekten kaçınmak ve en azından bazı önbellek konumlarını korumaktır. + +Varsayılan değer: 65,536. + +Blok boyutu `max_block_size` her zaman tablodan yüklenmez. Daha az verinin alınması gerektiği açıksa, daha küçük bir blok işlenir. + +## preferred\_block\_size\_bytes {#preferred-block-size-bytes} + +Olarak aynı amaç için kullanılır `max_block_size`, ancak önerilen blok boyutunu bayt cinsinden, bloktaki satır sayısına uyarlayarak ayarlar. +Ancak, blok boyutu daha fazla olamaz `max_block_size` satırlar. +Varsayılan olarak: 1.000.000. Sadece MergeTree motorlarından okurken çalışır. + +## merge\_tree\_mın\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} + +Bir dosyadan okunacak satır sayısı ise [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) tablo aşıyor `merge_tree_min_rows_for_concurrent_read` daha sonra ClickHouse, bu dosyadan birkaç iş parçacığı üzerinde eşzamanlı bir okuma gerçekleştirmeye çalışır. + +Olası değerler: + +- Herhangi bir pozitif tamsayı. + +Varsayılan değer: 163840. + +## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} + +Eğer bir dosyadan okunacak bayt sayısı [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)- motor tablosu `merge_tree_min_bytes_for_concurrent_read`, daha sonra ClickHouse, bu dosyadan aynı anda birkaç iş parçacığında okumaya çalışır. + +Olası değer: + +- Herhangi bir pozitif tamsayı. + +Varsayılan değer: 251658240. + +## merge\_tree\_mın\_rows\_for\_seek {#setting-merge-tree-min-rows-for-seek} + +Bir dosyada okunacak iki veri bloğu arasındaki mesafe daha az ise `merge_tree_min_rows_for_seek` satırlar, daha sonra ClickHouse dosyayı aramaz, ancak verileri sırayla okur. + +Olası değerler: + +- Herhangi bir pozitif tamsayı. + +Varsayılan değer: 0. + +## merge\_tree\_min\_bytes\_for\_seek {#setting-merge-tree-min-bytes-for-seek} + +Bir dosyada okunacak iki veri bloğu arasındaki mesafe daha az ise `merge_tree_min_bytes_for_seek` bayt, daha sonra ClickHouse sırayla böylece ekstra arama kaçınarak, her iki blok içeren bir dosya aralığını okur. + +Olası değerler: + +- Herhangi bir pozitif tamsayı. + +Varsayılan değer: 0. + +## merge\_tree\_coarse\_index\_granularity {#setting-merge-tree-coarse-index-granularity} + +Veri ararken, ClickHouse dizin dosyasındaki veri işaretlerini denetler. ClickHouse gerekli tuşların bazı aralıklarda olduğunu bulursa, bu aralığı `merge_tree_coarse_index_granularity` subranges ve gerekli anahtarları orada yinelemeli olarak arar. + +Olası değerler: + +- Herhangi bir pozitif bile tamsayı. + +Varsayılan değer: 8. + +## merge\_tree\_max\_rows\_to\_use\_cache {#setting-merge-tree-max-rows-to-use-cache} + +ClickHouse daha fazla okumak gerekiyorsa `merge_tree_max_rows_to_use_cache` bir sorgudaki satırlar, sıkıştırılmamış blokların önbelleğini kullanmaz. + +Sıkıştırılmamış blokların önbelleği, sorgular için ayıklanan verileri depolar. ClickHouse, tekrarlanan küçük sorgulara verilen yanıtları hızlandırmak için bu önbelleği kullanır. Bu ayar, önbelleğin büyük miktarda veri okuyan sorgularla çöpe atmasını önler. Bu [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) sunucu ayarı, sıkıştırılmamış blokların önbelleğinin boyutunu tanımlar. + +Olası değerler: + +- Herhangi bir pozitif tamsayı. + +Default value: 128 ✕ 8192. + +## merge\_tree\_max\_bytes\_to\_use\_cache {#setting-merge-tree-max-bytes-to-use-cache} + +ClickHouse daha fazla okumak gerekiyorsa `merge_tree_max_bytes_to_use_cache` bir sorguda bayt, sıkıştırılmamış blokların önbelleğini kullanmaz. + +Sıkıştırılmamış blokların önbelleği, sorgular için ayıklanan verileri depolar. ClickHouse, tekrarlanan küçük sorgulara verilen yanıtları hızlandırmak için bu önbelleği kullanır. Bu ayar, önbelleğin büyük miktarda veri okuyan sorgularla çöpe atmasını önler. Bu [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) sunucu ayarı, sıkıştırılmamış blokların önbelleğinin boyutunu tanımlar. + +Olası değer: + +- Herhangi bir pozitif tamsayı. + +Varsayılan değer: 2013265920. + +## min\_bytes\_to\_use\_direct\_io {#settings-min-bytes-to-use-direct-io} + +Depolama diskine Doğrudan G/Ç erişimi kullanmak için gereken minimum veri hacmi. + +ClickHouse, tablolardan veri okurken bu ayarı kullanır. Okunacak tüm verilerin toplam depolama hacmi aşarsa `min_bytes_to_use_direct_io` bayt, daha sonra ClickHouse ile depolama diskinden veri okur `O_DIRECT` seçenek. + +Olası değerler: + +- 0 — Direct I/O is disabled. +- Pozitif tamsayı. + +Varsayılan değer: 0. + +## log\_queries {#settings-log-queries} + +Sorgu günlüğü ayarlama. + +Bu kurulum ile Clickhouse'a gönderilen sorgular, [query\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-log) sunucu yapılandırma parametresi. + +Örnek: + +``` text +log_queries=1 +``` + +## log\_queries\_min\_type {#settings-log-queries-min-type} + +`query_log` giriş yapmak için en az tür. + +Olası değerler: +- `QUERY_START` (`=1`) +- `QUERY_FINISH` (`=2`) +- `EXCEPTION_BEFORE_START` (`=3`) +- `EXCEPTION_WHILE_PROCESSING` (`=4`) + +Varsayılan değer: `QUERY_START`. + +Entiries gider hangi sınırlamak için kullanılabilir `query_log`, sadece hatalarda ilginç olduğunuzu söyleyin, o zaman kullanabilirsiniz `EXCEPTION_WHILE_PROCESSING`: + +``` text +log_queries_min_type='EXCEPTION_WHILE_PROCESSING' +``` + +## log\_query\_threads {#settings-log-query-threads} + +Sorgu iş parçacığı günlüğü ayarlama. + +Bu kurulum ile ClickHouse tarafından çalıştırılan sorguların konuları, [query\_thread\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) sunucu yapılandırma parametresi. + +Örnek: + +``` text +log_query_threads=1 +``` + +## max\_ınsert\_block\_size {#settings-max_insert_block_size} + +Bir tabloya eklemek için oluşturulacak blokların boyutu. +Bu ayar yalnızca sunucu blokları oluşturduğu durumlarda geçerlidir. +Örneğin, HTTP arabirimi üzerinden bir ekleme için sunucu veri biçimini ayrıştırır ve belirtilen boyuttaki blokları oluşturur. +Ancak, clickhouse-client kullanırken, istemci verileri kendisi ayrıştırır ve ‘max\_insert\_block\_size’ sunucudaki ayar, eklenen blokların boyutunu etkilemez. +Veri SELECT sonra oluşturulan aynı blokları kullanarak eklendiğinden, INSERT SELECT kullanırken ayarı da bir amacı yoktur. + +Varsayılan değer: 1.048,576. + +Varsayılan biraz daha fazla `max_block_size`. Bunun nedeni, bazı tablo motorlarının (`*MergeTree`) oldukça büyük bir varlık olan eklenen her blok için diskte bir veri parçası oluşturun. Benzer bir şekilde, `*MergeTree` tablolar ekleme sırasında verileri sıralar ve yeterince büyük bir blok boyutu RAM'de daha fazla veriyi sıralamaya izin verir. + +## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} + +Dağıtılmış sorgular için gecikmeli yinelemeleri devre dışı bırakır. Görmek [Çoğalma](../../engines/table_engines/mergetree_family/replication.md). + +Saati saniye olarak ayarlar. Bir çoğaltma ayarlanan değerden daha fazla kalıyorsa, Bu çoğaltma kullanılmaz. + +Varsayılan değer: 300. + +Yaparken kullanılır `SELECT` çoğaltılmış tablolara işaret eden dağıtılmış bir tablodan. + +## max\_threads {#settings-max_threads} + +Uzak sunuculardan veri almak için iş parçacıkları hariç olmak üzere sorgu işleme iş parçacıklarının maksimum sayısı (bkz. ‘max\_distributed\_connections’ parametre). + +Bu parametre, paralel olarak sorgu işleme ardışık düzeninin aynı aşamalarını gerçekleştiren iş parçacıkları için geçerlidir. +Örneğin, bir tablodan okurken, ifadeleri işlevlerle değerlendirmek mümkün ise, en azından paralel olarak grup için where ve pre-aggregate ile filtreleyin ‘max\_threads’ konu sayısı, daha sonra ‘max\_threads’ kullanılır. + +Varsayılan değer: fiziksel CPU çekirdeği sayısı. + +Bir kerede bir sunucuda normal olarak birden az SELECT sorgusu çalıştırılırsa, bu parametreyi gerçek işlemci çekirdeği sayısından biraz daha küçük bir değere ayarlayın. + +Bir sınır nedeniyle hızlı bir şekilde tamamlanan sorgular için, daha düşük bir ‘max\_threads’. Örneğin, gerekli sayıda giriş her blokta ve max\_threads = 8'de bulunuyorsa, sadece bir tane okumak için yeterli olsa da, 8 blok alınır. + +Daha küçük `max_threads` değer, daha az bellek tüketilir. + +## max\_ınsert\_threads {#settings-max-insert-threads} + +Çalıştırılacak maksimum iş parçacığı sayısı `INSERT SELECT` sorgu. + +Olası değerler: + +- 0 (or 1) — `INSERT SELECT` paralel infaz yok. +- Pozitif tamsayı. 1'den büyük. + +Varsayılan değer: 0. + +Paralellik `INSERT SELECT` etkisi vardır sadece eğer `SELECT` bölüm paralel olarak yürütülür, bkz [max\_threads](#settings-max_threads) ayar. +Daha yüksek değerler daha yüksek bellek kullanımına yol açacaktır. + +## max\_compress\_block\_size {#max-compress-block-size} + +Bir tabloya yazmak için sıkıştırmadan önce sıkıştırılmamış veri bloklarının en büyük boyutu. Varsayılan olarak, 1.048.576 (1 MiB). Boyut azaltılırsa, sıkıştırma oranı önemli ölçüde azalır, önbellek konumu nedeniyle sıkıştırma ve dekompresyon hızı biraz artar ve bellek tüketimi azalır. Bu ayarı değiştirmek için genellikle herhangi bir neden yoktur. + +Sıkıştırma için blokları (bayttan oluşan bir bellek yığını) sorgu işleme için bloklarla (bir tablodan satır kümesi) karıştırmayın. + +## min\_compress\_block\_size {#min-compress-block-size} + +İçin [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)" Tablolar. Sorguları işlerken gecikmeyi azaltmak için, boyutu en az bir sonraki işareti yazarken bir blok sıkıştırılır ‘min\_compress\_block\_size’. Varsayılan olarak, 65.536. + +Sıkıştırılmamış veriler daha az ise, bloğun gerçek boyutu ‘max\_compress\_block\_size’, bu değerden daha az değildir ve bir işaret için veri hacminden daha az değildir. + +Bir örneğe bakalım. Varsaymak ‘index\_granularity’ tablo oluşturma sırasında 8192 olarak ayarlandı. + +Bir uint32 tipi sütun yazıyoruz (değer başına 4 bayt). 8192 satır yazarken, toplam 32 KB veri olacaktır. Min\_compress\_block\_size = 65.536 olduğundan, her iki işaret için sıkıştırılmış bir blok oluşturulacaktır. + +Dize türüne sahip bir URL sütunu yazıyoruz (değer başına ortalama 60 bayt boyutu). 8192 satır yazarken, ortalama 500 KB veri biraz daha az olacaktır. Bu 65,536'dan fazla olduğu için, her işaret için sıkıştırılmış bir blok oluşturulacaktır. Bu durumda, diskteki verileri tek bir işaret aralığında okurken, ekstra veriler sıkıştırılmaz. + +Bu ayarı değiştirmek için genellikle herhangi bir neden yoktur. + +## max\_query\_size {#settings-max_query_size} + +SQL ayrıştırıcısı ile ayrıştırmak için RAM'e alınabilecek bir sorgunun en büyük kısmı. +INSERT sorgusu, bu kısıtlamaya dahil olmayan ayrı bir akış ayrıştırıcısı (o(1) RAM tüketir) tarafından işlenen INSERT için veri de içerir. + +Varsayılan değer: 256 KiB. + +## ınteractive\_delay {#interactive-delay} + +İstek yürütülmesinin iptal edilip edilmediğini kontrol etmek ve ilerlemeyi göndermek için mikrosaniye cinsinden Aralık. + +Varsayılan değer: 100.000 (iptal için denetler ve ilerleme saniyede on kez gönderir). + +## connect\_timeout, receıve\_tımeout, send\_timeout {#connect-timeout-receive-timeout-send-timeout} + +İstemci ile iletişim kurmak için kullanılan sokette saniye cinsinden zaman aşımları. + +Varsayılan değer: 10, 300, 300. + +## cancel\_http\_readonly\_queries\_on\_client\_close {#cancel-http-readonly-queries-on-client-close} + +Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. + +Varsayılan değer: 0 + +## poll\_interval {#poll-interval} + +Belirtilen saniye sayısı için bir bekleme döngüsünde kilitleyin. + +Varsayılan değer: 10. + +## max\_distributed\_connections {#max-distributed-connections} + +Tek bir dağıtılmış tabloya tek bir sorgunun dağıtılmış işlenmesi için uzak sunucularla eşzamanlı bağlantı sayısı. Kümedeki sunucu sayısından daha az bir değer ayarlamanızı öneririz. + +Varsayılan değer: 1024. + +Aşağıdaki parametreler yalnızca dağıtılmış tablolar oluştururken (ve bir sunucu başlatırken) kullanılır, bu nedenle bunları çalışma zamanında değiştirmek için hiçbir neden yoktur. + +## distributed\_connections\_pool\_size {#distributed-connections-pool-size} + +Tüm sorguların tek bir dağıtılmış tabloya dağıtılmış işlenmesi için uzak sunucularla eşzamanlı bağlantıların maksimum sayısı. Kümedeki sunucu sayısından daha az bir değer ayarlamanızı öneririz. + +Varsayılan değer: 1024. + +## connect\_timeout\_with\_failover\_ms {#connect-timeout-with-failover-ms} + +Dağıtılmış bir tablo altyapısı için uzak bir sunucuya bağlanmak için milisaniye cinsinden zaman aşımı ‘shard’ ve ‘replica’ bölümler küme tanımında kullanılır. +Başarısız olursa, çeşitli yinelemelere bağlanmak için birkaç deneme yapılır. + +Varsayılan değer: 50. + +## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries} + +Dağıtılmış tablo altyapısı için her yineleme ile bağlantı girişimi sayısı. + +Varsayılan değer: 3. + +## çıkmaz {#extremes} + +Aşırı değerleri (bir sorgu sonucunun sütunlarındaki minimum ve maksimum değerler) saymak ister. 0 veya 1 kabul eder. Varsayılan olarak, 0 (devre dışı). +Daha fazla bilgi için bölüme bakın “Extreme values”. + +## use\_uncompressed\_cache {#setting-use_uncompressed_cache} + +Sıkıştırılmamış blokların önbelleğinin kullanılıp kullanılmayacağı. 0 veya 1 kabul eder. Varsayılan olarak, 0 (devre dışı). +Sıkıştırılmamış önbelleği (yalnızca mergetree ailesindeki tablolar için) kullanmak, çok sayıda kısa Sorgu ile çalışırken gecikmeyi önemli ölçüde azaltabilir ve verimi artırabilir. Sık sık kısa istek Gönderen kullanıcılar için bu ayarı etkinleştirin. Ayrıca dikkat [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. + +En azından biraz büyük bir veri hacmi (bir milyon satır veya daha fazla) okuyan sorgular için sıkıştırılmamış önbellek, gerçekten küçük sorgular için yer kazanmak için otomatik olarak devre dışı bırakılır. Bu tutmak anlamına gelir ‘use\_uncompressed\_cache’ ayar her zaman 1 olarak ayarlanır. + +## replace\_running\_query {#replace-running-query} + +HTTP arayüzünü kullanırken, ‘query\_id’ parametre geçirilebilir. Bu, sorgu tanımlayıcısı olarak hizmet veren herhangi bir dizedir. +Aynı kullanıcıdan aynı sorgu varsa ‘query\_id’ zaten şu anda var, davranış bağlıdır ‘replace\_running\_query’ parametre. + +`0` (default) – Throw an exception (don't allow the query to run if a query with the same ‘query\_id’ zaten çalışan) var. + +`1` – Cancel the old query and start running the new one. + +Üye.Metrica, segmentasyon koşulları için öneriler uygulamak için 1 olarak ayarlanmış bu parametreyi kullanır. Bir sonraki karakteri girdikten sonra, eski sorgu henüz tamamlanmamışsa, iptal edilmelidir. + +## stream\_flush\_interval\_ms {#stream-flush-interval-ms} + +Bir zaman aşımı durumunda akışlı tablolar için çalışır veya bir iş parçacığı oluşturduğunda [max\_ınsert\_block\_size](#settings-max_insert_block_size) satırlar. + +Varsayılan değer 7500'dür. + +Küçük değer, daha sık veri tablosuna temizlendi. Değeri çok düşük ayarlamak, düşük performansa yol açar. + +## dengeleme {#settings-load_balancing} + +Dağıtılmış sorgu işleme için kullanılan yinelemeler seçimi algoritmasını belirtir. + +ClickHouse kopyaları seçme aşağıdaki algoritmaları destekler: + +- [Rastgele](#load_balancing-random) (varsayılan olarak) +- [En yakın hostnamename](#load_balancing-nearest_hostname) +- [Sıralı](#load_balancing-in_order) +- [İlk veya rastgele](#load_balancing-first_or_random) + +### Rastgele (varsayılan olarak) {#load_balancing-random} + +``` sql +load_balancing = random +``` + +Her yineleme için hata sayısı sayılır. Sorgu, en az hata ile çoğaltmaya gönderilir ve bunlardan birkaçı varsa, bunlardan herhangi birine gönderilir. +Dezavantajları: sunucu yakınlık hesaba değil; kopyaları farklı veri varsa, aynı zamanda farklı veri alırsınız. + +### En Yakın Hostnamename {#load_balancing-nearest_hostname} + +``` sql +load_balancing = nearest_hostname +``` + +The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server's hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). + +Örneğin, example01-01-1 ve example01-01-2.yandex.ru bir pozisyonda farklıdır, örneği01-01-1 ve örneği01-02-2 iki yerde farklılık gösterir. +Bu yöntem ilkel görünebilir, ancak ağ topolojisi hakkında harici veri gerektirmez ve IPv6 adreslerimiz için karmaşık olan IP adreslerini karşılaştırmaz. + +Bu nedenle, eşdeğer yinelemeler varsa, isme göre en yakın olanı tercih edilir. +Aynı sunucuya bir sorgu gönderirken, arızaların yokluğunda, dağıtılmış bir sorgunun da aynı sunuculara gideceğini varsayabiliriz. Bu nedenle, yinelemelere farklı veriler yerleştirilse bile, sorgu çoğunlukla aynı sonuçları döndürür. + +### Sıralı {#load_balancing-in_order} + +``` sql +load_balancing = in_order +``` + +Yapılandırmada belirtilen hataları aynı sayıda yinelemeler aynı sırayla erişilir. +Bu yöntem, tam olarak hangi kopyanın tercih edildiğini bildiğinizde uygundur. + +### İlk veya rastgele {#load_balancing-first_or_random} + +``` sql +load_balancing = first_or_random +``` + +İlk kullanılamaz, bu algoritma kümesindeki ilk yineleme veya rasgele bir yineleme seçer. Çapraz çoğaltma topolojisi kurulumlarında etkilidir, ancak diğer yapılandırmalarda işe yaramaz. + +Bu `first_or_random` algoritma sorunu çözer `in_order` algoritma. İle `in_order`, bir çoğaltma aşağı giderse, kalan yinelemeler normal trafik miktarını işlerken bir sonraki bir çift yük alır. Kullanırken `first_or_random` algoritma, yük hala mevcut olan kopyalar arasında eşit olarak dağıtılır. + +## prefer\_localhost\_replica {#settings-prefer-localhost-replica} + +Etkinleştirir / devre dışı bırakır tercih kullanarak localhost çoğaltma dağıtılmış sorguları işlerken. + +Olası değerler: + +- 1 — ClickHouse always sends a query to the localhost replica if it exists. +- 0 — ClickHouse uses the balancing strategy specified by the [dengeleme](#settings-load_balancing) ayar. + +Varsayılan değer: 1. + +!!! warning "Uyarıcı" + Kullanıyorsanız bu ayarı devre dışı bırakın [max\_parallel\_replicas](#settings-max_parallel_replicas). + +## totals\_mode {#totals-mode} + +MAX\_ROWS\_TO\_GROUP\_BY ve group\_by\_overflow\_mode = ‘any’ Bulunmak. +Bölümüne bakınız “WITH TOTALS modifier”. + +## totals\_auto\_threshold {#totals-auto-threshold} + +İçin eşik `totals_mode = 'auto'`. +Bölümüne bakınız “WITH TOTALS modifier”. + +## max\_parallel\_replicas {#settings-max_parallel_replicas} + +Bir sorgu yürütülürken her parça için en fazla yineleme sayısı. +Tutarlılık için (aynı veri bölünmesinin farklı bölümlerini elde etmek için), bu seçenek yalnızca örnekleme anahtarı ayarlandığında çalışır. +Çoğaltma gecikme denetlenmez. + +## derlemek {#compile} + +Sorguların derlenmesini etkinleştirin. Varsayılan olarak, 0 (devre dışı). + +Derleme yalnızca sorgu işleme boru hattının bir parçası için kullanılır: toplamanın ilk aşaması için (GROUP BY). +Potansiyel hattın bu bölümü derlenmişse, sorgu, kısa döngüleri ve inlining toplu işlev çağrılarının dağıtımı nedeniyle daha hızlı çalışabilir. Birden çok basit toplama işlevine sahip sorgular için maksimum performans artışı (nadir durumlarda dört kata kadar daha hızlı) görülür. Tipik olarak, performans kazancı önemsİzdİr. Çok nadir durumlarda, sorgu yürütülmesini yavaşlatabilir. + +## min\_count\_to\_compile {#min-count-to-compile} + +Derleme çalıştırmadan önce derlenmiş bir kod yığını potansiyel olarak kaç kez kullanılır. Varsayılan olarak, 3. +For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. +Değer 1 veya daha fazla ise, derleme zaman uyumsuz olarak ayrı bir iş parçacığında oluşur. Sonuç, şu anda çalışmakta olan sorgular da dahil olmak üzere hazır olduğu anda kullanılacaktır. + +Derlenmiş kod, sorguda kullanılan toplama işlevlerinin her farklı birleşimi ve GROUP BY yan tümcesindeki anahtarların türü için gereklidir. +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. + +## output\_format\_json\_quote\_64bit\_integers {#session_settings-output_format_json_quote_64bit_integers} + +Değer doğruysa, json\* Int64 ve Uİnt64 formatlarını kullanırken tamsayılar tırnak içinde görünür (çoğu JavaScript uygulamasıyla uyumluluk için); aksi takdirde, tamsayılar tırnak işaretleri olmadan çıktılanır. + +## format\_csv\_delimiter {#settings-format_csv_delimiter} + +Karakter CSV verilerinde bir sınırlayıcı olarak yorumlanır. Varsayılan olarak, sınırlayıcı `,`. + +## ınput\_format\_csv\_unquoted\_null\_literal\_as\_null {#settings-input_format_csv_unquoted_null_literal_as_null} + +CSV giriş biçimi sağlar veya unquoted ayrıştırma devre dışı bırakır için `NULL` literal olarak (eşanlamlı `\N`). + +## output\_format\_csv\_crlf\_end\_of\_line {#settings-output-format-csv-crlf-end-of-line} + +Unix stili (LF) yerine CSV'DE DOS/Windows stili çizgi ayırıcı (CRLF) kullanın. + +## output\_format\_tsv\_crlf\_end\_of\_line {#settings-output-format-tsv-crlf-end-of-line} + +Unıx stili (LF) yerine TSV'DE DOC/Windows stili çizgi ayırıcı (CRLF) kullanın. + +## insert\_quorum {#settings-insert_quorum} + +Çekirdek yazma sağlar. + +- Eğer `insert_quorum < 2`, çekirdek yazma devre dışı bırakılır. +- Eğer `insert_quorum >= 2`, çekirdek yazma etkin. + +Varsayılan değer: 0. + +Nis writesap yazar + +`INSERT` yalnızca ClickHouse verileri doğru bir şekilde yazmayı başardığında başarılı olur. `insert_quorum` sırasında kopya ofların `insert_quorum_timeout`. Herhangi bir nedenle başarılı yazma ile kopya sayısı ulaşmazsa `insert_quorum`, yazma başarısız olarak kabul edilir ve ClickHouse, verilerin zaten yazıldığı tüm kopyalardan eklenen bloğu siler. + +Nisaptaki tüm kopyalar tutarlıdır, yani önceki tüm verileri içerir `INSERT` sorgular. Bu `INSERT` sıra doğrusallaştırılmıştır. + +Yazılan verileri okurken `insert_quorum` olabilir kullanın [select\_sequential\_consistency](#settings-select_sequential_consistency) seçenek. + +ClickHouse bir istisna oluşturur + +- Sorgu sırasında kullanılabilir yinelemelerin sayısı daha az ise `insert_quorum`. +- Önceki blok henüz eklenmemiş olduğunda veri yazma girişiminde `insert_quorum` kopyaların. Bu durum, bir kullanıcı gerçekleştirmeye çalışırsa oluşabilir. `INSERT` ile bir öncekinden önce `insert_quorum` Tamam islanmıştır. + +Ayrıca bakınız: + +- [ınsert\_quorum\_timeout](#settings-insert_quorum_timeout) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## ınsert\_quorum\_timeout {#settings-insert_quorum-timeout} + +Çekirdek zaman aşımına saniyeler içinde yazın. Zaman aşımı geçti ve yazma henüz gerçekleşmedi, ClickHouse bir özel durum oluşturur ve istemci aynı bloğu aynı veya başka bir yineleme yazmak için sorguyu yinelemeniz gerekir. + +Varsayılan değer: 60 saniye. + +Ayrıca bakınız: + +- [insert\_quorum](#settings-insert_quorum) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## select\_sequential\_consistency {#settings-select_sequential_consistency} + +İçin sıralı tutarlılığı etkinleştirir veya devre dışı bırakır `SELECT` sorgular: + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 0. + +Kullanma + +Sıralı tutarlılık etkinleştirildiğinde, clickhouse istemci çalıştırmak sağlar `SELECT` yalnızca önceki tüm verileri içeren yinelemeler için sorgu `INSERT` ile yürütülen sorgular `insert_quorum`. Istemci kısmi bir yineleme başvurursa, ClickHouse bir özel durum oluşturur. SELECT sorgusu yinelemeler çekirdek için henüz yazılmamış verileri içermez. + +Ayrıca bakınız: + +- [insert\_quorum](#settings-insert_quorum) +- [ınsert\_quorum\_timeout](#settings-insert_quorum_timeout) + +## ınsert\_deduplicate {#settings-insert-deduplicate} + +Blok tekilleştirmesini etkinleştirir veya devre dışı bırakır `INSERT` (çoğaltılmış \* tablolar için). + +Olası değerler: + +- 0 — Disabled. +- 1 — Enabled. + +Varsayılan değer: 1. + +Varsayılan olarak, çoğaltılmış tablolara eklenen bloklar `INSERT` deyim tekilleştirilir (bkz. \[Data Replication\] (../engines/table\_engines/mergetree\_family/replication.md). + +## deduplicate\_blocks\_ın\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} + +Yinelenmiş\* tablolardan veri alan materialized görünümler için tekilleştirme denetimini etkinleştirir veya devre dışı bırakır. + +Olası değerler: + + 0 — Disabled. + 1 — Enabled. + +Varsayılan değer: 0. + +Kullanma + +Varsayılan olarak, tekilleştirme materialized görünümler için gerçekleştirilmez, ancak kaynak tabloda, Yukarı akış yapılır. +Eklenen bir blok, kaynak tablodaki tekilleştirme nedeniyle atlanırsa, ekli materialized görünümlerine ekleme olmaz. Bu davranış, eklenen blokların materialized görünüm toplamasından sonra aynı olduğu, ancak kaynak tabloya farklı eklerden türetildiği durumlar için, yüksek oranda toplanmış verilerin materialized görünümlere eklenmesini sağlamak için vardır. +Aynı zamanda, bu davranış “breaks” `INSERT` idempotency. Eğer bir `INSERT` ana tabloya başarılı oldu ve `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won't receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` bu davranışı değiştirmeye izin verir. Yeniden denemede, somutlaştırılmış bir görünüm tekrar ekleme işlemini alacak ve tekilleştirme kontrolünü kendi başına gerçekleştirecektir, +kaynak tablo için onay sonucunu yoksayar ve ilk hata nedeniyle kaybedilen satırları ekler. + +## max\_network\_bytes {#settings-max-network-bytes} + +Alınan veya bir sorgu yürütülürken ağ üzerinden iletilen veri birimi (bayt cinsinden) sınırlar. Bu ayar, her bir sorgu için geçerlidir. + +Olası değerler: + +- Pozitif tamsayı. +- 0 — Data volume control is disabled. + +Varsayılan değer: 0. + +## max\_network\_bandwidth {#settings-max-network-bandwidth} + +Ağ üzerinden veri alışverişinin hızını saniyede bayt cinsinden sınırlar. Bu ayar her sorgu için geçerlidir. + +Olası değerler: + +- Pozitif tamsayı. +- 0 — Bandwidth control is disabled. + +Varsayılan değer: 0. + +## max\_network\_bandwidth\_for\_user {#settings-max-network-bandwidth-for-user} + +Ağ üzerinden veri alışverişinin hızını saniyede bayt cinsinden sınırlar. Bu ayar, tek bir kullanıcı tarafından gerçekleştirilen tüm aynı anda çalışan sorgular için geçerlidir. + +Olası değerler: + +- Pozitif tamsayı. +- 0 — Control of the data speed is disabled. + +Varsayılan değer: 0. + +## max\_network\_bandwidth\_for\_all\_users {#settings-max-network-bandwidth-for-all-users} + +Verilerin ağ üzerinden saniyede bayt olarak değiştirildiği hızı sınırlar. Bu ayar, sunucuda aynı anda çalışan tüm sorgular için geçerlidir. + +Olası değerler: + +- Pozitif tamsayı. +- 0 — Control of the data speed is disabled. + +Varsayılan değer: 0. + +## count\_distinct\_implementation {#settings-count_distinct_implementation} + +Aşağıdakilerden hang theisinin `uniq*` işlevleri gerçekleştirmek için kullanılmalıdır [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count) yapma. + +Olası değerler: + +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [uniqCombined64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [uniqHLL12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [uniqExact](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) + +Varsayılan değer: `uniqExact`. + +## skip\_unavailable\_shards {#settings-skip_unavailable_shards} + +Etkinleştirir veya sessizce kullanılamaz kırıkları atlama devre dışı bırakır. + +Tüm kopyaları kullanılamıyorsa, Shard kullanılamaz olarak kabul edilir. Aşağıdaki durumlarda bir yineleme kullanılamaz: + +- ClickHouse herhangi bir nedenle kopya bağlanamıyor. + + Bir kopyaya bağlanırken, ClickHouse birkaç deneme gerçekleştirir. Tüm bu girişimler başarısız olursa, çoğaltma kullanılamaz kabul edilir. + +- Çoğaltma DNS üzerinden çözülemez. + + Çoğaltmanın ana bilgisayar adı DNS aracılığıyla çözümlenemezse, aşağıdaki durumları gösterebilir: + + - Çoğaltma ana bilgisayar DNS kaydı yok. Dinamik DNS'YE sahip sistemlerde oluşabilir, örneğin, [Kubernetes](https://kubernetes.io), burada düğümler kesinti sırasında çözülmez olabilir ve bu bir hata değildir. + + - Yapılandırma hatası. ClickHouse yapılandırma dosyası yanlış bir ana bilgisayar adı içerir. + +Olası değerler: + +- 1 — skipping enabled. + + Bir parça kullanılamıyorsa, ClickHouse kısmi verilere dayalı bir sonuç döndürür ve düğüm kullanılabilirliği sorunlarını bildirmez. + +- 0 — skipping disabled. + + Bir shard kullanılamıyorsa, ClickHouse bir özel durum atar. + +Varsayılan değer: 0. + +## optimize\_skip\_unused\_shards {#settings-optimize_skip_unused_shards} + +Prewhere/WHERE (verilerin sharding anahtarı tarafından dağıtıldığını varsayar, aksi takdirde hiçbir şey yapmaz). + +Varsayılan değer: 0 + +## force\_optimize\_skip\_unused\_shards {#settings-force_optimize_skip_unused_shards} + +Sorgu yürütülmesini etkinleştirir veya devre dışı bırakır [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) etkin ve kullanılmayan kırıkları atlama mümkün değildir. Atlama mümkün değilse ve ayar etkinse özel durum atılır. + +Olası değerler: + +- 0-Devre Dışı (at notmayın) +- 1-sorgu yürütülmesini yalnızca tablonun sharding anahtarı varsa devre dışı bırakın +- 2-devre dışı sorgu yürütme ne olursa olsun sharding anahtar tablo için tanımlanır + +Varsayılan değer: 0 + +## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested} + +Sıfırlamak [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) iç içe geçmiş için `Distributed` Tablo + +Olası değerler: + +- 1 — Enabled. +- 0 — Disabled. + +Varsayılan değer: 0. + +## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} + +Bir özel durum atmayı etkinleştirir veya devre dışı bırakır. [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) sorgu birleştirme gerçekleştirmedi. + +Varsayılan olarak, `OPTIMIZE` eğer hiç bir şey yapmamış olsa bile, başarılı bir şekilde verir. Bu ayar, bu durumları ayırt etmenizi ve bir özel durum iletisinde nedeni almanızı sağlar. + +Olası değerler: + +- 1 — Throwing an exception is enabled. +- 0 — Throwing an exception is disabled. + +Varsayılan değer: 0. + +## distributed\_replica\_error\_half\_life {#settings-distributed_replica_error_half_life} + +- Türü: saniye +- Varsayılan değer: 60 saniye + +Dağıtılmış tablolardaki hataların ne kadar hızlı sıfırlandığını denetler. Bir yineleme bir süre için kullanılamıyorsa, 5 hataları biriktirir ve distributed\_replica\_error\_half\_lıfe 1 saniye olarak ayarlanır, sonra yineleme son hatadan sonra normal 3 saniye olarak kabul edilir. + +Ayrıca bakınız: + +- [Masa motoru Dağıt Distributedıldı](../../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) + +## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} + +- Tür: imzasız int +- Varsayılan değer: 1000 + +Her yineleme hata sayısı çok fazla hata biriken tek bir yineleme engelleyerek, bu değerle kaplıdır. + +Ayrıca bakınız: + +- [Masa motoru Dağıt Distributedıldı](../../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) + +## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} + +İçin taban aralığı [Dağılı](../../engines/table_engines/special/distributed.md) veri göndermek için tablo motoru. Gerçek Aralık, hatalar durumunda katlanarak büyür. + +Olası değerler: + +- Milisaniye pozitif tamsayı sayısı. + +Varsayılan değer: 100 milisaniye. + +## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} + +İçin Maksimum Aralık [Dağılı](../../engines/table_engines/special/distributed.md) veri göndermek için tablo motoru. Sınırları içinde belirlenen Aralık üstel büyüme [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) ayar. + +Olası değerler: + +- Milisaniye pozitif tamsayı sayısı. + +Varsayılan değer: 30000 milisaniye (30 saniye). + +## distributed\_directory\_monitor\_batch\_ınserts {#distributed_directory_monitor_batch_inserts} + +Eklenen verilerin toplu olarak gönderilmesini etkinleştirir / devre dışı bırakır. + +Toplu gönderme etkinleştirildiğinde, [Dağılı](../../engines/table_engines/special/distributed.md) table engine, eklenen verilerin birden çok dosyasını ayrı ayrı göndermek yerine tek bir işlemde göndermeye çalışır. Toplu gönderme, sunucu ve ağ kaynaklarını daha iyi kullanarak küme performansını artırır. + +Olası değerler: + +- 1 — Enabled. +- 0 — Disabled. + +Varsayılan değer: 0. + +## os\_thread\_priority {#setting-os-thread-priority} + +Önceliği ayarlar ([güzel](https://en.wikipedia.org/wiki/Nice_(Unix))) sorguları yürüten iş parçacıkları için. İşletim sistemi Zamanlayıcısı, kullanılabilir her CPU çekirdeğinde çalışacak bir sonraki iş parçacığını seçerken bu önceliği dikkate alır. + +!!! warning "Uyarıcı" + Bu ayarı kullanmak için, `CAP_SYS_NICE` özellik. Bu `clickhouse-server` paket kurulum sırasında kurar. Bazı sanal ortamlar ayarlamanıza izin vermez `CAP_SYS_NICE` özellik. Bu durumda, `clickhouse-server` Başlangıçta bu konuda bir mesaj gösterir. + +Olası değerler: + +- Aralıktaki değerleri ayarlayabilirsiniz `[-20, 19]`. + +Daha düşük değerler daha yüksek öncelik anlamına gelir. Düşük olan iplikler `nice` öncelik değerleri, yüksek değerlere sahip iş parçacıklarından daha sık yürütülür. Yüksek değerler, uzun süren etkileşimli olmayan sorgular için tercih edilir, çünkü geldiklerinde kısa etkileşimli sorgular lehine kaynakları hızlı bir şekilde bırakmalarına izin verir. + +Varsayılan değer: 0. + +## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} + +Gerçek bir saat zamanlayıcı için süreyi ayarlar [sorgu profiler](../../operations/optimizing_performance/sampling_query_profiler.md). Gerçek saat zamanlayıcı duvar saati zaman sayar. + +Olası değerler: + +- Nanosaniye cinsinden pozitif tam sayı. + + Önerilen değerler: + + - 10000000 (100 times a second) nanoseconds and less for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- Zamanlayıcıyı kapatmak için 0. + +Tür: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +Varsayılan değer: 1000000000 nanosaniye (saniyede bir kez). + +Ayrıca bakınız: + +- Sistem tablosu [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} + +Bir CPU saat süreölçerinin dönemini ayarlar. [sorgu profiler](../../operations/optimizing_performance/sampling_query_profiler.md). Bu zamanlayıcı sadece CPU süresini sayar. + +Olası değerler: + +- Nanosaniye pozitif tamsayı sayısı. + + Önerilen değerler: + + - 10000000 (100 times a second) nanoseconds and more for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- Zamanlayıcıyı kapatmak için 0. + +Tür: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +Varsayılan değer: 1000000000 nanosaniye. + +Ayrıca bakınız: + +- Sistem tablosu [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## allow\_introspection\_functions {#settings-allow_introspection_functions} + +Devre dışı bırakmayı etkinleştirir [ıntrospections fonksiyonları](../../sql_reference/functions/introspection.md) sorgu profilleme için. + +Olası değerler: + +- 1 — Introspection functions enabled. +- 0 — Introspection functions disabled. + +Varsayılan değer: 0. + +**Ayrıca Bakınız** + +- [Örnekleme Sorgusu Profiler](../optimizing_performance/sampling_query_profiler.md) +- Sistem tablosu [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## ınput\_format\_parallel\_parsing {#input-format-parallel-parsing} + +- Tipi: bool +- Varsayılan değer: True + +Veri biçimlerinin paralel ayrıştırma sırasını koruyarak etkinleştirin. Sadece TSV, TKSV, CSV ve JSONEachRow formatları için desteklenir. + +## min\_chunk\_bytes\_for\_parallel\_parsing {#min-chunk-bytes-for-parallel-parsing} + +- Tür: imzasız int +- Varsayılan değer: 1 MiB + +Her iş parçacığının paralel olarak ayrıştırılacağı bayt cinsinden minimum yığın boyutu. + +## output\_format\_avro\_codec {#settings-output_format_avro_codec} + +Çıkış Avro dosyası için kullanılan sıkıştırma codec ayarlar. + +Tipi: dize + +Olası değerler: + +- `null` — No compression +- `deflate` — Compress with Deflate (zlib) +- `snappy` — Compress with [Çabuk](https://google.github.io/snappy/) + +Varsayılan değer: `snappy` (varsa) veya `deflate`. + +## output\_format\_avro\_sync\_interval {#settings-output_format_avro_sync_interval} + +Çıkış Avro dosyası için senkronizasyon işaretçileri arasında minimum veri boyutunu (bayt cinsinden) ayarlar. + +Tür: imzasız int + +Olası değerler: 32 (32 bayt) - 1073741824 (1 GiB) + +Varsayılan değer: 32768 (32 KiB) + +## format\_avro\_schema\_registry\_url {#settings-format_avro_schema_registry_url} + +Sets Confluent Schema Registry URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) biçimli + +Type: URL + +Varsayılan değer: boş + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/tr/operations/settings/settings_profiles.md b/docs/tr/operations/settings/settings_profiles.md new file mode 100644 index 00000000000..318276ab6c8 --- /dev/null +++ b/docs/tr/operations/settings/settings_profiles.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 61 +toc_title: Ayarlar Profilleri +--- + +# Ayarlar Profilleri {#settings-profiles} + +Ayarlar profili, aynı ad altında gruplandırılmış ayarlar topluluğudur. Her ClickHouse kullanıcısının bir profili vardır. +Bir profildeki tüm ayarları uygulamak için `profile` ayar. + +Örnek: + +Yüklemek `web` profilli. + +``` sql +SET profile = 'web' +``` + +Ayarlar profilleri kullanıcı yapılandırma dosyasında bildirilir. Bu genellikle `users.xml`. + +Örnek: + +``` xml + + + + + + 8 + + + + + 1000000000 + 100000000000 + + 1000000 + any + + 1000000 + 1000000000 + + 100000 + 100000000 + break + + 600 + 1000000 + 15 + + 25 + 100 + 50 + + 2 + 25 + 50 + 100 + + 1 + + +``` + +Örnek iki profili belirtir: `default` ve `web`. Bu `default` profilin özel bir amacı vardır: her zaman mevcut olmalı ve sunucuyu başlatırken uygulanır. Diğer bir deyişle, `default` profil varsayılan ayarları içerir. Bu `web` profil kullanılarak ayarlanabilir düzenli bir profil `SET` sorgu veya bir HTTP sorgusunda bir URL parametresi kullanma. + +Ayarlar profilleri birbirinden miras alabilir. Kalıtım kullanmak için, bir veya birden fazla belirtiniz `profile` ayarlar profilde listelenen diğer ayarlardan önce. Farklı profillerde bir ayar tanımlandığında, en son tanımlı kullanılır. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/tr/operations/settings/settings_users.md b/docs/tr/operations/settings/settings_users.md new file mode 100644 index 00000000000..0bc2b5ac1a5 --- /dev/null +++ b/docs/tr/operations/settings/settings_users.md @@ -0,0 +1,148 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 63 +toc_title: "Kullan\u0131c\u0131 Ayarlar\u0131" +--- + +# Kullanıcı Ayarları {#user-settings} + +Bu `users` bu bölüm `user.xml` yapılandırma dosyası kullanıcı ayarlarını içerir. + +Bu yapı `users` bölme: + +``` xml + + + + + + + + + + + profile_name + + default + + + + + expression + + + + + + +``` + +### home/şifre {#user-namepassword} + +Şifre düz metin veya SHA256 (hex formatında) belirtilebilir. + +- Düz metin içinde bir şifre atamak için (**tavsiye edilmez**bir koyun `password` öğe. + + Mesela, `qwerty`. Şifre boş bırakılabilir. + + + +- SHA256 karmasını kullanarak bir şifre atamak için, bir `password_sha256_hex` öğe. + + Mesela, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. + + Kabuktan bir parola oluşturma örneği: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' + + Sonucun ilk satırı şifredir. İkinci satır karşılık gelen SHA256 karmasıdır. + + + +- MySQL istemcileri ile uyumluluk için, şifre çift SHA1 karma belirtilebilir. İçine yerleştirin `password_double_sha1_hex` öğe. + + Mesela, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. + + Kabuktan bir parola oluşturma örneği: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' + + Sonucun ilk satırı şifredir. İkinci satır karşılık gelen çift SHA1 karmasıdır. + +### kullanıcı\_adı / ağlar {#user-namenetworks} + +Kullanıcının ClickHouse sunucusuna bağlanabileceği ağların listesi. + +Listenin her öğesi aşağıdaki formlardan birine sahip olabilir: + +- `` — IP address or network mask. + + Örnekler: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. + +- `` — Hostname. + + Örnek: `example01.host.ru`. + + Erişimi denetlemek için bir DNS sorgusu gerçekleştirilir ve döndürülen tüm IP adresleri eş adresiyle karşılaştırılır. + +- `` — Regular expression for hostnames. + + Örnek, `^example\d\d-\d\d-\d\.host\.ru$` + + Erişimi kontrol etmek için, bir [DNS ptr sorgusu](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) eş adresi için gerçekleştirilir ve sonra belirtilen regexp uygulanır. Daha sonra PTR sorgusunun sonuçları için başka bir DNS sorgusu gerçekleştirilir ve alınan tüm adresler eş adresine karşılaştırılır. Regexp'nin $ile bitmesini şiddetle tavsiye ederiz. + +Sunucu yeniden başlatılıncaya kadar DNS isteklerinin tüm sonuçları önbelleğe alınır. + +**Örnekler** + +Herhangi bir ağdan kullanıcı için erişimi açmak için şunları belirtin: + +``` xml +::/0 +``` + +!!! warning "Uyarıcı" + Düzgün yapılandırılmış bir güvenlik duvarınız yoksa veya sunucu doğrudan internete bağlı değilse, herhangi bir ağdan erişimi açmak güvensizdir. + +Erişimi yalnızca localhost'tan açmak için şunları belirtin: + +``` xml +::1 +127.0.0.1 +``` + +### kullanıcı\_adı / profil {#user-nameprofile} + +Kullanıcı için bir ayarlar profili atayabilirsiniz. Ayarlar profilleri ayrı bir bölümde yapılandırılır `users.xml` Dosya. Daha fazla bilgi için, bkz. [Ayarların profilleri](settings_profiles.md). + +### user\_name / kota {#user-namequota} + +Kotalar, belirli bir süre boyunca kaynak kullanımını izlemenize veya sınırlamanıza izin verir. Kotalar yapılandırılır `quotas` +bu bölüm `users.xml` yapılandırma dosyası. + +Kullanıcı için ayarlanmış bir kotalar atayabilirsiniz. Kotalar yapılandırmasının ayrıntılı bir açıklaması için bkz. [Kotalar](../quotas.md#quotas). + +### user\_name / veritabanları {#user-namedatabases} + +Bu bölümde, ClickHouse tarafından döndürülen satırları sınırlayabilirsiniz `SELECT` geçerli kullanıcı tarafından yapılan sorgular, böylece temel satır düzeyinde güvenlik uygular. + +**Örnek** + +Aşağıdaki yapılandırma bu kullanıcıyı zorlar `user1` sadece satırları görebilirsiniz `table1` sonucu olarak `SELECT` sorgular, burada değeri `id` alan 1000'dir. + +``` xml + + + + + id = 1000 + + + + +``` + +Bu `filter` bir sonuç veren herhangi bir ifade olabilir [Uİnt8](../../sql_reference/data_types/int_uint.md)- tip değeri. Genellikle karşılaştırmalar ve mantıksal operatörler içerir. Satır fromlardan `database_name.table1` burada filtre sonuçları 0 için bu kullanıcı için döndürülür. Filtreleme ile uyumsuz `PREWHERE` işlemler ve devre dışı bırakır `WHERE→PREWHERE` optimizasyon. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/tr/operations/system_tables.md b/docs/tr/operations/system_tables.md new file mode 100644 index 00000000000..17fd176cc58 --- /dev/null +++ b/docs/tr/operations/system_tables.md @@ -0,0 +1,1166 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 52 +toc_title: "Sistem Tablolar\u0131" +--- + +# Sistem Tabloları {#system-tables} + +Sistem tabloları, sistemin işlevselliğinin bir kısmını uygulamak ve sistemin nasıl çalıştığı hakkında bilgilere erişim sağlamak için kullanılır. +Bir sistem tablosunu silemezsiniz (ancak ayırma işlemini gerçekleştirebilirsiniz). +Sistem tablolarında diskte veri bulunan dosyalar veya meta verilere sahip dosyalar yoktur. Sunucu, başlatıldığında tüm sistem tablolarını oluşturur. +Sistem tabloları salt okunur. +Bulun theurlar. ‘system’ veritabanı. + +## sistem.asynchronous\_metrics {#system_tables-asynchronous_metrics} + +Arka planda periyodik olarak hesaplanan metrikleri içerir. Örneğin, kullanılan RAM miktarı. + +Sütun: + +- `metric` ([Dize](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Float64](../sql_reference/data_types/float.md)) — Metric value. + +**Örnek** + +``` sql +SELECT * FROM system.asynchronous_metrics LIMIT 10 +``` + +``` text +┌─metric──────────────────────────────────┬──────value─┐ +│ jemalloc.background_thread.run_interval │ 0 │ +│ jemalloc.background_thread.num_runs │ 0 │ +│ jemalloc.background_thread.num_threads │ 0 │ +│ jemalloc.retained │ 422551552 │ +│ jemalloc.mapped │ 1682989056 │ +│ jemalloc.resident │ 1656446976 │ +│ jemalloc.metadata_thp │ 0 │ +│ jemalloc.metadata │ 10226856 │ +│ UncompressedCacheCells │ 0 │ +│ MarkCacheFiles │ 0 │ +└─────────────────────────────────────────┴────────────┘ +``` + +**Ayrıca Bakınız** + +- [İzleme](monitoring.md) — Base concepts of ClickHouse monitoring. +- [sistem.metrik](#system_tables-metrics) — Contains instantly calculated metrics. +- [sistem.etkinlik](#system_tables-events) — Contains a number of events that have occurred. +- [sistem.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. + +## sistem.kümeler {#system-clusters} + +Yapılandırma dosyasında bulunan kümeler ve içindeki sunucular hakkında bilgi içerir. + +Sütun: + +- `cluster` (String) — The cluster name. +- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. +- `shard_weight` (UInt32) — The relative weight of the shard when writing data. +- `replica_num` (UInt32) — The replica number in the shard, starting from 1. +- `host_name` (String) — The host name, as specified in the config. +- `host_address` (String) — The host IP address obtained from DNS. +- `port` (UInt16) — The port to use for connecting to the server. +- `user` (String) — The name of the user for connecting to the server. +- `errors_count` (Uİnt32) - bu ana bilgisayarın çoğaltma ulaşamadı sayısı. +- `estimated_recovery_time` (Uİnt32) - çoğaltma hata sayısı sıfırlanana kadar saniye kaldı ve normale döndü olarak kabul edilir. + +Lütfen unutmayın `errors_count` küme için sorgu başına bir kez güncelleştirilir, ancak `estimated_recovery_time` isteğe bağlı olarak yeniden hesaplanır. Yani sıfır olmayan bir durum olabilir `errors_count` ve sıfır `estimated_recovery_time`, sonraki sorgu sıfır olacak `errors_count` ve hiçbir hata yokmuş gibi çoğaltma kullanmayı deneyin. + +**Ayrıca bakınız** + +- [Masa motoru Dağıt Distributedıldı](../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_cap ayarı](settings/settings.md#settings-distributed_replica_error_cap) +- [distributed\_replica\_error\_half\_life ayarı](settings/settings.md#settings-distributed_replica_error_half_life) + +## sistem.sütun {#system-columns} + +Tüm tablolardaki sütunlar hakkında bilgi içerir. + +Benzer bilgileri almak için bu tabloyu kullanabilirsiniz [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table) sorgu, ancak aynı anda birden çok tablo için. + +Bu `system.columns` tablo aşağıdaki sütunları içerir (sütun türü parantez içinde gösterilir): + +- `database` (String) — Database name. +- `table` (String) — Table name. +- `name` (String) — Column name. +- `type` (String) — Column type. +- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) varsayılan değer veya tanımlanmamışsa boş bir dize için. +- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. +- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. +- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. +- `marks_bytes` (UInt64) — The size of marks, in bytes. +- `comment` (String) — Comment on the column, or an empty string if it is not defined. +- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression. +- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. +- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. +- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. + +## sistem.katılımcılar {#system-contributors} + +Katkıda bulunanlar hakkında bilgi içerir. Rastgele sırayla tüm constributors. Sipariş, sorgu yürütme zamanında rasgele olur. + +Sütun: + +- `name` (String) — Contributor (author) name from git log. + +**Örnek** + +``` sql +SELECT * FROM system.contributors LIMIT 10 +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +│ Max Vetrov │ +│ LiuYangkuan │ +│ svladykin │ +│ zamulla │ +│ Šimon Podlipský │ +│ BayoNet │ +│ Ilya Khomutov │ +│ Amy Krishnevsky │ +│ Loud_Scream │ +└──────────────────┘ +``` + +Tabloda kendinizi bulmak için bir sorgu kullanın: + +``` sql +SELECT * FROM system.contributors WHERE name='Olga Khvostikova' +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +└──────────────────┘ +``` + +## sistem.veritabanılar {#system-databases} + +Bu tablo, adı verilen tek bir dize sütunu içerir ‘name’ – the name of a database. +Sunucunun bildiği her veritabanı, tabloda karşılık gelen bir girdiye sahiptir. +Bu sistem tablosu uygulamak için kullanılır `SHOW DATABASES` sorgu. + +## sistem.detached\_parts {#system_tables-detached_parts} + +Müstakil parçaları hakkında bilgiler içerir [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) Tablolar. Bu `reason` sütun, parçanın neden ayrıldığını belirtir. Kullanıcı tarafından ayrılmış parçalar için sebep boştur. Bu tür parçalar ile eklenebilir [ALTER TABLE ATTACH PARTITION\|PART](../sql_reference/statements/alter.md#alter_attach-partition) komut. Diğer sütunların açıklaması için bkz. [sistem.parçalar](#system_tables-parts). Bölüm adı geçersiz ise, bazı sütunların değerleri olabilir `NULL`. Bu tür parçalar ile silinebilir [ALTER TABLE DROP DETACHED PART](../sql_reference/statements/alter.md#alter_drop-detached). + +## sistem.sözlükler {#system_tables-dictionaries} + +Hakkında bilgi içerir [dış söz dictionarieslükler](../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +Sütun: + +- `database` ([Dize](../sql_reference/data_types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries. +- `name` ([Dize](../sql_reference/data_types/string.md)) — [Sözlük adı](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md). +- `status` ([Enum8](../sql_reference/data_types/enum.md)) — Dictionary status. Possible values: + - `NOT_LOADED` — Dictionary was not loaded because it was not used. + - `LOADED` — Dictionary loaded successfully. + - `FAILED` — Unable to load the dictionary as a result of an error. + - `LOADING` — Dictionary is loading now. + - `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../sql_reference/statements/system.md#query_language-system-reload-dictionary) sorgu, zaman aşımı, sözlük yapılandırması değişti). + - `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now. +- `origin` ([Dize](../sql_reference/data_types/string.md)) — Path to the configuration file that describes the dictionary. +- `type` ([Dize](../sql_reference/data_types/string.md)) — Type of a dictionary allocation. [Sözlükleri bellekte saklama](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md). +- `key` — [Anahtar tipi](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-key): Sayısal Tuş ([Uİnt64](../sql_reference/data_types/int_uint.md#uint-ranges)) or Сomposite key ([Dize](../sql_reference/data_types/string.md)) — form “(type 1, type 2, …, type n)”. +- `attribute.names` ([Dizi](../sql_reference/data_types/array.md)([Dize](../sql_reference/data_types/string.md))) — Array of [öznitelik adları](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes) sözlük tarafından sağlanmıştır. +- `attribute.types` ([Dizi](../sql_reference/data_types/array.md)([Dize](../sql_reference/data_types/string.md))) — Corresponding array of [öznitelik türleri](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes) sözlük tarafından sağlanmaktadır. +- `bytes_allocated` ([Uİnt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. +- `query_count` ([Uİnt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. +- `hit_rate` ([Float64](../sql_reference/data_types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `element_count` ([Uİnt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Number of items stored in the dictionary. +- `load_factor` ([Float64](../sql_reference/data_types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `source` ([Dize](../sql_reference/data_types/string.md)) — Text describing the [veri kaynağı](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md) sözlük için. +- `lifetime_min` ([Uİnt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Minimum [ömür](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) bellekteki sözlüğün ardından ClickHouse sözlüğü yeniden yüklemeye çalışır (eğer `invalidate_query` ayarlanır, daha sonra sadece değiştiyse). Saniyeler içinde ayarlayın. +- `lifetime_max` ([Uİnt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Maximum [ömür](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) bellekteki sözlüğün ardından ClickHouse sözlüğü yeniden yüklemeye çalışır (eğer `invalidate_query` ayarlanır, daha sonra sadece değiştiyse). Saniyeler içinde ayarlayın. +- `loading_start_time` ([DateTime](../sql_reference/data_types/datetime.md)) — Start time for loading the dictionary. +- `last_successful_update_time` ([DateTime](../sql_reference/data_types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes. +- `loading_duration` ([Float32](../sql_reference/data_types/float.md)) — Duration of a dictionary loading. +- `last_exception` ([Dize](../sql_reference/data_types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. + +**Örnek** + +Sözlüğü yapılandırın. + +``` sql +CREATE DICTIONARY dictdb.dict +( + `key` Int64 DEFAULT -1, + `value_default` String DEFAULT 'world', + `value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb')) +LIFETIME(MIN 0 MAX 1) +LAYOUT(FLAT()) +``` + +Sözlüğün yüklendiğinden emin olun. + +``` sql +SELECT * FROM system.dictionaries +``` + +``` text +┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐ +│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ +└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ +``` + +## sistem.etkinlik {#system_tables-events} + +Sistemde meydana gelen olayların sayısı hakkında bilgi içerir. Örneğin, tabloda kaç tane bulabilirsiniz `SELECT` ClickHouse sunucusu başladığından beri sorgular işlendi. + +Sütun: + +- `event` ([Dize](../sql_reference/data_types/string.md)) — Event name. +- `value` ([Uİnt64](../sql_reference/data_types/int_uint.md)) — Number of events occurred. +- `description` ([Dize](../sql_reference/data_types/string.md)) — Event description. + +**Örnek** + +``` sql +SELECT * FROM system.events LIMIT 5 +``` + +``` text +┌─event─────────────────────────────────┬─value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 12 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ +│ SelectQuery │ 8 │ Same as Query, but only for SELECT queries. │ +│ FileOpen │ 73 │ Number of files opened. │ +│ ReadBufferFromFileDescriptorRead │ 155 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ +│ ReadBufferFromFileDescriptorReadBytes │ 9931 │ Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. │ +└───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [sistem.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [sistem.metrik](#system_tables-metrics) — Contains instantly calculated metrics. +- [sistem.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [İzleme](monitoring.md) — Base concepts of ClickHouse monitoring. + +## sistem.işlevler {#system-functions} + +Normal ve toplama işlevleri hakkında bilgi içerir. + +Sütun: + +- `name`(`String`) – The name of the function. +- `is_aggregate`(`UInt8`) — Whether the function is aggregate. + +## sistem.graphite\_retentions {#system-graphite-retentions} + +Parametreleri hakkında bilgi içerir [graphite\_rollup](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) tablo usedlarında kullanılan [\* Graphıtemergetree](../engines/table_engines/mergetree_family/graphitemergetree.md) motorlar. + +Sütun: + +- `config_name` (Dize) - `graphite_rollup` parametre adı. +- `regexp` (String) - metrik adı için bir desen. +- `function` (String) - toplama işlevinin adı. +- `age` (Uint64) - saniye cinsinden verilerin minimum yaş. +- `precision` (Uİnt64) - verilerin yaşını saniyeler içinde tam olarak tanımlamak için. +- `priority` (Uİnt16) - desen önceliği. +- `is_default` (Uİnt8) - desenin varsayılan olup olmadığı. +- `Tables.database` (Array (String)) - kullanılan veritabanı tablolarının adlarının dizisi `config_name` parametre. +- `Tables.table` (Array (String)) - kullanılan tablo adları dizisi `config_name` parametre. + +## sistem.birleştiriyor {#system-merges} + +Mergetree ailesindeki tablolar için şu anda işlemde olan birleştirme ve parça mutasyonları hakkında bilgi içerir. + +Sütun: + +- `database` (String) — The name of the database the table is in. +- `table` (String) — Table name. +- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started. +- `progress` (Float64) — The percentage of completed work from 0 to 1. +- `num_parts` (UInt64) — The number of pieces to be merged. +- `result_part_name` (String) — The name of the part that will be formed as the result of merging. +- `is_mutation` (Uİnt8 ) - 1 Bu işlem bir parça mutasyonu ise. +- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. +- `total_size_marks` (UInt64) — The total number of marks in the merged parts. +- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. +- `rows_read` (UInt64) — Number of rows read. +- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. +- `rows_written` (UInt64) — Number of rows written. + +## sistem.metrik {#system_tables-metrics} + +Anında hesaplanan veya geçerli bir değere sahip olabilir metrikleri içerir. Örneğin, aynı anda işlenen sorguların sayısı veya geçerli yineleme gecikmesi. Bu tablo her zaman güncel. + +Sütun: + +- `metric` ([Dize](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — Metric value. +- `description` ([Dize](../sql_reference/data_types/string.md)) — Metric description. + +Desteklenen metriklerin listesi [src / ortak / CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) ClickHouse kaynak dosyası. + +**Örnek** + +``` sql +SELECT * FROM system.metrics LIMIT 10 +``` + +``` text +┌─metric─────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 1 │ Number of executing queries │ +│ Merge │ 0 │ Number of executing background merges │ +│ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ +│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replicas │ +│ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ +│ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ +│ BackgroundPoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping) │ +│ BackgroundSchedulePoolTask │ 0 │ Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc. │ +│ DiskSpaceReservedForMerge │ 0 │ Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts. │ +│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │ +└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [sistem.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [sistem.etkinlik](#system_tables-events) — Contains a number of events that occurred. +- [sistem.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [İzleme](monitoring.md) — Base concepts of ClickHouse monitoring. + +## sistem.metric\_log {#system_tables-metric_log} + +Tablolardan metrik değerlerinin geçmişini içerir `system.metrics` ve `system.events`, periyodik olarak diske boşaltılır. +Metrik geçmişi koleksiyonunu açmak için `system.metric_log`, oluşturmak `/etc/clickhouse-server/config.d/metric_log.xml` aşağıdaki içerik ile: + +``` xml + + + system + metric_log
    + 7500 + 1000 +
    +
    +``` + +**Örnek** + +``` sql +SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical; +``` + +``` text +Row 1: +────── +event_date: 2020-02-18 +event_time: 2020-02-18 07:15:33 +milliseconds: 554 +ProfileEvent_Query: 0 +ProfileEvent_SelectQuery: 0 +ProfileEvent_InsertQuery: 0 +ProfileEvent_FileOpen: 0 +ProfileEvent_Seek: 0 +ProfileEvent_ReadBufferFromFileDescriptorRead: 1 +ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0 +ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0 +ProfileEvent_WriteBufferFromFileDescriptorWrite: 1 +ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0 +ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56 +... +CurrentMetric_Query: 0 +CurrentMetric_Merge: 0 +CurrentMetric_PartMutation: 0 +CurrentMetric_ReplicatedFetch: 0 +CurrentMetric_ReplicatedSend: 0 +CurrentMetric_ReplicatedChecks: 0 +... +``` + +**Ayrıca bakınız** + +- [sistem.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [sistem.etkinlik](#system_tables-events) — Contains a number of events that occurred. +- [sistem.metrik](#system_tables-metrics) — Contains instantly calculated metrics. +- [İzleme](monitoring.md) — Base concepts of ClickHouse monitoring. + +## sistem.şiir {#system-numbers} + +Bu tablo adında tek bir uint64 sütunu içerir ‘number’ bu sıfırdan başlayarak hemen hemen tüm doğal sayıları içerir. +Bu tabloyu testler için veya kaba kuvvet araması yapmanız gerekiyorsa kullanabilirsiniz. +Bu tablodan okumalar parallelized değil. + +## sistem.numbers\_mt {#system-numbers-mt} + +Olarak aynı ‘system.numbers’ ancak okumalar paralelleştirilmiştir. Sayılar herhangi bir sırayla iade edilebilir. +Testler için kullanılır. + +## sistem.bir {#system-one} + +Bu tablo, tek bir satır içeren tek bir satır içerir ‘dummy’ 0 değerini içeren uint8 sütunu. +SELECT sorgusu FROM yan tümcesi belirtmezse, bu tablo kullanılır. +Bu, diğer Dbms'lerde bulunan ikili tabloya benzer. + +## sistem.parçalar {#system_tables-parts} + +Bölümleri hakkında bilgi içerir [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) Tablolar. + +Her satır bir veri bölümünü açıklar. + +Sütun: + +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql_reference/statements/alter.md#query_language_queries_alter) sorgu. + + Biçimliler: + + - `YYYYMM` ay otomatik bölümleme için. + - `any_string` el ile bölümleme yaparken. + +- `name` (`String`) – Name of the data part. + +- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging. + +- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` dizin ayrıntısına göre (genellikle 8192) (bu ipucu uyarlanabilir ayrıntı için çalışmaz). + +- `rows` (`UInt64`) – The number of rows. + +- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes. + +- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `marks_bytes` (`UInt64`) – The size of the file with marks. + +- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\| + +- `remove_time` (`DateTime`) – The time when the data part became inactive. + +- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges. + +- `min_date` (`Date`) – The minimum value of the date key in the data part. + +- `max_date` (`Date`) – The maximum value of the date key in the data part. + +- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part. + +- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part. + +- `partition_id` (`String`) – ID of the partition. + +- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging. + +- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging. + +- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts. + +- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`). + +- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values. + +- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. + +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition) + +- `database` (`String`) – Name of the database. + +- `table` (`String`) – Name of the table. + +- `engine` (`String`) – Name of the table engine without parameters. + +- `path` (`String`) – Absolute path to the folder with data part files. + +- `disk` (`String`) – Name of a disk that stores the data part. + +- `hash_of_all_files` (`String`) – [sifash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) sıkıştırılmış dosyaların. + +- `hash_of_uncompressed_files` (`String`) – [sifash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) sıkıştırılmamış dosyaların (işaretli dosyalar, dizin dosyası vb.)). + +- `uncompressed_hash_of_compressed_files` (`String`) – [sifash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) sıkıştırılmış dosyalardaki verilerin sıkıştırılmamış gibi. + +- `bytes` (`UInt64`) – Alias for `bytes_on_disk`. + +- `marks_size` (`UInt64`) – Alias for `marks_bytes`. + +## sistem.part\_log {#system_tables-part-log} + +Bu `system.part_log` tablo yalnızca aşağıdaki durumlarda oluşturulur: [part\_log](server_configuration_parameters/settings.md#server_configuration_parameters-part-log) sunucu ayarı belirtilir. + +Bu tablo ile oluşan olaylar hakkında bilgi içerir [veri parçaları](../engines/table_engines/mergetree_family/custom_partitioning_key.md) in the [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) veri ekleme veya birleştirme gibi aile tabloları. + +Bu `system.part_log` tablo aşağıdaki sütunları içerir: + +- `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: + - `NEW_PART` — Inserting of a new data part. + - `MERGE_PARTS` — Merging of data parts. + - `DOWNLOAD_PART` — Downloading a data part. + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). + - `MUTATE_PART` — Mutating of a data part. + - `MOVE_PART` — Moving the data part from the one disk to another one. +- `event_date` (Date) — Event date. +- `event_time` (DateTime) — Event time. +- `duration_ms` (UInt64) — Duration. +- `database` (String) — Name of the database the data part is in. +- `table` (String) — Name of the table the data part is in. +- `part_name` (String) — Name of the data part. +- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ bölümleme tarafından ise değer `tuple()`. +- `rows` (UInt64) — The number of rows in the data part. +- `size_in_bytes` (UInt64) — Size of the data part in bytes. +- `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). +- `bytes_uncompressed` (UInt64) — Size of uncompressed bytes. +- `read_rows` (UInt64) — The number of rows was read during the merge. +- `read_bytes` (UInt64) — The number of bytes was read during the merge. +- `error` (UInt16) — The code number of the occurred error. +- `exception` (String) — Text message of the occurred error. + +Bu `system.part_log` tablo ilk veri ekleme sonra oluşturulur `MergeTree` Tablo. + +## sistem.işleyişler {#system_tables-processes} + +Bu sistem tablosu uygulamak için kullanılır `SHOW PROCESSLIST` sorgu. + +Sütun: + +- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` kullanan. Alan, bu sorgunun başlattığı bir sorgu için değil, belirli bir sorgunun kullanıcı adını içerir. +- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` sorgu istek sahibi sunucuda. +- `elapsed` (Float64) – The time in seconds since request execution started. +- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. +- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../operations/settings/query_complexity.md#settings_max_memory_usage) ayar. +- `query` (String) – The query text. For `INSERT`, eklemek için veri içermez. +- `query_id` (String) – Query ID, if defined. + +## sistem.text\_log {#system-tables-text-log} + +Günlük girişleri içerir. Bu tabloya giden günlük seviyesi ile sınırlı olabilir `text_log.level` sunucu ayarı. + +Sütun: + +- `event_date` (`Date`)- Giriş tarihi. +- `event_time` (`DateTime`)- Giriş zamanı. +- `microseconds` (`UInt32`)- Girişin mikrosaniye. +- `thread_name` (String) — Name of the thread from which the logging was done. +- `thread_id` (UInt64) — OS thread ID. +- `level` (`Enum8`)- Giriş seviyesi. + - `'Fatal' = 1` + - `'Critical' = 2` + - `'Error' = 3` + - `'Warning' = 4` + - `'Notice' = 5` + - `'Information' = 6` + - `'Debug' = 7` + - `'Trace' = 8` +- `query_id` (`String`)- Sorgunun kimliği. +- `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) +- `message` (`String`)- Mesajın kendisi . +- `revision` (`UInt32`)- ClickHouse revizyon. +- `source_file` (`LowCardinality(String)`)- Günlüğü yapıldığı kaynak dosya. +- `source_line` (`UInt64`)- Kaynak satır hangi günlüğü yapıldı. + +## sistem.query\_log {#system_tables-query_log} + +Sorguların yürütülmesi hakkında bilgi içerir. Her sorgu için, işlem başlangıç saatini, işlem süresini, hata mesajlarını ve diğer bilgileri görebilirsiniz. + +!!! note "Not" + Tablo için giriş verileri içermiyor `INSERT` sorgular. + +ClickHouse bu tabloyu yalnızca [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) sunucu parametresi belirtilir. Bu parametre, günlük aralığı veya sorguların oturum açacağı tablonun adı gibi günlük kurallarını ayarlar. + +Sorgu günlüğünü etkinleştirmek için, [log\_queries](settings/settings.md#settings-log-queries) parametre 1. Ayrıntılar için, bkz. [Ayarlar](settings/settings.md) bölme. + +Bu `system.query_log` tablo iki tür sorgu kaydeder: + +1. Doğrudan istemci tarafından çalıştırılan ilk sorgular. +2. Diğer sorgular tarafından başlatılan alt sorgular (dağıtılmış sorgu yürütme için). Bu tür sorgular için, üst sorgular hakkında bilgi `initial_*` sütun. + +Sütun: + +- `type` (`Enum8`) — Type of event that occurred when executing the query. Values: + - `'QueryStart' = 1` — Successful start of query execution. + - `'QueryFinish' = 2` — Successful end of query execution. + - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. + - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. +- `event_date` (Date) — Query starting date. +- `event_time` (DateTime) — Query starting time. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` sorgular, yazılı satır sayısı. Diğer sorgular için sütun değeri 0'dır. +- `written_bytes` (UInt64) — For `INSERT` sorgular, yazılı bayt sayısı. Diğer sorgular için sütun değeri 0'dır. +- `result_rows` (UInt64) — Number of rows in the result. +- `result_bytes` (UInt64) — Number of bytes in the result. +- `memory_usage` (UInt64) — Memory consumption by the query. +- `query` (String) — Query string. +- `exception` (String) — Exception message. +- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS's username who runs [clickhouse-müşteri](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi çalıştırılır. +- `client_name` (String) — The [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemci adı. +- `client_revision` (UInt32) — Revision of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi. +- `client_version_major` (UInt32) — Major version of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi. +- `client_version_minor` (UInt32) — Minor version of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi. +- `client_version_patch` (UInt32) — Patch component of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemci sürümü. +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` yöntem kullanılmıştır. + - 2 — `POST` yöntem kullanılmıştır. +- `http_user_agent` (String) — The `UserAgent` başlık http isteğinde geçti. +- `quota_key` (String) — The “quota key” belirtilen [kotalar](quotas.md) ayarı (bakınız `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [sistem.etkinlik](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` sütun. +- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parametre 1. +- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` sütun. + +Her sorgu bir veya iki satır oluşturur `query_log` tablo, sorgunun durumuna bağlı olarak: + +1. Sorgu yürütme başarılı olursa, tip 1 ve 2 ile iki olay oluşturulur (bkz. `type` sütun). +2. Sorgu işleme sırasında bir hata oluştu, iki olay türleri 1 ve 4 oluşturulur. +3. Sorguyu başlatmadan önce bir hata oluşmuşsa, 3 tipi olan tek bir olay oluşturulur. + +Varsayılan olarak, günlükleri 7.5 saniye aralıklarla tabloya eklenir. Bu aralığı ayarlayabilirsiniz [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) sunucu ayarı (bkz. `flush_interval_milliseconds` parametre). Günlükleri zorla bellek arabelleğinden tabloya temizlemek için `SYSTEM FLUSH LOGS` sorgu. + +Tablo elle silindiğinde, otomatik olarak anında oluşturulur. Önceki tüm günlüklerin silineceğini unutmayın. + +!!! note "Not" + Günlüklerin depolama süresi sınırsızdır. Günlükler tablodan otomatik olarak silinmez. Eski günlüklerin kaldırılmasını kendiniz düzenlemeniz gerekir. + +İçin keyfi bir bölümleme anahtarı belirtebilirsiniz `system.query_log` tablo içinde [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) sunucu ayarı (bkz. `partition_by` parametre). + +## sistem.query\_thread\_log {#system_tables-query-thread-log} + +Tablo, her sorgu yürütme iş parçacığı hakkında bilgi içerir. + +ClickHouse bu tabloyu yalnızca [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) sunucu parametresi belirtilir. Bu parametre, günlük aralığı veya sorguların oturum açacağı tablonun adı gibi günlük kurallarını ayarlar. + +Sorgu günlüğünü etkinleştirmek için, [log\_query\_threads](settings/settings.md#settings-log-query-threads) parametre 1. Ayrıntılar için, bkz. [Ayarlar](settings/settings.md) bölme. + +Sütun: + +- `event_date` (Date) — the date when the thread has finished execution of the query. +- `event_time` (DateTime) — the date and time when the thread has finished execution of the query. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` sorgular, yazılı satır sayısı. Diğer sorgular için sütun değeri 0'dır. +- `written_bytes` (UInt64) — For `INSERT` sorgular, yazılı bayt sayısı. Diğer sorgular için sütun değeri 0'dır. +- `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. +- `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. +- `thread_name` (String) — Name of the thread. +- `thread_number` (UInt32) — Internal thread ID. +- `os_thread_id` (Int32) — OS thread ID. +- `master_thread_id` (UInt64) — OS initial ID of initial thread. +- `query` (String) — Query string. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS's username who runs [clickhouse-müşteri](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi çalıştırılır. +- `client_name` (String) — The [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemci adı. +- `client_revision` (UInt32) — Revision of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi. +- `client_version_major` (UInt32) — Major version of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi. +- `client_version_minor` (UInt32) — Minor version of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemcisi. +- `client_version_patch` (UInt32) — Patch component of the [clickhouse-müşteri](../interfaces/cli.md) veya başka bir TCP istemci sürümü. +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` yöntem kullanılmıştır. + - 2 — `POST` yöntem kullanılmıştır. +- `http_user_agent` (String) — The `UserAgent` başlık http isteğinde geçti. +- `quota_key` (String) — The “quota key” belirtilen [kotalar](quotas.md) ayarı (bakınız `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [sistem.etkinlik](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` sütun. + +Varsayılan olarak, günlükleri 7.5 saniye aralıklarla tabloya eklenir. Bu aralığı ayarlayabilirsiniz [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) sunucu ayarı (bkz. `flush_interval_milliseconds` parametre). Günlükleri zorla bellek arabelleğinden tabloya temizlemek için `SYSTEM FLUSH LOGS` sorgu. + +Tablo elle silindiğinde, otomatik olarak anında oluşturulur. Önceki tüm günlüklerin silineceğini unutmayın. + +!!! note "Not" + Günlüklerin depolama süresi sınırsızdır. Günlükler tablodan otomatik olarak silinmez. Eski günlüklerin kaldırılmasını kendiniz düzenlemeniz gerekir. + +İçin keyfi bir bölümleme anahtarı belirtebilirsiniz `system.query_thread_log` tablo içinde [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) sunucu ayarı (bkz. `partition_by` parametre). + +## sistem.trace\_log {#system_tables-trace_log} + +Örnekleme sorgusu profiler tarafından toplanan yığın izlemeleri içerir. + +ClickHouse bu tabloyu oluşturduğunda [trace\_log](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) sunucu yapılandırma bölümü ayarlanır. Ayrıca [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) ve [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) ayarlar ayarlan .malıdır. + +Günlükleri analiz etmek için `addressToLine`, `addressToSymbol` ve `demangle` iç gözlem fonksiyonları. + +Sütun: + +- `event_date`([Tarihli](../sql_reference/data_types/date.md)) — Date of sampling moment. + +- `event_time`([DateTime](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. + +- `revision`([Uİnt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. + + Tarafından sunucuya Bağlan byırken `clickhouse-client`, benzer diz theg seeeyi görüyorsunuz `Connected to ClickHouse server version 19.18.1 revision 54429.`. Bu alan şunları içerir `revision` ama `version` bir sunucunun. + +- `timer_type`([Enum8](../sql_reference/data_types/enum.md)) — Timer type: + + - `Real` duvar saati zamanını temsil eder. + - `CPU` CPU süresini temsil eder. + +- `thread_number`([Uİnt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. + +- `query_id`([Dize](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) sistem tablosu. + +- `trace`([Dizi (Uİnt64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. + +**Örnek** + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-15 +event_time: 2019-11-15 15:09:38 +revision: 54428 +timer_type: Real +thread_number: 48 +query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 +trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] +``` + +## sistem.yinelemeler {#system_tables-replicas} + +Yerel sunucuda bulunan çoğaltılmış tablolar için bilgi ve durum içerir. +Bu tablo izleme için kullanılabilir. Tablo, her çoğaltılmış \* tablo için bir satır içerir. + +Örnek: + +``` sql +SELECT * +FROM system.replicas +WHERE table = 'visits' +FORMAT Vertical +``` + +``` text +Row 1: +────── +database: merge +table: visits +engine: ReplicatedCollapsingMergeTree +is_leader: 1 +can_become_leader: 1 +is_readonly: 0 +is_session_expired: 0 +future_parts: 1 +parts_to_check: 0 +zookeeper_path: /clickhouse/tables/01-06/visits +replica_name: example01-06-1.yandex.ru +replica_path: /clickhouse/tables/01-06/visits/replicas/example01-06-1.yandex.ru +columns_version: 9 +queue_size: 1 +inserts_in_queue: 0 +merges_in_queue: 1 +part_mutations_in_queue: 0 +queue_oldest_time: 2020-02-20 08:34:30 +inserts_oldest_time: 0000-00-00 00:00:00 +merges_oldest_time: 2020-02-20 08:34:30 +part_mutations_oldest_time: 0000-00-00 00:00:00 +oldest_part_to_get: +oldest_part_to_merge_to: 20200220_20284_20840_7 +oldest_part_to_mutate_to: +log_max_index: 596273 +log_pointer: 596274 +last_queue_update: 2020-02-20 08:34:32 +absolute_delay: 0 +total_replicas: 2 +active_replicas: 2 +``` + +Sütun: + +- `database` (`String`)- Veritabanı adı +- `table` (`String`)- Tablo adı +- `engine` (`String`)- Tablo motor adı +- `is_leader` (`UInt8`)- Kopya lider olup olmadığı. + Bir seferde sadece bir kopya lider olabilir. Lider, gerçekleştirmek için arka plan birleştirmelerini seçmekten sorumludur. + Yazma kullanılabilir ve bir oturum ZK, bir lider olup olmadığına bakılmaksızın olan herhangi bir yineleme için gerçekleştirilebilir unutmayın. +- `can_become_leader` (`UInt8`)- Rep .lik leaderanın lider olarak seçil .ip seçil .emeyeceği. +- `is_readonly` (`UInt8`)- Yinelemenin salt okunur modda olup olmadığı. + Yapılandırmanın ZooKeeper ile bölümleri yoksa, zookeeper'daki oturumları yeniden başlatırken ve Zookeeper'daki oturum yeniden başlatılırken bilinmeyen bir hata oluşmuşsa bu mod açılır. +- `is_session_expired` (`UInt8`)- ZooKeeper ile oturum süresi doldu. Temelde aynı `is_readonly`. +- `future_parts` (`UInt32`)- Henüz yapılmamış ekler veya birleştirmelerin sonucu olarak görünecek veri parçalarının sayısı. +- `parts_to_check` (`UInt32`)- Doğrulama için kuyruktaki veri parçalarının sayısı. Hasar görebileceğinden şüphe varsa, bir parça doğrulama kuyruğuna konur. +- `zookeeper_path` (`String`)- ZooKeeper tablo verilerine yolu. +- `replica_name` (`String`)- Zookeeper çoğaltma adı. Aynı tablonun farklı kopyaları farklı adlara sahiptir. +- `replica_path` (`String`)- ZooKeeper çoğaltma veri yolu. Birleştirme ile aynı ‘zookeeper\_path/replicas/replica\_path’. +- `columns_version` (`Int32`)- Tablo yapısının sürüm numarası. ALTER kaç kez gerçekleştirildiğini gösterir. Kopyaların farklı sürümleri varsa, bazı kopyaların tüm değişiklikleri henüz yapmadığı anlamına gelir. +- `queue_size` (`UInt32`)- Yapılması beklenen işlemler için sıranın büyüklüğü. İşlemler, veri bloklarını, birleştirmeleri ve diğer bazı eylemleri eklemeyi içerir. Genellikle ile çakışmaktadır `future_parts`. +- `inserts_in_queue` (`UInt32`)- Yapılması gereken veri bloklarının eklerinin sayısı. Eklemeler genellikle oldukça hızlı bir şekilde çoğaltılır. Bu sayı büyükse, bir şeylerin yanlış olduğu anlamına gelir. +- `merges_in_queue` (`UInt32`)- Yapılmasını bekleyen birleştirme sayısı. Bazen birleştirmeler uzundur, bu nedenle bu değer uzun süre sıfırdan büyük olabilir. +- `part_mutations_in_queue` (`UInt32`)- Yapılması beklenen Mut numberasyon sayısı. +- `queue_oldest_time` (`DateTime`) - Eğer `queue_size` daha büyük 0, en eski işlem sıraya eklendiğinde gösterir. +- `inserts_oldest_time` (`DateTime`) - Görmek `queue_oldest_time` +- `merges_oldest_time` (`DateTime`) - Görmek `queue_oldest_time` +- `part_mutations_oldest_time` (`DateTime`) - Görmek `queue_oldest_time` + +Sonraki 4 sütun, yalnızca ZK ile aktif bir oturumun olduğu sıfır olmayan bir değere sahiptir. + +- `log_max_index` (`UInt64`)- Genel faaliyet günlüğüne maksimum giriş numarası. +- `log_pointer` (`UInt64`)- Çoğaltma yürütme kuyruğuna kopyalanan genel faaliyet günlüğüne maksimum giriş numarası, artı bir. Eğer `log_pointer` daha küçük `log_max_index` yanlış bir şey olduğunu. +- `last_queue_update` (`DateTime`)- Kuyruk son kez güncellendiğinde. +- `absolute_delay` (`UInt64`)- Geçerli kopyanın saniyeler içinde ne kadar büyük gecikme var. +- `total_replicas` (`UInt8`)- Bu tablonun bilinen kopyalarının toplam sayısı. +- `active_replicas` (`UInt8`)- ZooKeeper bir oturum var bu tablonun kopyaları sayısı (yani, işleyen kopyaları sayısı). + +Tüm sütunları talep ederseniz, Tablo biraz yavaş çalışabilir, çünkü ZooKeeper birkaç okuma her satır için yapılır. +Son 4 sütun (log\_max\_ındex, log\_pointer, total\_replicas, active\_replicas) istemiyorsanız, tablo hızlı bir şekilde çalışır. + +Örneğin, her şeyin böyle düzgün çalıştığını kontrol edebilirsiniz: + +``` sql +SELECT + database, + table, + is_leader, + is_readonly, + is_session_expired, + future_parts, + parts_to_check, + columns_version, + queue_size, + inserts_in_queue, + merges_in_queue, + log_max_index, + log_pointer, + total_replicas, + active_replicas +FROM system.replicas +WHERE + is_readonly + OR is_session_expired + OR future_parts > 20 + OR parts_to_check > 10 + OR queue_size > 20 + OR inserts_in_queue > 10 + OR log_max_index - log_pointer > 10 + OR total_replicas < 2 + OR active_replicas < total_replicas +``` + +Bu sorgu hiçbir şey döndürmezse, her şeyin yolunda olduğu anlamına gelir. + +## sistem.ayarlar {#system-tables-system-settings} + +Geçerli kullanıcı için oturum ayarları hakkında bilgi içerir. + +Sütun: + +- `name` ([Dize](../sql_reference/data_types/string.md)) — Setting name. +- `value` ([Dize](../sql_reference/data_types/string.md)) — Setting value. +- `changed` ([Uİnt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether a setting is changed from its default value. +- `description` ([Dize](../sql_reference/data_types/string.md)) — Short setting description. +- `min` ([Nullable](../sql_reference/data_types/nullable.md)([Dize](../sql_reference/data_types/string.md))) — Minimum value of the setting, if any is set via [kısıtlamalar](settings/constraints_on_settings.md#constraints-on-settings). Ayarın minimum değeri yoksa, şunları içerir [NULL](../sql_reference/syntax.md#null-literal). +- `max` ([Nullable](../sql_reference/data_types/nullable.md)([Dize](../sql_reference/data_types/string.md))) — Maximum value of the setting, if any is set via [kısıtlamalar](settings/constraints_on_settings.md#constraints-on-settings). Ayarın maksimum değeri yoksa, şunları içerir [NULL](../sql_reference/syntax.md#null-literal). +- `readonly` ([Uİnt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether the current user can change the setting: + - `0` — Current user can change the setting. + - `1` — Current user can't change the setting. + +**Örnek** + +Aşağıdaki örnek, adı içeren ayarlar hakkında bilgi almak gösterilmiştir `min_i`. + +``` sql +SELECT * +FROM system.settings +WHERE name LIKE '%min_i%' +``` + +``` text +┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ +│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘ +``` + +Kullanımı `WHERE changed` örneğin, kontrol etmek istediğinizde yararlı olabilir: + +- Olsun yapılandırma dosyaları, ayarları doğru şekilde yüklenmiş ve kullanımdadır. +- Geçerli oturumda değişen ayarlar. + + + +``` sql +SELECT * FROM system.settings WHERE changed AND name='load_balancing' +``` + +**Ayrıca bakınız** + +- [Ayarlar](settings/index.md#settings) +- [Sorgular için izinler](settings/permissions_for_queries.md#settings_readonly) +- [Ayarlardaki kısıtlamalar](settings/constraints_on_settings.md) + +## sistem.table\_engines {#system.table_engines} + +``` text +┌─name───────────────────┬─value───────┐ +│ max_threads │ 8 │ +│ use_uncompressed_cache │ 0 │ +│ load_balancing │ random │ +│ max_memory_usage │ 10000000000 │ +└────────────────────────┴─────────────┘ +``` + +## sistem.merge\_tree\_settings {#system-merge_tree_settings} + +İçin ayarlar hakkında bilgi içerir `MergeTree` Tablolar. + +Sütun: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. + +## sistem.table\_engines {#system-table-engines} + +Sunucu tarafından desteklenen tablo motorlarının açıklamasını ve özellik destek bilgilerini içerir. + +Bu tablo aşağıdaki sütunları içerir (sütun türü parantez içinde gösterilir): + +- `name` (String) — The name of table engine. +- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` yan. +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [endeksleri atlama](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). +- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` ve `SAMPLE_BY`. +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [veri çoğaltma](../engines/table_engines/mergetree_family/replication.md). +- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. + +Örnek: + +``` sql +SELECT * +FROM system.table_engines +WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') +``` + +``` text +┌─name──────────────────────────┬─supports_settings─┬─supports_skipping_indices─┬─supports_sort_order─┬─supports_ttl─┬─supports_replication─┬─supports_deduplication─┐ +│ Kafka │ 1 │ 0 │ 0 │ 0 │ 0 │ 0 │ +│ MergeTree │ 1 │ 1 │ 1 │ 1 │ 0 │ 0 │ +│ ReplicatedCollapsingMergeTree │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ +└───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘ +``` + +**Ayrıca bakınız** + +- MergeTree ailesi [sorgu yan tümceleri](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) +- Kafka [ayarlar](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) +- Katmak [ayarlar](../engines/table_engines/special/join.md#join-limitations-and-settings) + +## sistem.Tablolar {#system-tables} + +Sunucunun bildiği her tablonun meta verilerini içerir. Müstakil tablolar gösterilmez `system.tables`. + +Bu tablo aşağıdaki sütunları içerir (sütun türü parantez içinde gösterilir): + +- `database` (String) — The name of the database the table is in. + +- `name` (String) — Table name. + +- `engine` (String) — Table engine name (without parameters). + +- `is_temporary` (Uİnt8) - tablonun geçici olup olmadığını gösteren bayrak. + +- `data_path` (String) - dosya sistemindeki tablo verilerinin yolu. + +- `metadata_path` (String) - dosya sistemindeki tablo Meta Veri Yolu. + +- `metadata_modification_time` (DateTime) - tablo meta son değişiklik zamanı. + +- `dependencies_database` (Array (String)) - veritabanı bağımlılıkları. + +- `dependencies_table` (Array (String)) - Tablo bağımlılıkları ([MaterializedView](../engines/table_engines/special/materializedview.md) geçerli tabloya dayalı tablolar). + +- `create_table_query` (String) - tablo oluşturmak için kullanılan sorgu. + +- `engine_full` (String) - tablo motorunun parametreleri. + +- `partition_key` (String) - tabloda belirtilen bölüm anahtarı ifadesi. + +- `sorting_key` (String) - tabloda belirtilen sıralama anahtarı ifadesi. + +- `primary_key` (String) - tabloda belirtilen birincil anahtar ifadesi. + +- `sampling_key` (String) - tabloda belirtilen örnekleme anahtar ifadesi. + +- `storage_policy` (String) - depolama politikası: + + - [MergeTree](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) + - [Dağılı](../engines/table_engines/special/distributed.md#distributed) + +- `total_rows` (Nullable (Uİnt64)) - tablodaki tam satır sayısını hızlı bir şekilde belirlemek mümkün ise, toplam satır sayısı `Null` (underying dahil `Buffer` Tablo). + +- `total_bytes` (Nullable (Uİnt64)) - toplam bayt sayısı, eğer depolama alanındaki tablo için tam bayt sayısını hızlı bir şekilde belirlemek mümkün ise, aksi takdirde `Null` (**do Notes not** herhangi bir temel depolama içerir). + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - Tablo verileri bellekte depolarsa, bellekte kullanılan bayt sayısını yaklaşık olarak döndürür. + +Bu `system.tables` tablo kullanılır `SHOW TABLES` sorgu uygulaması. + +## sistem.zookeeper {#system-zookeeper} + +ZooKeeper yapılandırılmamışsa, tablo yok. Yapılandırmada tanımlanan ZooKeeper kümesinden veri okumayı sağlar. +Sorgu bir olmalıdır ‘path’ WH .ere madd .esindeki eşitlik koşulu. Bu veri almak istediğiniz çocuklar için ZooKeeper yoludur. + +Sorgu `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` tüm çocuklar için veri çıkışı `/clickhouse` düğümlü. +Tüm kök düğümler için veri çıkışı yapmak için, path = yazın ‘/’. +Belirtilen yol ise ‘path’ yok, bir istisna atılır. + +Sütun: + +- `name` (String) — The name of the node. +- `path` (String) — The path to the node. +- `value` (String) — Node value. +- `dataLength` (Int32) — Size of the value. +- `numChildren` (Int32) — Number of descendants. +- `czxid` (Int64) — ID of the transaction that created the node. +- `mzxid` (Int64) — ID of the transaction that last changed the node. +- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. +- `ctime` (DateTime) — Time of node creation. +- `mtime` (DateTime) — Time of the last modification of the node. +- `version` (Int32) — Node version: the number of times the node was changed. +- `cversion` (Int32) — Number of added or removed descendants. +- `aversion` (Int32) — Number of changes to the ACL. +- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. + +Örnek: + +``` sql +SELECT * +FROM system.zookeeper +WHERE path = '/clickhouse/tables/01-08/visits/replicas' +FORMAT Vertical +``` + +``` text +Row 1: +────── +name: example01-08-1.yandex.ru +value: +czxid: 932998691229 +mzxid: 932998691229 +ctime: 2015-03-27 16:49:51 +mtime: 2015-03-27 16:49:51 +version: 0 +cversion: 47 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021031383 +path: /clickhouse/tables/01-08/visits/replicas + +Row 2: +────── +name: example01-08-2.yandex.ru +value: +czxid: 933002738135 +mzxid: 933002738135 +ctime: 2015-03-27 16:57:01 +mtime: 2015-03-27 16:57:01 +version: 0 +cversion: 37 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021252247 +path: /clickhouse/tables/01-08/visits/replicas +``` + +## sistem.mutasyonlar {#system_tables-mutations} + +Tablo hakkında bilgi içerir [mutasyonlar](../sql_reference/statements/alter.md#alter-mutations) MergeTree tabloları ve bunların ilerleme. Her mutasyon komutu tek bir satırla temsil edilir. Tablo aşağıdaki sütunlara sahiptir: + +**veritabanı**, **Tablo** - Mutasyonun uygulandığı veritabanı ve tablonun adı. + +**mutation\_id** - Mutasyonun kimliği. Çoğaltılmış tablolar için bu kimlikler znode adlarına karşılık gelir `/mutations/` ZooKeeper dizin. Yinelenmemiş tablolar için kimlikler, tablonun veri dizinindeki dosya adlarına karşılık gelir. + +**komut** - Mut commandasyon komut diz (gesi (sorgu afterdan sonra `ALTER TABLE [db.]table`). + +**create\_time** - Bu mutasyon komutu idam için sunulduğunda. + +**block\_numbers.partition\_id**, **block\_numbers.numara** - İç içe geçmiş bir sütun. Çoğaltılmış tabloların mutasyonları için, her bölüm için bir kayıt içerir: bölüm kimliği ve mutasyon tarafından elde edilen blok numarası (her bölümde, yalnızca bu bölümdeki mutasyon tarafından elde edilen blok sayısından daha az sayıda blok içeren parçalar mutasyona uğrayacaktır). Çoğaltılmamış tablolarda, tüm bölümlerdeki blok numaraları tek bir sıra oluşturur. Bu, çoğaltılmamış tabloların mutasyonları için, sütunun mutasyon tarafından elde edilen tek bir blok numarasına sahip bir kayıt içereceği anlamına gelir. + +**parts\_to\_do** - Mutasyonun bitmesi için mutasyona uğraması gereken veri parçalarının sayısı. + +**is\_done** - Mutasyon bitti mi? Not bile `parts_to_do = 0` çoğaltılmış bir tablonun mutasyonu, mutasyona uğraması gereken yeni bir veri parçası yaratacak uzun süren bir ekleme nedeniyle henüz yapılmamıştır. + +Bazı bölümleri mutasyon ile ilgili sorunlar varsa, aşağıdaki sütunlar ek bilgi içerir: + +**latest\_failed\_part** - Mutasyona uğramayan en son bölümün adı. + +**latest\_fail\_time** - En son bölüm mutasyon başarısızlığı zamanı. + +**latest\_fail\_reason** - En son bölüm mutasyon başarısızlığına neden olan istisna mesajı. + +## sistem.diskler {#system_tables-disks} + +İçinde tanımlanan diskler hakkında bilgi içerir [sunucu yapılandırması](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +Sütun: + +- `name` ([Dize](../sql_reference/data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([Dize](../sql_reference/data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([Uİnt64](../sql_reference/data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([Uİnt64](../sql_reference/data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([Uİnt64](../sql_reference/data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` disk yapılandırması parametresi. + +## sistem.storage\_policies {#system_tables-storage_policies} + +Depolama ilkeleri ve birimlerinde tanımlanan bilgiler içerir. [sunucu yapılandırması](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +Sütun: + +- `policy_name` ([Dize](../sql_reference/data_types/string.md)) — Name of the storage policy. +- `volume_name` ([Dize](../sql_reference/data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([Uİnt64](../sql_reference/data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([Ar Arrayray (String)](../sql_reference/data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([Uİnt64](../sql_reference/data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([Float64](../sql_reference/data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. + +Depolama ilkesi birden fazla birim içeriyorsa, her birim için bilgiler tablonun tek tek satırında saklanır. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/tr/operations/tips.md b/docs/tr/operations/tips.md new file mode 100644 index 00000000000..e6b02a239c5 --- /dev/null +++ b/docs/tr/operations/tips.md @@ -0,0 +1,251 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 58 +toc_title: "Kullan\u0131m \xD6nerileri" +--- + +# Kullanım Önerileri {#usage-recommendations} + +## CPU Ölçekleme Vali {#cpu-scaling-governor} + +Her zaman kullanın `performance` Ölçekleme Valisi. Bu `on-demand` ölçeklendirme Valisi sürekli yüksek talep ile çok daha kötü çalışır. + +``` bash +$ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor +``` + +## CPU sınırlamaları {#cpu-limitations} + +İşlemciler aşırı ısınabilir. Kullanmak `dmesg` aşırı ısınma nedeniyle CPU'nun saat hızının sınırlı olup olmadığını görmek için. +Kısıtlama, veri merkezi düzeyinde harici olarak da ayarlanabilir. Kullanabilirsiniz `turbostat` bir yük altında izlemek için. + +## RAM {#ram} + +Küçük miktarlarda veri için (~200 GB'a kadar sıkıştırılmış), veri hacmi kadar bellek kullanmak en iyisidir. +Büyük miktarda veri için ve etkileşimli (çevrimiçi) sorguları işlerken, sıcak veri alt kümesi sayfaların önbelleğine sığacak şekilde makul miktarda RAM (128 GB veya daha fazla) kullanmalısınız. +Sunucu başına ~50 TB veri hacimleri için bile, 128 GB RAM kullanmak, 64 GB'ye kıyasla sorgu performansını önemli ölçüde artırır. + +Overcommit devre dışı bırakmayın. Değer `cat /proc/sys/vm/overcommit_memory` 0 veya 1 olmalıdır. Koşmak + +``` bash +$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory +``` + +## Büyük Sayfalar {#huge-pages} + +Her zaman şeffaf büyük sayfaları devre dışı bırakın. Önemli performans düşmesine neden olan bellek yöneticileri, engel oluyor. + +``` bash +$ echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled +``` + +Kullanmak `perf top` bellek yönetimi için çekirdekte harcanan zamanı izlemek için. +Kalıcı büyük sayfaların da tahsis edilmesine gerek yoktur. + +## Depolama Alt Sistemi {#storage-subsystem} + +Bütçeniz SSD kullanmanıza izin veriyorsa, SSD kullanın. +Eğer değilse, sabit disk kullanın. SATA HDD'ler 7200 RPM yapacak. + +Bağlı disk raflarına sahip daha az sayıda sunucu üzerinde yerel sabit disklere sahip birçok sunucuyu tercih edin. +Ancak nadir sorguları olan arşivleri saklamak için raflar çalışacaktır. + +## RAID {#raid} + +HDD kullanırken, RAID-10, RAID-5, RAID-6 veya RAID-50'yi birleştirebilirsiniz. +Linux için, yazılım RAID daha iyidir (ile `mdadm`). LVM'Yİ kullanmanızı önermiyoruz. +RAID-10 oluştururken, `far` düzen. +Bütçeniz izin veriyorsa, RAID-10'u seçin. + +4'ten fazla diskiniz varsa, RAID-5 yerine RAID-6 (tercih edilen) veya RAID-50 kullanın. +RAID-5, RAID-6 veya RAID-50 kullanırken, varsayılan değer genellikle en iyi seçenek olmadığından daima stripe\_cache\_size değerini artırın. + +``` bash +$ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size +``` + +Formülü kullanarak cihaz sayısından ve blok boyutundan tam sayıyı hesaplayın: `2 * num_devices * chunk_size_in_bytes / 4096`. + +Tüm RAID yapılandırmaları için 1024 KB blok boyutu yeterlidir. +Blok boyutunu asla çok küçük veya çok büyük ayarlamayın. + +SSD'DE RAID-0 kullanabilirsiniz. +RAID kullanımı ne olursa olsun, her zaman veri güvenliği için çoğaltma kullanın. + +Uzun bir kuyruk ile NCQ etkinleştirin. HDD için CFQ zamanlayıcısını seçin ve SSD için noop'u seçin. Azalt themayın ‘readahead’ ayar. +HDD için yazma önbelleğini etkinleştirin. + +## Dosya Sistemi {#file-system} + +Ext4 en güvenilir seçenektir. Bağlama seçeneklerini ayarlama `noatime, nobarrier`. +XFS de uygundur, ancak ClickHouse ile iyice test edilmemiştir. +Diğer çoğu dosya sistemi de iyi çalışmalıdır. Gecikmeli tahsisli dosya sistemleri daha iyi çalışır. + +## Linux Çekirdeği {#linux-kernel} + +Eski bir Linux çekirdeği kullanmayın. + +## Ağ {#network} + +IPv6 kullanıyorsanız, rota önbelleğinin boyutunu artırın. +3.2 öncesinde Linux çekirdeği IPv6 uygulaması ile ilgili sorunlar çok sayıda vardı. + +Mümkünse en az 10 GB ağ kullanın. 1 Gb de çalışacak, ancak onlarca terabayt veri içeren kopyaları yamalamak veya büyük miktarda Ara veriyle dağıtılmış sorguları işlemek için çok daha kötü olacaktır. + +## ZooKeeper {#zookeeper} + +Muhtemelen zaten başka amaçlar için ZooKeeper kullanıyor. Zaten aşırı değilse, aynı ZooKeeper kurulumunu kullanabilirsiniz. + +It's best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. + +Sonuç sıralı düğümler için yanlış olacağından, farklı ZooKeeper kümeleri arasında veri aktarmak için el ile yazılmış komut dosyalarını asla kullanmamalısınız. Asla kullanmayın “zkcopy” aynı nedenle yardımcı program: https://github.com/ksprojects/zkcopy/issues/15 + +Varolan bir ZooKeeper kümesini ikiye bölmek istiyorsanız, doğru yol, yinelemelerinin sayısını artırmak ve sonra iki bağımsız küme olarak yeniden yapılandırmaktır. + +Zookeeper ClickHouse aynı sunucularda çalıştırmayın. ZooKeeper gecikme için çok hassas olduğundan ve ClickHouse mevcut tüm sistem kaynaklarını kullanabilir. + +Varsayılan ayarlarla, ZooKeeper bir saatli bomba: + +> ZooKeeper sunucusu, varsayılan yapılandırmayı kullanırken eski anlık görüntülerden ve günlüklerden dosyaları silmez (bkz.autopurge) ve bu operatörün sorumluluğundadır. + +Bu bomba etkisiz hale getirilmeli. + +Aşağıdaki ZooKeeper (3.5.1) yapılandırması Yandex'te kullanılmaktadır.20 Mayıs 2017 tarihi itibariyle Metrica üretim ortamı: + +hayvanat bahçesi.cfg: + +``` bash +# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html + +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=30000 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=10 + +maxClientCnxns=2000 + +maxSessionTimeout=60000000 +# the directory where the snapshot is stored. +dataDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '}}' }}/data +# Place the dataLogDir to a separate physical disc for better performance +dataLogDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '}}' }}/logs + +autopurge.snapRetainCount=10 +autopurge.purgeInterval=1 + + +# To avoid seeks ZooKeeper allocates space in the transaction log file in +# blocks of preAllocSize kilobytes. The default block size is 64M. One reason +# for changing the size of the blocks is to reduce the block size if snapshots +# are taken more often. (Also, see snapCount). +preAllocSize=131072 + +# Clients can submit requests faster than ZooKeeper can process them, +# especially if there are a lot of clients. To prevent ZooKeeper from running +# out of memory due to queued requests, ZooKeeper will throttle clients so that +# there is no more than globalOutstandingLimit outstanding requests in the +# system. The default limit is 1,000.ZooKeeper logs transactions to a +# transaction log. After snapCount transactions are written to a log file a +# snapshot is started and a new transaction log file is started. The default +# snapCount is 10,000. +snapCount=3000000 + +# If this option is defined, requests will be will logged to a trace file named +# traceFile.year.month.day. +#traceFile= + +# Leader accepts client connections. Default value is "yes". The leader machine +# coordinates updates. For higher update throughput at thes slight expense of +# read throughput the leader can be configured to not accept clients and focus +# on coordination. +leaderServes=yes + +standaloneEnabled=false +dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo.cfg.dynamic +``` + +Java sürümü: + +``` text +Java(TM) SE Runtime Environment (build 1.8.0_25-b17) +Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) +``` + +JVM parametreleri: + +``` bash +NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} +ZOOCFGDIR=/etc/$NAME/conf + +# TODO this is really ugly +# How to find out, which jars are needed? +# seems, that log4j requires the log4j.properties file to be in the classpath +CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper" + +ZOOCFG="$ZOOCFGDIR/zoo.cfg" +ZOO_LOG_DIR=/var/log/$NAME +USER=zookeeper +GROUP=zookeeper +PIDDIR=/var/run/$NAME +PIDFILE=$PIDDIR/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME +JAVA=/usr/bin/java +ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" +ZOO_LOG4J_PROP="INFO,ROLLINGFILE" +JMXLOCALONLY=false +JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ + -Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '}}' }} \ + -Xloggc:/var/log/$NAME/zookeeper-gc.log \ + -XX:+UseGCLogFileRotation \ + -XX:NumberOfGCLogFiles=16 \ + -XX:GCLogFileSize=16M \ + -verbose:gc \ + -XX:+PrintGCTimeStamps \ + -XX:+PrintGCDateStamps \ + -XX:+PrintGCDetails + -XX:+PrintTenuringDistribution \ + -XX:+PrintGCApplicationStoppedTime \ + -XX:+PrintGCApplicationConcurrentTime \ + -XX:+PrintSafepointStatistics \ + -XX:+UseParNewGC \ + -XX:+UseConcMarkSweepGC \ +-XX:+CMSParallelRemarkEnabled" +``` + +Tuz ve Karab saltiber: + +``` text +description "zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} centralized coordination service" + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +limit nofile 8192 8192 + +pre-start script + [ -r "/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment" ] || exit 0 + . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment + [ -d $ZOO_LOG_DIR ] || mkdir -p $ZOO_LOG_DIR + chown $USER:$GROUP $ZOO_LOG_DIR +end script + +script + . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment + [ -r /etc/default/zookeeper ] && . /etc/default/zookeeper + if [ -z "$JMXDISABLE" ]; then + JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY" + fi + exec start-stop-daemon --start -c $USER --exec $JAVA --name zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} \ + -- -cp $CLASSPATH $JAVA_OPTS -Dzookeeper.log.dir=${ZOO_LOG_DIR} \ + -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG +end script +``` + +{## [Orijinal makale](https://clickhouse.tech/docs/en/operations/tips/) ##} diff --git a/docs/tr/operations/troubleshooting.md b/docs/tr/operations/troubleshooting.md new file mode 100644 index 00000000000..f16a59767d6 --- /dev/null +++ b/docs/tr/operations/troubleshooting.md @@ -0,0 +1,146 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 46 +toc_title: "Ar\u0131za" +--- + +# Arıza {#troubleshooting} + +- [Kurulum](#troubleshooting-installation-errors) +- [Sunucuya bağlanma](#troubleshooting-accepts-no-connections) +- [Sorgu işleme](#troubleshooting-does-not-process-queries) +- [Sorgu işleme verimliliği](#troubleshooting-too-slow) + +## Kurulum {#troubleshooting-installation-errors} + +### Apt-get ile ClickHouse deposundan Deb paketleri alınamıyor {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} + +- Güvenlik Duvarı ayarlarını kontrol edin. +- Depoya herhangi bir nedenle erişemiyorsanız, paketleri aşağıda açıklandığı gibi indirin [Başlarken](../getting_started/index.md) makale ve bunları kullanarak manuel olarak yükleyin `sudo dpkg -i ` komut. Ayrıca ihtiyacınız olacak `tzdata` paket. + +## Sunucuya bağlanma {#troubleshooting-accepts-no-connections} + +Olası sorunlar: + +- Sunucu çalışmıyor. +- Beklenmeyen veya yanlış yapılandırma parametreleri. + +### Sunucu Çalışmıyor {#server-is-not-running} + +**Sunucu runnnig olup olmadığını kontrol edin** + +Komut: + +``` bash +$ sudo service clickhouse-server status +``` + +Sunucu çalışmıyorsa, komutla başlatın: + +``` bash +$ sudo service clickhouse-server start +``` + +**Günlükleri kontrol et** + +Ana günlüğü `clickhouse-server` içinde `/var/log/clickhouse-server/clickhouse-server.log` varsayılan olarak. + +Sunucu başarıyla başlatıldıysa, dizeleri görmelisiniz: + +- ` Application: starting up.` — Server started. +- ` Application: Ready for connections.` — Server is running and ready for connections. + +Eğer `clickhouse-server` Başlat bir yapılandırma hatası ile başarısız oldu, görmelisiniz `` bir hata açıklaması ile dize. Mesela: + +``` text +2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused +``` + +Dosyanın sonunda bir hata görmüyorsanız, dizeden başlayarak tüm dosyaya bakın: + +``` text + Application: starting up. +``` + +İkinci bir örneğini başlatmaya çalışırsanız `clickhouse-server` sunucuda, aşağıdaki günlük bakın: + +``` text +2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 +2019.01.11 15:25:11.154578 [ 1 ] {} Application: starting up +2019.01.11 15:25:11.156361 [ 1 ] {} StatusFile: Status file ./status already exists - unclean restart. Contents: +PID: 8510 +Started at: 2019-01-11 15:24:23 +Revision: 54413 + +2019.01.11 15:25:11.156673 [ 1 ] {} Application: DB::Exception: Cannot lock file ./status. Another server instance in same directory is already running. +2019.01.11 15:25:11.156682 [ 1 ] {} Application: shutting down +2019.01.11 15:25:11.156686 [ 1 ] {} Application: Uninitializing subsystem: Logging Subsystem +2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread +``` + +**Bkz. sistem.d günlükleri** + +Eğer herhangi bir yararlı bilgi bulamazsanız `clickhouse-server` günlükler veya herhangi bir günlük yok, görüntüleyebilirsiniz `system.d` komutu kullanarak günlükleri: + +``` bash +$ sudo journalctl -u clickhouse-server +``` + +**Clickhouse-Server'ı etkileşimli modda Başlat** + +``` bash +$ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml +``` + +Bu komut, sunucuyu otomatik başlatma komut dosyasının standart parametreleriyle etkileşimli bir uygulama olarak başlatır. Bu modda `clickhouse-server` konsoldaki tüm olay iletilerini yazdırır. + +### Yapılandırma Parametreleri {#configuration-parameters} + +Kontrol: + +- Docker ayarları. + + Bir IPv6 ağında Docker'da ClickHouse çalıştırırsanız, `network=host` ayar .lanmıştır. + +- Bitiş noktası ayarları. + + Kontrol [listen\_host](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) ve [tcp\_port](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port) ayarlar. + + ClickHouse server, yalnızca varsayılan olarak localhost bağlantılarını kabul eder. + +- HTTP protokolü ayarları. + + HTTP API protokol ayarlarını denetleyin. + +- Güvenli bağlantı ayarları. + + Kontrol: + + - Bu [tcp\_port\_secure](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) ayar. + - İçin ayarlar [SSL sertifikaları](server_configuration_parameters/settings.md#server_configuration_parameters-openssl). + + Bağlanırken uygun parametreleri kullanın. Örneğin, kullanın `port_secure` parametre ile `clickhouse_client`. + +- Kullanıcı ayarları. + + Yanlış kullanıcı adı veya parola kullanıyor olabilirsiniz. + +## Sorgu İşleme {#troubleshooting-does-not-process-queries} + +ClickHouse sorguyu işlemek mümkün değilse, istemciye bir hata açıklaması gönderir. İn the `clickhouse-client` konsoldaki hatanın bir açıklamasını alırsınız. Http arabirimini kullanıyorsanız, ClickHouse yanıt gövdesinde hata açıklamasını gönderir. Mesela: + +``` bash +$ curl 'http://localhost:8123/' --data-binary "SELECT a" +Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception +``` + +Eğer başlarsanız `clickhouse-client` ile... `stack-trace` parametre, ClickHouse bir hata açıklaması ile sunucu yığın izleme döndürür. + +Bozuk bir bağlantı hakkında bir mesaj görebilirsiniz. Bu durumda, sorguyu tekrarlayabilirsiniz. Sorguyu her gerçekleştirdiğinizde bağlantı kesilirse, sunucu günlüklerini hatalar için denetleyin. + +## Sorgu işleme verimliliği {#troubleshooting-too-slow} + +Clickhouse'un çok yavaş çalıştığını görürseniz, sorgularınız için sunucu kaynakları ve ağdaki yükü profillemeniz gerekir. + +Profil sorguları için clickhouse-benchmark yardımcı programını kullanabilirsiniz. Saniyede işlenen sorgu sayısını, saniyede işlenen satır sayısını ve sorgu işleme sürelerinin yüzdelerini gösterir. diff --git a/docs/tr/operations/update.md b/docs/tr/operations/update.md new file mode 100644 index 00000000000..1529729321e --- /dev/null +++ b/docs/tr/operations/update.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 47 +toc_title: "ClickHouse G\xFCncelleme" +--- + +# ClickHouse Güncelleme {#clickhouse-update} + +ClickHouse DEB paketlerinden yüklüyse, sunucuda aşağıdaki komutları çalıştırın: + +``` bash +$ sudo apt-get update +$ sudo apt-get install clickhouse-client clickhouse-server +$ sudo service clickhouse-server restart +``` + +Önerilen deb paketleri dışında bir şey kullanarak ClickHouse yüklediyseniz, uygun güncelleştirme yöntemini kullanın. + +ClickHouse dağıtılmış bir güncelleştirmeyi desteklemiyor. İşlem, her ayrı sunucuda ardışık olarak gerçekleştirilmelidir. Bir kümedeki tüm sunucuları aynı anda güncelleştirmeyin veya küme Bir süre kullanılamaz. diff --git a/docs/tr/operations/utilities/clickhouse-benchmark.md b/docs/tr/operations/utilities/clickhouse-benchmark.md new file mode 100644 index 00000000000..952852d880e --- /dev/null +++ b/docs/tr/operations/utilities/clickhouse-benchmark.md @@ -0,0 +1,156 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 61 +toc_title: clickhouse-benchmark +--- + +# clickhouse-benchmark {#clickhouse-benchmark} + +Bir ClickHouse sunucusuna bağlanır ve art arda belirtilen sorguları gönderir. + +Sözdizimi: + +``` bash +$ echo "single query" | clickhouse-benchmark [keys] +``` + +veya + +``` bash +$ clickhouse-benchmark [keys] <<< "single query" +``` + +Bir dizi sorgu göndermek istiyorsanız, Bir metin dosyası oluşturun ve her sorguyu bu dosyadaki tek tek dizeye yerleştirin. Mesela: + +``` sql +SELECT * FROM system.numbers LIMIT 10000000 +SELECT 1 +``` + +Sonra bu dosyayı standart bir girişe geçirin `clickhouse-benchmark`. + +``` bash +clickhouse-benchmark [keys] < queries_file +``` + +## Anahtarlar {#clickhouse-benchmark-keys} + +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` aynı anda gönderir. Varsayılan değer: 1. +- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. İçin [karşılaştırma modu](#clickhouse-benchmark-comparison-mode) birden fazla kullanabilirsiniz `-h` anahtarlar. +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [karşılaştırma modu](#clickhouse-benchmark-comparison-mode) birden fazla kullanabilirsiniz `-p` anahtarlar. +- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. +- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. +- `-s`, `--secure` — Using TLS connection. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` belirtilen zaman sınırına ulaşıldığında sorgu göndermeyi durdurur. Varsayılan değer: 0 (zaman sınırı devre dışı). +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [karşılaştırma modu](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` gerçekleştirir [Bağımsız iki örnek öğrencinin t-testi](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) iki dağıtımın seçilen güven düzeyi ile farklı olup olmadığını belirlemek için sınayın. +- `--cumulative` — Printing cumulative data instead of data per interval. +- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` belirtilen json dosyasına bir rapor verir. +- `--user=USERNAME` — ClickHouse user name. Default value: `default`. +- `--password=PSWD` — ClickHouse user password. Default value: empty string. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` çıkışlar özel durumların izlerini yığın. +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` belirtilen aşamada. Olası değerler: `complete`, `fetch_columns`, `with_mergeable_state`. Varsayılan değer: `complete`. +- `--help` — Shows the help message. + +Bazı uygulamak istiyorsanız [ayarlar](../../operations/settings/index.md) sorgular için bunları bir anahtar olarak geçirin `--= SETTING_VALUE`. Mesela, `--max_memory_usage=1048576`. + +## Çıktı {#clickhouse-benchmark-output} + +Varsayılan olarak, `clickhouse-benchmark` her biri için raporlar `--delay` aralıklı. + +Rapor örneği: + +``` text +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. + +0.000% 0.145 sec. +10.000% 0.146 sec. +20.000% 0.146 sec. +30.000% 0.146 sec. +40.000% 0.147 sec. +50.000% 0.148 sec. +60.000% 0.148 sec. +70.000% 0.148 sec. +80.000% 0.149 sec. +90.000% 0.150 sec. +95.000% 0.150 sec. +99.000% 0.150 sec. +99.900% 0.150 sec. +99.990% 0.150 sec. +``` + +Raporda bulabilirsiniz: + +- Sorgu sayısı `Queries executed:` alan. + +- İçeren durum dizesi (sırayla): + + - ClickHouse sunucusunun bitiş noktası. + - İşlenen sorgu sayısı. + - QPS: qps: kaç sorgu sunucusu saniyede belirtilen bir süre boyunca gerçekleştirilen `--delay` değişken. + - RPS: kaç satır sunucu saniyede belirtilen bir süre boyunca okuma `--delay` değişken. + - MıB / s: kaç mebibytes sunucu saniyede belirtilen bir süre boyunca okuma `--delay` değişken. + - sonuç RPS: sunucu tarafından belirtilen bir süre boyunca saniyede bir sorgunun sonucuna kaç satır yerleştirilir `--delay` değişken. + - sonuç MıB / s. kaç mebibytes sunucu tarafından belirtilen bir dönemde saniyede bir sorgu sonucu yerleştirilir `--delay` değişken. + +- Sorgu yürütme süresi yüzdelik. + +## Karşılaştırma modu {#clickhouse-benchmark-comparison-mode} + +`clickhouse-benchmark` iki çalışan ClickHouse sunucuları için performansları karşılaştırabilirsiniz. + +Karşılaştırma modunu kullanmak için, her iki sunucunun bitiş noktalarını iki çift `--host`, `--port` anahtarlar. Anahtarlar argüman listesindeki konuma göre eşleşti, ilk `--host` ilk ile eşleştirilir `--port` ve böyle devam eder. `clickhouse-benchmark` her iki sunucuya da bağlantılar kurar, sonra sorgular gönderir. Her sorgu rastgele seçilen bir sunucuya gönderilir. Sonuçlar her sunucu için ayrı ayrı gösterilir. + +## Örnek {#clickhouse-benchmark-example} + +``` bash +$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 +``` + +``` text +Loaded 1 queries. + +Queries executed: 6. + +localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.159 sec. +30.000% 0.160 sec. +40.000% 0.160 sec. +50.000% 0.162 sec. +60.000% 0.164 sec. +70.000% 0.165 sec. +80.000% 0.166 sec. +90.000% 0.166 sec. +95.000% 0.167 sec. +99.000% 0.167 sec. +99.900% 0.167 sec. +99.990% 0.167 sec. + + + +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.160 sec. +30.000% 0.163 sec. +40.000% 0.164 sec. +50.000% 0.165 sec. +60.000% 0.166 sec. +70.000% 0.166 sec. +80.000% 0.167 sec. +90.000% 0.167 sec. +95.000% 0.170 sec. +99.000% 0.172 sec. +99.900% 0.172 sec. +99.990% 0.172 sec. +``` diff --git a/docs/tr/operations/utilities/clickhouse-copier.md b/docs/tr/operations/utilities/clickhouse-copier.md new file mode 100644 index 00000000000..932519086c1 --- /dev/null +++ b/docs/tr/operations/utilities/clickhouse-copier.md @@ -0,0 +1,176 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 59 +toc_title: clickhouse-fotokopi makinesi +--- + +# clickhouse-fotokopi makinesi {#clickhouse-copier} + +Bir kümedeki tablolardan başka bir (veya aynı) kümedeki tablolara veri kopyalar. + +Birden fazla çalıştırabilirsiniz `clickhouse-copier` aynı işi gerçekleştirmek için farklı sunuculardaki örnekler. ZooKeeper süreçleri senkronize etmek için kullanılır. + +Başladıktan sonra, `clickhouse-copier`: + +- ZooKeeper bağlanır ve alır: + + - Kopyalama işleri. + - Kopyalama işlerinin durumu. + +- İşleri gerçekleştirir. + + Her çalışan işlem seçer “closest” kaynak kümenin parçası ve verileri hedef kümeye kopyalar, gerekirse verileri yeniden şekillendirir. + +`clickhouse-copier` ZooKeeper değişiklikleri izler ve anında uygular. + +Ağ trafiğini azaltmak için çalıştırmanızı öneririz `clickhouse-copier` kaynak verilerin bulunduğu aynı sunucuda. + +## Clickhouse çalışan-fotokopi {#running-clickhouse-copier} + +Yardımcı program el ile çalıştırılmalıdır: + +``` bash +$ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir +``` + +Parametre: + +- `daemon` — Starts `clickhouse-copier` daemon modunda. +- `config` — The path to the `zookeeper.xml` ZooKeeper bağlantı parametreleri ile dosya. +- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` süreçleri ve depolama görevleri. Görevler saklanır `$task-path/description`. +- `task-file` — Optional path to file with task configuration for initial upload to ZooKeeper. +- `task-upload-force` — Force upload `task-file` düğüm zaten var olsa bile. +- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` oluşturuyor `clickhouse-copier_YYYYMMHHSS_` içinde alt dizinler `$base-dir`. Bu parametre atlanırsa, dizinler aşağıdaki dizinde oluşturulur `clickhouse-copier` başlatıldı. + +## Zookeeper biçimi.xml {#format-of-zookeeper-xml} + +``` xml + + + trace + 100M + 3 + + + + + 127.0.0.1 + 2181 + + + +``` + +## Kopyalama görevlerinin yapılandırması {#configuration-of-copying-tasks} + +``` xml + + + + + + false + + 127.0.0.1 + 9000 + + + ... + + + + ... + + + + + 2 + + + + 1 + + + + + 0 + + + + + 3 + + 1 + + + + + + + + source_cluster + test + hits + + + destination_cluster + test + hits2 + + + + ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') + PARTITION BY toMonday(date) + ORDER BY (CounterID, EventDate) + + + + jumpConsistentHash(intHash64(UserID), 2) + + + CounterID != 0 + + + + '2018-02-26' + '2018-03-05' + ... + + + + + + ... + + ... + + +``` + +`clickhouse-copier` değişiklikleri izler `/task/path/description` ve onları anında uygular. Örneğin, değerini değiştirirseniz `max_workers`, görevleri çalıştıran süreçlerin sayısı da değişecektir. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/tr/operations/utilities/clickhouse-local.md b/docs/tr/operations/utilities/clickhouse-local.md new file mode 100644 index 00000000000..5e47459b670 --- /dev/null +++ b/docs/tr/operations/utilities/clickhouse-local.md @@ -0,0 +1,81 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 60 +toc_title: clickhouse-yerel +--- + +# clickhouse-yerel {#clickhouse-local} + +Bu `clickhouse-local` program, ClickHouse sunucusunu dağıtmak ve yapılandırmak zorunda kalmadan yerel dosyalar üzerinde hızlı işlem yapmanızı sağlar. + +Tabloları temsil eden verileri kabul eder ve bunları kullanarak sorgular [ClickHouse SQL lehçesi](../../sql_reference/index.md). + +`clickhouse-local` ClickHouse server ile aynı çekirdeği kullanır, bu nedenle özelliklerin çoğunu ve aynı format ve tablo motorlarını destekler. + +Varsayılan olarak `clickhouse-local` aynı ana bilgisayarda verilere erişimi yok, ancak kullanarak yükleme sunucu yapılandırmasını destekler `--config-file` değişken. + +!!! warning "Uyarıcı" + İçine üretim sunucusu yapılandırmasını yüklemek için tavsiye edilmez `clickhouse-local` çünkü insan hatası durumunda veriler zarar görebilir. + +## Kullanma {#usage} + +Temel kullanım: + +``` bash +$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" +``` + +Değişkenler: + +- `-S`, `--structure` — table structure for input data. +- `-if`, `--input-format` — input format, `TSV` varsayılan olarak. +- `-f`, `--file` — path to data, `stdin` varsayılan olarak. +- `-q` `--query` — queries to execute with `;` delimeter olarak. +- `-N`, `--table` — table name where to put output data, `table` varsayılan olarak. +- `-of`, `--format`, `--output-format` — output format, `TSV` varsayılan olarak. +- `--stacktrace` — whether to dump debug output in case of exception. +- `--verbose` — more details on query execution. +- `-s` — disables `stderr` günlük. +- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. +- `--help` — arguments references for `clickhouse-local`. + +Ayrıca, bunun yerine daha yaygın olarak kullanılan her ClickHouse yapılandırma değişkeni için argümanlar vardır `--config-file`. + +## Örnekler {#examples} + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" +Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. +1 2 +3 4 +``` + +Önceki örnek aynıdır: + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. +1 2 +3 4 +``` + +Şimdi her Unix kullanıcısı için bellek kullanıcısını çıkaralım: + +``` bash +$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" +``` + +``` text +Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. +┏━━━━━━━━━━┳━━━━━━━━━━┓ +┃ user ┃ memTotal ┃ +┡━━━━━━━━━━╇━━━━━━━━━━┩ +│ bayonet │ 113.5 │ +├──────────┼──────────┤ +│ root │ 8.8 │ +├──────────┼──────────┤ +... +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/tr/operations/utilities/index.md b/docs/tr/operations/utilities/index.md new file mode 100644 index 00000000000..17d3849cc6a --- /dev/null +++ b/docs/tr/operations/utilities/index.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Programlar +toc_priority: 56 +toc_title: "Genel bak\u0131\u015F" +--- + +# ClickHouse Programı {#clickhouse-utility} + +- [clickhouse-yerel](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` bunu yapar. +- [clickhouse-fotokopi makinesi](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. +- [clickhouse-benchmark](clickhouse-benchmark.md) — Loads server with the custom queries and settings. + +[Orijinal makale](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/tr/sql_reference/aggregate_functions/combinators.md b/docs/tr/sql_reference/aggregate_functions/combinators.md new file mode 100644 index 00000000000..693bf5e0348 --- /dev/null +++ b/docs/tr/sql_reference/aggregate_functions/combinators.md @@ -0,0 +1,166 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 37 +toc_title: "Toplama fonksiyonu birle\u015Ftiriciler" +--- + +# Toplama Fonksiyonu Birleştiriciler {#aggregate_functions_combinators} + +Bir toplama işlevinin adı, ona eklenmiş bir sonek olabilir. Bu, toplama işlevinin çalışma şeklini değiştirir. + +## -Eğer {#agg-functions-combinator-if} + +The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). + +Örnekler: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` ve böyle devam eder. + +Koşullu toplama işlevleriyle, alt sorgular kullanmadan aynı anda birkaç koşul için toplamları hesaplayabilirsiniz ve `JOIN`s. Örneğin, Üye olarak.Metrica, koşullu toplama işlevleri segment karşılaştırma işlevselliğini uygulamak için kullanılır. + +## -Dizi {#agg-functions-combinator-array} + +\- Array soneki herhangi bir toplama işlevine eklenebilir. Bu durumda, toplama işlevi, ‘Array(T)’ type (ar arraysra )ys) yerine ‘T’ bağımsız değişkenleri yazın. Toplama işlevi birden çok bağımsız değişken kabul ederse, bu eşit uzunlukta diziler olmalıdır. Dizileri işlerken, toplama işlevi tüm dizi öğelerinde orijinal toplama işlevi gibi çalışır. + +Örnek 1: `sumArray(arr)` - Tüm unsurları toplamları ‘arr’ diziler. Bu örnekte, daha basit yazılmış olabilir: `sum(arraySum(arr))`. + +Örnek 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ diziler. Bu daha kolay bir şekilde yapılabilir: `uniq(arrayJoin(arr))`, ancak eklemek her zaman mümkün değildir ‘arrayJoin’ bir sorguya. + +\- Eğer ve-dizi kombine edilebilir. Ancak, ‘Array’ önce gel mustmeli, sonra ‘If’. Örnekler: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Nedeniyle bu sipariş için, ‘cond’ argüman bir dizi olmayacak. + +## -Devlet {#agg-functions-combinator-state} + +Bu birleştiriciyi uygularsanız, toplama işlevi elde edilen değeri döndürmez (örneğin, [uniq](reference.md#agg_function-uniq) fonksiyonu), ancak top aggreglamanın bir ara durumu (for `uniq`, bu benzersiz değerlerin sayısını hesaplamak için karma tablodur). Bu bir `AggregateFunction(...)` bu, daha fazla işlem için kullanılabilir veya daha sonra toplanmayı bitirmek için bir tabloda saklanabilir. + +Bu durumlarla çalışmak için şunları kullanın: + +- [AggregatingMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) masa motoru. +- [finalizeAggregation](../../sql_reference/functions/other_functions.md#function-finalizeaggregation) işlev. +- [runningAccumulate](../../sql_reference/functions/other_functions.md#function-runningaccumulate) işlev. +- [-Birleştirmek](#aggregate_functions_combinators_merge) birleştirici. +- [- MergeState](#aggregate_functions_combinators_mergestate) birleştirici. + +## -Birleştirmek {#aggregate_functions_combinators-merge} + +Bu birleştiriciyi uygularsanız, toplama işlevi Ara toplama durumunu bağımsız değişken olarak alır, toplama işlemini tamamlamak için durumları birleştirir ve elde edilen değeri döndürür. + +## - MergeState {#aggregate_functions_combinators-mergestate} + +Ara toplama durumlarını-birleştirme Birleştiricisi ile aynı şekilde birleştirir. Bununla birlikte, elde edilen değeri döndürmez, ancak-State combinator'a benzer bir ara toplama durumu döndürür. + +## - ForEach {#agg-functions-combinator-foreach} + +Tablolar için bir toplama işlevi, karşılık gelen dizi öğelerini toplayan ve bir dizi sonuç döndüren diziler için bir toplama işlevine dönüştürür. Mesela, `sumForEach` diz theiler için `[1, 2]`, `[3, 4, 5]`ve`[6, 7]`sonucu döndürür `[10, 13, 5]` karşılık gelen dizi öğelerini bir araya getirdikten sonra. + +## - OrDefault {#agg-functions-combinator-ordefault} + +Toplamak için hiçbir şey yoksa, toplama işlevinin dönüş türünün Varsayılan değerini doldurur. + +``` sql +SELECT avg(number), avgOrDefault(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrDefault(number)─┐ +│ nan │ 0 │ +└─────────────┴──────────────────────┘ +``` + +## - OrNull {#agg-functions-combinator-ornull} + +Doldurmalar `null` toplamak için hiçbir şey varsa. Dönüş sütun null olur. + +``` sql +SELECT avg(number), avgOrNull(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrNull(number)─┐ +│ nan │ ᴺᵁᴸᴸ │ +└─────────────┴───────────────────┘ +``` + +\- OrDefault ve-OrNull diğer birleştiriciler ile kombine edilebilir. Toplama işlevi boş girişi kabul etmediğinde yararlıdır. + +``` sql +SELECT avgOrNullIf(x, x > 10) +FROM +( + SELECT toDecimal32(1.23, 2) AS x +) +``` + +``` text +┌─avgOrNullIf(x, greater(x, 10))─┐ +│ ᴺᵁᴸᴸ │ +└────────────────────────────────┘ +``` + +## - Resample {#agg-functions-combinator-resample} + +Verileri gruplara ayırmanızı sağlar ve ardından bu gruplardaki verileri ayrı ayrı toplar. Gruplar, değerleri bir sütundan aralıklara bölerek oluşturulur. + +``` sql +Resample(start, end, step)(, resampling_key) +``` + +**Parametre** + +- `start` — Starting value of the whole required interval for `resampling_key` değerler. +- `stop` — Ending value of the whole required interval for `resampling_key` değerler. Tüm Aralık içermez `stop` değer `[start, stop)`. +- `step` — Step for separating the whole interval into subintervals. The `aggFunction` bu alt aralıkların her biri üzerinde bağımsız olarak yürütülür. +- `resampling_key` — Column whose values are used for separating data into intervals. +- `aggFunction_params` — `aggFunction` parametre. + +**Döndürülen değerler** + +- Ar arrayray of `aggFunction` her subinterval için sonuçlar. + +**Örnek** + +Düşünün `people` aşağıdaki verilerle tablo: + +``` text +┌─name───┬─age─┬─wage─┐ +│ John │ 16 │ 10 │ +│ Alice │ 30 │ 15 │ +│ Mary │ 35 │ 8 │ +│ Evelyn │ 48 │ 11.5 │ +│ David │ 62 │ 9.9 │ +│ Brian │ 60 │ 16 │ +└────────┴─────┴──────┘ +``` + +Yaş aralığı içinde olan kişilerin isimlerini alalım `[30,60)` ve `[60,75)`. Yaş için tamsayı temsilini kullandığımızdan, yaşları `[30, 59]` ve `[60,74]` aralıklılar. + +Bir dizideki isimleri toplamak için, [groupArray](reference.md#agg_function-grouparray) toplama işlevi. Bir argüman alır. Bizim durumumuzda, bu `name` sütun. Bu `groupArrayResample` fonksiyon kullanmalıdır `age` yaşlara göre isimleri toplamak için sütun. Gerekli aralıkları tanımlamak için `30, 75, 30` argü themanlar içine `groupArrayResample` işlev. + +``` sql +SELECT groupArrayResample(30, 75, 30)(name, age) FROM people +``` + +``` text +┌─groupArrayResample(30, 75, 30)(name, age)─────┐ +│ [['Alice','Mary','Evelyn'],['David','Brian']] │ +└───────────────────────────────────────────────┘ +``` + +Sonuçları düşünün. + +`Jonh` çok genç olduğu için numunenin dışında. Diğer insanlar belirtilen yaş aralıklarına göre dağıtılır. + +Şimdi toplam insan sayısını ve ortalama ücretlerini belirtilen yaş aralıklarında sayalım. + +``` sql +SELECT + countResample(30, 75, 30)(name, age) AS amount, + avgResample(30, 75, 30)(wage, age) AS avg_wage +FROM people +``` + +``` text +┌─amount─┬─avg_wage──────────────────┐ +│ [3,2] │ [11.5,12.949999809265137] │ +└────────┴───────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/tr/sql_reference/aggregate_functions/index.md b/docs/tr/sql_reference/aggregate_functions/index.md new file mode 100644 index 00000000000..b1699ab5b62 --- /dev/null +++ b/docs/tr/sql_reference/aggregate_functions/index.md @@ -0,0 +1,62 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Toplama Fonksiyonlar\u0131" +toc_priority: 33 +toc_title: "Giri\u015F" +--- + +# Toplama fonksiyonları {#aggregate-functions} + +Toplama fonksiyonları [normal](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) veritabanı uzmanları tarafından beklendiği gibi. + +ClickHouse da destekler: + +- [Parametrik agrega fonksiyonları](parametric_functions.md#aggregate_functions_parametric), sütunlara ek olarak diğer parametreleri kabul eder. +- [Birleştiriciler](combinators.md#aggregate_functions_combinators) toplama işlevlerinin davranışını değiştiren. + +## NULL işleme {#null-processing} + +Toplama sırasında, tüm `NULL`s atlanır. + +**Örnekler:** + +Bu tabloyu düşünün: + +``` text +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +Diyelim ki değerleri toplamanız gerekiyor `y` sütun: + +``` sql +SELECT sum(y) FROM t_null_big +``` + + ┌─sum(y)─┐ + │ 7 │ + └────────┘ + +Bu `sum` fonksiyon yorumlar `NULL` olarak `0`. Özellikle, bu, işlevin tüm değerlerin bulunduğu bir seçimin girişini aldığı anlamına gelir `NULL`, sonra sonuç olacak `0`, değil `NULL`. + +Şimdi kullanabilirsiniz `groupArray` bir dizi oluşturmak için işlev `y` sütun: + +``` sql +SELECT groupArray(y) FROM t_null_big +``` + +``` text +┌─groupArray(y)─┐ +│ [2,2,3] │ +└───────────────┘ +``` + +`groupArray` içermez `NULL` elde edilen dizi. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/tr/sql_reference/aggregate_functions/parametric_functions.md b/docs/tr/sql_reference/aggregate_functions/parametric_functions.md new file mode 100644 index 00000000000..557b844ef00 --- /dev/null +++ b/docs/tr/sql_reference/aggregate_functions/parametric_functions.md @@ -0,0 +1,499 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 38 +toc_title: "Parametrik agrega fonksiyonlar\u0131" +--- + +# Parametrik Agrega Fonksiyonları {#aggregate_functions_parametric} + +Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. + +## çubuk {#histogram} + +Uyarlanabilir bir histogram hesaplar. Kesin sonuçları garanti etmez. + +``` sql +histogram(number_of_bins)(values) +``` + +İşlevleri kullanır [Bir Akış Paralel Karar Ağacı Algoritması](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). Histogram kutularının sınırları, yeni veriler bir işleve girdiğinde ayarlanır. Ortak durumda, kutu genişlikleri eşit değildir. + +**Parametre** + +`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. +`values` — [İfade](../syntax.md#syntax-expressions) giriş değerleri ile sonuçlanır. + +**Döndürülen değerler** + +- [Dizi](../../sql_reference/data_types/array.md) -den [Demetler](../../sql_reference/data_types/tuple.md) aşağıdaki format oftan: + + ``` + [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] + ``` + + - `lower` — Lower bound of the bin. + - `upper` — Upper bound of the bin. + - `height` — Calculated height of the bin. + +**Örnek** + +``` sql +SELECT histogram(5)(number + 1) +FROM ( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─histogram(5)(plus(number, 1))───────────────────────────────────────────┐ +│ [(1,4.5,4),(4.5,8.5,4),(8.5,12.75,4.125),(12.75,17,4.625),(17,20,3.25)] │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +Bir histogram ile görselleştirebilirsiniz [bar](../../sql_reference/functions/other_functions.md#function-bar) fonksiyon, örneğin: + +``` sql +WITH histogram(5)(rand() % 100) AS hist +SELECT + arrayJoin(hist).3 AS height, + bar(height, 0, 6, 5) AS bar +FROM +( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─height─┬─bar───┐ +│ 2.125 │ █▋ │ +│ 3.25 │ ██▌ │ +│ 5.625 │ ████▏ │ +│ 5.625 │ ████▏ │ +│ 3.375 │ ██▌ │ +└────────┴───────┘ +``` + +Bu durumda, histogram kutusu kenarlıklarını bilmediğinizi unutmamalısınız. + +## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} + +Dizinin desenle eşleşen bir olay zinciri içerip içermediğini denetler. + +``` sql +sequenceMatch(pattern)(timestamp, cond1, cond2, ...) +``` + +!!! warning "Uyarıcı" + Aynı saniyede meydana gelen olaylar sonucu etkileyen tanımsız bir sırada sırayla yatıyordu. + +**Parametre** + +- `pattern` — Pattern string. See [Desen sözdizimi](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` ve `DateTime`. Ayrıca desteklenen herhangi birini kullanabilirsiniz [Uİnt](../../sql_reference/data_types/int_uint.md) veri türleri. + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. En fazla 32 koşul argümanını iletebilirsiniz. İşlev yalnızca bu koşullarda açıklanan olayları dikkate alır. Sıra, bir koşulda açıklanmayan veriler içeriyorsa, işlev bunları atlar. + +**Döndürülen değerler** + +- 1, Eğer desen eşleşti. +- Desen eşleşmezse 0. + +Tür: `UInt8`. + + +**Desen sözdizimi** + +- `(?N)` — Matches the condition argument at position `N`. Şartlar numaralandırılmıştır `[1, 32]` Aralık. Mesela, `(?1)` argü theman thela eşleş their `cond1` parametre. + +- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. + +- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` birbirinden 1800 saniyeden fazla meydana gelen olayları eşleşir. Bu olaylar arasında herhangi bir olayın keyfi bir sayısı olabilir. Kullanabilirsiniz `>=`, `>`, `<`, `<=` operatörler. + +**Örnekler** + +Verileri göz önünde bulundurun `t` Tablo: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +└──────┴────────┘ +``` + +Sorguyu gerçekleştir: + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 1 │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +İşlev, 2 numarasının 1 numarayı takip ettiği olay zincirini buldu. Sayı bir olay olarak tanımlanmadığı için aralarında 3 sayısını atladı. Örnekte verilen olay zincirini ararken bu numarayı dikkate almak istiyorsak, bunun için bir koşul oluşturmalıyız. + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 3))─┐ +│ 0 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +Bu durumda, işlev desenle eşleşen olay zincirini bulamadı, çünkü 3 numaralı olay 1 ile 2 arasında gerçekleşti. Aynı durumda 4 numaralı koşulu kontrol edersek, sıra desenle eşleşir. + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 4))─┐ +│ 1 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [sequenceCount](#function-sequencecount) + +## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} + +Desenle eşleşen olay zincirlerinin sayısını sayar. İşlev, çakışmayan olay zincirlerini arar. Geçerli zincir eşleştirildikten sonra bir sonraki zinciri aramaya başlar. + +!!! warning "Uyarıcı" + Aynı saniyede meydana gelen olaylar sonucu etkileyen tanımsız bir sırada sırayla yatıyordu. + +``` sql +sequenceCount(pattern)(timestamp, cond1, cond2, ...) +``` + +**Parametre** + +- `pattern` — Pattern string. See [Desen sözdizimi](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` ve `DateTime`. Ayrıca desteklenen herhangi birini kullanabilirsiniz [Uİnt](../../sql_reference/data_types/int_uint.md) veri türleri. + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. En fazla 32 koşul argümanını iletebilirsiniz. İşlev yalnızca bu koşullarda açıklanan olayları dikkate alır. Sıra, bir koşulda açıklanmayan veriler içeriyorsa, işlev bunları atlar. + +**Döndürülen değerler** + +- Eşleşen çakışmayan olay zincirlerinin sayısı. + +Tür: `UInt64`. + +**Örnek** + +Verileri göz önünde bulundurun `t` Tablo: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +│ 4 │ 1 │ +│ 5 │ 3 │ +│ 6 │ 2 │ +└──────┴────────┘ +``` + +2 numara aralarında diğer sayıların herhangi bir miktarda 1 numaradan sonra meydana kaç kez Sayın: + +``` sql +SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceCount('(?1).*(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 2 │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [sequenceMatch](#function-sequencematch) + +## windowFunnel {#windowfunnel} + +Kayan bir zaman penceresinde olay zincirlerini arar ve zincirden meydana gelen en fazla olay sayısını hesaplar. + +Fonksiyon algoritmaya göre çalışır: + +- İşlev, zincirdeki ilk koşulu tetikleyen ve olay sayacını 1'e ayarlayan verileri arar. Sürgülü pencerenin başladığı an budur. + +- Zincirdeki olaylar pencerede sırayla gerçekleşirse, sayaç artırılır. Olayların sırası bozulursa, sayaç artırılmaz. + +- Verilerin çeşitli tamamlanma noktalarında birden çok olay zinciri varsa, işlev yalnızca en uzun zincirin boyutunu çıkarır. + +**Sözdizimi** + +``` sql +windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) +``` + +**Parametre** + +- `window` — Length of the sliding window in seconds. +- `mode` - Bu isteğe bağlı bir argüman. + - `'strict'` - Zaman `'strict'` ayarlanırsa, windowFunnel () yalnızca benzersiz değerler için koşullar uygular. +- `timestamp` — Name of the column containing the timestamp. Data types supported: [Tarihli](../../sql_reference/data_types/date.md), [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) ve diğer imzasız tamsayı türleri (timestamp'ın `UInt64` yazın, değeri 2^63 - 1 olan Int64 maksimum değerini aşamaz). +- `cond` — Conditions or data describing the chain of events. [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değer** + +Sürgülü zaman penceresi içindeki zincirden ardışık tetiklenen koşulların maksimum sayısı. +Seçimdeki tüm zincirler analiz edilir. + +Tür: `Integer`. + +**Örnek** + +Kullanıcının bir telefon seçmesi ve çevrimiçi mağazada iki kez satın alması için belirli bir süre yeterli olup olmadığını belirleyin. + +Aşağıdaki olaylar zincirini ayarlayın: + +1. Mağaz theadaki Hesabına giriş yapan kullanıcı (`eventID = 1003`). +2. Kullanıcı bir telefon arar (`eventID = 1007, product = 'phone'`). +3. Kullanıcı sipariş verdi (`eventID = 1009`). +4. Kullanıcı tekrar sipariş yaptı (`eventID = 1010`). + +Giriş tablosu: + +``` text +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-28 │ 1 │ 2019-01-29 10:00:00 │ 1003 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-31 │ 1 │ 2019-01-31 09:00:00 │ 1007 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-30 │ 1 │ 2019-01-30 08:00:00 │ 1009 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-02-01 │ 1 │ 2019-02-01 08:00:00 │ 1010 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +``` + +Kullanıcının ne kadar uzakta olduğunu öğrenin `user_id` 2019 yılının Ocak-Şubat aylarında bir dönemde zincirden geçebilir. + +Sorgu: + +``` sql +SELECT + level, + count() AS c +FROM +( + SELECT + user_id, + windowFunnel(6048000000000000)(timestamp, eventID = 1003, eventID = 1009, eventID = 1007, eventID = 1010) AS level + FROM trend + WHERE (event_date >= '2019-01-01') AND (event_date <= '2019-02-02') + GROUP BY user_id +) +GROUP BY level +ORDER BY level ASC +``` + +Sonuç: + +``` text +┌─level─┬─c─┐ +│ 4 │ 1 │ +└───────┴───┘ +``` + +## saklama {#retention} + +İşlev, bağımsız değişken olarak 1'den 32'ye kadar bir dizi koşul türünü alır `UInt8` bu, etkinlik için belirli bir koşulun karşılanıp karşılanmadığını gösterir. +Herhangi bir koşul bir argüman olarak belirtilebilir (aşağıdaki gibi [WHERE](../../sql_reference/statements/select.md#select-where)). + +İlk hariç, koşullar çiftler halinde geçerlidir: birinci ve ikinci doğruysa, ikincinin sonucu, birinci ve fird doğruysa, üçüncüsü doğru olacaktır. + +**Sözdizimi** + +``` sql +retention(cond1, cond2, ..., cond32); +``` + +**Parametre** + +- `cond` — an expression that returns a `UInt8` sonuç (1 veya 0). + +**Döndürülen değer** + +1 veya 0 dizisi. + +- 1 — condition was met for the event. +- 0 — condition wasn't met for the event. + +Tür: `UInt8`. + +**Örnek** + +Hesaplamanın bir örneğini düşünelim `retention` site trafiğini belirlemek için işlev. + +**1.** Сreate a table to illustrate an example. + +``` sql +CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; + +INSERT INTO retention_test SELECT '2020-01-01', number FROM numbers(5); +INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); +INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); +``` + +Giriş tablosu: + +Sorgu: + +``` sql +SELECT * FROM retention_test +``` + +Sonuç: + +``` text +┌───────date─┬─uid─┐ +│ 2020-01-01 │ 0 │ +│ 2020-01-01 │ 1 │ +│ 2020-01-01 │ 2 │ +│ 2020-01-01 │ 3 │ +│ 2020-01-01 │ 4 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-02 │ 0 │ +│ 2020-01-02 │ 1 │ +│ 2020-01-02 │ 2 │ +│ 2020-01-02 │ 3 │ +│ 2020-01-02 │ 4 │ +│ 2020-01-02 │ 5 │ +│ 2020-01-02 │ 6 │ +│ 2020-01-02 │ 7 │ +│ 2020-01-02 │ 8 │ +│ 2020-01-02 │ 9 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-03 │ 0 │ +│ 2020-01-03 │ 1 │ +│ 2020-01-03 │ 2 │ +│ 2020-01-03 │ 3 │ +│ 2020-01-03 │ 4 │ +│ 2020-01-03 │ 5 │ +│ 2020-01-03 │ 6 │ +│ 2020-01-03 │ 7 │ +│ 2020-01-03 │ 8 │ +│ 2020-01-03 │ 9 │ +│ 2020-01-03 │ 10 │ +│ 2020-01-03 │ 11 │ +│ 2020-01-03 │ 12 │ +│ 2020-01-03 │ 13 │ +│ 2020-01-03 │ 14 │ +└────────────┴─────┘ +``` + +**2.** Kullanıcıları benzersiz kimliğe göre grupla `uid` kullanarak `retention` işlev. + +Sorgu: + +``` sql +SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r +FROM retention_test +WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') +GROUP BY uid +ORDER BY uid ASC +``` + +Sonuç: + +``` text +┌─uid─┬─r───────┐ +│ 0 │ [1,1,1] │ +│ 1 │ [1,1,1] │ +│ 2 │ [1,1,1] │ +│ 3 │ [1,1,1] │ +│ 4 │ [1,1,1] │ +│ 5 │ [0,0,0] │ +│ 6 │ [0,0,0] │ +│ 7 │ [0,0,0] │ +│ 8 │ [0,0,0] │ +│ 9 │ [0,0,0] │ +│ 10 │ [0,0,0] │ +│ 11 │ [0,0,0] │ +│ 12 │ [0,0,0] │ +│ 13 │ [0,0,0] │ +│ 14 │ [0,0,0] │ +└─────┴─────────┘ +``` + +**3.** Günde toplam site ziyaret sayısını hesaplayın. + +Sorgu: + +``` sql +SELECT + sum(r[1]) AS r1, + sum(r[2]) AS r2, + sum(r[3]) AS r3 +FROM +( + SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r + FROM retention_test + WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') + GROUP BY uid +) +``` + +Sonuç: + +``` text +┌─r1─┬─r2─┬─r3─┐ +│ 5 │ 5 │ 5 │ +└────┴────┴────┘ +``` + +Nerede: + +- `r1`- 2020-01-01 sırasında siteyi ziyaret eden tekil ziyaretçi sayısı ( `cond1` koşul). +- `r2`- 2020-01-01 ve 2020-01-02 arasında belirli bir süre boyunca siteyi ziyaret eden tekil ziyaretçi sayısı (`cond1` ve `cond2` şartlar). +- `r3`- 2020-01-01 ve 2020-01-03 arasında belirli bir süre boyunca siteyi ziyaret eden tekil ziyaretçi sayısı (`cond1` ve `cond3` şartlar). + +## uniqUpTo(N) (x) {#uniquptonx} + +Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. + +Küçük Ns ile kullanım için tavsiye, kadar 10. N'nin maksimum değeri 100'dür. + +Bir toplama işlevinin durumu için, 1 + n \* bir bayt değerinin boyutuna eşit bellek miktarını kullanır. +Dizeler için, 8 baytlık kriptografik olmayan bir karma saklar. Yani, hesaplama dizeler için yaklaşık olarak hesaplanır. + +İşlev ayrıca birkaç argüman için de çalışır. + +Büyük bir N değeri kullanıldığında ve benzersiz değerlerin sayısı n'den biraz daha az olduğu durumlar dışında mümkün olduğunca hızlı çalışır. + +Kullanım örneği: + +``` text +Problem: Generate a report that shows only keywords that produced at least 5 unique users. +Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) + +## sumMapFiltered (keys\_to\_keep) (anahtarlar, değerler) {#summapfilteredkeys-to-keepkeys-values} + +Aynı davranış [sumMap](reference.md#agg_functions-summap) dışında bir dizi anahtar parametre olarak geçirilir. Bu, özellikle yüksek bir Anahtarlık ile çalışırken yararlı olabilir. diff --git a/docs/tr/sql_reference/aggregate_functions/reference.md b/docs/tr/sql_reference/aggregate_functions/reference.md new file mode 100644 index 00000000000..d52b82be996 --- /dev/null +++ b/docs/tr/sql_reference/aggregate_functions/reference.md @@ -0,0 +1,1878 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 36 +toc_title: "Ba\u015Fvurma" +--- + +# Fonksiyon Referans {#function-reference} + +## sayma {#agg_function-count} + +Satır veya NOT-NULL değerleri sayar. + +ClickHouse için aşağıdaki sözdizimleri destekler `count`: +- `count(expr)` veya `COUNT(DISTINCT expr)`. +- `count()` veya `COUNT(*)`. Bu `count()` sözdizimi ClickHouse özeldir. + +**Parametre** + +Fonksiyon alabilir: + +- Sıfır parametreler. +- Bir [ifade](../syntax.md#syntax-expressions). + +**Döndürülen değer** + +- Fonksiyon parametreleri olmadan çağrılırsa, satır sayısını sayar. +- Eğer... [ifade](../syntax.md#syntax-expressions) geçirilir, daha sonra işlev bu ifadenin kaç kez NOT null döndürdüğünü sayar. İfad aede bir [Nullable](../../sql_reference/data_types/nullable.md)- type değeri, sonra sonucu `count` kalır değil `Nullable`. İfade döndürülürse işlev 0 döndürür `NULL` tüm satırlar için. + +Her iki durumda da döndürülen değerin türü [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Ayrıntı** + +ClickHouse destekler `COUNT(DISTINCT ...)` sözdizimi. Bu yapının davranışı Aşağıdakilere bağlıdır [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) ayar. Aşağıdakilerden hang theisini tanımlar [uniq\*](#agg_function-uniq) fonksiyonlar işlemi gerçekleştirmek için kullanılır. Varsayılan değer [uniqExact](#agg_function-uniqexact) işlev. + +Bu `SELECT count() FROM table` tablodaki girdi sayısı ayrı olarak depolanmadığı için sorgu en iyi duruma getirilmez. Tablodan küçük bir sütun seçer ve içindeki değerlerin sayısını sayar. + +**Örnekler** + +Örnek 1: + +``` sql +SELECT count() FROM t +``` + +``` text +┌─count()─┐ +│ 5 │ +└─────────┘ +``` + +Örnek 2: + +``` sql +SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' +``` + +``` text +┌─name──────────────────────────┬─value─────┐ +│ count_distinct_implementation │ uniqExact │ +└───────────────────────────────┴───────────┘ +``` + +``` sql +SELECT count(DISTINCT num) FROM t +``` + +``` text +┌─uniqExact(num)─┐ +│ 3 │ +└────────────────┘ +``` + +Bu örnek gösteriyor ki `count(DISTINCT num)` tarafından gerçekleştirilir `uniqExact` fonksiyonu göre `count_distinct_implementation` ayar değeri. + +## herhangi(x) {#agg_function-any} + +İlk karşılaşılan değeri seçer. +Sorgu herhangi bir sırada ve hatta her seferinde farklı bir sırada çalıştırılabilir, bu nedenle bu işlevin sonucu belirsizdir. +Belirli bir sonuç elde etmek için ‘min’ veya ‘max’ fonksiyon yerine ‘any’. + +Bazı durumlarda, yürütme sırasına güvenebilirsiniz. Bu, select ORDER BY kullanan bir alt sorgudan geldiğinde durumlar için geçerlidir. + +Ne zaman bir `SELECT` sorgu vardır `GROUP BY` yan tümce veya en az bir toplama işlevi, ClickHouse (Mysql'in aksine), tüm ifadelerin `SELECT`, `HAVING`, ve `ORDER BY` anahtar functionslardan veya toplama işlev .lerinden hesaplan .malıdır. Başka bir deyişle, tablodan seçilen her sütun, anahtarlarda veya toplama işlevlerinde kullanılmalıdır. Mysql'de olduğu gibi davranış elde etmek için, diğer sütunları `any` toplama işlevi. + +## anyHeavy (x) {#anyheavyx} + +Kullanarak sık oluşan bir değer seçer [ağır vurucular](http://www.cs.umd.edu/~samir/498/karp.pdf) algoritma. Sorgunun yürütme iş parçacığı her durumda yarısından fazlasını oluşan bir değer varsa, bu değer döndürülür. Normalde, sonuç belirsizdir. + +``` sql +anyHeavy(column) +``` + +**Değişkenler** + +- `column` – The column name. + +**Örnek** + +Tak thee the [OnTime](../../getting_started/example_datasets/ontime.md) veri kümesi ve herhangi bir sık oluşan değeri seçin `AirlineID` sütun. + +``` sql +SELECT anyHeavy(AirlineID) AS res +FROM ontime +``` + +``` text +┌───res─┐ +│ 19690 │ +└───────┘ +``` + +## anyLast(x) {#anylastx} + +Karşılaşılan son değeri seçer. +Sonuç için olduğu kadar belirsiz `any` işlev. + +## groupBitAnd {#groupbitand} + +Bitwise uygular `AND` sayı serisi için. + +``` sql +groupBitAnd(expr) +``` + +**Parametre** + +`expr` – An expression that results in `UInt*` tür. + +**Dönüş değeri** + +Bu değer `UInt*` tür. + +**Örnek** + +Test verileri: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Sorgu: + +``` sql +SELECT groupBitAnd(num) FROM t +``` + +Nerede `num` test verileri ile sütundur. + +Sonuç: + +``` text +binary decimal +00000100 = 4 +``` + +## groupBitOr {#groupbitor} + +Bitwise uygular `OR` sayı serisi için. + +``` sql +groupBitOr(expr) +``` + +**Parametre** + +`expr` – An expression that results in `UInt*` tür. + +**Dönüş değeri** + +Bu değer `UInt*` tür. + +**Örnek** + +Test verileri: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Sorgu: + +``` sql +SELECT groupBitOr(num) FROM t +``` + +Nerede `num` test verileri ile sütundur. + +Sonuç: + +``` text +binary decimal +01111101 = 125 +``` + +## groupBitXor {#groupbitxor} + +Bitwise uygular `XOR` sayı serisi için. + +``` sql +groupBitXor(expr) +``` + +**Parametre** + +`expr` – An expression that results in `UInt*` tür. + +**Dönüş değeri** + +Bu değer `UInt*` tür. + +**Örnek** + +Test verileri: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Sorgu: + +``` sql +SELECT groupBitXor(num) FROM t +``` + +Nerede `num` test verileri ile sütundur. + +Sonuç: + +``` text +binary decimal +01101000 = 104 +``` + +## groupBitmap {#groupbitmap} + +İşaretsiz tamsayı sütun, Uınt64 tür iade önem, gelen bit eşlem veya Toplama hesaplamaları suffix ekleme -Devlet, sonra iade [bitmap nesnesi](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmap(expr) +``` + +**Parametre** + +`expr` – An expression that results in `UInt*` tür. + +**Dönüş değeri** + +Bu değer `UInt64` tür. + +**Örnek** + +Test verileri: + +``` text +UserID +1 +1 +2 +3 +``` + +Sorgu: + +``` sql +SELECT groupBitmap(UserID) as num FROM t +``` + +Sonuç: + +``` text +num +3 +``` + +## min (x) {#agg_function-min} + +Minimum hesaplar. + +## max (x) {#agg_function-max} + +Maksimum hesaplar. + +## argMin (arg, val) {#agg-function-argmin} + +Hesaplar ‘arg’ minimum değer ‘val’ değer. Birkaç farklı değer varsa ‘arg’ minimum değerler için ‘val’, karşılaşılan bu değerlerin ilki çıktıdır. + +**Örnek:** + +``` text +┌─user─────┬─salary─┐ +│ director │ 5000 │ +│ manager │ 3000 │ +│ worker │ 1000 │ +└──────────┴────────┘ +``` + +``` sql +SELECT argMin(user, salary) FROM salary +``` + +``` text +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ +``` + +## argMax (arg, val) {#agg-function-argmax} + +Hesaplar ‘arg’ maksimum değer ‘val’ değer. Birkaç farklı değer varsa ‘arg’ maksimum değerler için ‘val’, karşılaşılan bu değerlerin ilki çıktıdır. + +## s (um (x) {#agg_function-sum} + +Toplamı hesaplar. +Sadece sayılar için çalışır. + +## sumWithOverflow(x) {#sumwithoverflowx} + +Giriş parametreleri için olduğu gibi sonuç için aynı veri türünü kullanarak sayıların toplamını hesaplar. Toplam bu veri türü için en büyük değeri aşarsa, işlev bir hata döndürür. + +Sadece sayılar için çalışır. + +## sumMap (anahtar, değer) {#agg_functions-summap} + +Toplam thelar ‘value’ belirtilen tuş accordinglara göre dizi ‘key’ dizi. +Eleman sayısı ‘key’ ve ‘value’ toplam her satır için aynı olmalıdır. +Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. + +Örnek: + +``` sql +CREATE TABLE sum_map( + date Date, + timeslot DateTime, + statusMap Nested( + status UInt16, + requests UInt64 + ) +) ENGINE = Log; +INSERT INTO sum_map VALUES + ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); +SELECT + timeslot, + sumMap(statusMap.status, statusMap.requests) +FROM sum_map +GROUP BY timeslot +``` + +``` text +┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ +│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ +│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ +└─────────────────────┴──────────────────────────────────────────────┘ +``` + +## skewPop {#skewpop} + +Hesaplar [çarpıklık](https://en.wikipedia.org/wiki/Skewness) bir sıra. + +``` sql +skewPop(expr) +``` + +**Parametre** + +`expr` — [İfade](../syntax.md#syntax-expressions) bir numara döndürüyor. + +**Döndürülen değer** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**Örnek** + +``` sql +SELECT skewPop(value) FROM series_with_value_column +``` + +## skewSamp {#skewsamp} + +Hesaplar [örnek çarpıklık](https://en.wikipedia.org/wiki/Skewness) bir sıra. + +Bir rassal değişkenin çarpıklığının tarafsız bir tahminini temsil eder. + +``` sql +skewSamp(expr) +``` + +**Parametre** + +`expr` — [İfade](../syntax.md#syntax-expressions) bir numara döndürüyor. + +**Döndürülen değer** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). Eğer `n <= 1` (`n` örnek boyutudur), daha sonra işlev döner `nan`. + +**Örnek** + +``` sql +SELECT skewSamp(value) FROM series_with_value_column +``` + +## kurtPop {#kurtpop} + +Hesaplar [kurtosis](https://en.wikipedia.org/wiki/Kurtosis) bir sıra. + +``` sql +kurtPop(expr) +``` + +**Parametre** + +`expr` — [İfade](../syntax.md#syntax-expressions) bir numara döndürüyor. + +**Döndürülen değer** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**Örnek** + +``` sql +SELECT kurtPop(value) FROM series_with_value_column +``` + +## kurtSamp {#kurtsamp} + +Hesaplar [örnek kurtoz](https://en.wikipedia.org/wiki/Kurtosis) bir sıra. + +Eğer geçen değerleri örnek oluşturur, eğer bir rassal değişken kurtosis tarafsız bir tahmini temsil eder. + +``` sql +kurtSamp(expr) +``` + +**Parametre** + +`expr` — [İfade](../syntax.md#syntax-expressions) bir numara döndürüyor. + +**Döndürülen değer** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). Eğer `n <= 1` (`n` örnek bir boyutudur), daha sonra işlev döner `nan`. + +**Örnek** + +``` sql +SELECT kurtSamp(value) FROM series_with_value_column +``` + +## timeSeriesGroupSum(uıd, zaman damgası, değer) {#agg-function-timeseriesgroupsum} + +`timeSeriesGroupSum` örnek zaman damgası değil hizalama farklı zaman serileri toplayabilir. +İki örnek zaman damgası arasında doğrusal enterpolasyon kullanacak ve daha sonra zaman serilerini birlikte toplayacaktır. + +- `uid` zaman serisi benzersiz kimliği mi, `UInt64`. +- `timestamp` milisaniye veya mikrosaniye desteklemek için Int64 türüdür. +- `value` metr .iktir. + +İşlev, tuples dizisini döndürür `(timestamp, aggregated_value)` çiftliler. + +Bu işlevi kullanmadan önce emin olun `timestamp` artan düzende. + +Örnek: + +``` text +┌─uid─┬─timestamp─┬─value─┐ +│ 1 │ 2 │ 0.2 │ +│ 1 │ 7 │ 0.7 │ +│ 1 │ 12 │ 1.2 │ +│ 1 │ 17 │ 1.7 │ +│ 1 │ 25 │ 2.5 │ +│ 2 │ 3 │ 0.6 │ +│ 2 │ 8 │ 1.6 │ +│ 2 │ 12 │ 2.4 │ +│ 2 │ 18 │ 3.6 │ +│ 2 │ 24 │ 4.8 │ +└─────┴───────────┴───────┘ +``` + +``` sql +CREATE TABLE time_series( + uid UInt64, + timestamp Int64, + value Float64 +) ENGINE = Memory; +INSERT INTO time_series VALUES + (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), + (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); + +SELECT timeSeriesGroupSum(uid, timestamp, value) +FROM ( + SELECT * FROM time_series order by timestamp ASC +); +``` + +Ve sonuç olacak: + +``` text +[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] +``` + +## timeSeriesGroupRateSum(uıd, ts, val) {#agg-function-timeseriesgroupratesum} + +Benzer şekilde timeSeriesGroupRateSum, timeSeriesGroupRateSum zaman serisi ve daha sonra toplam oranları birlikte oranını hesaplar. +Ayrıca, bu işlevi kullanmadan önce zaman damgası yükseliş sırasına göre olmalıdır. + +Bu işlevi kullanın, yukarıdaki sonuç olacaktır: + +``` text +[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] +``` + +## avg (x) {#agg_function-avg} + +Ortalama hesaplar. +Sadece sayılar için çalışır. +Sonuç Her zaman Float64. + +## avgWeighted {#avgweighted} + +Hesaplar [ağırlıklı aritmetik ortalama](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean). + +**Sözdizimi** + +``` sql +avgWeighted(x, weight) +``` + +**Parametre** + +- `x` — Values. [Tamsayı](../data_types/int_uint.md) veya [kayan nokta](../data_types/float.md). +- `weight` — Weights of the values. [Tamsayı](../data_types/int_uint.md) veya [kayan nokta](../data_types/float.md). + +Türü `x` ve `weight` aynı olmalıdır. + +**Döndürülen değer** + +- Ağırlıklı ortalama. +- `NaN`. Tüm ağırlıklar 0'a eşitse. + +Tür: [Float64](../data_types/float.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT avgWeighted(x, w) +FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) +``` + +Sonuç: + +``` text +┌─avgWeighted(x, weight)─┐ +│ 8 │ +└────────────────────────┘ +``` + +## uniq {#agg_function-uniq} + +Bağımsız değişken farklı değerlerin yaklaşık sayısını hesaplar. + +``` sql +uniq(x[, ...]) +``` + +**Parametre** + +Fonksiyon değişken sayıda parametre alır. Parametreler olabilir `Tuple`, `Array`, `Date`, `DateTime`, `String` veya sayısal türleri. + +**Döndürülen değer** + +- A [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip numarası. + +**Uygulama Detayları** + +İşlev: + +- Toplamdaki tüm parametreler için bir karma hesaplar, daha sonra hesaplamalarda kullanır. + +- Bir adaptif örnekleme algoritması kullanır. Hesaplama durumu için işlev, 65536'ya kadar öğe karma değerlerinin bir örneğini kullanır. + + This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. + +- Sonucu deterministically sağlar (sorgu işleme sırasına bağlı değildir). + +Bu işlevi hemen hemen tüm senaryolarda kullanmanızı öneririz. + +**Ayrıca Bakınız** + +- [uniqCombined](#agg_function-uniqcombined) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined {#agg_function-uniqcombined} + +Farklı bağımsız değişken değerlerinin yaklaşık sayısını hesaplar. + +``` sql +uniqCombined(HLL_precision)(x[, ...]) +``` + +Bu `uniqCombined` fonksiyon, farklı değerlerin sayısını hesaplamak için iyi bir seçimdir. + +**Parametre** + +Fonksiyon değişken sayıda parametre alır. Parametreler olabilir `Tuple`, `Array`, `Date`, `DateTime`, `String` veya sayısal türleri. + +`HLL_precision` hücre sayısının baz-2 logaritmasıdır. [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). İsteğe bağlı olarak işlevi kullanabilirsiniz `uniqCombined(x[, ...])`. İçin varsayılan değer `HLL_precision` etkin bir şekilde 96 KiB alan olan 17'dir (2^17 hücre, her biri 6 bit). + +**Döndürülen değer** + +- Numara [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip numarası. + +**Uygulama Detayları** + +İşlev: + +- Bir karma hesaplar (64-bit karma için `String` ve 32-bit aksi halde) agregadaki tüm parametreler için, hesaplamalarda kullanır. + +- Bir hata düzeltme tablosu ile dizi, karma tablo ve HyperLogLog: üç algoritmaları bir arada kullanır. + + For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. + +- Sonucu deterministically sağlar (sorgu işleme sırasına bağlı değildir). + +!!! note "Not" + Olmayan için 32-bit karma kullandığından-`String` tipi, sonuç cardinalities önemli ölçüde daha büyük için çok yüksek hata olacak `UINT_MAX` (birkaç on milyarlarca farklı değerden sonra hata hızla artacaktır), bu durumda kullanmanız gerekir [uniqCombined64](#agg_function-uniqcombined64) + +İle karşılaştırıldığında [uniq](#agg_function-uniq) fonksiyonu, `uniqCombined`: + +- Birkaç kez daha az bellek tüketir. +- Birkaç kat daha yüksek doğrulukla hesaplar. +- Genellikle biraz daha düşük performansa sahiptir. Bazı senaryolarda, `uniqCombined` daha iyi performans gösterebilir `uniq` örneğin, ağ üzerinden çok sayıda toplama durumu ileten dağıtılmış sorgularla. + +**Ayrıca Bakınız** + +- [uniq](#agg_function-uniq) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined64 {#agg_function-uniqcombined64} + +Aynı olarak [uniqCombined](#agg_function-uniqcombined), ancak tüm veri türleri için 64 bit karma kullanır. + +## uniqHLL12 {#agg_function-uniqhll12} + +Farklı argüman değerlerinin yaklaşık sayısını hesaplar [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) algoritma. + +``` sql +uniqHLL12(x[, ...]) +``` + +**Parametre** + +Fonksiyon değişken sayıda parametre alır. Parametreler olabilir `Tuple`, `Array`, `Date`, `DateTime`, `String` veya sayısal türleri. + +**Döndürülen değer** + +- A [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip numarası. + +**Uygulama Detayları** + +İşlev: + +- Toplamdaki tüm parametreler için bir karma hesaplar, daha sonra hesaplamalarda kullanır. + +- Farklı bağımsız değişken değerlerinin sayısını yaklaştırmak için HyperLogLog algoritmasını kullanır. + + 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + +- Belirli sonucu sağlar (sorgu işleme sırasına bağlı değildir). + +Bu işlevi kullanmanızı önermiyoruz. Çoğu durumda, kullan [uniq](#agg_function-uniq) veya [uniqCombined](#agg_function-uniqcombined) işlev. + +**Ayrıca Bakınız** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqExact](#agg_function-uniqexact) + +## uniqExact {#agg_function-uniqexact} + +Farklı bağımsız değişken değerlerinin tam sayısını hesaplar. + +``` sql +uniqExact(x[, ...]) +``` + +Kullan... `uniqExact` kesinlikle kesin bir sonuca ihtiyacınız varsa işlev. Aksi takdirde kullanın [uniq](#agg_function-uniq) işlev. + +Bu `uniqExact` fonksiyonu daha fazla bellek kullanır `uniq`, çünkü farklı değerlerin sayısı arttıkça devletin büyüklüğü sınırsız büyümeye sahiptir. + +**Parametre** + +Fonksiyon değişken sayıda parametre alır. Parametreler olabilir `Tuple`, `Array`, `Date`, `DateTime`, `String` veya sayısal türleri. + +**Ayrıca Bakınız** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqHLL12](#agg_function-uniqhll12) + +## groupArray (x), groupArray (max\_size)(x) {#agg_function-grouparray} + +Bağımsız değişken değerleri dizisi oluşturur. +Değerler diziye herhangi bir (belirsiz) sırayla eklenebilir. + +İkinci versiyonu (ile `max_size` parametre), elde edilen dizinin boyutunu sınırlar `max_size` öğeler. +Mesela, `groupArray (1) (x)` eşdeğ toer equivalentdir `[any (x)]`. + +Bazı durumlarda, hala yürütme sırasına güvenebilirsiniz. Bu, aşağıdaki durumlar için geçerlidir `SELECT` kullanan bir alt sorgudan gelir `ORDER BY`. + +## grouparrayınsertat (değer, konum) {#grouparrayinsertatvalue-position} + +Belirtilen konumda diziye bir değer ekler. + +!!! note "Not" + Bu işlev, SQL dizileri için geleneksel tek tabanlı konumların aksine sıfır tabanlı konumlar kullanır. + +Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. + +İsteğe bağlı parametreler: + +- Boş pozisyonlarda ikame için varsayılan değer. +- Elde edilen dizinin uzunluğu. Bu, tüm toplama anahtarları için aynı boyuttaki dizileri almanızı sağlar. Bu parametreyi kullanırken, varsayılan değer belirtilmelidir. + +## groupArrayMovingSum {#agg_function-grouparraymovingsum} + +Giriş değerlerinin hareketli toplamını hesaplar. + +``` sql +groupArrayMovingSum(numbers_for_summing) +groupArrayMovingSum(window_size)(numbers_for_summing) +``` + +İşlev, pencere boyutunu bir parametre olarak alabilir. Belirtilmemiş bırakılırsa, işlev, sütundaki satır sayısına eşit pencere boyutunu alır. + +**Parametre** + +- `numbers_for_summing` — [İfade](../syntax.md#syntax-expressions) sayısal veri türü değeri ile sonuçlanır. +- `window_size` — Size of the calculation window. + +**Döndürülen değerler** + +- Giriş verileri ile aynı boyut ve türdeki dizi. + +**Örnek** + +Örnek tablo: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +Sorgu: + +``` sql +SELECT + groupArrayMovingSum(int) AS I, + groupArrayMovingSum(float) AS F, + groupArrayMovingSum(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingSum(2)(int) AS I, + groupArrayMovingSum(2)(float) AS F, + groupArrayMovingSum(2)(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +## groupArrayMovingAvg {#agg_function-grouparraymovingavg} + +Giriş değerlerinin hareketli ortalamasını hesaplar. + +``` sql +groupArrayMovingAvg(numbers_for_summing) +groupArrayMovingAvg(window_size)(numbers_for_summing) +``` + +İşlev, pencere boyutunu bir parametre olarak alabilir. Belirtilmemiş bırakılırsa, işlev, sütundaki satır sayısına eşit pencere boyutunu alır. + +**Parametre** + +- `numbers_for_summing` — [İfade](../syntax.md#syntax-expressions) sayısal veri türü değeri ile sonuçlanır. +- `window_size` — Size of the calculation window. + +**Döndürülen değerler** + +- Giriş verileri ile aynı boyut ve türdeki dizi. + +İşlev kullanır [sıfıra doğru yuvarlama](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). Sonuç veri türü için önemsiz ondalık basamaklar keser. + +**Örnek** + +Örnek tablo `b`: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +Sorgu: + +``` sql +SELECT + groupArrayMovingAvg(int) AS I, + groupArrayMovingAvg(float) AS F, + groupArrayMovingAvg(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ +│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ +└───────────┴─────────────────────────────────────┴───────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingAvg(2)(int) AS I, + groupArrayMovingAvg(2)(float) AS F, + groupArrayMovingAvg(2)(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ +│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ +└───────────┴──────────────────────────────────┴───────────────────────┘ +``` + +## groupUniqArray (x), groupUniqArray (max\_size)(x) {#groupuniqarrayx-groupuniqarraymax-sizex} + +Farklı bağımsız değişken değerlerinden bir dizi oluşturur. Bellek tüketimi için aynıdır `uniqExact` işlev. + +İkinci versiyonu (ile `max_size` parametre), elde edilen dizinin boyutunu sınırlar `max_size` öğeler. +Mesela, `groupUniqArray(1)(x)` eşdeğ toer equivalentdir `[any(x)]`. + +## quantile {#quantile} + +Yaklaşık hesaplar [quantile](https://en.wikipedia.org/wiki/Quantile) sayısal veri dizisinin. + +Bu işlev geçerlidir [rezerv reservoiruar örnek samplinglemesi](https://en.wikipedia.org/wiki/Reservoir_sampling) 8192'ye kadar bir rezervuar boyutu ve örnekleme için rastgele sayı üreteci ile. Sonuç deterministik değildir. Tam bir miktar elde etmek için [quantileExact](#quantileexact) işlev. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantile(level)(expr) +``` + +Takma ad: `median`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [veri türleri](../../sql_reference/data_types/index.md#data_types), [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). + +**Döndürülen değer** + +- Belirtilen seviyenin yaklaşık miktarı. + +Tür: + +- [Float64](../../sql_reference/data_types/float.md) sayısal veri türü girişi için. +- [Tarihli](../../sql_reference/data_types/date.md) giriş değerleri varsa `Date` tür. +- [DateTime](../../sql_reference/data_types/datetime.md) giriş değerleri varsa `DateTime` tür. + +**Örnek** + +Giriş tablosu: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Sorgu: + +``` sql +SELECT quantile(val) FROM t +``` + +Sonuç: + +``` text +┌─quantile(val)─┐ +│ 1.5 │ +└───────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## quantileDeterministic {#quantiledeterministic} + +Yaklaşık hesaplar [quantile](https://en.wikipedia.org/wiki/Quantile) sayısal veri dizisinin. + +Bu işlev geçerlidir [rezerv reservoiruar örnek samplinglemesi](https://en.wikipedia.org/wiki/Reservoir_sampling) 8192'ye kadar bir rezervuar boyutu ve örnekleme deterministik algoritması ile. Sonuç deterministiktir. Tam bir miktar elde etmek için [quantileExact](#quantileexact) işlev. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantileDeterministic(level)(expr, determinator) +``` + +Takma ad: `medianDeterministic`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [veri türleri](../../sql_reference/data_types/index.md#data_types), [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). +- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. + +**Döndürülen değer** + +- Belirtilen seviyenin yaklaşık miktarı. + +Tür: + +- [Float64](../../sql_reference/data_types/float.md) sayısal veri türü girişi için. +- [Tarihli](../../sql_reference/data_types/date.md) giriş değerleri varsa `Date` tür. +- [DateTime](../../sql_reference/data_types/datetime.md) giriş değerleri varsa `DateTime` tür. + +**Örnek** + +Giriş tablosu: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Sorgu: + +``` sql +SELECT quantileDeterministic(val, 1) FROM t +``` + +Sonuç: + +``` text +┌─quantileDeterministic(val, 1)─┐ +│ 1.5 │ +└───────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## quantileExact {#quantileexact} + +Tam olarak hesaplar [quantile](https://en.wikipedia.org/wiki/Quantile) sayısal veri dizisinin. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` bellek, nerede `n` geçirilen değerler say .ısıdır. Bununla birlikte, az sayıda değer için, işlev çok etkilidir. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantileExact(level)(expr) +``` + +Takma ad: `medianExact`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [veri türleri](../../sql_reference/data_types/index.md#data_types), [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). + +**Döndürülen değer** + +- Belirtilen seviyenin miktarı. + +Tür: + +- [Float64](../../sql_reference/data_types/float.md) sayısal veri türü girişi için. +- [Tarihli](../../sql_reference/data_types/date.md) giriş değerleri varsa `Date` tür. +- [DateTime](../../sql_reference/data_types/datetime.md) giriş değerleri varsa `DateTime` tür. + +**Örnek** + +Sorgu: + +``` sql +SELECT quantileExact(number) FROM numbers(10) +``` + +Sonuç: + +``` text +┌─quantileExact(number)─┐ +│ 5 │ +└───────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## quantilexactweighted {#quantileexactweighted} + +Tam olarak hesaplar [quantile](https://en.wikipedia.org/wiki/Quantile) her elemanın ağırlığını dikkate alarak sayısal bir veri dizisinin. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). Bunun yerine bu işlevi kullanabilirsiniz `quantileExact` ve 1 ağırlığını belirtin. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantileExactWeighted(level)(expr, weight) +``` + +Takma ad: `medianExactWeighted`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [veri türleri](../../sql_reference/data_types/index.md#data_types), [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. + +**Döndürülen değer** + +- Belirtilen seviyenin miktarı. + +Tür: + +- [Float64](../../sql_reference/data_types/float.md) sayısal veri türü girişi için. +- [Tarihli](../../sql_reference/data_types/date.md) giriş değerleri varsa `Date` tür. +- [DateTime](../../sql_reference/data_types/datetime.md) giriş değerleri varsa `DateTime` tür. + +**Örnek** + +Giriş tablosu: + +``` text +┌─n─┬─val─┐ +│ 0 │ 3 │ +│ 1 │ 2 │ +│ 2 │ 1 │ +│ 5 │ 4 │ +└───┴─────┘ +``` + +Sorgu: + +``` sql +SELECT quantileExactWeighted(n, val) FROM t +``` + +Sonuç: + +``` text +┌─quantileExactWeighted(n, val)─┐ +│ 1 │ +└───────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## quantileTiming {#quantiletiming} + +Belirlenen hassas hesaplar ile [quantile](https://en.wikipedia.org/wiki/Quantile) sayısal veri dizisinin. + +Sonuç deterministiktir (sorgu işleme sırasına bağlı değildir). Fonksiyon yükleme web sayfaları kez veya arka uç yanıt süreleri gibi dağılımları tanımlamak dizileri ile çalışmak için optimize edilmiştir. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantileTiming(level)(expr) +``` + +Takma ad: `medianTiming`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). + +- `expr` — [İfade](../syntax.md#syntax-expressions) bir sütun değerleri üzerinde dönen bir [Yüzdürmek\*](../../sql_reference/data_types/float.md)- tip numarası. + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +**Doğruluk** + +Hesaplama doğru ise: + +- Toplam değer sayısı 5670'i geçmez. +- Toplam değer sayısı 5670'i aşıyor, ancak sayfa yükleme süresi 1024 ms'den az. + +Aksi takdirde, hesaplamanın sonucu 16 MS'nin en yakın katlarına yuvarlanır. + +!!! note "Not" + Sayfa yükleme süresi nicelerini hesaplamak için, bu işlev daha etkili ve doğrudur [quantile](#quantile). + +**Döndürülen değer** + +- Belirtilen seviyenin miktarı. + +Tür: `Float32`. + +!!! note "Not" + İşlev valuese hiçbir değer geçir (ilmem (işse (kullanırken `quantileTimingIf`), [Nine](../../sql_reference/data_types/float.md#data_type-float-nan-inf) döndürülür. Bunun amacı, bu vakaları sıfır ile sonuçlanan vakalardan ayırmaktır. Görmek [ORDER BY FLA BYGE](../statements/select.md#select-order-by) sıralama ile ilgili notlar için `NaN` değerler. + +**Örnek** + +Giriş tablosu: + +``` text +┌─response_time─┐ +│ 72 │ +│ 112 │ +│ 126 │ +│ 145 │ +│ 104 │ +│ 242 │ +│ 313 │ +│ 168 │ +│ 108 │ +└───────────────┘ +``` + +Sorgu: + +``` sql +SELECT quantileTiming(response_time) FROM t +``` + +Sonuç: + +``` text +┌─quantileTiming(response_time)─┐ +│ 126 │ +└───────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## niceletimingweighted {#quantiletimingweighted} + +Belirlenen hassas hesaplar ile [quantile](https://en.wikipedia.org/wiki/Quantile) her sıra üyesi ağırlığına göre sayısal veri dizisi. + +Sonuç deterministiktir (sorgu işleme sırasına bağlı değildir). Fonksiyon yükleme web sayfaları kez veya arka uç yanıt süreleri gibi dağılımları tanımlamak dizileri ile çalışmak için optimize edilmiştir. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantileTimingWeighted(level)(expr, weight) +``` + +Takma ad: `medianTimingWeighted`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). + +- `expr` — [İfade](../syntax.md#syntax-expressions) bir sütun değerleri üzerinde dönen bir [Yüzdürmek\*](../../sql_reference/data_types/float.md)- tip numarası. + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**Doğruluk** + +Hesaplama doğru ise: + +- Toplam değer sayısı 5670'i geçmez. +- Toplam değer sayısı 5670'i aşıyor, ancak sayfa yükleme süresi 1024 ms'den az. + +Aksi takdirde, hesaplamanın sonucu 16 MS'nin en yakın katlarına yuvarlanır. + +!!! note "Not" + Sayfa yükleme süresi nicelerini hesaplamak için, bu işlev daha etkili ve doğrudur [quantile](#quantile). + +**Döndürülen değer** + +- Belirtilen seviyenin miktarı. + +Tür: `Float32`. + +!!! note "Not" + İşlev valuese hiçbir değer geçir (ilmem (işse (kullanırken `quantileTimingIf`), [Nine](../../sql_reference/data_types/float.md#data_type-float-nan-inf) döndürülür. Bunun amacı, bu vakaları sıfır ile sonuçlanan vakalardan ayırmaktır. Görmek [ORDER BY FLA BYGE](../statements/select.md#select-order-by) sıralama ile ilgili notlar için `NaN` değerler. + +**Örnek** + +Giriş tablosu: + +``` text +┌─response_time─┬─weight─┐ +│ 68 │ 1 │ +│ 104 │ 2 │ +│ 112 │ 3 │ +│ 126 │ 2 │ +│ 138 │ 1 │ +│ 162 │ 1 │ +└───────────────┴────────┘ +``` + +Sorgu: + +``` sql +SELECT quantileTimingWeighted(response_time, weight) FROM t +``` + +Sonuç: + +``` text +┌─quantileTimingWeighted(response_time, weight)─┐ +│ 112 │ +└───────────────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## quantileTDigest {#quantiletdigest} + +Yaklaşık hesaplar [quantile](https://en.wikipedia.org/wiki/Quantile) kullanarak sayısal veri diz ofisinin [t-dig -est](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algoritma. + +Maksimum hata %1'dir. Bellek tüketimi `log(n)`, nere `n` değer say isısıdır. Sonuç, sorguyu çalıştırma sırasına bağlıdır ve nondeterministic. + +Fonksiyonun performansı, performanstan daha düşüktür [quantile](#quantile) veya [quantileTiming](#quantiletiming). Durum boyutunun hassasiyete oranı açısından, bu işlev çok daha iyidir `quantile`. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantileTDigest(level)(expr) +``` + +Takma ad: `medianTDigest`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [veri türleri](../../sql_reference/data_types/index.md#data_types), [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). + +**Döndürülen değer** + +- Belirtilen seviyenin yaklaşık miktarı. + +Tür: + +- [Float64](../../sql_reference/data_types/float.md) sayısal veri türü girişi için. +- [Tarihli](../../sql_reference/data_types/date.md) giriş değerleri varsa `Date` tür. +- [DateTime](../../sql_reference/data_types/datetime.md) giriş değerleri varsa `DateTime` tür. + +**Örnek** + +Sorgu: + +``` sql +SELECT quantileTDigest(number) FROM numbers(10) +``` + +Sonuç: + +``` text +┌─quantileTDigest(number)─┐ +│ 4.5 │ +└─────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## quantileTDigestWeighted {#quantiletdigestweighted} + +Yaklaşık hesaplar [quantile](https://en.wikipedia.org/wiki/Quantile) kullanarak sayısal veri diz ofisinin [t-dig -est](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algoritma. İşlev, her sıra üyesinin ağırlığını dikkate alır. Maksimum hata %1'dir. Bellek tüketimi `log(n)`, nere `n` değer say isısıdır. + +Fonksiyonun performansı, performanstan daha düşüktür [quantile](#quantile) veya [quantileTiming](#quantiletiming). Durum boyutunun hassasiyete oranı açısından, bu işlev çok daha iyidir `quantile`. + +Sonuç, sorguyu çalıştırma sırasına bağlıdır ve nondeterministic. + +Çoklu kullanırken `quantile*` bir sorguda farklı düzeylerde işlevler, iç durumları birleştirilmez (diğer bir deyişle, sorgu olabilir daha az verimli çalışır). Bu durumda, kullan [quantiles](#quantiles) işlev. + +**Sözdizimi** + +``` sql +quantileTDigest(level)(expr) +``` + +Takma ad: `medianTDigest`. + +**Parametre** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` aralığında değer `[0.01, 0.99]`. Varsayılan değer: 0.5. Yanında `level=0.5` fonksiyon hesaplar [medyan](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [veri türleri](../../sql_reference/data_types/index.md#data_types), [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**Döndürülen değer** + +- Belirtilen seviyenin yaklaşık miktarı. + +Tür: + +- [Float64](../../sql_reference/data_types/float.md) sayısal veri türü girişi için. +- [Tarihli](../../sql_reference/data_types/date.md) giriş değerleri varsa `Date` tür. +- [DateTime](../../sql_reference/data_types/datetime.md) giriş değerleri varsa `DateTime` tür. + +**Örnek** + +Sorgu: + +``` sql +SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) +``` + +Sonuç: + +``` text +┌─quantileTDigestWeighted(number, 1)─┐ +│ 4.5 │ +└────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [medyan](#median) +- [quantiles](#quantiles) + +## medyan {#median} + +Bu `median*` fonksiyonlar karşılık gelen takma adlardır `quantile*` işlevler. Sayısal bir veri örneğinin medyanını hesaplarlar. + +İşlevler: + +- `median` — Alias for [quantile](#quantile). +- `medianDeterministic` — Alias for [quantileDeterministic](#quantiledeterministic). +- `medianExact` — Alias for [quantileExact](#quantileexact). +- `medianExactWeighted` — Alias for [quantilexactweighted](#quantileexactweighted). +- `medianTiming` — Alias for [quantileTiming](#quantiletiming). +- `medianTimingWeighted` — Alias for [niceletimingweighted](#quantiletimingweighted). +- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). + +**Örnek** + +Giriş tablosu: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Sorgu: + +``` sql +SELECT medianDeterministic(val, 1) FROM t +``` + +Sonuç: + +``` text +┌─medianDeterministic(val, 1)─┐ +│ 1.5 │ +└─────────────────────────────┘ +``` + +## quantiles(level1, level2, …)(x) {#quantiles} + +Tüm quantile fonksiyonları da karşılık gelen quantile fonksiyonlarına sahiptir: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. Bu işlevler, listelenen seviyelerin tüm nicelerini tek geçişte hesaplar ve elde edilen değerlerin bir dizisini döndürür. + +## varSamp (x) {#varsampx} + +Miktarı hesaplar `Σ((x - x̅)^2) / (n - 1)`, nere `n` örneklem büyüklüğü ve `x̅`ortalama değer isidir `x`. + +Bir rassal değişkenin varyansının tarafsız bir tahminini temsil eder, eğer geçirilen değerler numunesini oluşturursa. + +Dönüşler `Float64`. Ne zaman `n <= 1`, dönüşler `+∞`. + +## varPop (x) {#varpopx} + +Miktarı hesaplar `Σ((x - x̅)^2) / n`, nere `n` örneklem büyüklüğü ve `x̅`ortalama değer isidir `x`. + +Başka bir deyişle, bir dizi değer için dağılım. Dönüşler `Float64`. + +## stddevSamp(x) {#stddevsampx} + +Sonuç kareköküne eşittir `varSamp(x)`. + +## stddevPop(x) {#stddevpopx} + +Sonuç kareköküne eşittir `varPop(x)`. + +## topK (N) (x) {#topknx} + +Belirtilen sütundaki yaklaşık en sık değerleri bir dizi döndürür. Elde edilen dizi, değerlerin yaklaşık frekansının azalan sırasına göre sıralanır (değerlerin kendileri tarafından değil). + +Uygular [Filtrelenmiş Yerden Tasarruf](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) TopK analiz etmek için algoritma, azaltmak ve birleştirmek algoritması dayalı [Paralel Alan Tasarrufu](https://arxiv.org/pdf/1401.0702.pdf). + +``` sql +topK(N)(column) +``` + +Bu işlev garantili bir sonuç sağlamaz. Bazı durumlarda, hatalar oluşabilir ve en sık kullanılan değerler olmayan sık değerler döndürebilir. + +Biz kullanmanızı öneririz `N < 10` değer; performans büyük ile azalır `N` değerler. Maksimum değeri `N = 65536`. + +**Parametre** + +- ‘N’ dönmek için Öğe sayısıdır. + +Parametre atlanırsa, varsayılan değer 10 kullanılır. + +**Değişkenler** + +- ' x ' – The value to calculate frequency. + +**Örnek** + +Tak thee the [OnTime](../../getting_started/example_datasets/ontime.md) veri kümesi ve üç en sık oluşan değerleri seçin `AirlineID` sütun. + +``` sql +SELECT topK(3)(AirlineID) AS res +FROM ontime +``` + +``` text +┌─res─────────────────┐ +│ [19393,19790,19805] │ +└─────────────────────┘ +``` + +## topKWeighted {#topkweighted} + +Benzer `topK` ancak tamsayı türünde bir ek argüman alır - `weight`. Her değer muhasebeleştirilir `weight` frekans hesaplaması için zamanlar. + +**Sözdizimi** + +``` sql +topKWeighted(N)(x, weight) +``` + +**Parametre** + +- `N` — The number of elements to return. + +**Değişkenler** + +- `x` – The value. +- `weight` — The weight. [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değer** + +Maksimum yaklaşık ağırlık toplamına sahip değerlerin bir dizisini döndürür. + +**Örnek** + +Sorgu: + +``` sql +SELECT topKWeighted(10)(number, number) FROM numbers(1000) +``` + +Sonuç: + +``` text +┌─topKWeighted(10)(number, number)──────────┐ +│ [999,998,997,996,995,994,993,992,991,990] │ +└───────────────────────────────────────────┘ +``` + +## covarSamp(x, y) {#covarsampx-y} + +Değerini hesaplar `Σ((x - x̅)(y - y̅)) / (n - 1)`. + +Float64 Döndürür. Ne zaman `n <= 1`, returns +∞. + +## covarPop (x, y) {#covarpopx-y} + +Değerini hesaplar `Σ((x - x̅)(y - y̅)) / n`. + +## corr(x, y) {#corrx-y} + +Pearson korelasyon katsayısını hesaplar: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. + +## categoricalınformationvalue {#categoricalinformationvalue} + +Değerini hesaplar `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` her kategori için. + +``` sql +categoricalInformationValue(category1, category2, ..., tag) +``` + +Sonuç, ayrık (kategorik) bir özelliğin nasıl olduğunu gösterir `[category1, category2, ...]` değerini öngör aen bir öğrenme modeline katkıda `tag`. + +## simpleLinearRegression {#simplelinearregression} + +Basit (tek boyutlu) doğrusal regresyon gerçekleştirir. + +``` sql +simpleLinearRegression(x, y) +``` + +Parametre: + +- `x` — Column with dependent variable values. +- `y` — Column with explanatory variable values. + +Döndürülen değerler: + +Devamlılar `(a, b)` ortaya çıkan hat linetın `y = a*x + b`. + +**Örnekler** + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ +│ (1,0) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ +│ (1,3) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## stokastiklinearregression {#agg_functions-stochasticlinearregression} + +Bu fonksiyon stokastik doğrusal regresyon uygular. Öğrenme oranı, L2 regularization katsayısı, mini-batch boyutu için özel parametreleri destekler ve ağırlıkları güncellemek için birkaç yöntem vardır ([Adem](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (varsayılan olarak kullanılır), [basit SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [İvme](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). + +### Parametre {#agg_functions-stochasticlinearregression-parameters} + +4 özelleştirilebilir parametre vardır. Onlar sırayla işleve geçirilir, ancak dört varsayılan değerleri kullanılacak geçmek gerek yoktur, ancak iyi bir model bazı parametre ayarlama gerekli. + +``` text +stochasticLinearRegression(1.0, 1.0, 10, 'SGD') +``` + +1. `learning rate` Gradyan iniş adımı gerçekleştirildiğinde adım uzunluğundaki katsayıdır. Çok büyük öğrenme oranı, modelin sonsuz ağırlıklarına neden olabilir. Default is `0.00001`. +2. `l2 regularization coefficient` hangi overfitting önlemek için yardımcı olabilir. Default is `0.1`. +3. `mini-batch size` gradyanların hesaplanacağı ve Gradyan inişinin bir adımını gerçekleştirmek için toplanacağı öğelerin sayısını ayarlar. Saf stokastik iniş bir eleman kullanır, ancak küçük partilere(yaklaşık 10 eleman) sahip olmak degrade adımları daha kararlı hale getirir. Default is `15`. +4. `method for updating weights` onlar : `Adam` (varsayılan olarak), `SGD`, `Momentum`, `Nesterov`. `Momentum` ve `Nesterov` biraz daha fazla hesaplama ve bellek gerektirir, ancak stokastik Gradyan yöntemlerinin yakınsama hızı ve kararlılığı açısından yararlı olurlar. + +### Kullanma {#agg_functions-stochasticlinearregression-usage} + +`stochasticLinearRegression` iki adımda kullanılır: modelin takılması ve yeni verilerin tahmin edilmesi. Modeli sığdırmak ve daha sonra kullanım için durumunu kaydetmek için kullandığımız `-State` temel olarak durumu kurtaran birleştirici (model ağırlıkları, vb.). +Fonksiyonu kullan wedığımızı tahmin etmek [evalMLMethod](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod), bir argüman olarak bir durumu yanı sıra tahmin etmek için özellikler alır. + + + +**1.** Uydurma + +Böyle bir sorgu kullanılabilir. + +``` sql +CREATE TABLE IF NOT EXISTS train_data +( + param1 Float64, + param2 Float64, + target Float64 +) ENGINE = Memory; + +CREATE TABLE your_model ENGINE = Memory AS SELECT +stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) +AS state FROM train_data; +``` + +Burada ayrıca veri eklememiz gerekiyor `train_data` Tablo. Parametrelerin sayısı sabit değildir, sadece argümanların sayısına bağlıdır, `linearRegressionState`. Hepsi sayısal değerler olmalıdır. +Hedef değere sahip sütunun(tahmin etmeyi öğrenmek istediğimiz) ilk argüman olarak eklendiğini unutmayın. + +**2.** Öngören + +Bir durumu tabloya kaydettikten sonra, tahmin için birden çok kez kullanabilir, hatta diğer durumlarla birleşebilir ve yeni daha iyi modeller oluşturabiliriz. + +``` sql +WITH (SELECT state FROM your_model) AS model SELECT +evalMLMethod(model, param1, param2) FROM test_data +``` + +Sorgu, tahmin edilen değerlerin bir sütununu döndürür. Not ilk argüman `evalMLMethod` oluyor `AggregateFunctionState` nesne, sonraki özelliklerin sütunlarıdır. + +`test_data` bir tablo gibi mi `train_data` ancak hedef değer içermeyebilir. + +### Not {#agg_functions-stochasticlinearregression-notes} + +1. İki modeli birleştirmek için Kullanıcı böyle bir sorgu oluşturabilir: + `sql SELECT state1 + state2 FROM your_models` + nerede `your_models` tablo her iki modeli de içerir. Bu sorgu yeni dönecektir `AggregateFunctionState` nesne. + +2. Kullanıcı, modeli kaydetmeden oluşturulan modelin ağırlıklarını kendi amaçları için alabilir `-State` birleştirici kullanılır. + `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` + Bu sorgu modele uyacak ve ağırlıklarını geri getirecektir-ilk önce modelin parametrelerine karşılık gelen ağırlıklar, sonuncusu önyargıdır. Yani yukarıdaki örnekte sorgu 3 değer içeren bir sütun döndürecektir. + +**Ayrıca Bakınız** + +- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) +- [Doğrusal ve lojistik regresyonlar arasındaki fark](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} + +Bu işlev stokastik lojistik regresyon uygular. İkili sınıflandırma problemi için kullanılabilir, stochasticLinearRegression ile aynı özel parametreleri destekler ve aynı şekilde çalışır. + +### Parametre {#agg_functions-stochasticlogisticregression-parameters} + +Parametreler tam olarak stochasticLinearRegression ile aynıdır: +`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. +Daha fazla bilgi için bkz. [parametre](#agg_functions-stochasticlinearregression-parameters). + +``` text +stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') +``` + +1. Uydurma + + + + See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. + + Predicted labels have to be in \[-1, 1\]. + +1. Öngören + + + + Using saved state we can predict probability of object having label `1`. + + ``` sql + WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) FROM test_data + ``` + + The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. + + We can also set a bound of probability, which assigns elements to different labels. + + ``` sql + SELECT ans < 1.1 AND ans > 0.5 FROM + (WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) AS ans FROM test_data) + ``` + + Then the result will be labels. + + `test_data` is a table like `train_data` but may not contain target value. + +**Ayrıca Bakınız** + +- [stokastiklinearregression](#agg_functions-stochasticlinearregression) +- [Doğrusal ve lojistik regresyonlar arasındaki fark.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## groupBitmapAnd {#groupbitmapand} + +Bu VE bir bitmap sütun, Uınt64 tür iade önem, hesaplamaları suffix ekleme -Devlet, sonra iade [bitmap nesnesi](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapAnd(expr) +``` + +**Parametre** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` tür. + +**Dönüş değeri** + +Bu değer `UInt64` tür. + +**Örnek** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapAnd(z)─┐ +│ 3 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ +│ [6,8,10] │ +└──────────────────────────────────────────────────┘ +``` + +## groupBitmapOr {#groupbitmapor} + +YA da bir bitmap sütun, Uınt64 tür iade önem, hesaplamaları suffix ekleme -Devlet, sonra iade [bitmap nesnesi](../../sql_reference/functions/bitmap_functions.md). Bu eşdeğerdir `groupBitmapMerge`. + +``` sql +groupBitmapOr(expr) +``` + +**Parametre** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` tür. + +**Dönüş değeri** + +Bu değer `UInt64` tür. + +**Örnek** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapOr(z)─┐ +│ 15 │ +└──────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ +│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ +└─────────────────────────────────────────────────┘ +``` + +## groupBitmapXor {#groupbitmapxor} + +Bir bitmap sütun, Uınt64 tür iade önem hesaplamaları XOR, suffix ekleme -Devlet, sonra iade [bitmap nesnesi](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapOr(expr) +``` + +**Parametre** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` tür. + +**Dönüş değeri** + +Bu değer `UInt64` tür. + +**Örnek** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapXor(z)─┐ +│ 10 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ +│ [1,3,5,6,8,10,11,13,14,15] │ +└──────────────────────────────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/tr/sql_reference/data_types/aggregatefunction.md b/docs/tr/sql_reference/data_types/aggregatefunction.md new file mode 100644 index 00000000000..622e02d9674 --- /dev/null +++ b/docs/tr/sql_reference/data_types/aggregatefunction.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 52 +toc_title: AggregateFunction (ad, types_of_arguments...) +--- + +# AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} + +Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [materyalize bir görünüm](../../sql_reference/statements/select.md#create-view). Bir toplama işlevi durumu üretmek için ortak yolu ile toplama işlevi çağırarak olduğunu `-State` sonek. Gelecekte toplanmanın nihai sonucunu elde etmek için, aynı toplama işlevini `-Merge`sonek. + +`AggregateFunction` — parametric data type. + +**Parametre** + +- Toplama işlevinin adı. + + If the function is parametric, specify its parameters too. + +- Toplama işlevi bağımsız değişkenleri türleri. + +**Örnek** + +``` sql +CREATE TABLE t +( + column1 AggregateFunction(uniq, UInt64), + column2 AggregateFunction(anyIf, String, UInt8), + column3 AggregateFunction(quantiles(0.5, 0.9), UInt64) +) ENGINE = ... +``` + +[uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq), anyİf ([herhangi](../../sql_reference/aggregate_functions/reference.md#agg_function-any)+[Eğer](../../sql_reference/aggregate_functions/combinators.md#agg-functions-combinator-if)) ve [quantiles](../../sql_reference/aggregate_functions/reference.md) ClickHouse desteklenen toplam işlevleri vardır. + +## Kullanma {#usage} + +### Veri Ekleme {#data-insertion} + +Veri eklemek için şunları kullanın `INSERT SELECT` agr aggregateega ile `-State`- işlevler. + +**Fonksiyon örnekleri** + +``` sql +uniqState(UserID) +quantilesState(0.5, 0.9)(SendTiming) +``` + +Karşılık gelen fonksiyonların aksine `uniq` ve `quantiles`, `-State`- fonksiyonlar son değer yerine durumu döndürür. Başka bir deyişle, bir değer döndürür `AggregateFunction` tür. + +Sonuç inlarında `SELECT` sorgu, değerleri `AggregateFunction` türü, Tüm ClickHouse çıktı biçimleri için uygulamaya özgü ikili gösterime sahiptir. Örneğin, veri dökümü, `TabSeparated` ile format `SELECT` sorgu, daha sonra bu dökümü kullanarak geri yüklenebilir `INSERT` sorgu. + +### Veri Seçimi {#data-selection} + +Veri seçerken `AggregatingMergeTree` tablo kullanın `GROUP BY` yan tümce ve veri eklerken aynı toplama işlevleri, ancak kullanarak `-Merge`sonek. + +Bir toplama fonksiyonu ile `-Merge` sonek, bir dizi durum alır, bunları birleştirir ve tam veri toplama sonucunu döndürür. + +Örneğin, aşağıdaki iki sorgu aynı sonucu döndürür: + +``` sql +SELECT uniq(UserID) FROM table + +SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP BY RegionID) +``` + +## Kullanım Örneği {#usage-example} + +Görmek [AggregatingMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) motor açıklaması. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/tr/sql_reference/data_types/array.md b/docs/tr/sql_reference/data_types/array.md new file mode 100644 index 00000000000..1db16ebd1fc --- /dev/null +++ b/docs/tr/sql_reference/data_types/array.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 51 +toc_title: Dizi(T) +--- + +# Dizi(t) {#data-type-array} + +Bir dizi `T`- tip öğeleri. `T` herhangi bir veri türü, bir dizi dahil edilebilir. + +## Bir dizi oluşturma {#creating-an-array} + +Bir dizi oluşturmak için bir işlev kullanabilirsiniz: + +``` sql +array(T) +``` + +Köşeli parantez de kullanabilirsiniz. + +``` sql +[] +``` + +Bir dizi oluşturma örneği: + +``` sql +SELECT array(1, 2) AS x, toTypeName(x) +``` + +``` text +┌─x─────┬─toTypeName(array(1, 2))─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴─────────────────────────┘ +``` + +``` sql +SELECT [1, 2] AS x, toTypeName(x) +``` + +``` text +┌─x─────┬─toTypeName([1, 2])─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴────────────────────┘ +``` + +## Veri Türleri İle Çalışma {#working-with-data-types} + +Anında bir dizi oluştururken, ClickHouse bağımsız değişken türünü otomatik olarak listelenen tüm bağımsız değişkenleri depolayabilen en dar veri türü olarak tanımlar. Eğer herhangi bir [Nullable](nullable.md#data_type-nullable) veya edebi [NULL](../../sql_reference/syntax.md#null-literal) değerler, bir dizi öğesinin türü de olur [Nullable](nullable.md). + +ClickHouse veri türünü belirleyemedi, bir özel durum oluşturur. Örneğin, aynı anda dizeler ve sayılarla bir dizi oluşturmaya çalışırken bu olur (`SELECT array(1, 'a')`). + +Otomatik veri türü algılama örnekleri: + +``` sql +SELECT array(1, 2, NULL) AS x, toTypeName(x) +``` + +``` text +┌─x──────────┬─toTypeName(array(1, 2, NULL))─┐ +│ [1,2,NULL] │ Array(Nullable(UInt8)) │ +└────────────┴───────────────────────────────┘ +``` + +Uyumsuz veri türleri dizisi oluşturmaya çalışırsanız, ClickHouse bir özel durum atar: + +``` sql +SELECT array(1, 'a') +``` + +``` text +Received exception from server (version 1.1.54388): +Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/array/) diff --git a/docs/tr/sql_reference/data_types/boolean.md b/docs/tr/sql_reference/data_types/boolean.md new file mode 100644 index 00000000000..e0641847b98 --- /dev/null +++ b/docs/tr/sql_reference/data_types/boolean.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 43 +toc_title: Boeanoleanean +--- + +# Boole Değerleri {#boolean-values} + +Boole değerleri için ayrı bir tür yoktur. 0 veya 1 değerleriyle sınırlı Uİnt8 türünü kullanın. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/boolean/) diff --git a/docs/tr/sql_reference/data_types/date.md b/docs/tr/sql_reference/data_types/date.md new file mode 100644 index 00000000000..fd47a49313c --- /dev/null +++ b/docs/tr/sql_reference/data_types/date.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 47 +toc_title: Tarihli +--- + +# Tarihli {#date} + +Tarihli. 1970-01-01 (imzasız) gün sayısı olarak iki bayt olarak saklanır. Unix döneminin başlangıcından hemen sonra, derleme aşamasında bir sabit tarafından tanımlanan üst eşiğe kadar değerlerin depolanmasına izin verir (şu anda, bu 2106 yılına kadar, ancak tam olarak desteklenen son yıl 2105'tir). +Minimum değer 0000-00-00 olarak çıktıdır. + +Tarih değeri saat dilimi olmadan depolanır. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/date/) diff --git a/docs/tr/sql_reference/data_types/datetime.md b/docs/tr/sql_reference/data_types/datetime.md new file mode 100644 index 00000000000..bc5feeed3b5 --- /dev/null +++ b/docs/tr/sql_reference/data_types/datetime.md @@ -0,0 +1,129 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 48 +toc_title: DateTime +--- + +# Datetime {#data_type-datetime} + +Bir takvim tarih ve bir günün bir saat olarak ifade edilebilir, zaman içinde bir anlık saklamak için izin verir. + +Sözdizimi: + +``` sql +DateTime([timezone]) +``` + +Desteklenen değerler aralığı: \[1970-01-01 00:00:00, 2105-12-31 23:59:59\]. + +Çözünürlük: 1 saniye. + +## Kullanım Açıklamaları {#usage-remarks} + +Zaman içindeki nokta bir [Unix zaman damgası](https://en.wikipedia.org/wiki/Unix_time), ne olursa olsun saat dilimi veya gün ışığından yararlanma saati. Ayrıca, `DateTime` tür, tüm sütun için aynı olan saat dilimini depolayabilir, bu da `DateTime` tür değerleri metin biçiminde görüntülenir ve dizeler olarak belirtilen değerlerin nasıl ayrıştırılır (‘2020-01-01 05:00:01’). Saat dilimi tablo (veya resultset) satırlarında depolanır, ancak sütun meta verileri depolanır. +Desteklenen saat dilimlerinin bir listesi şu adreste bulunabilir: [IANA Saat Dilimi veritabanı](https://www.iana.org/time-zones). +Bu `tzdata` paket, içeren [IANA Saat Dilimi veritabanı](https://www.iana.org/time-zones), sisteme Kurul .malıdır. Kullan... `timedatectl list-timezones` yerel bir sistem tarafından bilinen zaman dilimlerini listelemek için komut. + +İçin bir saat dilimi açıkça ayarlayabilirsiniz `DateTime`- bir tablo oluştururken sütunları yazın. Saat dilimi ayarlanmamışsa, ClickHouse değerini kullanır [saat dilimi](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) sunucu ayarlarında veya ClickHouse sunucusunun başlatıldığı anda işletim sistemi ayarlarında parametre. + +Bu [clickhouse-müşteri](../../interfaces/cli.md) veri türünü başlatırken bir saat dilimi açıkça ayarlanmamışsa, sunucu saat dilimini varsayılan olarak uygular. İstemci saat dilimini kullanmak için `clickhouse-client` ile... `--use_client_time_zone` parametre. + +ClickHouse çıkış değerleri `YYYY-MM-DD hh:mm:ss` varsayılan olarak metin biçimi. Çıkış ile değiştirebilirsiniz [formatDateTime](../../sql_reference/functions/date_time_functions.md#formatdatetime) işlev. + +Clickhouse'a veri eklerken, Tarih ve saat dizelerinin farklı biçimlerini kullanabilirsiniz. [date\_time\_input\_format](../../operations/settings/settings.md#settings-date_time_input_format) ayar. + +## Örnekler {#examples} + +**1.** Bir tablo ile bir tablo oluşturma `DateTime`- sütun yazın ve içine veri ekleme: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime('Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog; +``` + +``` sql +INSERT INTO dt Values (1546300800, 1), ('2019-01-01 00:00:00', 2); +``` + +``` sql +SELECT * FROM dt; +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00 │ 1 │ +│ 2019-01-01 00:00:00 │ 2 │ +└─────────────────────┴──────────┘ +``` + +- Bir tamsayı olarak datetime eklerken, Unıx Zaman Damgası (UTC) olarak kabul edilir. `1546300800` temsil etmek `'2019-01-01 00:00:00'` UTC. Ancak, `timestamp` sütun vardır `Europe/Moscow` (UTC+3) belirtilen zaman dilimi, dize olarak çıkış yaparken değer olarak gösterilecektir `'2019-01-01 03:00:00'` +- Dize değerini datetime olarak eklerken, sütun saat diliminde olduğu kabul edilir. `'2019-01-01 00:00:00'` will gibi muamele `Europe/Moscow` saat dilimi ve farklı kaydedildi `1546290000`. + +**2.** Üzerinde filtreleme `DateTime` değerler + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime('2019-01-01 00:00:00', 'Europe/Moscow') +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00 │ 2 │ +└─────────────────────┴──────────┘ +``` + +`DateTime` sütun değerleri, bir dize değeri kullanılarak filtrelenebilir `WHERE` yüklem. Dönüştürül willecektir `DateTime` otomatik olarak: + +``` sql +SELECT * FROM dt WHERE timestamp = '2019-01-01 00:00:00' +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00 │ 1 │ +└─────────────────────┴──────────┘ +``` + +**3.** Bir saat dilimi almak `DateTime`- type Col columnum columnn: + +``` sql +SELECT toDateTime(now(), 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────column─┬─x─────────────────────────┐ +│ 2019-10-16 04:12:04 │ DateTime('Europe/Moscow') │ +└─────────────────────┴───────────────────────────┘ +``` + +**4.** Zaman dilimi dönüştürme + +``` sql +SELECT +toDateTime(timestamp, 'Europe/London') as lon_time, +toDateTime(timestamp, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────lon_time──┬────────────mos_time─┐ +│ 2019-01-01 00:00:00 │ 2019-01-01 03:00:00 │ +│ 2018-12-31 21:00:00 │ 2019-01-01 00:00:00 │ +└─────────────────────┴─────────────────────┘ +``` + +## Ayrıca Bakınız {#see-also} + +- [Tip dönüştürme fonksiyonları](../../sql_reference/functions/type_conversion_functions.md) +- [Tarih ve saatlerle çalışmak için işlevler](../../sql_reference/functions/date_time_functions.md) +- [Dizilerle çalışmak için işlevler](../../sql_reference/functions/array_functions.md) +- [Bu `date_time_input_format` ayar](../../operations/settings/settings.md#settings-date_time_input_format) +- [Bu `timezone` sunucu yapılandırma parametresi](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [Tarih ve saatlerle çalışmak için operatörler](../../sql_reference/operators.md#operators-datetime) +- [Bu `Date` veri türü](date.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/datetime/) diff --git a/docs/tr/sql_reference/data_types/datetime64.md b/docs/tr/sql_reference/data_types/datetime64.md new file mode 100644 index 00000000000..82839e174da --- /dev/null +++ b/docs/tr/sql_reference/data_types/datetime64.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 49 +toc_title: DateTime64 +--- + +# Datetime64 {#data_type-datetime64} + +Tanımlanmış alt saniye hassasiyetle, bir takvim tarihi ve bir günün saati olarak ifade edilebilir, zaman içinde bir anlık saklamak için izin verir + +Kene boyutu (hassas): 10-hassaslık ikincilikler + +Sözdizimi: + +``` sql +DateTime64(precision, [timezone]) +``` + +DAHİLİ olarak, verileri bir dizi olarak saklar ‘ticks’ epoch başlangıçtan beri (1970-01-01 00:00:00 UTC) Int64 olarak. Kene çözünürlüğü hassasiyet parametresi tarafından belirlenir. Ayrıca, `DateTime64` tür, tüm sütun için aynı olan saat dilimini depolayabilir, bu da `DateTime64` tür değerleri metin biçiminde görüntülenir ve dizeler olarak belirtilen değerlerin nasıl ayrıştırılır (‘2020-01-01 05:00:01.000’). Saat dilimi tablo (veya resultset) satırlarında depolanır, ancak sütun meta verileri depolanır. Ayrıntıları görün [DateTime](datetime.md). + +## Örnekler {#examples} + +**1.** İle bir tablo oluşturma `DateTime64`- sütun yazın ve içine veri ekleme: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime64(3, 'Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +``` + +``` sql +SELECT * FROM dt +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00.000 │ 1 │ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +- Bir tamsayı olarak datetime eklerken, uygun şekilde ölçeklendirilmiş bir Unıx Zaman Damgası (UTC) olarak kabul edilir. `1546300800000` (hassas 3 ile) temsil eder `'2019-01-01 00:00:00'` UTC. Ancak, `timestamp` sütun vardır `Europe/Moscow` (UTC+3) belirtilen zaman dilimi, bir dize olarak çıkış yaparken değer olarak gösterilir `'2019-01-01 03:00:00'` +- Dize değerini datetime olarak eklerken, sütun saat diliminde olduğu kabul edilir. `'2019-01-01 00:00:00'` will gibi muamele `Europe/Moscow` saat dilimi ve olarak saklanır `1546290000000`. + +**2.** Üzerinde filtreleme `DateTime64` değerler + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +Aksine `DateTime`, `DateTime64` değerler dönüştürülmez `String` otomatik olarak + +**3.** Bir saat dilimi almak `DateTime64`- tip değeri: + +``` sql +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────────column─┬─x──────────────────────────────┐ +│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ +└─────────────────────────┴────────────────────────────────┘ +``` + +**4.** Zaman dilimi dönüştürme + +``` sql +SELECT +toDateTime64(timestamp, 3, 'Europe/London') as lon_time, +toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────────lon_time──┬────────────────mos_time─┐ +│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ +│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ +└─────────────────────────┴─────────────────────────┘ +``` + +## Ayrıca Bakınız {#see-also} + +- [Tip dönüştürme fonksiyonları](../../sql_reference/functions/type_conversion_functions.md) +- [Tarih ve saatlerle çalışmak için işlevler](../../sql_reference/functions/date_time_functions.md) +- [Dizilerle çalışmak için işlevler](../../sql_reference/functions/array_functions.md) +- [Bu `date_time_input_format` ayar](../../operations/settings/settings.md#settings-date_time_input_format) +- [Bu `timezone` sunucu yapılandırma parametresi](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [Tarih ve saatlerle çalışmak için operatörler](../../sql_reference/operators.md#operators-datetime) +- [`Date` veri türü](date.md) +- [`DateTime` veri türü](datetime.md) diff --git a/docs/tr/sql_reference/data_types/decimal.md b/docs/tr/sql_reference/data_types/decimal.md new file mode 100644 index 00000000000..6ec15a52e12 --- /dev/null +++ b/docs/tr/sql_reference/data_types/decimal.md @@ -0,0 +1,109 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 42 +toc_title: "Ondal\u0131k" +--- + +# Ondalık(P, S), Decimal32 (S), Decimal64( S), Decimal128 (S) {#decimalp-s-decimal32s-decimal64s-decimal128s} + +Ekleme, çıkarma ve çarpma işlemleri sırasında hassasiyeti koruyan imzalı sabit noktalı sayılar. Bölünme için en az önemli basamak atılır (yuvarlatılmamış). + +## Parametre {#parameters} + +- P-hassas. Geçerli Aralık: \[1: 38 \]. Kaç ondalık basamak sayısı (kesir dahil) olabilir belirler. +- S-scale. Geçerli Aralık: \[0: P\]. Kaç ondalık basamak kesir olabilir belirler. + +P parametre değerine bağlı olarak ondalık (P, S) bir eşanlamlıdır: +- P \[ 1 : 9\] - Decimal32(S) için) +- P \[ 10 : 18\] - Decimal64(ler) için) +- P \[ 19 : 38\] - Decimal128(ler) için) + +## Ondalık değer aralıkları {#decimal-value-ranges} + +- Decimal32(S) - ( -1 \* 10^(9 - S), 1 \* 10^(9-S) ) +- Decimal64(S) - ( -1 \* 10^(18 - S), 1 \* 10^(18-S) ) +- Decimal128(S) - ( -1 \* 10^(38 - S), 1 \* 10^(38-S) ) + +Örneğin, Decimal32 (4) -99999.9999 99999.9999 0.0001 adım ile sayılar içerebilir. + +## İç temsil {#internal-representation} + +Dahili veri, ilgili bit genişliğine sahip normal imzalı tamsayılar olarak temsil edilir. Bellekte saklanabilen gerçek değer aralıkları, yukarıda belirtilenden biraz daha büyüktür ve yalnızca bir dizeden dönüştürmede kontrol edilir. + +Modern CPU 128-bit tamsayıları doğal olarak desteklemediğinden, Decimal128 üzerindeki işlemler öykünülür. Bu Decimal128 nedeniyle Decimal32/Decimal64'ten önemli ölçüde daha yavaş çalışır. + +## İşlemler ve sonuç türü {#operations-and-result-type} + +Ondalık sonuçtaki ikili işlemler daha geniş sonuç türünde (herhangi bir bağımsız değişken sırası ile) sonuçlanır. + +- Decimal64(S1) Decimal32 (S2) - \> Decimal64 (S) +- Decimal128(S1) Decimal32 (S2) - \> Decimal128(S) +- Decimal128(S1) Decimal64 (S2) - \> Decimal128(S) + +Ölçek kuralları: + +- ekleme, çıkarma: s = max (S1, S2). +- multuply: S = S1 + S2. +- böl: S = S1. + +Ondalık ve tamsayılar arasındaki benzer işlemler için sonuç, bir bağımsız değişkenle aynı boyutta ondalık olur. + +Ondalık ve Float32 / Float64 arasındaki işlemler tanımlanmamıştır. Bunlara ihtiyacınız varsa, todecimal32, toDecimal64, toDecimal128 veya toFloat32, toFloat64 builtins kullanarak bağımsız değişkenlerden birini açıkça yayınlayabilirsiniz. Sonucun hassasiyeti kaybedeceğini ve tür dönüşümünün hesaplamalı olarak pahalı bir işlem olduğunu unutmayın. + +Float64 (örneğin, var veya stddev) ondalık dönüş sonucu bazı işlevler. Ara hesaplamalar hala Float64 ve aynı değerlere sahip ondalık girişler arasında farklı sonuçlara yol açabilecek ondalık olarak gerçekleştirilebilir. + +## Taşma kontrolleri {#overflow-checks} + +Ondalık hesaplamalar sırasında tamsayı taşmaları gerçekleşebilir. Bir kesirdeki aşırı rakamlar atılır (yuvarlatılmamış). Tamsayı bölümünde aşırı basamak bir istisna yol açacaktır. + +``` sql +SELECT toDecimal32(2, 4) AS x, x / 3 +``` + +``` text +┌──────x─┬─divide(toDecimal32(2, 4), 3)─┐ +│ 2.0000 │ 0.6666 │ +└────────┴──────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32(4.2, 8) AS x, x * x +``` + +``` text +DB::Exception: Scale is out of bounds. +``` + +``` sql +SELECT toDecimal32(4.2, 8) AS x, 6 * x +``` + +``` text +DB::Exception: Decimal math overflow. +``` + +Taşma kontrolleri operasyonların yavaşlamasına neden olur. Taşmaların mümkün olmadığı biliniyorsa, kontrolleri kullanarak devre dışı bırakmak mantıklıdır `decimal_check_overflow` ayar. Kontroller devre dışı bırakıldığında ve taşma gerçekleştiğinde, sonuç yanlış olacaktır: + +``` sql +SET decimal_check_overflow = 0; +SELECT toDecimal32(4.2, 8) AS x, 6 * x +``` + +``` text +┌──────────x─┬─multiply(6, toDecimal32(4.2, 8))─┐ +│ 4.20000000 │ -17.74967296 │ +└────────────┴──────────────────────────────────┘ +``` + +Taşma kontrolleri sadece aritmetik işlemlerde değil, değer karşılaştırmasında da gerçekleşir: + +``` sql +SELECT toDecimal32(1, 8) < 100 +``` + +``` text +DB::Exception: Can't compare. +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/decimal/) diff --git a/docs/tr/sql_reference/data_types/domains/index.md b/docs/tr/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..e2632761647 --- /dev/null +++ b/docs/tr/sql_reference/data_types/domains/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Etkiler +toc_priority: 56 +--- + + diff --git a/docs/tr/sql_reference/data_types/domains/ipv4.md b/docs/tr/sql_reference/data_types/domains/ipv4.md new file mode 100644 index 00000000000..51611ad97de --- /dev/null +++ b/docs/tr/sql_reference/data_types/domains/ipv4.md @@ -0,0 +1,84 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 59 +toc_title: Ipv44 +--- + +## Ipv44 {#ipv4} + +`IPv4` dayalı bir doma aindir `UInt32` tip ve IPv4 değerlerini depolamak için yazılan bir yedek olarak hizmet eder. İnsan dostu giriş-çıkış biçimi ve muayene ile ilgili sütun tipi bilgileri ile kompakt depolama sağlar. + +### Temel Kullanım {#basic-usage} + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + +``` text +┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ +│ url │ String │ │ │ │ │ +│ from │ IPv4 │ │ │ │ │ +└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ +``` + +Veya IPv4 etki alanını anahtar olarak kullanabilirsiniz: + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; +``` + +`IPv4` etki alanı IPv4 dizeleri olarak özel giriş biçimini destekler: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); + +SELECT * FROM hits; +``` + +``` text +┌─url────────────────────────────────┬───────────from─┐ +│ https://clickhouse.tech/docs/en/ │ 116.106.34.242 │ +│ https://wikipedia.org │ 116.253.40.133 │ +│ https://clickhouse.tech │ 183.247.232.58 │ +└────────────────────────────────────┴────────────────┘ +``` + +Değerler kompakt ikili formda saklanır: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(from)─┬─hex(from)─┐ +│ IPv4 │ B7F7E83A │ +└──────────────────┴───────────┘ +``` + +Etki alanı değerleri örtülü olarak dışındaki türlere dönüştürülemez `UInt32`. +Dönüştürmek istiyorsanız `IPv4` bir dizeye değer, bunu açıkça yapmak zorundasınız `IPv4NumToString()` işlev: + +``` sql +SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; +``` + + ┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ + │ String │ 183.247.232.58 │ + └───────────────────────────────────┴────────────────┘ + +Ya da bir döküm `UInt32` değer: + +``` sql +SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ +│ UInt32 │ 3086477370 │ +└──────────────────────────────────┴────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/domains/ipv4) diff --git a/docs/tr/sql_reference/data_types/domains/ipv6.md b/docs/tr/sql_reference/data_types/domains/ipv6.md new file mode 100644 index 00000000000..71c9db90f9f --- /dev/null +++ b/docs/tr/sql_reference/data_types/domains/ipv6.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 60 +toc_title: IPv6 +--- + +## IPv6 {#ipv6} + +`IPv6` dayalı bir doma aindir `FixedString(16)` tip ve IPv6 değerlerini depolamak için yazılan bir yedek olarak hizmet eder. İnsan dostu giriş-çıkış biçimi ve muayene ile ilgili sütun tipi bilgileri ile kompakt depolama sağlar. + +### Temel Kullanım {#basic-usage} + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + +``` text +┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ +│ url │ String │ │ │ │ │ +│ from │ IPv6 │ │ │ │ │ +└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ +``` + +Veya kullanabilirsiniz `IPv6` anahtar olarak etki alanı: + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; +``` + +`IPv6` etki alanı IPv6 dizeleri olarak özel girişi destekler: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); + +SELECT * FROM hits; +``` + +``` text +┌─url────────────────────────────────┬─from──────────────────────────┐ +│ https://clickhouse.tech │ 2001:44c8:129:2632:33:0:252:2 │ +│ https://clickhouse.tech/docs/en/ │ 2a02:e980:1e::1 │ +│ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │ +└────────────────────────────────────┴───────────────────────────────┘ +``` + +Değerler kompakt ikili formda saklanır: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(from)─┬─hex(from)────────────────────────┐ +│ IPv6 │ 200144C8012926320033000002520002 │ +└──────────────────┴──────────────────────────────────┘ +``` + +Etki alanı değerleri örtülü olarak dışındaki türlere dönüştürülemez `FixedString(16)`. +Dönüştürmek istiyorsanız `IPv6` bir dizeye değer, bunu açıkça yapmak zorundasınız `IPv6NumToString()` işlev: + +``` sql +SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ +│ String │ 2001:44c8:129:2632:33:0:252:2 │ +└───────────────────────────────────┴───────────────────────────────┘ +``` + +Ya da bir döküm `FixedString(16)` değer: + +``` sql +SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ +│ FixedString(16) │ ��� │ +└───────────────────────────────────────────┴─────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/domains/ipv6) diff --git a/docs/tr/sql_reference/data_types/domains/overview.md b/docs/tr/sql_reference/data_types/domains/overview.md new file mode 100644 index 00000000000..97e3ef5018a --- /dev/null +++ b/docs/tr/sql_reference/data_types/domains/overview.md @@ -0,0 +1,32 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 58 +toc_title: "Genel bak\u0131\u015F" +--- + +# Etkiler {#domains} + +Etki alanları, varolan temel türün üstüne bazı ek özellikler ekleyen, ancak temel veri türünün kablolu ve disk üstü biçimini sağlam bırakan özel amaçlı türlerdir. Şu anda, ClickHouse kullanıcı tanımlı etki alanlarını desteklemiyor. + +Örneğin, ilgili taban türünün kullanılabileceği her yerde etki alanlarını kullanabilirsiniz: + +- Etki alanı türünde bir sütun oluşturma +- Alan sütunundan/alanına değerleri okuma / yazma +- Bir temel türü bir dizin olarak kullanılabilir, bir dizin olarak kullanın +- Etki alanı sütun değerleri ile çağrı fonksiyonları + +### Alanların ekstra özellikleri {#extra-features-of-domains} + +- Açık sütun türü adı `SHOW CREATE TABLE` veya `DESCRIBE TABLE` +- İle insan dostu format inputtan giriş `INSERT INTO domain_table(domain_column) VALUES(...)` +- İçin insan dostu forma outputta çıktı `SELECT domain_column FROM domain_table` +- Harici bir kaynaktan insan dostu biçimde veri yükleme: `INSERT INTO domain_table FORMAT CSV ...` + +### Sınırlamalar {#limitations} + +- Temel türün dizin sütununu etki alanı türüne dönüştürülemiyor `ALTER TABLE`. +- Başka bir sütun veya tablodan veri eklerken dize değerlerini dolaylı olarak etki alanı değerlerine dönüştüremez. +- Etki alanı, depolanan değerler üzerinde hiçbir kısıtlama ekler. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/tr/sql_reference/data_types/enum.md b/docs/tr/sql_reference/data_types/enum.md new file mode 100644 index 00000000000..65c687835dc --- /dev/null +++ b/docs/tr/sql_reference/data_types/enum.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 50 +toc_title: Enum +--- + +# Enum {#enum} + +Adlandırılmış değerlerden oluşan numaralandırılmış tür. + +Adlandırılmış değerler olarak bildirilmelidir `'string' = integer` çiftliler. ClickHouse yalnızca sayıları saklar, ancak adları aracılığıyla değerlerle işlemleri destekler. + +ClickHouse destekler: + +- 8-bit `Enum`. En fazla 256 değerleri numaralandırılmış içerebilir `[-128, 127]` Aralık. +- 16-bit `Enum`. En fazla 65536 değerleri numaralandırılmış içerebilir `[-32768, 32767]` Aralık. + +ClickHouse otomatik olarak türünü seçer `Enum` veri eklendiğinde. Ayrıca kullanabilirsiniz `Enum8` veya `Enum16` türleri depolama boyutunda emin olmak için. + +## Kullanım Örnekleri {#usage-examples} + +Burada bir tablo oluşturuyoruz `Enum8('hello' = 1, 'world' = 2)` type Col columnum columnn: + +``` sql +CREATE TABLE t_enum +( + x Enum('hello' = 1, 'world' = 2) +) +ENGINE = TinyLog +``` + +Sütun `x` yalnızca tür tanımında listelenen değerleri depolayabilir: `'hello'` veya `'world'`. Başka bir değer kaydetmeye çalışırsanız, ClickHouse bir özel durum yükseltir. Bunun için 8-bit boyutu `Enum` otomatik olarak seçilir. + +``` sql +INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') +``` + +``` text +Ok. +``` + +``` sql +INSERT INTO t_enum values('a') +``` + +``` text +Exception on client: +Code: 49. DB::Exception: Unknown element 'a' for type Enum('hello' = 1, 'world' = 2) +``` + +Tablodan veri sorguladığınızda, ClickHouse dize değerleri `Enum`. + +``` sql +SELECT * FROM t_enum +``` + +``` text +┌─x─────┐ +│ hello │ +│ world │ +│ hello │ +└───────┘ +``` + +Satırların sayısal eşdeğerlerini görmeniz gerekiyorsa, `Enum` tamsayı türüne değer. + +``` sql +SELECT CAST(x, 'Int8') FROM t_enum +``` + +``` text +┌─CAST(x, 'Int8')─┐ +│ 1 │ +│ 2 │ +│ 1 │ +└─────────────────┘ +``` + +Bir sorguda bir Enum değeri oluşturmak için, ayrıca kullanmanız gerekir `CAST`. + +``` sql +SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) +``` + +``` text +┌─toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)'))─┐ +│ Enum8('a' = 1, 'b' = 2) │ +└─────────────────────────────────────────────────────┘ +``` + +## Genel Kurallar ve kullanım {#general-rules-and-usage} + +Değerlerin her birine aralıkta bir sayı atanır `-128 ... 127` için `Enum8` veya aralık inta `-32768 ... 32767` için `Enum16`. Tüm dizeler ve sayılar farklı olmalıdır. Boş bir dize izin verilir. Bu tür belirtilmişse (bir tablo tanımında), sayılar rasgele bir sırada olabilir. Ancak, sipariş önemli değil. + +Ne dize ne de sayısal değer bir `Enum` olabilir [NULL](../../sql_reference/syntax.md). + +Bir `Enum` içerdiği olabilir [Nullable](nullable.md) tür. Yani sorguyu kullanarak bir tablo oluşturursanız + +``` sql +CREATE TABLE t_enum_nullable +( + x Nullable( Enum8('hello' = 1, 'world' = 2) ) +) +ENGINE = TinyLog +``` + +bu mağaza değil sadece `'hello'` ve `'world'`, ama `NULL`, yanında. + +``` sql +INSERT INTO t_enum_nullable Values('hello'),('world'),(NULL) +``` + +RAM, bir `Enum` sütun aynı şekilde saklanır `Int8` veya `Int16` karşılık gelen sayısal değerlerin. + +Metin formunda okurken, ClickHouse değeri bir dize olarak ayrıştırır ve karşılık gelen dizeyi Enum değerleri kümesinden arar. Bulunmazsa, bir istisna atılır. Metin biçiminde okurken, dize okunur ve karşılık gelen sayısal değer aranır. Bulunmazsa bir istisna atılır. +Metin formunda yazarken, değeri karşılık gelen dize olarak yazar. Sütun verileri çöp içeriyorsa (geçerli kümeden olmayan sayılar), bir özel durum atılır. İkili formda okurken ve yazarken, Int8 ve Int16 veri türleri ile aynı şekilde çalışır. +Örtülü varsayılan değer, en düşük sayıya sahip değerdir. + +Sırasında `ORDER BY`, `GROUP BY`, `IN`, `DISTINCT` ve böylece, Enumlar karşılık gelen sayılarla aynı şekilde davranır. Örneğin, sipariş onları sayısal olarak sıralar. Eşitlik ve karşılaştırma işleçleri, alttaki sayısal değerler üzerinde yaptıkları gibi Enumlarda aynı şekilde çalışır. + +Enum değerleri sayılarla karşılaştırılamaz. Enums sabit bir dize ile karşılaştırılabilir. Karşılaştırılan dize Enum için geçerli bir değer değilse, bir özel durum atılır. IN operatörü, sol taraftaki Enum ve sağ taraftaki bir dizi dizeyle desteklenir. Dizeler, karşılık gelen Enumun değerleridir. + +Most numeric and string operations are not defined for Enum values, e.g. adding a number to an Enum or concatenating a string to an Enum. +Ancak, Enum doğal bir `toString` dize değerini döndüren işlev. + +Enum değerleri de kullanarak sayısal türlere dönüştürülebilir `toT` fonksiyon, burada t sayısal bir türdür. T enum'un temel sayısal türüne karşılık geldiğinde, bu dönüşüm sıfır maliyetlidir. +Enum türü, yalnızca değer kümesi değiştirilirse, alter kullanılarak maliyet olmadan değiştirilebilir. Her iki ekleme ve Alter kullanarak Enum üyeleri kaldırmak mümkündür (kaldırma yalnızca kaldırılan değer tabloda hiç kullanılmadıysa güvenlidir). Bir koruma olarak, önceden tanımlanmış bir Enum üyesinin sayısal değerini değiştirmek bir istisna atar. + +ALTER kullanarak, bir Enum8 için bir Enum16 veya tam tersi, Int8 için Int16 değiştirme gibi değiştirmek mümkündür. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/enum/) diff --git a/docs/tr/sql_reference/data_types/fixedstring.md b/docs/tr/sql_reference/data_types/fixedstring.md new file mode 100644 index 00000000000..bf716f4d591 --- /dev/null +++ b/docs/tr/sql_reference/data_types/fixedstring.md @@ -0,0 +1,63 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 45 +toc_title: FixedString(N) +--- + +# Fixedstring {#fixedstring} + +Sabit uzunlukta bir dize `N` bayt (ne karakter ne de kod noktaları). + +Bir sütun bildirmek için `FixedString` yazın, aşağıdaki sözdizimini kullanın: + +``` sql + FixedString(N) +``` + +Nerede `N` doğal bir sayıdır. + +Bu `FixedString` veri tam olarak uzunluğa sahip olduğunda tür etkilidir `N` baytlar. Diğer tüm durumlarda, verimliliği düşürmesi muhtemeldir. + +Verimli bir şekilde depolan theabilen değerlere örnekler `FixedString`- yazılan sütunlar: + +- IP adreslerinin ikili gösterimi (`FixedString(16)` IPv6 için). +- Language codes (ru\_RU, en\_US … ). +- Currency codes (USD, RUB … ). +- Karma ikili gösterimi (`FixedString(16)` MD5 için, `FixedString(32)` SHA256 için). + +UUID değerlerini depolamak için [UUID](uuid.md) veri türü. + +Verileri eklerken, ClickHouse: + +- Dize daha az içeriyorsa, boş bayt ile bir dize tamamlar `N` baytlar. +- Atar `Too large value for FixedString(N)` dize birden fazla içeriyorsa, özel durum `N` baytlar. + +Verileri seçerken, ClickHouse dize sonunda boş bayt kaldırmaz. Eğer kullanıyorsanız `WHERE` yan tümcesi, null bayt el ile eşleştirmek için eklemelisiniz `FixedString` değer. Kullanımı için aşağıdaki örnek, nasıl gösterir `WHERE` fık withra ile `FixedString`. + +Aşağıdaki tabloyu tek ile düşünelim `FixedString(2)` sütun: + +``` text +┌─name──┐ +│ b │ +└───────┘ +``` + +Sorgu `SELECT * FROM FixedStringTable WHERE a = 'b'` sonuç olarak herhangi bir veri döndürmez. Filtre desenini boş baytlarla tamamlamalıyız. + +``` sql +SELECT * FROM FixedStringTable +WHERE a = 'b\0' +``` + +``` text +┌─a─┐ +│ b │ +└───┘ +``` + +Bu davranış için MySQL farklıdır `CHAR` tür (burada dizeler boşluklarla doldurulur ve boşluklar çıktı için kaldırılır). + +Not uzunluğu `FixedString(N)` değer sabittir. Bu [uzunluk](../../sql_reference/functions/array_functions.md#array_functions-length) fonksiyon döndürür `N` hatta eğer `FixedString(N)` değer yalnızca boş baytlarla doldurulur, ancak [boş](../../sql_reference/functions/string_functions.md#empty) fonksiyon döndürür `1` bu durumda. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/tr/sql_reference/data_types/float.md b/docs/tr/sql_reference/data_types/float.md new file mode 100644 index 00000000000..0b5bd76be86 --- /dev/null +++ b/docs/tr/sql_reference/data_types/float.md @@ -0,0 +1,87 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 41 +toc_title: Float32, Float64 +--- + +# Float32, Float64 {#float32-float64} + +[Kayan nokta numaraları](https://en.wikipedia.org/wiki/IEEE_754). + +Türleri C türlerine eşdeğerdir: + +- `Float32` - `float` +- `Float64` - `double` + +Verileri mümkün olduğunda tamsayı biçiminde saklamanızı öneririz. Örneğin, sabit hassas sayıları parasal tutarlar veya sayfa yükleme süreleri gibi milisaniye cinsinden tamsayı değerlerine dönüştürün. + +## Kayan noktalı sayıları kullanma {#using-floating-point-numbers} + +- Kayan noktalı sayılarla yapılan hesaplamalar yuvarlama hatası oluşturabilir. + + + +``` sql +SELECT 1 - 0.9 +``` + +``` text +┌───────minus(1, 0.9)─┐ +│ 0.09999999999999998 │ +└─────────────────────┘ +``` + +- Hesaplamanın sonucu hesaplama yöntemine (bilgisayar sisteminin işlemci tipi ve mimarisi) bağlıdır. +- Kayan nokta hesaplamaları, sonsuzluk gibi sayılarla sonuçlanabilir (`Inf`) ve “not-a-number” (`NaN`). Hesaplamaların sonuçlarını işlerken bu dikkate alınmalıdır. +- Kayan noktalı sayıları metinden ayrıştırırken, sonuç en yakın makine tarafından temsil edilebilir sayı olmayabilir. + +## N andan ve In andf {#data_type-float-nan-inf} + +Standart SQL aksine, ClickHouse kayan noktalı sayılar aşağıdaki kategorileri destekler: + +- `Inf` – Infinity. + + + +``` sql +SELECT 0.5 / 0 +``` + +``` text +┌─divide(0.5, 0)─┐ +│ inf │ +└────────────────┘ +``` + +- `-Inf` – Negative infinity. + + + +``` sql +SELECT -0.5 / 0 +``` + +``` text +┌─divide(-0.5, 0)─┐ +│ -inf │ +└─────────────────┘ +``` + +- `NaN` – Not a number. + + + +``` sql +SELECT 0 / 0 +``` + +``` text +┌─divide(0, 0)─┐ +│ nan │ +└──────────────┘ +``` + + See the rules for `NaN` sorting in the section [ORDER BY clause](../sql_reference/statements/select.md). + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/float/) diff --git a/docs/tr/sql_reference/data_types/index.md b/docs/tr/sql_reference/data_types/index.md new file mode 100644 index 00000000000..c2fdda0c8e8 --- /dev/null +++ b/docs/tr/sql_reference/data_types/index.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Veri T\xFCrleri" +toc_priority: 37 +toc_title: "Giri\u015F" +--- + +# Veri Türleri {#data_types} + +ClickHouse tablo hücrelerinde veri çeşitli saklayabilirsiniz. + +Bu bölümde desteklenen veri türleri ve varsa bunları kullanmak ve/veya uygulamak için özel hususlar açıklanmaktadır. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/) diff --git a/docs/tr/sql_reference/data_types/int_uint.md b/docs/tr/sql_reference/data_types/int_uint.md new file mode 100644 index 00000000000..2c7b47eb4c6 --- /dev/null +++ b/docs/tr/sql_reference/data_types/int_uint.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 40 +toc_title: "U\u0130nt8, U\u0130nt16, U\u0130nt32, Uint64, Int8, Int16, Int32, Int64" +--- + +# Uİnt8, Uİnt16, Uİnt32, Uint64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} + +Sabit uzunlukta tamsayılar, veya bir işareti olmadan. + +## İnt Aralıkları {#int-ranges} + +- Int8- \[-128: 127\] +- Int16 - \[-32768: 32767\] +- Int32 - \[-2147483648: 2147483647\] +- Int64 - \[-9223372036854775808: 9223372036854775807\] + +## Uint Aralıkları {#uint-ranges} + +- Uİnt8- \[0: 255\] +- Uİnt16- \[0: 65535\] +- Uİnt32- \[0: 4294967295\] +- Uİnt64 - \[0: 18446744073709551615\] + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/int_uint/) diff --git a/docs/tr/sql_reference/data_types/nested_data_structures/index.md b/docs/tr/sql_reference/data_types/nested_data_structures/index.md new file mode 100644 index 00000000000..53a998fa8a8 --- /dev/null +++ b/docs/tr/sql_reference/data_types/nested_data_structures/index.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "\u0130\xE7 \u0130\xE7e Veri Yap\u0131lar\u0131" +toc_hidden: true +toc_priority: 54 +toc_title: "gizlenmi\u015F" +--- + +# İç İçe Veri Yapıları {#nested-data-structures} + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/nested_data_structures/) diff --git a/docs/tr/sql_reference/data_types/nested_data_structures/nested.md b/docs/tr/sql_reference/data_types/nested_data_structures/nested.md new file mode 100644 index 00000000000..8cd2dc3a558 --- /dev/null +++ b/docs/tr/sql_reference/data_types/nested_data_structures/nested.md @@ -0,0 +1,106 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 57 +toc_title: "\u0130\xE7 \u0130\xE7e (Name1 Type1, Name2 Type2,...)" +--- + +# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} + +A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql_reference/statements/create.md) sorgu. Her tablo satırı, iç içe geçmiş veri yapısındaki herhangi bir sayıda satıra karşılık gelebilir. + +Örnek: + +``` sql +CREATE TABLE test.visits +( + CounterID UInt32, + StartDate Date, + Sign Int8, + IsNew UInt8, + VisitID UInt64, + UserID UInt64, + ... + Goals Nested + ( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32 + ), + ... +) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign) +``` + +Bu örnek bildirir `Goals` dönüşümlerle ilgili verileri içeren iç içe veri yapısı (ulaşılan hedefler). Her satır içinde ‘visits’ tablo sıfır veya dönüşüm herhangi bir sayıda karşılık gelebilir. + +Sadece tek bir yuvalama seviyesi desteklenir. Diziler içeren iç içe geçmiş yapıların sütunları çok boyutlu dizilere eşdeğerdir, bu nedenle sınırlı desteğe sahiptirler (bu sütunları MergeTree altyapısı ile tablolarda depolamak için destek yoktur). + +Çoğu durumda, iç içe geçmiş bir veri yapısıyla çalışırken, sütunları bir nokta ile ayrılmış sütun adlarıyla belirtilir. Bu sütunlar eşleşen türleri bir dizi oluşturur. Tek bir iç içe geçmiş veri yapısının tüm sütun dizileri aynı uzunluğa sahiptir. + +Örnek: + +``` sql +SELECT + Goals.ID, + Goals.EventTime +FROM test.visits +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goals.ID───────────────────────┬─Goals.EventTime───────────────────────────────────────────────────────────────────────────┐ +│ [1073752,591325,591325] │ ['2014-03-17 16:38:10','2014-03-17 16:38:48','2014-03-17 16:42:27'] │ +│ [1073752] │ ['2014-03-17 00:28:25'] │ +│ [1073752] │ ['2014-03-17 10:46:20'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:59:20','2014-03-17 22:17:55','2014-03-17 22:18:07','2014-03-17 22:18:51'] │ +│ [] │ [] │ +│ [1073752,591325,591325] │ ['2014-03-17 11:37:06','2014-03-17 14:07:47','2014-03-17 14:36:21'] │ +│ [] │ [] │ +│ [] │ [] │ +│ [591325,1073752] │ ['2014-03-17 00:46:05','2014-03-17 00:46:05'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:28:33','2014-03-17 13:30:26','2014-03-17 18:51:21','2014-03-17 18:51:45'] │ +└────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +İç içe geçmiş bir veri yapısını aynı uzunlukta birden çok sütun dizisi kümesi olarak düşünmek en kolay yoldur. + +Bir SELECT sorgusunun tek tek sütunlar yerine tüm iç içe geçmiş veri yapısının adını belirtebileceği tek yer array JOIN yan tümcesi. Daha fazla bilgi için, bkz. “ARRAY JOIN clause”. Örnek: + +``` sql +SELECT + Goal.ID, + Goal.EventTime +FROM test.visits +ARRAY JOIN Goals AS Goal +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goal.ID─┬──────Goal.EventTime─┐ +│ 1073752 │ 2014-03-17 16:38:10 │ +│ 591325 │ 2014-03-17 16:38:48 │ +│ 591325 │ 2014-03-17 16:42:27 │ +│ 1073752 │ 2014-03-17 00:28:25 │ +│ 1073752 │ 2014-03-17 10:46:20 │ +│ 1073752 │ 2014-03-17 13:59:20 │ +│ 591325 │ 2014-03-17 22:17:55 │ +│ 591325 │ 2014-03-17 22:18:07 │ +│ 591325 │ 2014-03-17 22:18:51 │ +│ 1073752 │ 2014-03-17 11:37:06 │ +└─────────┴─────────────────────┘ +``` + +İç içe geçmiş veri yapısının tamamı için SELECT gerçekleştiremezsiniz. Yalnızca bir parçası olan tek tek sütunları açıkça listeleyebilirsiniz. + +Bir INSERT sorgusu için, iç içe geçmiş bir veri yapısının tüm bileşen sütun dizilerini ayrı ayrı (tek tek sütun dizileri gibi) iletmelisiniz. Ekleme sırasında, sistem aynı uzunluğa sahip olduklarını kontrol eder. + +Bir tanımlama sorgusu için, iç içe geçmiş bir veri yapısındaki sütunlar aynı şekilde ayrı olarak listelenir. + +İç içe geçmiş bir veri yapısındaki öğeler için ALTER sorgusu sınırlamaları vardır. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/nested_data_structures/nested/) diff --git a/docs/tr/sql_reference/data_types/nullable.md b/docs/tr/sql_reference/data_types/nullable.md new file mode 100644 index 00000000000..1950e2b1b77 --- /dev/null +++ b/docs/tr/sql_reference/data_types/nullable.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 54 +toc_title: Nullable +--- + +# Nullable (typename) {#data_type-nullable} + +Özel işaretleyici saklamak için izin verir ([NULL](../../sql_reference/syntax.md)) bu ifade eder “missing value” tarafından izin verilen normal değerlerin yanında `TypeName`. Örneğin, bir `Nullable(Int8)` tipi sütun saklayabilirsiniz `Int8` değerleri yazın ve değeri olmayan satırlar depolayacaktır `NULL`. + +İçin... `TypeName`, bileşik veri türlerini kullanamazsınız [Dizi](array.md) ve [Demet](tuple.md). Bileşik veri türleri şunları içerebilir `Nullable` gibi tür değerleri `Array(Nullable(Int8))`. + +A `Nullable` tür alanı tablo dizinlerine dahil edilemez. + +`NULL` herhangi biri için varsayılan değer mi `Nullable` ClickHouse sunucu yapılandırmasında aksi belirtilmediği sürece yazın. + +## Depolama Özellikleri {#storage-features} + +İçermek `Nullable` bir tablo sütunundaki değerleri yazın, ClickHouse ile ayrı bir dosya kullanır `NULL` değerleri ile normal dosyaya ek olarak Maskeler. Maskeli girişleri ClickHouse ayırt izin dosyası `NULL` ve her tablo satırı için karşılık gelen veri türünün varsayılan değeri. Ek bir dosya nedeniyle, `Nullable` sütun, benzer bir normal olana kıyasla ek depolama alanı tüketir. + +!!! info "Not" + Kullanım `Nullable` neredeyse her zaman performansı olumsuz etkiler, veritabanlarınızı tasarlarken bunu aklınızda bulundurun. + +## Kullanım Örneği {#usage-example} + +``` sql +CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog +``` + +``` sql +INSERT INTO t_null VALUES (1, NULL), (2, 3) +``` + +``` sql +SELECT x + y FROM t_null +``` + +``` text +┌─plus(x, y)─┐ +│ ᴺᵁᴸᴸ │ +│ 5 │ +└────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/tr/sql_reference/data_types/special_data_types/expression.md b/docs/tr/sql_reference/data_types/special_data_types/expression.md new file mode 100644 index 00000000000..a098b0c6365 --- /dev/null +++ b/docs/tr/sql_reference/data_types/special_data_types/expression.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 58 +toc_title: "\u0130fade" +--- + +# İfade {#expression} + +İfadeler, lambda'ları yüksek mertebeden işlevlerde temsil etmek için kullanılır. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/special_data_types/expression/) diff --git a/docs/tr/sql_reference/data_types/special_data_types/index.md b/docs/tr/sql_reference/data_types/special_data_types/index.md new file mode 100644 index 00000000000..700bf57784b --- /dev/null +++ b/docs/tr/sql_reference/data_types/special_data_types/index.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "\xD6zel Veri T\xFCrleri" +toc_hidden: true +toc_priority: 55 +toc_title: "gizlenmi\u015F" +--- + +# Özel Veri Türleri {#special-data-types} + +Özel veri türü değerleri Tablo veya çıktı sorgu sonuçlarında kaydetmek için seri hale getirilemez, ancak sorgu yürütme sırasında bir ara sonuç olarak kullanılabilir. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/special_data_types/) diff --git a/docs/tr/sql_reference/data_types/special_data_types/interval.md b/docs/tr/sql_reference/data_types/special_data_types/interval.md new file mode 100644 index 00000000000..e95f5a98cfd --- /dev/null +++ b/docs/tr/sql_reference/data_types/special_data_types/interval.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 61 +toc_title: "Aral\u0131kl\u0131" +--- + +# Aralıklı {#data-type-interval} + +Zaman ve Tarih aralıklarını temsil eden veri türleri ailesi. Ortaya çıkan türleri [INTERVAL](../../../sql_reference/operators.md#operator-interval) operatör. + +!!! warning "Uyarıcı" + `Interval` veri türü değerleri tablolarda saklanamaz. + +Yapılı: + +- İmzasız bir tamsayı değeri olarak zaman aralığı. +- Bir aralık türü. + +Desteklenen Aralık türleri: + +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +Her Aralık türü için ayrı bir veri türü vardır. Örneğin, `DAY` Aralık karşılık gelir `IntervalDay` veri türü: + +``` sql +SELECT toTypeName(INTERVAL 4 DAY) +``` + +``` text +┌─toTypeName(toIntervalDay(4))─┐ +│ IntervalDay │ +└──────────────────────────────┘ +``` + +## Kullanım Açıklamaları {#data-type-interval-usage-remarks} + +Kullanabilirsiniz `Interval`- aritmetik işlemlerde değerler yazın [Tarihli](../../../sql_reference/data_types/date.md) ve [DateTime](../../../sql_reference/data_types/datetime.md)- tip değerleri. Örneğin, geçerli saate 4 gün ekleyebilirsiniz: + +``` sql +SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY +``` + +``` text +┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ +│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ +└─────────────────────┴───────────────────────────────┘ +``` + +Farklı tiplere sahip aralıklar birleştirilemez. Gibi aralıklarla kullanamazsınız `4 DAY 1 HOUR`. Aralıkların, örneğin aralığın en küçük birimine eşit veya daha küçük olan birimlerdeki aralıkları belirtin `1 day and an hour` aralık olarak ifade edilebilir `25 HOUR` veya `90000 SECOND`. + +İle aritmetik işlemler yapamazsınız `Interval`- değerleri yazın, ancak farklı türde aralıklar ekleyebilirsiniz. `Date` veya `DateTime` veri türleri. Mesela: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +Aşağıdaki sorgu bir özel duruma neden olur: + +``` sql +select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) +``` + +``` text +Received exception from server (version 19.14.1): +Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. +``` + +## Ayrıca Bakınız {#see-also} + +- [INTERVAL](../../../sql_reference/operators.md#operator-interval) operatör +- [toİnterval](../../../sql_reference/functions/type_conversion_functions.md#function-tointerval) tip dönüştürme işlevleri diff --git a/docs/tr/sql_reference/data_types/special_data_types/nothing.md b/docs/tr/sql_reference/data_types/special_data_types/nothing.md new file mode 100644 index 00000000000..046c36ab047 --- /dev/null +++ b/docs/tr/sql_reference/data_types/special_data_types/nothing.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 60 +toc_title: "Hi\xE7bir \u015Fey" +--- + +# Hiçbir şey {#nothing} + +Bu veri türünün tek amacı, bir değerin beklenmediği durumları temsil etmektir. Yani bir oluşturamazsınız `Nothing` type value. + +Örneğin, literal [NULL](../../../sql_reference/syntax.md#null-literal) türü vardır `Nullable(Nothing)`. Daha fazla görmek [Nullable](../../../sql_reference/data_types/nullable.md). + +Bu `Nothing` tür boş dizileri belirtmek için de kullanılabilir: + +``` sql +SELECT toTypeName(array()) +``` + +``` text +┌─toTypeName(array())─┐ +│ Array(Nothing) │ +└─────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/special_data_types/nothing/) diff --git a/docs/tr/sql_reference/data_types/special_data_types/set.md b/docs/tr/sql_reference/data_types/special_data_types/set.md new file mode 100644 index 00000000000..f12fac0e56f --- /dev/null +++ b/docs/tr/sql_reference/data_types/special_data_types/set.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 59 +toc_title: Koymak +--- + +# Koymak {#set} + +Sağ yarısı için kullanılan bir [IN](../../../sql_reference/statements/select.md#select-in-operators) ifade. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) diff --git a/docs/tr/sql_reference/data_types/string.md b/docs/tr/sql_reference/data_types/string.md new file mode 100644 index 00000000000..0b7c5918dc7 --- /dev/null +++ b/docs/tr/sql_reference/data_types/string.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 44 +toc_title: Dize +--- + +# Dize {#string} + +Keyfi uzunlukta dizeler. Uzunluk sınırlı değildir. Değer, boş bayt da dahil olmak üzere rasgele bir bayt kümesi içerebilir. +Dize türü türleri değiştirir VARCHAR, BLOB, CLOB, ve diğerleri diğer DBMSs. + +## Kodlamalar {#encodings} + +ClickHouse kodlama kavramına sahip değildir. Dizeler, depolanan ve olduğu gibi çıkan rasgele bir bayt kümesi içerebilir. +Metinleri saklamanız gerekiyorsa, UTF-8 kodlamasını kullanmanızı öneririz. En azından, terminaliniz UTF-8 kullanıyorsa (önerildiği gibi), dönüşüm yapmadan değerlerinizi okuyabilir ve yazabilirsiniz. +Benzer şekilde, dizelerle çalışmak için belirli işlevler, dizenin UTF-8 kodlu bir metni temsil eden bir bayt kümesi içerdiği varsayımı altında çalışan ayrı varyasyonlara sahiptir. +Örneğin, ‘length’ işlev, bayt cinsinden dize uzunluğunu hesaplar; ‘lengthUTF8’ işlev, değerin UTF-8 kodlanmış olduğunu varsayarak Unicode kod noktalarındaki dize uzunluğunu hesaplar. + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/string/) diff --git a/docs/tr/sql_reference/data_types/tuple.md b/docs/tr/sql_reference/data_types/tuple.md new file mode 100644 index 00000000000..64f2b2b2aec --- /dev/null +++ b/docs/tr/sql_reference/data_types/tuple.md @@ -0,0 +1,52 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 53 +toc_title: Tuple (T1, T2, ...) +--- + +# Tuple(t1, T2, …) {#tuplet1-t2} + +Elemanlarının bir demet, her bir birey olması [tür](index.md#data_types). + +Tuples geçici sütun gruplama için kullanılır. Sütunlar, bir sorguda bir In ifadesi kullanıldığında ve lambda işlevlerinin belirli resmi parametrelerini belirtmek için gruplandırılabilir. Daha fazla bilgi için bölümlere bakın [Operatör İNLERDE](../../sql_reference/statements/select.md) ve [Yüksek mertebeden fonksiyonlar](../../sql_reference/functions/higher_order_functions.md). + +Tuples bir sorgunun sonucu olabilir. Bu durumda, json dışındaki metin formatları için değerler köşeli parantez içinde virgülle ayrılır. JSON formatlarında, tuples diziler olarak çıktılanır (köşeli parantez içinde). + +## Bir Tuple oluşturma {#creating-a-tuple} + +Bir tuple oluşturmak için bir işlev kullanabilirsiniz: + +``` sql +tuple(T1, T2, ...) +``` + +Bir tuple oluşturma örneği: + +``` sql +SELECT tuple(1,'a') AS x, toTypeName(x) +``` + +``` text +┌─x───────┬─toTypeName(tuple(1, 'a'))─┐ +│ (1,'a') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────────┘ +``` + +## Veri Türleri İle Çalışma {#working-with-data-types} + +Anında bir tuple oluştururken, ClickHouse her bağımsız değişkenin türünü bağımsız değişken değerini depolayabilen türlerin en azı olarak otomatik olarak algılar. Argüman ise [NULL](../../sql_reference/syntax.md#null-literal), tuple elemanının türü [Nullable](nullable.md). + +Otomatik veri türü algılama örneği: + +``` sql +SELECT tuple(1, NULL) AS x, toTypeName(x) +``` + +``` text +┌─x────────┬─toTypeName(tuple(1, NULL))──────┐ +│ (1,NULL) │ Tuple(UInt8, Nullable(Nothing)) │ +└──────────┴─────────────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/tuple/) diff --git a/docs/tr/sql_reference/data_types/uuid.md b/docs/tr/sql_reference/data_types/uuid.md new file mode 100644 index 00000000000..9fb502227be --- /dev/null +++ b/docs/tr/sql_reference/data_types/uuid.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 46 +toc_title: UUID +--- + +# UUID {#uuid-data-type} + +Evrensel olarak benzersiz bir tanımlayıcı (UUID), kayıtları tanımlamak için kullanılan 16 baytlık bir sayıdır. UUID hakkında ayrıntılı bilgi için bkz [Vikipedi](https://en.wikipedia.org/wiki/Universally_unique_identifier). + +UUID türü değeri örneği aşağıda temsil edilmektedir: + +``` text +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +``` + +Yeni bir kayıt eklerken UUID sütun değerini belirtmezseniz, UUID değeri sıfır ile doldurulur: + +``` text +00000000-0000-0000-0000-000000000000 +``` + +## Nasıl Oluşturulur {#how-to-generate} + +UUID değerini oluşturmak için ClickHouse, [generateuuıdv4](../../sql_reference/functions/uuid_functions.md) işlev. + +## Kullanım Örneği {#usage-example} + +**Örnek 1** + +Bu örnek, UUID türü sütunuyla bir tablo oluşturma ve tabloya bir değer ekleme gösterir. + +``` sql +CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog +``` + +``` sql +INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +└──────────────────────────────────────┴───────────┘ +``` + +**Örnek 2** + +Bu örnekte, yeni bir kayıt eklerken UUID sütun değeri belirtilmedi. + +``` sql +INSERT INTO t_uuid (y) VALUES ('Example 2') +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ +└──────────────────────────────────────┴───────────┘ +``` + +## Kısıtlama {#restrictions} + +UUID veri türü sadece hangi fonksiyonları destekler [Dize](string.md) veri türü de destekler (örneğin, [dakika](../../sql_reference/aggregate_functions/reference.md#agg_function-min), [maksimum](../../sql_reference/aggregate_functions/reference.md#agg_function-max), ve [sayma](../../sql_reference/aggregate_functions/reference.md#agg_function-count)). + +UUID veri türü aritmetik işlemler tarafından desteklenmez (örneğin, [abs](../../sql_reference/functions/arithmetic_functions.md#arithm_func-abs)) veya toplama fonksiyonları gibi [toplam](../../sql_reference/aggregate_functions/reference.md#agg_function-sum) ve [avg](../../sql_reference/aggregate_functions/reference.md#agg_function-avg). + +[Orijinal makale](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts.md new file mode 100644 index 00000000000..ac85ccd1724 --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -0,0 +1,56 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 39 +toc_title: "Genel A\xE7\u0131klama" +--- + +# Dış Söz Dictionarieslükler {#dicts-external-dicts} + +Çeşitli veri kaynaklarından kendi sözlükleri ekleyebilirsiniz. Bir sözlük için veri kaynağı, yerel bir metin veya yürütülebilir dosya, bir HTTP(s) kaynağı veya başka bir DBMS olabilir. Daha fazla bilgi için, bkz. “[Dış sözlükler için kaynaklar](external_dicts_dict_sources.md)”. + +ClickHouse: + +- Sözlükleri RAM'de tamamen veya kısmen saklar. +- Sözlükleri periyodik olarak günceller ve eksik değerleri dinamik olarak yükler. Başka bir deyişle, sözlükler dinamik olarak yüklenebilir. +- Xml dosyaları ile harici sözlükler oluşturmak için izin verir veya [DDL sorguları](../../statements/create.md#create-dictionary-query). + +Dış sözlüklerin yapılandırması bir veya daha fazla xml dosyasında bulunabilir. Yapılandırma yolu belirtilen [dictionaries\_config](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_config) parametre. + +Sözlükler sunucu başlangıçta veya ilk kullanımda, bağlı olarak yüklenebilir [dictionaries\_lazy\_load](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) ayar. + +Sözlük yapılandırma dosyası aşağıdaki biçime sahiptir: + +``` xml + + An optional element with any content. Ignored by the ClickHouse server. + + + /etc/metrika.xml + + + + + + + + +``` + +Yapabilirsin [yapılandırmak](external_dicts_dict.md) aynı dosyada sözlükler herhangi bir sayıda. + +[Sözlükler için DDL sorguları](../../statements/create.md#create-dictionary-query) sunucu yapılandırmasında herhangi bir ek kayıt gerektirmez. Tablolar veya görünümler gibi birinci sınıf varlıklar olarak sözlüklerle çalışmaya izin verirler. + +!!! attention "Dikkat" + Küçük bir sözlük için değerleri, bir `SELECT` sorgu (bkz. [dönüştürmek](../../../sql_reference/functions/other_functions.md) işlev). Bu işlevsellik harici sözlüklerle ilgili değildir. + +## Ayrıca Bakınız {#ext-dicts-see-also} + +- [Harici bir sözlük yapılandırma](external_dicts_dict.md) +- [Sözlükleri bellekte saklama](external_dicts_dict_layout.md) +- [Sözlük Güncellemeleri](external_dicts_dict_lifetime.md) +- [Dış Sözlüklerin kaynakları](external_dicts_dict_sources.md) +- [Sözlük anahtarı ve alanları](external_dicts_dict_structure.md) +- [Harici Sözlüklerle çalışmak için işlevler](../../../sql_reference/functions/ext_dict_functions.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md new file mode 100644 index 00000000000..b931143903e --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -0,0 +1,53 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 40 +toc_title: "Harici bir s\xF6zl\xFCk yap\u0131land\u0131rma" +--- + +# Harici bir sözlük yapılandırma {#dicts-external-dicts-dict} + +Sözlük xml dosyası kullanılarak yapılandırılmışsa, sözlük yapılandırması aşağıdaki yapıya sahiptir: + +``` xml + + dict_name + + + + + + + + + + + + + + + + + +``` + +İlgili [DDL-sorgu](../../statements/create.md#create-dictionary-query) aşağıdaki yapıya sahiptir: + +``` sql +CREATE DICTIONARY dict_name +( + ... -- attributes +) +PRIMARY KEY ... -- complex or single key configuration +SOURCE(...) -- Source configuration +LAYOUT(...) -- Memory layout configuration +LIFETIME(...) -- Lifetime of dictionary in memory +``` + +- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. +- [kaynaklı](external_dicts_dict_sources.md) — Source of the dictionary. +- [düzen](external_dicts_dict_layout.md) — Dictionary layout in memory. +- [yapılı](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. +- [ömür](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md new file mode 100644 index 00000000000..6945173c529 --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 45 +toc_title: "Hiyerar\u015Fik s\xF6zl\xFCkler" +--- + +# Hiyerarşik Sözlükler {#hierarchical-dictionaries} + +ClickHouse bir hiyerarşik sözlükler destekler [sayısal tuş](external_dicts_dict_structure.md#ext_dict-numeric-key). + +Aşağıdaki hiyerarşik yapıya bakın: + +``` text +0 (Common parent) +│ +├── 1 (Russia) +│ │ +│ └── 2 (Moscow) +│ │ +│ └── 3 (Center) +│ +└── 4 (Great Britain) + │ + └── 5 (London) +``` + +Bu hiyerarşi aşağıdaki sözlük tablosu olarak ifade edilebilir. + +| region\_id | parent\_region | region\_name | +|------------|----------------|--------------| +| 1 | 0 | Rusya | +| 2 | 1 | Moskova | +| 3 | 2 | Merkezli | +| 4 | 0 | İngiltere | +| 5 | 4 | Londra | + +Bu tablo bir sütun içerir `parent_region` bu öğe için en yakın ebeveynin anahtarını içerir. + +ClickHouse destekler [hiyerarşik](external_dicts_dict_structure.md#hierarchical-dict-attr) için mülkiyet [dış sözlük](index.md) öznitelik. Bu özellik, yukarıda açıklanana benzer hiyerarşik sözlüğü yapılandırmanıza izin verir. + +Bu [dictGetHierarchy](../../../sql_reference/functions/ext_dict_functions.md#dictgethierarchy) fonksiyonu bir elemanın üst zincir almak için izin verir. + +Örneğimiz için, sözlüğün yapısı aşağıdaki gibi olabilir: + +``` xml + + + + region_id + + + + parent_region + UInt64 + 0 + true + + + + region_name + String + + + + + +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md new file mode 100644 index 00000000000..174d6830839 --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -0,0 +1,373 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 41 +toc_title: "S\xF6zl\xFCkleri bellekte saklama" +--- + +# Sözlükleri Bellekte Saklama {#dicts-external-dicts-dict-layout} + +Sözlükleri bellekte saklamanın çeşitli yolları vardır. + +Biz tavsiye [düzlük](#flat), [karıştırıyordu](#dicts-external_dicts_dict_layout-hashed) ve [complex\_key\_hashed](#complex-key-hashed). hangi optimum işleme hızı sağlamak. + +Önbelleğe alma, potansiyel olarak düşük performans ve en uygun parametreleri seçmede zorluklar nedeniyle önerilmez. Bölümünde devamını oku “[önbellek](#cache)”. + +Sözlük performansını artırmanın birkaç yolu vardır: + +- Sonra sözlük ile çalışmak için işlevi çağırın `GROUP BY`. +- Mark enjektif olarak ayıklamak için nitelikler. Farklı öznitelik değerleri farklı anahtarlara karşılık geliyorsa, bir öznitelik ınjective olarak adlandırılır. Yani ne zaman `GROUP BY` anahtar tarafından bir öznitelik değeri getiren bir işlev kullanır, bu işlev otomatik olarak dışarı alınır `GROUP BY`. + +ClickHouse, sözlüklerdeki hatalar için bir istisna oluşturur. Hata örnekleri: + +- Erişilen sözlük yüklenemedi. +- Bir sorgulama hatası `cached` sözlük. + +Sen dış sözlükler ve durumları listesini görüntüleyebilirsiniz `system.dictionaries` Tablo. + +Yapılandırma şöyle görünüyor: + +``` xml + + + ... + + + + + + ... + + +``` + +İlgili [DDL-sorgu](../../statements/create.md#create-dictionary-query): + +``` sql +CREATE DICTIONARY (...) +... +LAYOUT(LAYOUT_TYPE(param value)) -- layout settings +... +``` + +## Sözlükleri Bellekte Saklamanın Yolları {#ways-to-store-dictionaries-in-memory} + +- [düzlük](#flat) +- [karıştırıyordu](#dicts-external_dicts_dict_layout-hashed) +- [sparse\_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) +- [önbellek](#cache) +- [range\_hashed](#range-hashed) +- [complex\_key\_hashed](#complex-key-hashed) +- [complex\_key\_cache](#complex-key-cache) +- [ıp\_trie](#ip-trie) + +### düzlük {#flat} + +Sözlük tamamen düz diziler şeklinde bellekte saklanır. Sözlük ne kadar bellek kullanıyor? Miktar, en büyük anahtarın boyutuyla orantılıdır (kullanılan alanda). + +Sözlük anahtarı vardır `UInt64` yazın ve değeri 500.000 ile sınırlıdır. Sözlük oluştururken daha büyük bir anahtar bulunursa, ClickHouse bir özel durum atar ve sözlüğü oluşturmaz. + +Her türlü kaynak desteklenmektedir. Güncellerken, veriler (bir dosyadan veya bir tablodan) bütünüyle okunur. + +Bu yöntem, sözlüğü saklamak için mevcut tüm yöntemler arasında en iyi performansı sağlar. + +Yapılandırma örneği: + +``` xml + + + +``` + +veya + +``` sql +LAYOUT(FLAT()) +``` + +### karıştırıyordu {#dicts-external_dicts_dict_layout-hashed} + +Sözlük tamamen bir karma tablo şeklinde bellekte saklanır. Sözlük, uygulamada herhangi bir tanımlayıcıya sahip herhangi bir sayıda öğe içerebilir, anahtar sayısı on milyonlarca öğeye ulaşabilir. + +Her türlü kaynak desteklenmektedir. Güncellerken, veriler (bir dosyadan veya bir tablodan) bütünüyle okunur. + +Yapılandırma örneği: + +``` xml + + + +``` + +veya + +``` sql +LAYOUT(HASHED()) +``` + +### sparse\_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} + +Benzer `hashed`, ancak daha fazla CPU kullanımı lehine daha az bellek kullanır. + +Yapılandırma örneği: + +``` xml + + + +``` + +``` sql +LAYOUT(SPARSE_HASHED()) +``` + +### complex\_key\_hashed {#complex-key-hashed} + +Bu tür depolama kompozit ile kullanım içindir [anahtarlar](external_dicts_dict_structure.md). Benzer `hashed`. + +Yapılandırma örneği: + +``` xml + + + +``` + +``` sql +LAYOUT(COMPLEX_KEY_HASHED()) +``` + +### range\_hashed {#range-hashed} + +Sözlük, sıralı bir aralık dizisi ve bunlara karşılık gelen değerleri olan bir karma tablo şeklinde bellekte saklanır. + +Bu depolama yöntemi, hashed ile aynı şekilde çalışır ve anahtara ek olarak tarih/saat (rasgele sayısal tür) aralıklarının kullanılmasına izin verir. + +Örnek: tablo, her reklamveren için biçimdeki indirimleri içerir: + +``` text ++---------|-------------|-------------|------+ +| advertiser id | discount start date | discount end date | amount | ++===============+=====================+===================+========+ +| 123 | 2015-01-01 | 2015-01-15 | 0.15 | ++---------|-------------|-------------|------+ +| 123 | 2015-01-16 | 2015-01-31 | 0.25 | ++---------|-------------|-------------|------+ +| 456 | 2015-01-01 | 2015-01-15 | 0.05 | ++---------|-------------|-------------|------+ +``` + +Tarih aralıkları için bir örnek kullanmak için, `range_min` ve `range_max` element inler [yapılı](external_dicts_dict_structure.md). Bu elemanlar elemanları içermelidir `name` ve`type` (eğer `type` belirtilmemişse, varsayılan tür kullanılır-Tarih). `type` herhangi bir sayısal tür olabilir (Date / DateTime / Uint64 / Int32 / others). + +Örnek: + +``` xml + + + Id + + + first + Date + + + last + Date + + ... +``` + +veya + +``` sql +CREATE DICTIONARY somedict ( + id UInt64, + first Date, + last Date +) +PRIMARY KEY id +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) +``` + +Bu sözlüklerle çalışmak için, `dictGetT` bir aralığın seçildiği işlev: + +``` sql +dictGetT('dict_name', 'attr_name', id, date) +``` + +Bu işlev belirtilen değerin değerini döndürür `id`s ve geçirilen tarihi içeren tarih aralığı. + +Algoritmanın detayları: + +- Eğer... `id` not fo orund veya a range is not fo aund for the `id`, sözlük için varsayılan değeri döndürür. +- Çakışan aralıklar varsa, herhangi birini kullanabilirsiniz. +- Aralık sınırlayıcı ise `NULL` veya geçersiz bir tarih (örneğin 1900-01-01 veya 2039-01-01), Aralık açık bırakılır. Aralık her iki tarafta da açık olabilir. + +Yapılandırma örneği: + +``` xml + + + + ... + + + + + + + + Abcdef + + + StartTimeStamp + UInt64 + + + EndTimeStamp + UInt64 + + + XXXType + String + + + + + + +``` + +veya + +``` sql +CREATE DICTIONARY somedict( + Abcdef UInt64, + StartTimeStamp UInt64, + EndTimeStamp UInt64, + XXXType String DEFAULT '' +) +PRIMARY KEY Abcdef +RANGE(MIN StartTimeStamp MAX EndTimeStamp) +``` + +### önbellek {#cache} + +Sözlük, sabit sayıda hücre içeren bir önbellekte saklanır. Bu hücreler sık kullanılan elementleri içerir. + +Bir sözlük ararken, önce önbellek aranır. Her veri bloğu için, önbellekte bulunmayan veya güncel olmayan tüm anahtarlar, kaynak kullanılarak istenir `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. Alınan veriler daha sonra önbelleğe yazılır. + +Önbellek sözlükleri için, sona erme [ömür](external_dicts_dict_lifetime.md) önbellekteki verilerin ayarlanabilir. Eğer daha fazla zaman `lifetime` bir hücrede veri yüklenmesinden bu yana geçti, hücrenin değeri kullanılmaz ve bir dahaki sefere kullanılması gerektiğinde yeniden istenir. +Bu, sözlükleri saklamanın tüm yollarından en az etkilidir. Önbelleğin hızı, doğru ayarlara ve kullanım senaryosuna bağlıdır. Bir önbellek türü sözlüğü, yalnızca isabet oranları yeterince yüksek olduğunda (önerilen %99 ve daha yüksek) iyi performans gösterir. Sen ortalama isabet oranı görebilirsiniz `system.dictionaries` Tablo. + +Önbellek performansını artırmak için bir alt sorgu ile kullanın `LIMIT`, ve harici sözlük ile işlevini çağırın. + +Destek [kaynaklılar](external_dicts_dict_sources.md): MySQL, ClickHouse, yürütülebilir, HTTP. + +Ayarlar örneği: + +``` xml + + + + 1000000000 + + +``` + +veya + +``` sql +LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) +``` + +Yeterince büyük bir önbellek boyutu ayarlayın. Sen hücre sayısını seçmek için deneme gerekir: + +1. Bazı değer ayarlayın. +2. Önbellek tamamen doluncaya kadar sorguları çalıştırın. +3. Kullanarak bellek tüketimini değerlendirmek `system.dictionaries` Tablo. +4. Gerekli bellek tüketimine ulaşılana kadar hücre sayısını artırın veya azaltın. + +!!! warning "Uyarıcı" + Rasgele okuma ile sorguları işlemek için yavaş olduğundan, ClickHouse kaynak olarak kullanmayın. + +### complex\_key\_cache {#complex-key-cache} + +Bu tür depolama kompozit ile kullanım içindir [anahtarlar](external_dicts_dict_structure.md). Benzer `cache`. + +### ıp\_trie {#ip-trie} + +Bu tür depolama, ağ öneklerini (IP adresleri) asn gibi meta verilere eşlemek içindir. + +Örnek: tablo, ağ önekleri ve bunlara karşılık gelen sayı ve ülke kodu içerir: + +``` text + +-----------|-----|------+ + | prefix | asn | cca2 | + +=================+=======+========+ + | 202.79.32.0/20 | 17501 | NP | + +-----------|-----|------+ + | 2620:0:870::/48 | 3856 | US | + +-----------|-----|------+ + | 2a02:6b8:1::/48 | 13238 | RU | + +-----------|-----|------+ + | 2001:db8::/32 | 65536 | ZZ | + +-----------|-----|------+ +``` + +Bu tür bir düzen kullanırken, yapının bileşik bir anahtarı olmalıdır. + +Örnek: + +``` xml + + + + prefix + String + + + + asn + UInt32 + + + + cca2 + String + ?? + + ... +``` + +veya + +``` sql +CREATE DICTIONARY somedict ( + prefix String, + asn UInt32, + cca2 String DEFAULT '??' +) +PRIMARY KEY prefix +``` + +Anahtarın izin verilen bir IP öneki içeren yalnızca bir dize türü özniteliği olması gerekir. Diğer türler henüz desteklenmiyor. + +Sorgular için aynı işlevleri kullanmanız gerekir (`dictGetT` bir tuple ile) kompozit tuşları ile sözlükler gelince: + +``` sql +dictGetT('dict_name', 'attr_name', tuple(ip)) +``` + +İşlev ya alır `UInt32` IPv4 için veya `FixedString(16)` IPv6 için: + +``` sql +dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +``` + +Diğer türler henüz desteklenmiyor. İşlev, bu IP adresine karşılık gelen önek için özniteliği döndürür. Örtüşen önekler varsa, en spesifik olanı döndürülür. + +Veri bir saklanan `trie`. Tamamen RAM'e uyması gerekir. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md new file mode 100644 index 00000000000..d630e9e0128 --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -0,0 +1,91 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 42 +toc_title: "S\xF6zl\xFCk G\xFCncellemeleri" +--- + +# Sözlük Güncellemeleri {#dictionary-updates} + +ClickHouse sözlükleri periyodik olarak günceller. Tam olarak karşıdan yüklenen sözlükler için Güncelleştirme aralığı ve önbelleğe alınmış sözlükler için geçersiz kılma aralığı `` saniyeler içinde etiketleyin. + +Sözlük güncelleştirmeleri (ilk kullanım için yükleme dışında) sorguları engellemez. Güncellemeler sırasında, bir sözlüğün eski sürümü kullanılır. Güncelleştirme sırasında bir hata oluşursa, hata sunucu günlüğüne yazılır ve sorgular sözlüklerin eski sürümünü kullanmaya devam eder. + +Ayarlar örneği: + +``` xml + + ... + 300 + ... + +``` + +``` sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +Ayar `0` (`LIFETIME(0)`) söz dictionarieslük .lerin güncel updatinglenmesini engeller. + +Yükseltmeler için bir zaman aralığı ayarlayabilirsiniz ve ClickHouse bu aralıkta eşit rastgele bir zaman seçecektir. Bu, çok sayıda sunucuda yükseltme yaparken yükü sözlük kaynağına dağıtmak için gereklidir. + +Ayarlar örneği: + +``` xml + + ... + + 300 + 360 + + ... + +``` + +veya + +``` sql +LIFETIME(MIN 300 MAX 360) +``` + +Eğer `0` ve `0`, ClickHouse sözlüğü zaman aşımı ile yeniden yüklemez. +Bu durumda, Sözlük yapılandırma dosyası değiştirilmişse veya ClickHouse sözlüğü daha önce yeniden yükleyebilir. `SYSTEM RELOAD DICTIONARY` komut yürütüldü. + +Sözlükleri yükseltirken, ClickHouse sunucusu türüne bağlı olarak farklı mantık uygular [kaynaklı](external_dicts_dict_sources.md): + +Sözlükleri yükseltirken, ClickHouse sunucusu türüne bağlı olarak farklı mantık uygular [kaynaklı](external_dicts_dict_sources.md): + +- Bir metin dosyası için değişiklik zamanını kontrol eder. Zaman önceden kaydedilmiş zaman farklıysa, sözlük güncelleştirilir. +- Myısam tabloları için, değişiklik zamanı bir `SHOW TABLE STATUS` sorgu. +- Diğer kaynaklardan gelen sözlükler varsayılan olarak her zaman güncellenir. + +MySQL (InnoDB), ODBC ve ClickHouse kaynakları için, sözlükleri her seferinde değil, gerçekten değiştiyse güncelleyecek bir sorgu ayarlayabilirsiniz. Bunu yapmak için şu adımları izleyin: + +- Sözlük tablosu, kaynak verileri güncelleştirildiğinde her zaman değişen bir alana sahip olmalıdır. +- Kaynak ayarları, değişen alanı alan bir sorgu belirtmeniz gerekir. ClickHouse sunucu sorgu sonucu bir satır olarak yorumlar ve bu satır önceki durumuna göre değişmişse, sözlük güncelleştirilir. Sorguda belirtme `` için ayar fieldlardaki alan [kaynaklı](external_dicts_dict_sources.md). + +Ayarlar örneği: + +``` xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +veya + +``` sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md new file mode 100644 index 00000000000..d2ea01c54ce --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -0,0 +1,608 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 43 +toc_title: "D\u0131\u015F S\xF6zl\xFCklerin kaynaklar\u0131" +--- + +# Dış Sözlüklerin Kaynakları {#dicts-external-dicts-dict-sources} + +Harici bir sözlük birçok farklı kaynaktan bağlanabilir. + +Sözlük xml dosyası kullanılarak yapılandırılmışsa, yapılandırma şöyle görünür: + +``` xml + + + ... + + + + + + ... + + ... + +``` + +Durumunda [DDL-sorgu](../../statements/create.md#create-dictionary-query), eşit yapılandırma gibi görünüyor olacak: + +``` sql +CREATE DICTIONARY dict_name (...) +... +SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration +... +``` + +Kaynak yapılandırılmış `source` bölme. + +Kaynak türleri (`source_type`): + +- [Yerel dosya](#dicts-external_dicts_dict_sources-local_file) +- [Yürütülebilir dosya](#dicts-external_dicts_dict_sources-executable) +- [HTTP (s)](#dicts-external_dicts_dict_sources-http) +- DBMS + - [ODBC](#dicts-external_dicts_dict_sources-odbc) + - [MySQL](#dicts-external_dicts_dict_sources-mysql) + - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) + - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) + - [Redis](#dicts-external_dicts_dict_sources-redis) + +## Yerel Dosya {#dicts-external_dicts_dict_sources-local_file} + +Ayarlar örneği: + +``` xml + + + /opt/dictionaries/os.tsv + TabSeparated + + +``` + +veya + +``` sql +SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +Ayar alanları: + +- `path` – The absolute path to the file. +- `format` – The file format. All the formats described in “[Biçimliler](../../../interfaces/formats.md#formats)” desteklenir. + +## Yürütülebilir Dosya {#dicts-external_dicts_dict_sources-executable} + +Yürütülebilir dosyalarla çalışmak Aşağıdakilere bağlıdır [sözlük bellekte nasıl saklanır](external_dicts_dict_layout.md). Sözlük kullanılarak saklan theıyorsa `cache` ve `complex_key_cache` ClickHouse, yürütülebilir dosyanın STDIN'SİNE bir istek göndererek gerekli anahtarları ister. Aksi takdirde, clickhouse yürütülebilir dosyayı başlatır ve çıktısını sözlük verileri olarak değerlendirir. + +Ayarlar örneği: + +``` xml + + + cat /opt/dictionaries/os.tsv + TabSeparated + + +``` + +veya + +``` sql +SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +Ayar alanları: + +- `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). +- `format` – The file format. All the formats described in “[Biçimliler](../../../interfaces/formats.md#formats)” desteklenir. + +## Http (s) {#dicts-external_dicts_dict_sources-http} + +Bir HTTP (s) sunucusuyla çalışmak Aşağıdakilere bağlıdır [sözlük bellekte nasıl saklanır](external_dicts_dict_layout.md). Sözlük kullanılarak saklan theıyorsa `cache` ve `complex_key_cache`, ClickHouse aracılığıyla bir istek göndererek gerekli anahtarları ister `POST` yöntem. + +Ayarlar örneği: + +``` xml + + + http://[::1]/os.tsv + TabSeparated + + user + password + + +
    + API-KEY + key +
    +
    +
    + +``` + +veya + +``` sql +SOURCE(HTTP( + url 'http://[::1]/os.tsv' + format 'TabSeparated' + credentials(user 'user' password 'password') + headers(header(name 'API-KEY' value 'key')) +)) +``` + +Clickhouse'un bir HTTPS kaynağına erişebilmesi için şunları yapmanız gerekir [openssl'yi yapılandırma](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-openssl) sunucu yapılandırmasında. + +Ayar alanları: + +- `url` – The source URL. +- `format` – The file format. All the formats described in “[Biçimliler](../../../interfaces/formats.md#formats)” desteklenir. +- `credentials` – Basic HTTP authentication. Optional parameter. + - `user` – Username required for the authentication. + - `password` – Password required for the authentication. +- `headers` – All custom HTTP headers entries used for the HTTP request. Optional parameter. + - `header` – Single HTTP header entry. + - `name` – Identifiant name used for the header send on the request. + - `value` – Value set for a specific identifiant name. + +## ODBC {#dicts-external_dicts_dict_sources-odbc} + +ODBC sürücüsü olan herhangi bir veritabanını bağlamak için bu yöntemi kullanabilirsiniz. + +Ayarlar örneği: + +``` xml + + + DatabaseName + ShemaName.TableName
    + DSN=some_parameters + SQL_QUERY +
    + +``` + +veya + +``` sql +SOURCE(ODBC( + db 'DatabaseName' + table 'SchemaName.TableName' + connection_string 'DSN=some_parameters' + invalidate_query 'SQL_QUERY' +)) +``` + +Ayar alanları: + +- `db` – Name of the database. Omit it if the database name is set in the `` parametre. +- `table` – Name of the table and schema if exists. +- `connection_string` – Connection string. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Sözlükleri güncelleme](external_dicts_dict_lifetime.md). + +ClickHouse, ODBC sürücüsünden alıntı sembolleri alır ve sorgulardaki tüm ayarları sürücüye aktarır, bu nedenle tablo adını veritabanındaki tablo adı durumuna göre ayarlamak gerekir. + +Oracle kullanırken kodlamalarla ilgili bir sorununuz varsa, ilgili [FAQ](../../../faq/general.md#oracle-odbc-encodings) makale. + +### ODBC Sözlük işlevselliği bilinen güvenlik açığı {#known-vulnerability-of-the-odbc-dictionary-functionality} + +!!! attention "Dikkat" + ODBC sürücüsü bağlantı parametresi aracılığıyla veritabanına bağlanırken `Servername` yerine. Bu durumda değerler `USERNAME` ve `PASSWORD` itibaren `odbc.ini` uzak sunucuya gönderilir ve tehlikeye girebilir. + +**Güvensiz kullanım örneği** + +PostgreSQL için unixodbc'yi yapılandıralım. İçeriği `/etc/odbc.ini`: + +``` text +[gregtest] +Driver = /usr/lib/psqlodbca.so +Servername = localhost +PORT = 5432 +DATABASE = test_db +#OPTION = 3 +USERNAME = test +PASSWORD = test +``` + +Daha sonra aşağıdaki gibi bir sorgu yaparsanız + +``` sql +SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); +``` + +ODBC sürücüsü değerleri gönderir `USERNAME` ve `PASSWORD` itibaren `odbc.ini` -e doğru `some-server.com`. + +### PostgreSQL Bağlanma Örneği {#example-of-connecting-postgresql} + +UB .untu OS. + +PostgreSQL için UNİXODBC ve ODBC sürücüsünü yükleme: + +``` bash +$ sudo apt-get install -y unixodbc odbcinst odbc-postgresql +``` + +Yapılandırma `/etc/odbc.ini` (veya `~/.odbc.ini`): + +``` text + [DEFAULT] + Driver = myconnection + + [myconnection] + Description = PostgreSQL connection to my_db + Driver = PostgreSQL Unicode + Database = my_db + Servername = 127.0.0.1 + UserName = username + Password = password + Port = 5432 + Protocol = 9.3 + ReadOnly = No + RowVersioning = No + ShowSystemTables = No + ConnSettings = +``` + +Clickhouse'da sözlük yapılandırması: + +``` xml + + + table_name + + + + + DSN=myconnection + postgresql_table
    +
    + + + 300 + 360 + + + + + + + id + + + some_column + UInt64 + 0 + + +
    +
    +``` + +veya + +``` sql +CREATE DICTIONARY table_name ( + id UInt64, + some_column UInt64 DEFAULT 0 +) +PRIMARY KEY id +SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) +LAYOUT(HASHED()) +LIFETIME(MIN 300 MAX 360) +``` + +Düzenlemeniz gerekebilir `odbc.ini` sürücü ile kitaplığın tam yolunu belirtmek için `DRIVER=/usr/local/lib/psqlodbcw.so`. + +### MS SQL Server bağlanma örneği {#example-of-connecting-ms-sql-server} + +UB .untu OS. + +Sürücüyü yükleme: : + +``` bash +$ sudo apt-get install tdsodbc freetds-bin sqsh +``` + +Sürücüyü yapılandırma: + +``` bash + $ cat /etc/freetds/freetds.conf + ... + + [MSSQL] + host = 192.168.56.101 + port = 1433 + tds version = 7.0 + client charset = UTF-8 + + $ cat /etc/odbcinst.ini + ... + + [FreeTDS] + Description = FreeTDS + Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so + Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so + FileUsage = 1 + UsageCount = 5 + + $ cat ~/.odbc.ini + ... + + [MSSQL] + Description = FreeTDS + Driver = FreeTDS + Servername = MSSQL + Database = test + UID = test + PWD = test + Port = 1433 +``` + +Clickhouse'da sözlüğü yapılandırma: + +``` xml + + + test + + + dict
    + DSN=MSSQL;UID=test;PWD=test +
    + + + + 300 + 360 + + + + + + + + + k + + + s + String + + + +
    +
    +``` + +veya + +``` sql +CREATE DICTIONARY test ( + k UInt64, + s String DEFAULT '' +) +PRIMARY KEY k +SOURCE(ODBC(table 'dict' connection_string 'DSN=MSSQL;UID=test;PWD=test')) +LAYOUT(FLAT()) +LIFETIME(MIN 300 MAX 360) +``` + +## DBMS {#dbms} + +### Mysql {#dicts-external_dicts_dict_sources-mysql} + +Ayarlar örneği: + +``` xml + + + 3306 + clickhouse + qwerty + + example01-1 + 1 + + + example01-2 + 1 + + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +veya + +``` sql +SOURCE(MYSQL( + port 3306 + user 'clickhouse' + password 'qwerty' + replica(host 'example01-1' priority 1) + replica(host 'example01-2' priority 1) + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +Ayar alanları: + +- `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). + +- `user` – Name of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `password` – Password of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `replica` – Section of replica configurations. There can be multiple sections. + + - `replica/host` – The MySQL host. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. + +- `db` – Name of the database. + +- `table` – Name of the table. + +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` MySQL, örneğin, `id > 10 AND id < 20`. İsteğe bağlı parametre. + +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Sözlükleri güncelleme](external_dicts_dict_lifetime.md). + +MySQL yuva üzerinden yerel bir ana bilgisayara bağlanabilir. Bunu yapmak için, ayarlayın `host` ve `socket`. + +Ayarlar örneği: + +``` xml + + + localhost + /path/to/socket/file.sock + clickhouse + qwerty + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +veya + +``` sql +SOURCE(MYSQL( + host 'localhost' + socket '/path/to/socket/file.sock' + user 'clickhouse' + password 'qwerty' + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} + +Ayarlar örneği: + +``` xml + + + example01-01-1 + 9000 + default + + default + ids
    + id=10 +
    + +``` + +veya + +``` sql +SOURCE(CLICKHOUSE( + host 'example01-01-1' + port 9000 + user 'default' + password '' + db 'default' + table 'ids' + where 'id=10' +)) +``` + +Ayar alanları: + +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Dağılı](../../../engines/table_engines/special/distributed.md) tablo ve sonraki yapılandırmalarda girin. +- `port` – The port on the ClickHouse server. +- `user` – Name of the ClickHouse user. +- `password` – Password of the ClickHouse user. +- `db` – Name of the database. +- `table` – Name of the table. +- `where` – The selection criteria. May be omitted. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Sözlükleri güncelleme](external_dicts_dict_lifetime.md). + +### Mongodb {#dicts-external_dicts_dict_sources-mongodb} + +Ayarlar örneği: + +``` xml + + + localhost + 27017 + + + test + dictionary_source + + +``` + +veya + +``` sql +SOURCE(MONGO( + host 'localhost' + port 27017 + user '' + password '' + db 'test' + collection 'dictionary_source' +)) +``` + +Ayar alanları: + +- `host` – The MongoDB host. +- `port` – The port on the MongoDB server. +- `user` – Name of the MongoDB user. +- `password` – Password of the MongoDB user. +- `db` – Name of the database. +- `collection` – Name of the collection. + +### Redis {#dicts-external_dicts_dict_sources-redis} + +Ayarlar örneği: + +``` xml + + + localhost + 6379 + simple + 0 + + +``` + +veya + +``` sql +SOURCE(REDIS( + host 'localhost' + port 6379 + storage_type 'simple' + db_index 0 +)) +``` + +Ayar alanları: + +- `host` – The Redis host. +- `port` – The port on the Redis server. +- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` basit kaynaklar ve karma tek anahtar kaynaklar içindir, `hash_map` iki anahtarlı karma kaynaklar içindir. Ranged kaynakları ve karmaşık anahtarlı önbellek kaynakları desteklenmez. İhmal edilebilir, varsayılan değer `simple`. +- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md new file mode 100644 index 00000000000..bd404e3b822 --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 44 +toc_title: "S\xF6zl\xFCk anahtar\u0131 ve alanlar\u0131" +--- + +# Sözlük anahtarı ve alanları {#dictionary-key-and-fields} + +Bu `` yan tümcesi sözlük anahtarı ve sorgular için kullanılabilir alanları açıklar. + +XML açıklaması: + +``` xml + + + + Id + + + + + + + ... + + + +``` + +Nitelikler elemanlarda açıklanmıştır: + +- `` — [Anahtar sütun](external_dicts_dict_structure.md#ext_dict_structure-key). +- `` — [Veri sütunu](external_dicts_dict_structure.md#ext_dict_structure-attributes). Birden fazla sayıda özellik olabilir. + +DDL sorgusu: + +``` sql +CREATE DICTIONARY dict_name ( + Id UInt64, + -- attributes +) +PRIMARY KEY Id +... +``` + +Öznitelikler sorgu gövdesinde açıklanmıştır: + +- `PRIMARY KEY` — [Anahtar sütun](external_dicts_dict_structure.md#ext_dict_structure-key) +- `AttrName AttrType` — [Veri sütunu](external_dicts_dict_structure.md#ext_dict_structure-attributes). Birden fazla sayıda özellik olabilir. + +## Anahtar {#ext_dict_structure-key} + +ClickHouse aşağıdaki anahtar türlerini destekler: + +- Sayısal tuş. `UInt64`. Tanımlanan `` etiket veya kullanma `PRIMARY KEY` kelime. +- Kompozit anahtar. Farklı türde değerler kümesi. Etiket definedinde tanımlı `` veya `PRIMARY KEY` kelime. + +Bir xml yapısı şunları içerebilir `` veya ``. DDL sorgusu tek içermelidir `PRIMARY KEY`. + +!!! warning "Uyarıcı" + Anahtarı bir öznitelik olarak tanımlamamalısınız. + +### Sayısal Tuş {#ext_dict-numeric-key} + +Tür: `UInt64`. + +Yapılandırma örneği: + +``` xml + + Id + +``` + +Yapılandırma alanları: + +- `name` – The name of the column with keys. + +DDL sorgusu için: + +``` sql +CREATE DICTIONARY ( + Id UInt64, + ... +) +PRIMARY KEY Id +... +``` + +- `PRIMARY KEY` – The name of the column with keys. + +### Kompozit Anahtar {#composite-key} + +Anahtar bir olabilir `tuple` her türlü alandan. Bu [düzen](external_dicts_dict_layout.md) bu durumda olmalıdır `complex_key_hashed` veya `complex_key_cache`. + +!!! tip "Uç" + Bileşik bir anahtar tek bir elemandan oluşabilir. Bu, örneğin bir dizeyi anahtar olarak kullanmayı mümkün kılar. + +Anahtar yapısı eleman ayarlanır ``. Anahtar alanlar sözlük ile aynı biçimde belirtilir [öznitelik](external_dicts_dict_structure.md). Örnek: + +``` xml + + + + field1 + String + + + field2 + UInt32 + + ... + +... +``` + +veya + +``` sql +CREATE DICTIONARY ( + field1 String, + field2 String + ... +) +PRIMARY KEY field1, field2 +... +``` + +Bir sorgu için `dictGet*` fonksiyon, bir tuple anahtar olarak geçirilir. Örnek: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. + +## Öznitelik {#ext_dict_structure-attributes} + +Yapılandırma örneği: + +``` xml + + ... + + Name + ClickHouseDataType + + rand64() + true + true + true + + +``` + +veya + +``` sql +CREATE DICTIONARY somename ( + Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID +) +``` + +Yapılandırma alanları: + +| Etiket | Açıklama | Gerekli | +|------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| +| `name` | Sütun adı. | Evet | +| `type` | ClickHouse veri türü.
    ClickHouse, sözlükten belirtilen veri türüne değer atmaya çalışır. Örneğin, MySQL için alan olabilir `TEXT`, `VARCHAR`, veya `BLOB` MySQL kaynak tablosunda, ancak şu şekilde yüklenebilir `String` Clickhouse'da.
    [Nullable](../../../sql_reference/data_types/nullable.md) desteklenmiyor. | Evet | +| `null_value` | Varolan olmayan bir öğe için varsayılan değer.
    Örnekte, boş bir dizedir. Kullanamazsınız `NULL` bu alanda. | Evet | +| `expression` | [İfade](../../syntax.md#syntax-expressions) bu ClickHouse değeri yürütür.
    İfade, uzak SQL veritabanında bir sütun adı olabilir. Bu nedenle, uzak sütun için bir diğer ad oluşturmak için kullanabilirsiniz.

    Varsayılan değer: ifade yok. | Hayır | +| `hierarchical` | Eğer `true`, öznitelik, geçerli anahtar için bir üst anahtarın değerini içerir. Görmek [Hiyerarşik Sözlükler](external_dicts_dict_hierarchical.md).

    Varsayılan değer: `false`. | Hayır | +| `injective` | Olup olmadığını gösteren bayrak `id -> attribute` ima isge is [enjektif](https://en.wikipedia.org/wiki/Injective_function).
    Eğer `true`, ClickHouse sonra otomatik olarak yerleştirebilirsiniz `GROUP BY` fık .ra ile ilgili istek dictionariesleriniz Genellikle bu tür taleplerin miktarını önemli ölçüde azaltır.

    Varsayılan değer: `false`. | Hayır | +| `is_object_id` | Bir MongoDB belgesi için sorgunun yürütülüp yürütülmediğini gösteren bayrak `ObjectID`.

    Varsayılan değer: `false`. | Hayır | + +## Ayrıca Bakınız {#see-also} + +- [Harici sözlüklerle çalışmak için işlevler](../../../sql_reference/functions/ext_dict_functions.md). + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/tr/sql_reference/dictionaries/external_dictionaries/index.md b/docs/tr/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..d5eaa1e55b6 --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "D\u0131\u015F S\xF6z Dictionariesl\xFCkler" +toc_priority: 37 +--- + + diff --git a/docs/tr/sql_reference/dictionaries/index.md b/docs/tr/sql_reference/dictionaries/index.md new file mode 100644 index 00000000000..a8a6c3605b4 --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "S\xF6zl\xFCkler" +toc_priority: 35 +toc_title: "Giri\u015F" +--- + +# Sözlükler {#dictionaries} + +Bir sözlük bir haritalama (`key -> attributes`) bu referans listeleri çeşitli türleri için uygundur. + +ClickHouse, sorgularda kullanılabilecek sözlüklerle çalışmak için özel işlevleri destekler. Sözlükleri işlevlerle kullanmak daha kolay ve daha verimlidir. `JOIN` referans tabloları ile. + +[NULL](../syntax.md#null) değerler sözlükte saklanamaz. + +ClickHouse destekler: + +- [Dahili sözlükler](internal_dicts.md#internal_dicts) bir özel ile [fonksiyonlar kümesi](../../sql_reference/functions/ym_dict_functions.md). +- [Eklenti (harici) söz dictionarieslükler](external_dictionaries/external_dicts.md) ile bir [fonksiyonlar net](../../sql_reference/functions/ext_dict_functions.md). + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/tr/sql_reference/dictionaries/internal_dicts.md b/docs/tr/sql_reference/dictionaries/internal_dicts.md new file mode 100644 index 00000000000..b7a1d46207c --- /dev/null +++ b/docs/tr/sql_reference/dictionaries/internal_dicts.md @@ -0,0 +1,55 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 39 +toc_title: "\u0130\xE7 S\xF6z Dictionariesl\xFCkler" +--- + +# İç söz dictionarieslükler {#internal_dicts} + +ClickHouse, bir geobase ile çalışmak için yerleşik bir özellik içerir. + +Bu size sağlar: + +- Adını istediğiniz dilde almak için bölgenin kimliğini kullanın. +- Bir şehir, bölge, federal bölge, ülke veya kıtanın kimliğini almak için bölgenin kimliğini kullanın. +- Bir bölgenin başka bir bölgenin parçası olup olmadığını kontrol edin. +- Ana bölgeler zinciri alın. + +Tüm fonksiyonları destek “translocality,” aynı anda bölge mülkiyeti farklı bakış açıları kullanma yeteneği. Daha fazla bilgi için bölüme bakın “Functions for working with Yandex.Metrica dictionaries”. + +İç sözlükler varsayılan pakette devre dışı bırakılır. +Bunları etkinleştirmek için, parametreleri uncomment `path_to_regions_hierarchy_file` ve `path_to_regions_names_files` sunucu yapılandırma dosyasında. + +Geobase metin dosyalarından yüklenir. + +Place the `regions_hierarchy*.txt` dosyaları içine `path_to_regions_hierarchy_file` dizin. Bu yapılandırma parametresi, `regions_hierarchy.txt` dosya (varsayılan bölgesel hiyerarşi) ve diğer dosyalar (`regions_hierarchy_ua.txt`) aynı dizinde bulunmalıdır. + +Koy... `regions_names_*.txt` dosyalar içinde `path_to_regions_names_files` dizin. + +Bu dosyaları kendiniz de oluşturabilirsiniz. Dosya biçimi aşağıdaki gibidir: + +`regions_hierarchy*.txt`: TabSeparated (başlık yok), sütunlar: + +- bölge kimliği (`UInt32`) +- üst bölge kimliği (`UInt32`) +- bölge tipi (`UInt8`): 1 - kıta, 3-ülke, 4-federal bölge, 5-bölge, 6-şehir; diğer türlerin değerleri yoktur +- nüfuslu (`UInt32`) — optional column + +`regions_names_*.txt`: TabSeparated (başlık yok), sütunlar: + +- bölge kimliği (`UInt32`) +- bölge adı (`String`) — Can't contain tabs or line feeds, even escaped ones. + +RAM'de depolamak için düz bir dizi kullanılır. Bu nedenle, IDs bir milyondan fazla olmamalıdır. + +Sözlükler sunucuyu yeniden başlatmadan güncellenebilir. Ancak, kullanılabilir sözlükler kümesi güncelleştirilmez. +Güncellemeler için dosya değiştirme süreleri kontrol edilir. Bir dosya değiştiyse, sözlük güncelleştirilir. +Değişiklikleri kontrol etmek için Aralık `builtin_dictionaries_reload_interval` parametre. +Sözlük güncelleştirmeleri (ilk kullanımda yükleme dışında) sorguları engellemez. Güncelleştirmeler sırasında, sorgular sözlüklerin eski sürümlerini kullanır. Güncelleştirme sırasında bir hata oluşursa, hata sunucu günlüğüne yazılır ve sorgular sözlüklerin eski sürümünü kullanmaya devam eder. + +Sözlükleri geobase ile periyodik olarak güncellemenizi öneririz. Bir güncelleme sırasında yeni dosyalar oluşturun ve bunları ayrı bir konuma yazın. Her şey hazır olduğunda, bunları sunucu tarafından kullanılan dosyalara yeniden adlandırın. + +OS tanımlayıcıları ve Yandex ile çalışmak için işlevler de vardır.Metrica arama motorları, ancak kullanılmamalıdır. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/tr/sql_reference/functions/arithmetic_functions.md b/docs/tr/sql_reference/functions/arithmetic_functions.md new file mode 100644 index 00000000000..594e9bd4699 --- /dev/null +++ b/docs/tr/sql_reference/functions/arithmetic_functions.md @@ -0,0 +1,87 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 35 +toc_title: Aritmetik +--- + +# Aritmetik fonksiyonlar {#arithmetic-functions} + +Tüm aritmetik işlevler için, sonuç türü, böyle bir tür varsa, sonucun sığdığı en küçük sayı türü olarak hesaplanır. Minimum, bit sayısına, imzalanıp imzalanmadığına ve yüzüp yüzmediğine bağlı olarak aynı anda alınır. Yeterli bit yoksa, en yüksek bit türü alınır. + +Örnek: + +``` sql +SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 + 0 + 0) +``` + +``` text +┌─toTypeName(0)─┬─toTypeName(plus(0, 0))─┬─toTypeName(plus(plus(0, 0), 0))─┬─toTypeName(plus(plus(plus(0, 0), 0), 0))─┐ +│ UInt8 │ UInt16 │ UInt32 │ UInt64 │ +└───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘ +``` + +Aritmetik fonksiyonlar, uint8, Uİnt16, Uİnt32, Uint64, Int8, Int16, Int32, Int64, Float32 veya Float64 türlerinden herhangi bir çift için çalışır. + +Taşma, C++ile aynı şekilde üretilir. + +## artı (a, b), A + B operatörü {#plusa-b-a-b-operator} + +Sayıların toplamını hesaplar. +Ayrıca bir tarih veya tarih ve Saat ile tamsayı numaraları ekleyebilirsiniz. Bir tarih durumunda, bir tamsayı eklemek, karşılık gelen gün sayısını eklemek anlamına gelir. Zamanla bir tarih için, karşılık gelen saniye sayısını eklemek anlamına gelir. + +## eksi (a, b), A - B operatörü {#minusa-b-a-b-operator} + +Farkı hesaplar. Sonuç her zaman imzalanır. + +You can also calculate integer numbers from a date or date with time. The idea is the same – see above for ‘plus’. + +## çarp operatorma (a, b), A \* B operatörü {#multiplya-b-a-b-operator} + +Sayıların ürününü hesaplar. + +## böl (a, b), A / B operatörü {#dividea-b-a-b-operator} + +Sayıların bölümünü hesaplar. Sonuç türü her zaman bir kayan nokta türüdür. +Tam sayı bölümü değildir. Tamsayı bölümü için, ‘intDiv’ işlev. +Sıfıra bölerek zaman olsun ‘inf’, ‘-inf’, veya ‘nan’. + +## ıntdiv(a, b) {#intdiva-b} + +Sayıların bölümünü hesaplar. Tamsayılara bölünür, yuvarlanır (mutlak değere göre). +Sıfıra bölünürken veya en az negatif sayıyı eksi bir ile bölürken bir istisna atılır. + +## ıntdivorzero(a, b) {#intdivorzeroa-b} + +Farklıdır ‘intDiv’ bu, sıfıra bölünürken veya en az bir negatif sayıyı eksi bir ile bölerek sıfır döndürür. + +## modulo (a, b), A % B operatörü {#moduloa-b-a-b-operator} + +Bölünmeden sonra kalan hesaplar. +Bağımsız değişkenler kayan nokta sayılarıysa, ondalık bölümü bırakarak tamsayılara önceden dönüştürülürler. +Kalan C++ile aynı anlamda alınır. Kesik bölme negatif sayılar için kullanılır. +Sıfıra bölünürken veya en az negatif sayıyı eksi bir ile bölürken bir istisna atılır. + +## moduloOrZero(a, b) {#moduloorzeroa-b} + +Farklıdır ‘modulo’ bölen sıfır olduğunda sıfır döndürür. + +## negate (a), - bir operatör {#negatea-a-operator} + +Ters işareti ile bir sayı hesaplar. Sonuç her zaman imzalanır. + +## abs (a) {#arithm_func-abs} + +\(A\) sayısının mutlak değerini hesaplar. Yani, \< 0 ise,- A döndürür. imzasız türler için hiçbir şey yapmaz. İmzalı tamsayı türleri için imzasız bir sayı döndürür. + +## gcd (a, b) {#gcda-b} + +Sayıların en büyük ortak böleni döndürür. +Sıfıra bölünürken veya en az negatif sayıyı eksi bir ile bölürken bir istisna atılır. + +## lcm(a, b) {#lcma-b} + +Sayıların en az ortak katını döndürür. +Sıfıra bölünürken veya en az negatif sayıyı eksi bir ile bölürken bir istisna atılır. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/tr/sql_reference/functions/array_functions.md b/docs/tr/sql_reference/functions/array_functions.md new file mode 100644 index 00000000000..9e3221435ed --- /dev/null +++ b/docs/tr/sql_reference/functions/array_functions.md @@ -0,0 +1,1061 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 46 +toc_title: "Dizilerle \xE7al\u0131\u015Fma" +--- + +# Dizilerle çalışmak için işlevler {#functions-for-working-with-arrays} + +## boş {#function-empty} + +Boş bir dizi için 1 veya boş olmayan bir dizi için 0 döndürür. +Sonuç türü Uint8'dir. +İşlev ayrıca dizeler için de çalışır. + +## notEmpty {#function-notempty} + +Boş bir dizi için 0 veya boş olmayan bir dizi için 1 döndürür. +Sonuç türü Uint8'dir. +İşlev ayrıca dizeler için de çalışır. + +## uzunluk {#array_functions-length} + +Dizideki öğe sayısını döndürür. +Sonuç türü Uint64'tür. +İşlev ayrıca dizeler için de çalışır. + +## emptyArrayUİnt8, emptyArrayUİnt16, emptyArrayUİnt32, emptyArrayUİnt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} + +## emptyArrayİnt8, emptyArrayİnt16, emptyArrayİnt32, emptyArrayİnt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} + +## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} + +## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} + +## emptyArrayString {#emptyarraystring} + +Sıfır bağımsız değişkeni kabul eder ve uygun türde boş bir dizi döndürür. + +## emptyArrayToSingle {#emptyarraytosingle} + +Boş bir dizi kabul eder ve varsayılan değere eşit bir tek öğe dizisi döndürür. + +## Aralık (bitiş), Aralık(başlangıç, bitiş \[, adım\]) {#rangeend-rangestart-end-step} + +1 Adım-başından sonuna kadar sayıların bir dizi döndürür. +Eğer argüman `start` belirtilmemiş, varsayılan olarak 0. +Eğer argüman `step` belirtilmemiş, varsayılan olarak 1. +Neredeyse pythonic gibi davranışlar `range`. Ancak fark, tüm argümanların tipinin olması gerektiğidir `UInt` şiir. +Bir veri bloğunda toplam uzunluğu 100.000.000'den fazla öğe olan diziler oluşturulursa, bir istisna atılır. + +## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} + +İşlev bağımsız değişkenlerinden bir dizi oluşturur. +Bağımsız değişkenler sabit olmalı ve en küçük ortak türe sahip türlere sahip olmalıdır. En az bir argüman geçirilmelidir, çünkü aksi takdirde hangi tür dizinin oluşturulacağı açık değildir. Yani, boş bir dizi oluşturmak için bu işlevi kullanamazsınız (bunu yapmak için, ‘emptyArray\*’ fonksiyon yukarıda açıklandığı. +Ret anur anns an ‘Array(T)’ sonucu yazın, nerede ‘T’ geçirilen bağımsız değişkenlerin en küçük ortak türüdür. + +## arrayConcat {#arrayconcat} + +Argüman olarak geçirilen dizileri birleştirir. + +``` sql +arrayConcat(arrays) +``` + +**Parametre** + +- `arrays` – Arbitrary number of arguments of [Dizi](../../sql_reference/data_types/array.md) tür. + **Örnek** + + + +``` sql +SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res +``` + +``` text +┌─res───────────┐ +│ [1,2,3,4,5,6] │ +└───────────────┘ +``` + +## arrayElement (arr, n), operatör arr\[n\] {#arrayelementarr-n-operator-arrn} + +İnd theex ile eleman alın `n` diz theiden `arr`. `n` herhangi bir tamsayı türü olmalıdır. +Bir dizideki dizinler birinden başlar. +Negatif dizinler desteklenir. Bu durumda, sondan numaralandırılmış ilgili elemanı seçer. Mesela, `arr[-1]` dizideki son öğedir. + +Dizin bir dizinin sınırlarının dışına düşerse, bazı varsayılan değer döndürür (sayılar için 0, dizeler için boş bir dize vb.).), sabit olmayan bir dizi ve sabit bir dizin 0 olan durum hariç (bu durumda bir hata olacaktır `Array indices are 1-based`). + +## has (arr, elem) {#hasarr-elem} + +Olup olmadığını denetler ‘arr’ dizi var ‘elem’ öğe. +Öğe dizide değilse 0 veya varsa 1 değerini döndürür. + +`NULL` değer olarak iş islenir. + +``` sql +SELECT has([1, 2, NULL], NULL) +``` + +``` text +┌─has([1, 2, NULL], NULL)─┐ +│ 1 │ +└─────────────────────────┘ +``` + +## hasAll {#hasall} + +Bir dizi başka bir alt kümesi olup olmadığını denetler. + +``` sql +hasAll(set, subset) +``` + +**Parametre** + +- `set` – Array of any type with a set of elements. +- `subset` – Array of any type with elements that should be tested to be a subset of `set`. + +**Dönüş değerleri** + +- `1`, eğer `set` tüm öğeleri içerir `subset`. +- `0`, başka. + +**Tuhaf özellikler** + +- Boş bir dizi, herhangi bir dizinin bir alt kümesidir. +- `Null` bir değer olarak işlenir. +- Her iki dizideki değerlerin sırası önemli değil. + +**Örnekler** + +`SELECT hasAll([], [])` döner 1. + +`SELECT hasAll([1, Null], [Null])` döner 1. + +`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` döner 1. + +`SELECT hasAll(['a', 'b'], ['a'])` döner 1. + +`SELECT hasAll([1], ['a'])` 0 döndürür. + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` 0 döndürür. + +## hasAny {#hasany} + +İki dizinin bazı öğelerle kesiştiği olup olmadığını kontrol eder. + +``` sql +hasAny(array1, array2) +``` + +**Parametre** + +- `array1` – Array of any type with a set of elements. +- `array2` – Array of any type with a set of elements. + +**Dönüş değerleri** + +- `1`, eğer `array1` ve `array2` en azından benzer bir öğeye sahip olun. +- `0`, başka. + +**Tuhaf özellikler** + +- `Null` bir değer olarak işlenir. +- Her iki dizideki değerlerin sırası önemli değil. + +**Örnekler** + +`SELECT hasAny([1], [])` dönüşler `0`. + +`SELECT hasAny([Null], [Null, 1])` dönüşler `1`. + +`SELECT hasAny([-128, 1., 512], [1])` dönüşler `1`. + +`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` dönüşler `0`. + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` dönüşler `1`. + +## ındexof(arr, x) {#indexofarr-x} + +İlk dizini döndürür ‘x’ dizide ise öğe (1'den başlayarak) veya değilse 0. + +Örnek: + +``` sql +SELECT indexOf([1, 3, NULL, NULL], NULL) +``` + +``` text +┌─indexOf([1, 3, NULL, NULL], NULL)─┐ +│ 3 │ +└───────────────────────────────────┘ +``` + +Elem setents set to `NULL` normal değerler olarak ele alınır. + +## countEqual(arr, x) {#countequalarr-x} + +X eşit dizideki öğelerin sayısını döndürür. Arraycount eşdeğer (elem - \> elem = x, arr). + +`NULL` öğeler ayrı değerler olarak işlenir. + +Örnek: + +``` sql +SELECT countEqual([1, 2, NULL, NULL], NULL) +``` + +``` text +┌─countEqual([1, 2, NULL, NULL], NULL)─┐ +│ 2 │ +└──────────────────────────────────────┘ +``` + +## arrayEnumerate(arr) {#array_functions-arrayenumerate} + +Returns the array \[1, 2, 3, …, length (arr) \] + +Bu işlev normalde ARRAY JOIN ile kullanılır. ARRAY JOİN uyguladıktan sonra her dizi için sadece bir kez bir şey saymaya izin verir. Örnek: + +``` sql +SELECT + count() AS Reaches, + countIf(num = 1) AS Hits +FROM test.hits +ARRAY JOIN + GoalsReached, + arrayEnumerate(GoalsReached) AS num +WHERE CounterID = 160656 +LIMIT 10 +``` + +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` + +Bu örnekte, Reaches dönüşümlerin sayısıdır (ARRAY JOİN uygulandıktan sonra alınan dizeler) ve İsabetler sayfa görüntüleme sayısıdır (ARRAY JOİN önce dizeler). Bu özel durumda, aynı sonucu daha kolay bir şekilde alabilirsiniz: + +``` sql +SELECT + sum(length(GoalsReached)) AS Reaches, + count() AS Hits +FROM test.hits +WHERE (CounterID = 160656) AND notEmpty(GoalsReached) +``` + +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` + +Bu fonksiyon aynı zamanda yüksek mertebeden fonksiyonlarda da kullanılabilir. Örneğin, bir koşulla eşleşen öğeler için dizi dizinleri almak için kullanabilirsiniz. + +## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} + +Kaynak diziyle aynı boyutta bir dizi döndürür ve her öğe için aynı değere sahip öğeler arasında konumunun ne olduğunu gösterir. +Örneğin: arrayEnumerateUniq(\[10, 20, 10, 30\]) = \[1, 1, 2, 1\]. + +Bu işlev, dizi birleştirme ve dizi öğelerinin toplanmasını kullanırken kullanışlıdır. +Örnek: + +``` sql +SELECT + Goals.ID AS GoalID, + sum(Sign) AS Reaches, + sumIf(Sign, num = 1) AS Visits +FROM test.visits +ARRAY JOIN + Goals, + arrayEnumerateUniq(Goals.ID) AS num +WHERE CounterID = 160656 +GROUP BY GoalID +ORDER BY Reaches DESC +LIMIT 10 +``` + +``` text +┌──GoalID─┬─Reaches─┬─Visits─┐ +│ 53225 │ 3214 │ 1097 │ +│ 2825062 │ 3188 │ 1097 │ +│ 56600 │ 2803 │ 488 │ +│ 1989037 │ 2401 │ 365 │ +│ 2830064 │ 2396 │ 910 │ +│ 1113562 │ 2372 │ 373 │ +│ 3270895 │ 2262 │ 812 │ +│ 1084657 │ 2262 │ 345 │ +│ 56599 │ 2260 │ 799 │ +│ 3271094 │ 2256 │ 812 │ +└─────────┴─────────┴────────┘ +``` + +Bu örnekte, her hedef kimliğinin dönüşüm sayısı (hedefler iç içe geçmiş veri yapısındaki her öğe, bir dönüşüm olarak adlandırdığımız ulaşılan bir hedeftir) ve oturum sayısı Hesaplaması vardır. ARRAY JOİN olmadan, oturum sayısını sum(Sign) olarak sayardık. Ancak bu özel durumda, satırlar iç içe geçmiş hedefler yapısıyla çarpıldı, bu nedenle her oturumu bir kez saymak için arrayenumerateuniq değerine bir koşul uyguluyoruz(Goals.ID) fonksiyonu. + +Arrayenumerateuniq işlevi, bağımsız değişkenlerle aynı boyutta birden çok dizi alabilir. Bu durumda, tüm dizilerde aynı konumlardaki elemanların tuplesleri için benzersizlik düşünülür. + +``` sql +SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res +``` + +``` text +┌─res───────────┐ +│ [1,2,1,1,2,1] │ +└───────────────┘ +``` + +Bu, iç içe geçmiş bir veri yapısı ve bu yapıdaki birden çok öğe arasında daha fazla toplama ile dizi birleşimini kullanırken gereklidir. + +## arrayPopBack {#arraypopback} + +Son öğeyi diziden kaldırır. + +``` sql +arrayPopBack(array) +``` + +**Parametre** + +- `array` – Array. + +**Örnek** + +``` sql +SELECT arrayPopBack([1, 2, 3]) AS res +``` + +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` + +## arrayPopFront {#arraypopfront} + +İlk öğeyi diziden kaldırır. + +``` sql +arrayPopFront(array) +``` + +**Parametre** + +- `array` – Array. + +**Örnek** + +``` sql +SELECT arrayPopFront([1, 2, 3]) AS res +``` + +``` text +┌─res───┐ +│ [2,3] │ +└───────┘ +``` + +## arrayPushBack {#arraypushback} + +Dizinin sonuna bir öğe ekler. + +``` sql +arrayPushBack(array, single_value) +``` + +**Parametre** + +- `array` – Array. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` dizinin veri türü için yazın. Clickhouse'daki veri türleri hakkında daha fazla bilgi için bkz. “[Veri türleri](../../sql_reference/data_types/index.md#data_types)”. Olabilir `NULL`. Fonksiyon bir ekler `NULL` bir dizi için öğe ve dizi öğeleri türü dönüştürür `Nullable`. + +**Örnek** + +``` sql +SELECT arrayPushBack(['a'], 'b') AS res +``` + +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` + +## arrayPushFront {#arraypushfront} + +Dizinin başına bir öğe ekler. + +``` sql +arrayPushFront(array, single_value) +``` + +**Parametre** + +- `array` – Array. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` dizinin veri türü için yazın. Clickhouse'daki veri türleri hakkında daha fazla bilgi için bkz. “[Veri türleri](../../sql_reference/data_types/index.md#data_types)”. Olabilir `NULL`. Fonksiyon bir ekler `NULL` bir dizi için öğe ve dizi öğeleri türü dönüştürür `Nullable`. + +**Örnek** + +``` sql +SELECT arrayPushFront(['b'], 'a') AS res +``` + +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` + +## arrayResize {#arrayresize} + +Dizinin uzunluğunu değiştirir. + +``` sql +arrayResize(array, size[, extender]) +``` + +**Parametre:** + +- `array` — Array. +- `size` — Required length of the array. + - Eğer `size` dizinin orijinal boyutundan daha az, dizi sağdan kesilir. +- Eğer `size` dizinin başlangıç boyutundan daha büyük, dizi sağa uzatılır `extender` dizi öğelerinin veri türü için değerler veya varsayılan değerler. +- `extender` — Value for extending an array. Can be `NULL`. + +**Döndürülen değer:** + +Bir dizi uzunluk `size`. + +**Arama örnekleri** + +``` sql +SELECT arrayResize([1], 3) +``` + +``` text +┌─arrayResize([1], 3)─┐ +│ [1,0,0] │ +└─────────────────────┘ +``` + +``` sql +SELECT arrayResize([1], 3, NULL) +``` + +``` text +┌─arrayResize([1], 3, NULL)─┐ +│ [1,NULL,NULL] │ +└───────────────────────────┘ +``` + +## arraySlice {#arrayslice} + +Dizinin bir dilimini döndürür. + +``` sql +arraySlice(array, offset[, length]) +``` + +**Parametre** + +- `array` – Array of data. +- `offset` – Indent from the edge of the array. A positive value indicates an offset on the left, and a negative value is an indent on the right. Numbering of the array items begins with 1. +- `length` - Gerekli dilimin uzunluğu. Negatif bir değer belirtirseniz, işlev açık bir dilim döndürür `[offset, array_length - length)`. Değeri atlarsanız, işlev dilimi döndürür `[offset, the_end_of_array]`. + +**Örnek** + +``` sql +SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res +``` + +``` text +┌─res────────┐ +│ [2,NULL,4] │ +└────────────┘ +``` + +Ar arrayray elem toents set to `NULL` normal değerler olarak ele alınır. + +## arraySort(\[func,\] arr, …) {#array_functions-sort} + +Elemanları sıralar `arr` artan düzende dizi. Eğer... `func` fonksiyonu belirtilir, sıralama düzeni sonucu belirlenir `func` fonksiyon dizinin elemanlarına uygulanır. Eğer `func` birden fazla argüman kabul eder, `arraySort` fonksiyon argümanları birkaç diziler geçirilir `func` karşılık gelir. Ayrıntılı örnekler sonunda gösterilir `arraySort` açıklama. + +Tamsayı değerleri sıralama örneği: + +``` sql +SELECT arraySort([1, 3, 3, 0]); +``` + +``` text +┌─arraySort([1, 3, 3, 0])─┐ +│ [0,1,3,3] │ +└─────────────────────────┘ +``` + +Dize değerleri sıralama örneği: + +``` sql +SELECT arraySort(['hello', 'world', '!']); +``` + +``` text +┌─arraySort(['hello', 'world', '!'])─┐ +│ ['!','hello','world'] │ +└────────────────────────────────────┘ +``` + +Aşağıdaki sıralama sırasını göz önünde bulundurun `NULL`, `NaN` ve `Inf` değerler: + +``` sql +SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); +``` + +``` text +┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ +│ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────────────────────────┘ +``` + +- `-Inf` değerler dizide ilk sırada yer alır. +- `NULL` değerler dizideki son değerlerdir. +- `NaN` değerler hemen önce `NULL`. +- `Inf` değerler hemen önce `NaN`. + +Not thate that `arraySort` is a [yüksek sipariş fonksiyonu](higher_order_functions.md). Bir lambda işlevini ilk argüman olarak iletebilirsiniz. Bu durumda, sıralama sırası, dizinin elemanlarına uygulanan lambda işlevinin sonucu ile belirlenir. + +Aşağıdaki örneği ele alalım: + +``` sql +SELECT arraySort((x) -> -x, [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [3,2,1] │ +└─────────┘ +``` + +For each element of the source array, the lambda function returns the sorting key, that is, \[1 –\> -1, 2 –\> -2, 3 –\> -3\]. Since the `arraySort` fonksiyon tuşları artan sırayla sıralar, sonuç \[3, 2, 1\]. Böylece, `(x) –> -x` lambda fonksiyonu setleri [azalan düzen](#array_functions-reverse-sort) bir sıralama içinde. + +Lambda işlevi birden çok bağımsız değişken kabul edebilir. Bu durumda, geçmek gerekir `arraySort` işlev lambda işlevinin argümanlarının karşılık geleceği aynı uzunlukta birkaç dizi. Elde edilen dizi ilk giriş dizisinden elemanlardan oluşacaktır; bir sonraki giriş dizilerinden elemanlar sıralama anahtarlarını belirtir. Mesela: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res────────────────┐ +│ ['world', 'hello'] │ +└────────────────────┘ +``` + +Burada, ikinci dizide (\[2, 1\]) geçirilen öğeler, kaynak diziden karşılık gelen öğe için bir sıralama anahtarı tanımlar (\[‘hello’, ‘world’\]), bu, \[‘hello’ –\> 2, ‘world’ –\> 1\]. Since the lambda function doesn't use `x`, kaynak dizinin gerçek değerleri sonuçtaki sırayı etkilemez. Böyle, ‘hello’ sonuçtaki ikinci eleman olacak ve ‘world’ ilk olacak. + +Diğer örnekler aşağıda gösterilmiştir. + +``` sql +SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; +``` + +``` text +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +``` sql +SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +!!! note "Not" + Sıralama verimliliğini artırmak için, [Schwartzian dönüşümü](https://en.wikipedia.org/wiki/Schwartzian_transform) kullanılır. + +## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} + +Elemanları sıralar `arr` azalan sırayla dizi. Eğer... `func` fonksiyon belirtilir, `arr` sonucuna göre sıra islanır. `func` işlev dizinin öğelerine uygulanır ve sonra sıralanmış dizi tersine çevrilir. Eğer `func` birden fazla argüman kabul eder, `arrayReverseSort` fonksiyon argümanları birkaç diziler geçirilir `func` karşılık gelir. Ayrıntılı örnekler sonunda gösterilir `arrayReverseSort` açıklama. + +Tamsayı değerleri sıralama örneği: + +``` sql +SELECT arrayReverseSort([1, 3, 3, 0]); +``` + +``` text +┌─arrayReverseSort([1, 3, 3, 0])─┐ +│ [3,3,1,0] │ +└────────────────────────────────┘ +``` + +Dize değerleri sıralama örneği: + +``` sql +SELECT arrayReverseSort(['hello', 'world', '!']); +``` + +``` text +┌─arrayReverseSort(['hello', 'world', '!'])─┐ +│ ['world','hello','!'] │ +└───────────────────────────────────────────┘ +``` + +Aşağıdaki sıralama sırasını göz önünde bulundurun `NULL`, `NaN` ve `Inf` değerler: + +``` sql +SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; +``` + +``` text +┌─res───────────────────────────────────┐ +│ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────┘ +``` + +- `Inf` değerler dizide ilk sırada yer alır. +- `NULL` değerler dizideki son değerlerdir. +- `NaN` değerler hemen önce `NULL`. +- `-Inf` değerler hemen önce `NaN`. + +Not `arrayReverseSort` is a [yüksek sipariş fonksiyonu](higher_order_functions.md). Bir lambda işlevini ilk argüman olarak iletebilirsiniz. Örnek aşağıda gösterilmiştir. + +``` sql +SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [1,2,3] │ +└─────────┘ +``` + +Dizi aşağıdaki şekilde sıralanır: + +1. İlk başta, kaynak dizi (\[1, 2, 3\]), dizinin elemanlarına uygulanan lambda işlevinin sonucuna göre sıralanır. Sonuç bir dizidir \[3, 2, 1\]. +2. Önceki adımda elde edilen dizi tersine çevrilir. Yani, nihai sonuç \[1, 2, 3\]. + +Lambda işlevi birden çok bağımsız değişken kabul edebilir. Bu durumda, geçmek gerekir `arrayReverseSort` işlev lambda işlevinin argümanlarının karşılık geleceği aynı uzunlukta birkaç dizi. Elde edilen dizi ilk giriş dizisinden elemanlardan oluşacaktır; bir sonraki giriş dizilerinden elemanlar sıralama anahtarlarını belirtir. Mesela: + +``` sql +SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res───────────────┐ +│ ['hello','world'] │ +└───────────────────┘ +``` + +Bu örnekte, dizi aşağıdaki şekilde sıralanır: + +1. İlk başta, kaynak dizi (\[‘hello’, ‘world’\]) dizilerin elemanlarına uygulanan lambda işlevinin sonucuna göre sıralanır. İkinci dizide geçirilen öğeler (\[2, 1\]), kaynak diziden karşılık gelen öğeler için sıralama anahtarlarını tanımlar. Sonuç bir dizidir \[‘world’, ‘hello’\]. +2. Önceki adımda sıralanmış dizi tersine çevrilir. Yani, nihai sonuç \[‘hello’, ‘world’\]. + +Diğer örnekler aşağıda gösterilmiştir. + +``` sql +SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; +``` + +``` text +┌─res─────┐ +│ [5,3,4] │ +└─────────┘ +``` + +``` sql +SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; +``` + +``` text +┌─res─────┐ +│ [4,3,5] │ +└─────────┘ +``` + +## arrayUniq(arr, …) {#arrayuniqarr} + +Bir bağımsız değişken geçirilirse, dizideki farklı öğelerin sayısını sayar. +Birden çok bağımsız değişken geçirilirse, birden çok dizideki karşılık gelen konumlardaki farklı öğe kümelerinin sayısını sayar. + +Bir dizideki benzersiz öğelerin bir listesini almak istiyorsanız, arrayreduce kullanabilirsiniz(‘groupUniqArray’, arr). + +## arrayJoin(arr) {#array-functions-join} + +Özel bir işlev. Bölümüne bakınız [“ArrayJoin function”](array_join.md#functions_arrayjoin). + +## arrayDifference {#arraydifference} + +Bitişik dizi öğeleri arasındaki farkı hesaplar. İlk öğenin 0 olacağı bir dizi döndürür, ikincisi arasındaki farktır `a[1] - a[0]`, etc. The type of elements in the resulting array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`). + +**Sözdizimi** + +``` sql +arrayDifference(array) +``` + +**Parametre** + +- `array` – [Dizi](https://clickhouse.yandex/docs/en/data_types/array/). + +**Döndürülen değerler** + +Bitişik öğeler arasındaki farklar dizisini döndürür. + +Tür: [Uİnt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Tamsayı\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Yüzdürmek\*](https://clickhouse.yandex/docs/en/data_types/float/). + +**Örnek** + +Sorgu: + +``` sql +SELECT arrayDifference([1, 2, 3, 4]) +``` + +Sonuç: + +``` text +┌─arrayDifference([1, 2, 3, 4])─┐ +│ [0,1,1,1] │ +└───────────────────────────────┘ +``` + +Sonuç türü Int64 nedeniyle taşma örneği: + +Sorgu: + +``` sql +SELECT arrayDifference([0, 10000000000000000000]) +``` + +Sonuç: + +``` text +┌─arrayDifference([0, 10000000000000000000])─┐ +│ [0,-8446744073709551616] │ +└────────────────────────────────────────────┘ +``` + +## arrayDistinct {#arraydistinct} + +Bir dizi alır, yalnızca farklı öğeleri içeren bir dizi döndürür. + +**Sözdizimi** + +``` sql +arrayDistinct(array) +``` + +**Parametre** + +- `array` – [Dizi](https://clickhouse.yandex/docs/en/data_types/array/). + +**Döndürülen değerler** + +Farklı öğeleri içeren bir dizi döndürür. + +**Örnek** + +Sorgu: + +``` sql +SELECT arrayDistinct([1, 2, 2, 3, 1]) +``` + +Sonuç: + +``` text +┌─arrayDistinct([1, 2, 2, 3, 1])─┐ +│ [1,2,3] │ +└────────────────────────────────┘ +``` + +## arrayEnumerateDense(arr) {#array_functions-arrayenumeratedense} + +Kaynak diziyle aynı boyutta bir dizi döndürür ve her öğenin kaynak dizide ilk olarak nerede göründüğünü gösterir. + +Örnek: + +``` sql +SELECT arrayEnumerateDense([10, 20, 10, 30]) +``` + +``` text +┌─arrayEnumerateDense([10, 20, 10, 30])─┐ +│ [1,2,1,3] │ +└───────────────────────────────────────┘ +``` + +## arrayıntersect(arr) {#array-functions-arrayintersect} + +Birden çok dizi alır, tüm kaynak dizilerde bulunan öğeleri içeren bir dizi döndürür. Elde edilen dizideki öğeler sırası ilk dizideki ile aynıdır. + +Örnek: + +``` sql +SELECT + arrayIntersect([1, 2], [1, 3], [2, 3]) AS no_intersect, + arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect +``` + +``` text +┌─no_intersect─┬─intersect─┐ +│ [] │ [1] │ +└──────────────┴───────────┘ +``` + +## arrayReduce {#arrayreduce} + +Dizi öğelerine bir toplama işlevi uygular ve sonucunu döndürür. Toplama işlevinin adı, tek tırnak içinde bir dize olarak geçirilir `'max'`, `'sum'`. Parametrik toplama işlevleri kullanıldığında, parametre parantez içinde işlev adından sonra gösterilir `'uniqUpTo(6)'`. + +**Sözdizimi** + +``` sql +arrayReduce(agg_func, arr1, arr2, ..., arrN) +``` + +**Parametre** + +- `agg_func` — The name of an aggregate function which should be a constant [dize](../../sql_reference/data_types/string.md). +- `arr` — Any number of [dizi](../../sql_reference/data_types/array.md) sütunları toplama işlevinin parametreleri olarak yazın. + +**Döndürülen değer** + +**Örnek** + +``` sql +SELECT arrayReduce('max', [1, 2, 3]) +``` + +``` text +┌─arrayReduce('max', [1, 2, 3])─┐ +│ 3 │ +└───────────────────────────────┘ +``` + +Bir toplama işlevi birden çok bağımsız değişken alırsa, bu işlev aynı boyuttaki birden çok diziye uygulanmalıdır. + +``` sql +SELECT arrayReduce('maxIf', [3, 5], [1, 0]) +``` + +``` text +┌─arrayReduce('maxIf', [3, 5], [1, 0])─┐ +│ 3 │ +└──────────────────────────────────────┘ +``` + +Parametrik toplama fonksiyonu ile örnek: + +``` sql +SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) +``` + +``` text +┌─arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])─┐ +│ 4 │ +└─────────────────────────────────────────────────────────────┘ +``` + +## arrayReduceİnRanges {#arrayreduceinranges} + +Belirli aralıklardaki dizi öğelerine bir toplama işlevi uygular ve her aralığa karşılık gelen sonucu içeren bir dizi döndürür. Fonksiyon aynı sonucu birden fazla olarak döndürür `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. + +**Sözdizimi** + +``` sql +arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) +``` + +**Parametre** + +- `agg_func` — The name of an aggregate function which should be a constant [dize](../../sql_reference/data_types/string.md). +- `ranges` — The ranges to aggretate which should be an [dizi](../../sql_reference/data_types/array.md) -den [Demetler](../../sql_reference/data_types/tuple.md) indeks ve her aralığın uzunluğunu içeren. +- `arr` — Any number of [dizi](../../sql_reference/data_types/array.md) sütunları toplama işlevinin parametreleri olarak yazın. + +**Döndürülen değer** + +**Örnek** + +``` sql +SELECT arrayReduceInRanges( + 'sum', + [(1, 5), (2, 3), (3, 4), (4, 4)], + [1000000, 200000, 30000, 4000, 500, 60, 7] +) AS res +``` + +``` text +┌─res─────────────────────────┐ +│ [1234500,234000,34560,4567] │ +└─────────────────────────────┘ +``` + +## arrayReverse(arr) {#arrayreverse} + +Öğeleri ters sırada içeren orijinal diziyle aynı boyutta bir dizi döndürür. + +Örnek: + +``` sql +SELECT arrayReverse([1, 2, 3]) +``` + +``` text +┌─arrayReverse([1, 2, 3])─┐ +│ [3,2,1] │ +└─────────────────────────┘ +``` + +## ters (arr) {#array-functions-reverse} + +Eşanlamlı [“arrayReverse”](#array_functions-arrayreverse) + +## arrayFlatten {#arrayflatten} + +Bir dizi diziyi düz bir diziye dönüştürür. + +İşlev: + +- İç içe geçmiş dizilerin herhangi bir derinliği için geçerlidir. +- Zaten düz olan dizileri değiştirmez. + +Düzleştirilmiş dizi, tüm kaynak dizilerdeki tüm öğeleri içerir. + +**Sözdizimi** + +``` sql +flatten(array_of_arrays) +``` + +Takma ad: `flatten`. + +**Parametre** + +- `array_of_arrays` — [Dizi](../../sql_reference/data_types/array.md) dizilerin. Mesela, `[[1,2,3], [4,5]]`. + +**Örnekler** + +``` sql +SELECT flatten([[[1]], [[2], [3]]]) +``` + +``` text +┌─flatten(array(array([1]), array([2], [3])))─┐ +│ [1,2,3] │ +└─────────────────────────────────────────────┘ +``` + +## arrayCompact {#arraycompact} + +Ardışık yinelenen öğeleri bir diziden kaldırır. Sonuç değerlerinin sırası, kaynak dizideki sıraya göre belirlenir. + +**Sözdizimi** + +``` sql +arrayCompact(arr) +``` + +**Parametre** + +`arr` — The [dizi](../../sql_reference/data_types/array.md) incelemek. + +**Döndürülen değer** + +Yinelenen olmadan dizi. + +Tür: `Array`. + +**Örnek** + +Sorgu: + +``` sql +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) +``` + +Sonuç: + +``` text +┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ +│ [1,nan,nan,2,3] │ +└────────────────────────────────────────────┘ +``` + +## arrayZip {#arrayzip} + +Birden çok diziyi tek bir dizide birleştirir. Elde edilen dizi, listelenen bağımsız değişken sırasına göre gruplandırılmış kaynak dizilerin karşılık gelen öğelerini içerir. + +**Sözdizimi** + +``` sql +arrayZip(arr1, arr2, ..., arrN) +``` + +**Parametre** + +- `arrN` — [Dizi](../data_types/array.md). + +İşlev, farklı türde herhangi bir dizi alabilir. Tüm giriş dizileri eşit boyutta olmalıdır. + +**Döndürülen değer** + +- Gruplandırılmış kaynak dizilerden öğelerle dizi [Demetler](../data_types/tuple.md). Veri türleri tuple giriş dizileri türleri ile aynıdır ve diziler geçirilir aynı sırada. + +Tür: [Dizi](../data_types/array.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]) +``` + +Sonuç: + +``` text +┌─arrayZip(['a', 'b', 'c'], [5, 2, 1])─┐ +│ [('a',5),('b',2),('c',1)] │ +└──────────────────────────────────────┘ +``` + +## arrayAUC {#arrayauc} + +Auc'yi hesaplayın (makine öğreniminde bir kavram olan eğrinin altındaki alan, daha fazla ayrıntıya bakın: https://en.wikipedia.org/wiki/Receiver\_operating\_characteristic\#Area\_under\_the\_curve). + +**Sözdizimi** + +``` sql +arrayAUC(arr_scores, arr_labels) +``` + +**Parametre** +- `arr_scores` — scores prediction model gives. +- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample. + +**Döndürülen değer** +Float64 türü ile AUC değerini döndürür. + +**Örnek** +Sorgu: + +``` sql +select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) +``` + +Sonuç: + +``` text +┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ +│ 0.75 │ +└────────────────────────────────────────---──┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/tr/sql_reference/functions/array_join.md b/docs/tr/sql_reference/functions/array_join.md new file mode 100644 index 00000000000..f59ed95805b --- /dev/null +++ b/docs/tr/sql_reference/functions/array_join.md @@ -0,0 +1,37 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 61 +toc_title: arrayJoin +--- + +# arrayjoin işlevi {#functions_arrayjoin} + +Bu çok sıradışı bir işlevdir. + +Normal işlevler bir dizi satırı değiştirmez, ancak her satırdaki değerleri değiştirir (harita). +Toplama işlevleri bir dizi satırı sıkıştırır (katlayın veya azaltın). +Bu ‘arrayJoin’ işlev her satırı alır ve bir dizi satır oluşturur (açılır). + +Bu işlev bir diziyi bağımsız değişken olarak alır ve kaynak satırı dizideki öğe sayısı için birden çok satıra yayar. +Sütunlardaki tüm değerler, bu işlevin uygulandığı sütundaki değerler dışında kopyalanır; karşılık gelen dizi değeri ile değiştirilir. + +Bir sorgu birden çok kullanabilirsiniz `arrayJoin` işlevler. Bu durumda, dönüşüm birden çok kez gerçekleştirilir. + +Daha geniş olanaklar sağlayan SELECT sorgusunda dizi birleştirme sözdizimini not alın. + +Örnek: + +``` sql +SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src +``` + +``` text +┌─dst─┬─\'Hello\'─┬─src─────┐ +│ 1 │ Hello │ [1,2,3] │ +│ 2 │ Hello │ [1,2,3] │ +│ 3 │ Hello │ [1,2,3] │ +└─────┴───────────┴─────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/tr/sql_reference/functions/bit_functions.md b/docs/tr/sql_reference/functions/bit_functions.md new file mode 100644 index 00000000000..1e8d9761d5d --- /dev/null +++ b/docs/tr/sql_reference/functions/bit_functions.md @@ -0,0 +1,255 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 48 +toc_title: Bitlik +--- + +# Bit fonksiyonları {#bit-functions} + +Bit işlevleri, uint8, Uİnt16, Uİnt32, Uint64, Int8, Int16, Int32, Int64, Float32 veya Float64 türlerinden herhangi bir çift için çalışır. + +Sonuç türü, bağımsız değişkenlerinin maksimum bitlerine eşit bit içeren bir tamsayıdır. Bağımsız değişkenlerden en az biri imzalanırsa, sonuç imzalı bir sayıdır. Bir bağımsız değişken bir kayan noktalı sayı ise, Int64 için cast. + +## bıtor(a, b) {#bitanda-b} + +## bitOr(a, b) {#bitora-b} + +## bitXor(a, b) {#bitxora-b} + +## bitNot (a) {#bitnota} + +## bitShiftLeft(a, b) {#bitshiftlefta-b} + +## bitShiftRight(a, b) {#bitshiftrighta-b} + +## bitRotateLeft(a, b) {#bitrotatelefta-b} + +## bitRotateRight(a, b) {#bitrotaterighta-b} + +## bitTest {#bittest} + +Herhangi bir tamsayı alır ve dönüştürür [ikili form](https://en.wikipedia.org/wiki/Binary_number), belirtilen konumda bir bit değerini döndürür. Geri sayım sağdan sola 0 başlar. + +**Sözdizimi** + +``` sql +SELECT bitTest(number, index) +``` + +**Parametre** + +- `number` – integer number. +- `index` – position of bit. + +**Döndürülen değerler** + +Belirtilen konumda bit değeri döndürür. + +Tür: `UInt8`. + +**Örnek** + +Örneğin, taban-2 (ikili) sayı sistemindeki 43 sayısı 101011'dir. + +Sorgu: + +``` sql +SELECT bitTest(43, 1) +``` + +Sonuç: + +``` text +┌─bitTest(43, 1)─┐ +│ 1 │ +└────────────────┘ +``` + +Başka bir örnek: + +Sorgu: + +``` sql +SELECT bitTest(43, 2) +``` + +Sonuç: + +``` text +┌─bitTest(43, 2)─┐ +│ 0 │ +└────────────────┘ +``` + +## bitTestAll {#bittestall} + +Sonucu döndürür [mantıksal conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) Verilen pozisyonlarda tüm bitlerin (ve operatörü). Geri sayım sağdan sola 0 başlar. + +Bitsel işlemler için conjuction: + +0 AND 0 = 0 + +0 AND 1 = 0 + +1 AND 0 = 0 + +1 AND 1 = 1 + +**Sözdizimi** + +``` sql +SELECT bitTestAll(number, index1, index2, index3, index4, ...) +``` + +**Parametre** + +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) doğru ise ve sadece tüm pozisyon trueları doğru ise (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). + +**Döndürülen değerler** + +Mantıksal conjuction sonucunu döndürür. + +Tür: `UInt8`. + +**Örnek** + +Örneğin, taban-2 (ikili) sayı sistemindeki 43 sayısı 101011'dir. + +Sorgu: + +``` sql +SELECT bitTestAll(43, 0, 1, 3, 5) +``` + +Sonuç: + +``` text +┌─bitTestAll(43, 0, 1, 3, 5)─┐ +│ 1 │ +└────────────────────────────┘ +``` + +Başka bir örnek: + +Sorgu: + +``` sql +SELECT bitTestAll(43, 0, 1, 3, 5, 2) +``` + +Sonuç: + +``` text +┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐ +│ 0 │ +└───────────────────────────────┘ +``` + +## bitTestAny {#bittestany} + +Sonucu döndürür [mantıksal ayrılma](https://en.wikipedia.org/wiki/Logical_disjunction) Verilen konumlardaki tüm bitlerin (veya operatör). Geri sayım sağdan sola 0 başlar. + +Bitsel işlemler için ayrılma: + +0 OR 0 = 0 + +0 OR 1 = 1 + +1 OR 0 = 1 + +1 OR 1 = 1 + +**Sözdizimi** + +``` sql +SELECT bitTestAny(number, index1, index2, index3, index4, ...) +``` + +**Parametre** + +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. + +**Döndürülen değerler** + +Mantıksal disjuction sonucunu döndürür. + +Tür: `UInt8`. + +**Örnek** + +Örneğin, taban-2 (ikili) sayı sistemindeki 43 sayısı 101011'dir. + +Sorgu: + +``` sql +SELECT bitTestAny(43, 0, 2) +``` + +Sonuç: + +``` text +┌─bitTestAny(43, 0, 2)─┐ +│ 1 │ +└──────────────────────┘ +``` + +Başka bir örnek: + +Sorgu: + +``` sql +SELECT bitTestAny(43, 4, 2) +``` + +Sonuç: + +``` text +┌─bitTestAny(43, 4, 2)─┐ +│ 0 │ +└──────────────────────┘ +``` + +## bitCount {#bitcount} + +Bir sayının ikili gösteriminde birine ayarlanmış bit sayısını hesaplar. + +**Sözdizimi** + +``` sql +bitCount(x) +``` + +**Parametre** + +- `x` — [Tamsayı](../../sql_reference/data_types/int_uint.md) veya [kayan nokta](../../sql_reference/data_types/float.md) numara. İşlev, bellekteki değer gösterimini kullanır. Kayan noktalı sayıları desteklemeye izin verir. + +**Döndürülen değer** + +- Giriş numarasında birine ayarlanmış bit sayısı. + +İşlev, giriş değerini daha büyük bir türe dönüştürmez ([işaret uzantısı](https://en.wikipedia.org/wiki/Sign_extension)). Bu yüzden, örneğin , `bitCount(toUInt8(-1)) = 8`. + +Tür: `UInt8`. + +**Örnek** + +Örneğin 333 sayısını alın. İkili gösterimi: 0000000101001101. + +Sorgu: + +``` sql +SELECT bitCount(333) +``` + +Sonuç: + +``` text +┌─bitCount(333)─┐ +│ 5 │ +└───────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/tr/sql_reference/functions/bitmap_functions.md b/docs/tr/sql_reference/functions/bitmap_functions.md new file mode 100644 index 00000000000..1b4fb53e0e3 --- /dev/null +++ b/docs/tr/sql_reference/functions/bitmap_functions.md @@ -0,0 +1,496 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 49 +toc_title: "E\u015Flem" +--- + +# Bitmap işlevleri {#bitmap-functions} + +Bitmap işlevleri iki bit eşlemler nesne değeri hesaplama için çalışmak, yeni bitmap veya kardinality formül hesaplama, and, or, xor ve not, vb gibi kullanırken döndürmektir. + +Bitmap nesnesi için 2 çeşit inşaat yöntemi vardır. Biri-State ile toplama işlevi groupBitmap tarafından inşa edilecek, diğeri Array nesnesi tarafından inşa edilecek. Ayrıca bitmap nesnesini dizi nesnesine dönüştürmektir. + +Roaringbitmap, Bitmap nesnelerinin gerçek depolanması sırasında bir veri yapısına sarılır. Önemlilik 32'den küçük veya eşit olduğunda, Set objet kullanır. Kardinality 32'den büyük olduğunda, roaringbitmap nesnesi kullanır. Bu nedenle düşük kardinalite kümesinin depolanması daha hızlıdır. + +RoaringBitmap hakkında daha fazla bilgi için bkz: [CRoaring](https://github.com/RoaringBitmap/CRoaring). + +## bitmapBuild {#bitmap_functions-bitmapbuild} + +İmzasız tamsayı dizisinden bir bit eşlem oluşturun. + +``` sql +bitmapBuild(array) +``` + +**Parametre** + +- `array` – unsigned integer array. + +**Örnek** + +``` sql +SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) +``` + +``` text +┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ +│  │ AggregateFunction(groupBitmap, UInt8) │ +└─────┴──────────────────────────────────────────────┘ +``` + +## bitmapToArray {#bitmaptoarray} + +Bitmap'i tamsayı dizisine dönüştürün. + +``` sql +bitmapToArray(bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + +``` text +┌─res─────────┐ +│ [1,2,3,4,5] │ +└─────────────┘ +``` + +## bitmapsubsetınrange {#bitmap-functions-bitmapsubsetinrange} + +Belirtilen aralıktaki alt kümesi döndürür (range\_end içermez). + +``` sql +bitmapSubsetInRange(bitmap, range_start, range_end) +``` + +**Parametre** + +- `bitmap` – [Bitmap nesnesi](#bitmap_functions-bitmapbuild). +- `range_start` – range start point. Type: [Uİnt32](../../sql_reference/data_types/int_uint.md). +- `range_end` – range end point(excluded). Type: [Uİnt32](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +``` sql +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +``` text +┌─res───────────────┐ +│ [30,31,32,33,100] │ +└───────────────────┘ +``` + +## bitmapSubsetLimit {#bitmapsubsetlimit} + +Arasında alınan n öğeleri ile bitmap bir alt kümesi oluşturur `range_start` ve `cardinality_limit`. + +**Sözdizimi** + +``` sql +bitmapSubsetLimit(bitmap, range_start, cardinality_limit) +``` + +**Parametre** + +- `bitmap` – [Bitmap nesnesi](#bitmap_functions-bitmapbuild). +- `range_start` – The subset starting point. Type: [Uİnt32](../../sql_reference/data_types/int_uint.md). +- `cardinality_limit` – The subset cardinality upper limit. Type: [Uİnt32](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değer** + +Alt. + +Tür: `Bitmap object`. + +**Örnek** + +Sorgu: + +``` sql +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +Sonuç: + +``` text +┌─res───────────────────────┐ +│ [30,31,32,33,100,200,500] │ +└───────────────────────────┘ +``` + +## bitmapContains {#bitmap_functions-bitmapcontains} + +Bit eşlem bir öğe içerip içermediğini denetler. + +``` sql +bitmapContains(haystack, needle) +``` + +**Parametre** + +- `haystack` – [Bitmap nesnesi](#bitmap_functions-bitmapbuild), fonksiyon arar nerede. +- `needle` – Value that the function searches. Type: [Uİnt32](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- 0 — If `haystack` içermez `needle`. +- 1 — If `haystack` içeriyor `needle`. + +Tür: `UInt8`. + +**Örnek** + +``` sql +SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## bitmapHasAny {#bitmaphasany} + +İki bit eşlemin bazı öğelerle kesiştiği olup olmadığını kontrol eder. + +``` sql +bitmapHasAny(bitmap1, bitmap2) +``` + +Eğer eminseniz `bitmap2` kesinlikle bir öğe içerir, kullanmayı düşünün [bitmapContains](#bitmap_functions-bitmapcontains) işlev. Daha verimli çalışır. + +**Parametre** + +- `bitmap*` – bitmap object. + +**Dönüş değerleri** + +- `1`, eğer `bitmap1` ve `bitmap2` en azından benzer bir öğeye sahip olun. +- `0`, başka. + +**Örnek** + +``` sql +SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## bitmapHasAll {#bitmaphasall} + +Benzer `hasAll(array, array)` ilk bit eşlem, ikincisinin tüm öğelerini içeriyorsa, 1 değerini döndürür, aksi halde 0. +İkinci bağımsız değişken boş bir bit eşlem ise, 1 döndürür. + +``` sql +bitmapHasAll(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + +``` text +┌─res─┐ +│ 0 │ +└─────┘ +``` + +## bitmapCardinality {#bitmapcardinality} + +Retrun bit eşlem kardinalite türü Uİnt64. + +``` sql +bitmapCardinality(bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + +``` text +┌─res─┐ +│ 5 │ +└─────┘ +``` + +## bitmapMin {#bitmapmin} + +Kümedeki uint64 türünün en küçük değerini yeniden çalıştırın, küme boşsa UİNT32\_MAX. + + bitmapMin(bitmap) + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 1 │ + └─────┘ + +## bitmapMax {#bitmapmax} + +Küme boşsa, kümedeki uint64 türünün en büyük değerini 0 olarak yeniden çalıştırın. + + bitmapMax(bitmap) + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 5 │ + └─────┘ + +## bitmapTransform {#bitmaptransform} + +Bitmap'teki bir değer dizisini başka bir değer dizisine dönüştürün, sonuç yeni bir bitmap'tir. + + bitmapTransform(bitmap, from_array, to_array) + +**Parametre** + +- `bitmap` – bitmap object. +- `from_array` – UInt32 array. For idx in range \[0, from\_array.size()), if bitmap contains from\_array\[idx\], then replace it with to\_array\[idx\]. Note that the result depends on array ordering if there are common elements between from\_array and to\_array. +- `to_array` – UInt32 array, its size shall be the same to from\_array. + +**Örnek** + +``` sql +SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res +``` + + ┌─res───────────────────┐ + │ [1,3,4,6,7,8,9,10,20] │ + └───────────────────────┘ + +## bitmapAnd {#bitmapand} + +İki bitmap ve hesaplama, sonuç yeni bir bitmap'tir. + +``` sql +bitmapAnd(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res─┐ +│ [3] │ +└─────┘ +``` + +## bitmapOr {#bitmapor} + +İki bitmap veya hesaplama, sonuç yeni bir bitmap'tir. + +``` sql +bitmapOr(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res─────────┐ +│ [1,2,3,4,5] │ +└─────────────┘ +``` + +## bitmapXor {#bitmapxor} + +İki bitmap XOR hesaplama, sonuç yeni bir bitmap. + +``` sql +bitmapXor(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res───────┐ +│ [1,2,4,5] │ +└───────────┘ +``` + +## bitmapAndnot {#bitmapandnot} + +İki bit eşlem andnot hesaplama, sonuç yeni bir bit eşlem. + +``` sql +bitmapAndnot(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` + +## bitmapAndCardinality {#bitmapandcardinality} + +İki bitmap ve hesaplama, uint64 türünün kardinalliğini döndürür. + +``` sql +bitmapAndCardinality(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## bitmapOrCardinality {#bitmaporcardinality} + +İki bitmap veya hesaplama, uint64 türünün kardinalliğini döndürür. + +``` sql +bitmapOrCardinality(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 5 │ +└─────┘ +``` + +## bitmapXorCardinality {#bitmapxorcardinality} + +İki bitmap XOR hesaplama, uint64 türünün kardinalliğini döndürür. + +``` sql +bitmapXorCardinality(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 4 │ +└─────┘ +``` + +## bitmapAndnotCardinality {#bitmapandnotcardinality} + +İki bitmap andnot hesaplama, uint64 türünün kardinalliğini döndürür. + +``` sql +bitmapAndnotCardinality(bitmap,bitmap) +``` + +**Parametre** + +- `bitmap` – bitmap object. + +**Örnek** + +``` sql +SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 2 │ +└─────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/tr/sql_reference/functions/comparison_functions.md b/docs/tr/sql_reference/functions/comparison_functions.md new file mode 100644 index 00000000000..d041459bb05 --- /dev/null +++ b/docs/tr/sql_reference/functions/comparison_functions.md @@ -0,0 +1,37 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 36 +toc_title: "Kar\u015F\u0131la\u015Ft\u0131rma" +--- + +# Karşılaştırma fonksiyonları {#comparison-functions} + +Karşılaştırma işlevleri her zaman 0 veya 1 (Uint8) döndürür. + +Aşağıdaki türler karşılaştırılabilir: + +- şiir +- dizeler ve sabit dizeler +- tarihliler +- tarihleri ile saatleri + +her grup içinde, ancak farklı gruplar arasında değil. + +Örneğin, bir tarihi bir dizeyle karşılaştıramazsınız. Bir tarih dizesi dönüştürmek, ya da tam tersi bir işlev kullanmak zorunda. + +Dizeler baytlarla karşılaştırılır. Daha kısa bir dize, onunla başlayan ve en az bir karakter daha içeren tüm dizelerden daha küçüktür. + +## eşittir, a = b VE a = = B operatörü {#function-equals} + +## notEquals, a ! operatör = b VE A \<\> b {#function-notequals} + +## l ,ess, \< operat \ operatör {#function-greater} + +## lessOrEquals, \< = operatör {#function-lessorequals} + +## greaterOrEquals, \> = operatör {#function-greaterorequals} + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/tr/sql_reference/functions/conditional_functions.md b/docs/tr/sql_reference/functions/conditional_functions.md new file mode 100644 index 00000000000..b2e918666a0 --- /dev/null +++ b/docs/tr/sql_reference/functions/conditional_functions.md @@ -0,0 +1,207 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 43 +toc_title: "Ko\u015Fullu " +--- + +# Koşullu fonksiyonlar {#conditional-functions} + +## eğer {#if} + +Koşullu dallanmayı kontrol eder. Çoğu sistemin aksine, ClickHouse her zaman her iki ifadeyi de değerlendirir `then` ve `else`. + +**Sözdizimi** + +``` sql +SELECT if(cond, then, else) +``` + +Eğer durum `cond` sıfır olmayan bir değere değerlendirir, ifadenin sonucunu döndürür `then` ve ifad andenin sonucu `else` varsa, atlanır. Eğer... `cond` sıfır veya `NULL` fakat daha sonra sonucu `then` ifade atlanır ve sonucu `else` Ifade, varsa, döndürülür. + +**Parametre** + +- `cond` – The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL. +- `then` - Koşul karşılanırsa dönmek için ifade. +- `else` - Koşul karşılanmazsa dönmek için ifade. + +**Döndürülen değerler** + +İşlev yürütür `then` ve `else` ifadeler ve koşulun olup olmadığına bağlı olarak sonucunu döndürür `cond` sıfır ya da değil. + +**Örnek** + +Sorgu: + +``` sql +SELECT if(1, plus(2, 2), plus(2, 6)) +``` + +Sonuç: + +``` text +┌─plus(2, 2)─┐ +│ 4 │ +└────────────┘ +``` + +Sorgu: + +``` sql +SELECT if(0, plus(2, 2), plus(2, 6)) +``` + +Sonuç: + +``` text +┌─plus(2, 6)─┐ +│ 8 │ +└────────────┘ +``` + +- `then` ve `else` en düşük ortak türe sahip olmalıdır. + +**Örnek:** + +Bunu al `LEFT_RIGHT` Tablo: + +``` sql +SELECT * +FROM LEFT_RIGHT + +┌─left─┬─right─┐ +│ ᴺᵁᴸᴸ │ 4 │ +│ 1 │ 3 │ +│ 2 │ 2 │ +│ 3 │ 1 │ +│ 4 │ ᴺᵁᴸᴸ │ +└──────┴───────┘ +``` + +Aşağıdaki sorgu karşılaştırır `left` ve `right` değerler: + +``` sql +SELECT + left, + right, + if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller +FROM LEFT_RIGHT +WHERE isNotNull(left) AND isNotNull(right) + +┌─left─┬─right─┬─is_smaller──────────────────────────┐ +│ 1 │ 3 │ left is smaller than right │ +│ 2 │ 2 │ right is greater or equal than left │ +│ 3 │ 1 │ right is greater or equal than left │ +└──────┴───────┴─────────────────────────────────────┘ +``` + +Not: `NULL` bu örnekte değerler kullanılmaz, kontrol edin [Koşullardaki boş değerler](#null-values-in-conditionals) bölme. + +## Üçlü Operatör {#ternary-operator} + +Aynı gibi çalışıyor. `if` işlev. + +Sözdizimi: `cond ? then : else` + +Dönüşler `then` eğer... `cond` true (sıfırdan büyük) olarak değerlendirir, aksi takdirde döndürür `else`. + +- `cond` türü olmalıdır `UInt8`, ve `then` ve `else` en düşük ortak türe sahip olmalıdır. + +- `then` ve `else` olabilir `NULL` + +**Ayrıca bakınız** + +- [ifNotFinite](other_functions.md#ifnotfinite). + +## multiİf {#multiif} + +Yaz allowsmanızı sağlar [CASE](../operators.md#operator_case) operatör sorguda daha kompakt. + +Sözdizimi: `multiIf(cond_1, then_1, cond_2, then_2, ..., else)` + +**Parametre:** + +- `cond_N` — The condition for the function to return `then_N`. +- `then_N` — The result of the function when executed. +- `else` — The result of the function if none of the conditions is met. + +İşlev kabul eder `2N+1` parametre. + +**Döndürülen değerler** + +İşlev, değerlerden birini döndürür `then_N` veya `else` bu koşullara bağlı olarak `cond_N`. + +**Örnek** + +Yine kullanarak `LEFT_RIGHT` Tablo. + +``` sql +SELECT + left, + right, + multiIf(left < right, 'left is smaller', left > right, 'left is greater', left = right, 'Both equal', 'Null value') AS result +FROM LEFT_RIGHT + +┌─left─┬─right─┬─result──────────┐ +│ ᴺᵁᴸᴸ │ 4 │ Null value │ +│ 1 │ 3 │ left is smaller │ +│ 2 │ 2 │ Both equal │ +│ 3 │ 1 │ left is greater │ +│ 4 │ ᴺᵁᴸᴸ │ Null value │ +└──────┴───────┴─────────────────┘ +``` + +## Koşullu sonuçları doğrudan kullanma {#using-conditional-results-directly} + +Koşullar her zaman sonuç `0`, `1` veya `NULL`. Böylece koşullu sonuçları doğrudan bu şekilde kullanabilirsiniz: + +``` sql +SELECT left < right AS is_small +FROM LEFT_RIGHT + +┌─is_small─┐ +│ ᴺᵁᴸᴸ │ +│ 1 │ +│ 0 │ +│ 0 │ +│ ᴺᵁᴸᴸ │ +└──────────┘ +``` + +## Koşullardaki boş değerler {#null-values-in-conditionals} + +Ne zaman `NULL` değerler koşullarla ilgilidir, sonuç da olacaktır `NULL`. + +``` sql +SELECT + NULL < 1, + 2 < NULL, + NULL < NULL, + NULL = NULL + +┌─less(NULL, 1)─┬─less(2, NULL)─┬─less(NULL, NULL)─┬─equals(NULL, NULL)─┐ +│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +└───────────────┴───────────────┴──────────────────┴────────────────────┘ +``` + +Bu nedenle, sorgularınızı türleri dikkatli bir şekilde oluşturmalısınız `Nullable`. + +Aşağıdaki örnek, eşittir koşulu eklemek başarısız tarafından bu gösterir `multiIf`. + +``` sql +SELECT + left, + right, + multiIf(left < right, 'left is smaller', left > right, 'right is smaller', 'Both equal') AS faulty_result +FROM LEFT_RIGHT + +┌─left─┬─right─┬─faulty_result────┐ +│ ᴺᵁᴸᴸ │ 4 │ Both equal │ +│ 1 │ 3 │ left is smaller │ +│ 2 │ 2 │ Both equal │ +│ 3 │ 1 │ right is smaller │ +│ 4 │ ᴺᵁᴸᴸ │ Both equal │ +└──────┴───────┴──────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/tr/sql_reference/functions/date_time_functions.md b/docs/tr/sql_reference/functions/date_time_functions.md new file mode 100644 index 00000000000..fdcbacc7749 --- /dev/null +++ b/docs/tr/sql_reference/functions/date_time_functions.md @@ -0,0 +1,450 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 39 +toc_title: "Tarih ve Saatlerle \xE7al\u0131\u015Fma" +--- + +# Tarih ve saatlerle çalışmak için işlevler {#functions-for-working-with-dates-and-times} + +Saat dilimleri için destek + +Saat dilimi için mantıksal kullanımı olan tarih ve Saat ile çalışmak için tüm işlevler, ikinci bir isteğe bağlı saat dilimi bağımsız değişkeni kabul edebilir. Örnek: Asya / Yekaterinburg. Bu durumda, yerel (varsayılan) yerine belirtilen saat dilimini kullanırlar. + +``` sql +SELECT + toDateTime('2016-06-15 23:00:00') AS time, + toDate(time) AS date_local, + toDate(time, 'Asia/Yekaterinburg') AS date_yekat, + toString(time, 'US/Samoa') AS time_samoa +``` + +``` text +┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐ +│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │ +└─────────────────────┴────────────┴────────────┴─────────────────────┘ +``` + +UTC'DEN saat sayısına göre farklı olan yalnızca saat dilimleri desteklenir. + +## toTimeZone {#totimezone} + +Saat veya tarih ve saati belirtilen saat dilimine dönüştürün. + +## toYear {#toyear} + +Bir tarihi veya tarihi zamanla yıl numarasını (AD) içeren bir Uınt16 numarasına dönüştürür. + +## toQuarter {#toquarter} + +Bir tarihi veya tarihi zaman ile çeyrek sayısını içeren bir Uİnt8 numarasına dönüştürür. + +## toMonth {#tomonth} + +Bir tarih veya tarih ile saati, ay numarasını (1-12) içeren bir Uİnt8 numarasına dönüştürür. + +## bugünyıl {#todayofyear} + +Bir tarih veya tarih ile saat, yılın gün sayısını (1-366) içeren bir Uınt16 numarasına dönüştürür. + +## bugünay {#todayofmonth} + +Bir tarih veya tarih ile saat, Ayın gün sayısını (1-31) içeren bir Uınt8 numarasına dönüştürür. + +## bugünhafta {#todayofweek} + +Bir tarih veya tarih ile saat, haftanın gününün sayısını içeren bir Uınt8 numarasına dönüştürür (Pazartesi 1 ve pazar 7'dir). + +## toHour {#tohour} + +Saatli bir tarihi, 24 saatlik süre (0-23) saat sayısını içeren bir Uınt8 numarasına dönüştürür. +This function assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always true – even in Moscow the clocks were twice changed at a different time). + +## toMinute {#tominute} + +Saatli bir tarihi, saatin dakika sayısını (0-59) içeren bir Uınt8 numarasına dönüştürür. + +## toSecond {#tosecond} + +Dakika (0-59) ikinci sayısını içeren bir uınt8 numarasına zaman ile bir tarih dönüştürür. +Sıçrama saniye hesaba değildir. + +## toUnixTimestamp {#to-unix-timestamp} + +DateTime argümanı için: değeri dahili sayısal gösterimine dönüştürür (Unıx Zaman Damgası). +String argümanı için: datetime'ı dizeden saat dilimine göre ayrıştırın (isteğe bağlı ikinci argüman, sunucu zaman dilimi varsayılan olarak kullanılır) ve karşılık gelen unıx zaman damgasını döndürür. +Tarih argümanı için: davranış belirtilmemiş. + +**Sözdizimi** + +``` sql +toUnixTimestamp(datetime) +toUnixTimestamp(str, [timezone]) +``` + +**Döndürülen değer** + +- Unix zaman damgasını döndürür. + +Tür: `UInt32`. + +**Örnek** + +Sorgu: + +``` sql +SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp +``` + +Sonuç: + +``` text +┌─unix_timestamp─┐ +│ 1509836867 │ +└────────────────┘ +``` + +## toStartOfYear {#tostartofyear} + +Yılın ilk gününe kadar bir tarih veya tarih aşağı yuvarlar. +Tarihi döndürür. + +## toStartOfİSOYear {#tostartofisoyear} + +ISO yılın ilk gününe kadar bir tarih veya tarih aşağı yuvarlar. +Tarihi döndürür. + +## toStartOfQuarter {#tostartofquarter} + +Çeyrek ilk güne kadar bir tarih veya tarih aşağı yuvarlar. +Çeyreğin ilk günü 1 Ocak, 1 Nisan, 1 Temmuz veya 1 ekim'dir. +Tarihi döndürür. + +## toStartOfMonth {#tostartofmonth} + +Ayın ilk gününe kadar bir tarih veya tarih aşağı yuvarlar. +Tarihi döndürür. + +!!! attention "Dikkat" + Yanlış tarihleri ayrıştırma davranışı uygulamaya özeldir. ClickHouse sıfır tarihi döndürebilir, bir istisna atabilir veya yapabilir “natural” taşmak. + +## toMonday {#tomonday} + +En yakın Pazartesi günü bir tarih veya tarih aşağı yuvarlar. +Tarihi döndürür. + +## toStartOfWeek(t \[, mod\]) {#tostartofweektmode} + +Modu ile en yakın pazar veya Pazartesi zaman bir tarih veya tarih aşağı yuvarlar. +Tarihi döndürür. +Mod bağımsız değişkeni, toWeek () için mod bağımsız değişkeni gibi çalışır. Tek bağımsız değişken sözdizimi için 0 mod değeri kullanılır. + +## toStartOfDay {#tostartofday} + +Günün başlangıcına kadar bir tarih aşağı yuvarlar. + +## toStartOfHour {#tostartofhour} + +Saat başlangıcına kadar bir tarih aşağı yuvarlar. + +## toStartOfMinute {#tostartofminute} + +Dakikanın başlangıcına kadar bir tarih aşağı yuvarlar. + +## toStartOfFiveMinute {#tostartoffiveminute} + +Beş dakikalık aralığın başlangıcına kadar bir tarih aşağı yuvarlar. + +## toStartOfTenMinutes {#tostartoftenminutes} + +On dakikalık aralığın başlangıcına kadar bir tarih aşağı yuvarlar. + +## toStartOfFifteenMinutes {#tostartoffifteenminutes} + +On beş dakikalık aralığın başlangıcına kadar tarih aşağı yuvarlar. + +## toStartOfİnterval (time\_or\_data, Aralık x birimi \[, time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} + +Bu, diğer işlevlerin bir genellemesidir `toStartOf*`. Mesela, +`toStartOfInterval(t, INTERVAL 1 year)` aynı döndürür `toStartOfYear(t)`, +`toStartOfInterval(t, INTERVAL 1 month)` aynı döndürür `toStartOfMonth(t)`, +`toStartOfInterval(t, INTERVAL 1 day)` aynı döndürür `toStartOfDay(t)`, +`toStartOfInterval(t, INTERVAL 15 minute)` aynı döndürür `toStartOfFifteenMinutes(t)` vb. + +## toTime {#totime} + +Belirli bir sabit tarihe zaman ile bir tarih dönüştürür, zaman korurken. + +## toRelativeYearNum {#torelativeyearnum} + +Geçmişte belirli bir sabit noktadan başlayarak, yıl sayısına saat veya tarih ile bir tarih dönüştürür. + +## toRelativeQuarterNum {#torelativequarternum} + +Geçmişte belirli bir sabit noktadan başlayarak, çeyrek sayısına saat veya tarih ile bir tarih dönüştürür. + +## toRelativeMonthNum {#torelativemonthnum} + +Geçmişte belirli bir sabit noktadan başlayarak, Ayın sayısına saat veya tarih ile bir tarih dönüştürür. + +## toRelativeWeekNum {#torelativeweeknum} + +Geçmişte belirli bir sabit noktadan başlayarak, haftanın sayısına saat veya tarih ile bir tarih dönüştürür. + +## toRelativeDayNum {#torelativedaynum} + +Geçmişte belirli bir sabit noktadan başlayarak, günün sayısına saat veya tarih ile bir tarih dönüştürür. + +## toRelativeHourNum {#torelativehournum} + +Geçmişte belirli bir sabit noktadan başlayarak, saat veya tarih ile bir tarih saat sayısına dönüştürür. + +## toRelativeMinuteNum {#torelativeminutenum} + +Geçmişte belirli bir sabit noktadan başlayarak, dakika sayısına saat veya tarih ile bir tarih dönüştürür. + +## toRelativeSecondNum {#torelativesecondnum} + +Geçmişte belirli bir sabit noktadan başlayarak, ikinci sayısına saat veya tarih ile bir tarih dönüştürür. + +## toİSOYear {#toisoyear} + +ISO yıl numarasını içeren bir uınt16 numarasına bir tarih veya tarih zaman dönüştürür. + +## toİSOWeek {#toisoweek} + +ISO hafta numarasını içeren bir uınt8 numarasına bir tarih veya tarih zaman dönüştürür. + +## toWeek (tarih \[, mod\]) {#toweekdatemode} + +Bu işlev, date veya datetime için hafta numarasını döndürür. ToWeek () ' in iki bağımsız değişkenli formu, haftanın pazar veya Pazartesi günü başlayıp başlamadığını ve dönüş değerinin 0 ile 53 arasında mı yoksa 1 ile 53 arasında mı olması gerektiğini belirlemenizi sağlar. Mod bağımsız değişkeni atlanırsa, varsayılan mod 0'dır. +`toISOWeek()`eşdeğer bir uyumluluk işlevidir `toWeek(date,3)`. +Aşağıdaki tabloda mod bağımsız değişkeni nasıl çalıştığını açıklar. + +| Modu | Haftanın ilk günü | Aralık | Week 1 is the first week … | +|------|-------------------|--------|----------------------------------| +| 0 | Pazar | 0-53 | bu yıl bir pazar günü ile | +| 1 | Pazartesi | 0-53 | bu yıl 4 veya daha fazla gün ile | +| 2 | Pazar | 1-53 | bu yıl bir pazar günü ile | +| 3 | Pazartesi | 1-53 | bu yıl 4 veya daha fazla gün ile | +| 4 | Pazar | 0-53 | bu yıl 4 veya daha fazla gün ile | +| 5 | Pazartesi | 0-53 | bu yıl bir Pazartesi ile | +| 6 | Pazar | 1-53 | bu yıl 4 veya daha fazla gün ile | +| 7 | Pazartesi | 1-53 | bu yıl bir Pazartesi ile | +| 8 | Pazar | 1-53 | 1 Ocak içerir | +| 9 | Pazartesi | 1-53 | 1 Ocak içerir | + +Bir anlamı olan mod değerleri için “with 4 or more days this year,” haftalar ISO 8601: 1988'e göre numaralandırılmıştır: + +- 1 Ocak içeren haftanın yeni yılda 4 veya daha fazla günü varsa, 1. haftadır. + +- Aksi takdirde, bir önceki yılın son haftasıdır ve bir sonraki hafta 1. haftadır. + +Bir anlamı olan mod değerleri için “contains January 1”, 1 Ocak haftanın 1.haft .asıdır. Haftanın yeni yılda kaç gün içerdiği önemli değil, sadece bir gün içerse bile. + +``` sql +toWeek(date, [, mode][, Timezone]) +``` + +**Parametre** + +- `date` – Date or DateTime. +- `mode` – Optional parameter, Range of values is \[0,9\], default is 0. +- `Timezone` – Optional parameter, it behaves like any other conversion function. + +**Örnek** + +``` sql +SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS week1, toWeek(date,9) AS week9; +``` + +``` text +┌───────date─┬─week0─┬─week1─┬─week9─┐ +│ 2016-12-27 │ 52 │ 52 │ 1 │ +└────────────┴───────┴───────┴───────┘ +``` + +## toYearWeek (tarih \[, mod\]) {#toyearweekdatemode} + +Bir tarih için yıl ve hafta döndürür. Sonuçtaki yıl, yılın ilk ve son haftası için tarih argümanındaki yıldan farklı olabilir. + +Mod bağımsız değişkeni, toWeek () için mod bağımsız değişkeni gibi çalışır. Tek bağımsız değişken sözdizimi için 0 mod değeri kullanılır. + +`toISOYear()`eşdeğer bir uyumluluk işlevidir `intDiv(toYearWeek(date,3),100)`. + +**Örnek** + +``` sql +SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9; +``` + +``` text +┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐ +│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ +└────────────┴───────────┴───────────┴───────────┘ +``` + +## şimdi {#now} + +Sıfır bağımsız değişkeni kabul eder ve geçerli saati istek yürütme anlarından birinde döndürür. +Bu işlev, isteğin tamamlanması uzun zaman alsa bile bir sabit döndürür. + +## bugünkü {#today} + +Sıfır bağımsız değişkeni kabul eder ve geçerli tarihi, istek yürütme anlarından birinde döndürür. +Olarak aynı ‘toDate(now())’. + +## dün {#yesterday} + +Sıfır bağımsız değişkeni kabul eder ve istek yürütme anlarından birinde dünün tarihini döndürür. +Olarak aynı ‘today() - 1’. + +## zaman dilimi {#timeslot} + +Yarım saat için zaman yuvarlar. +Bu fonksiyon (kayıt olmak için özeldir.Metrica, yarım saat, bir izleme etiketi, tek bir kullanıcının ardışık sayfa görüntülemelerini, zaman içinde bu miktardan kesinlikle daha fazla farklılık gösteriyorsa, bir oturumu iki oturuma bölmek için minimum zaman miktarıdır. Bu, ilgili oturumda bulunan sayfa görüntülemelerini aramak için tuples (etiket kimliği, kullanıcı kimliği ve zaman dilimi) kullanılabileceği anlamına gelir. + +## toYYYYMM {#toyyyymm} + +Bir tarih veya tarih ile saat, yıl ve ay numarasını (YYYY \* 100 + MM) içeren bir Uınt32 numarasına dönüştürür. + +## toYYYYMMDD {#toyyyymmdd} + +Bir tarih veya tarih ile saat, yıl ve ay numarasını içeren bir Uınt32 numarasına dönüştürür (YYYY \* 10000 + MM \* 100 + DD). + +## toYYYYMMDDhhmmss {#toyyyymmddhhmmss} + +Bir tarihi veya tarihi, yıl ve ay numarasını içeren bir Uınt64 numarasına dönüştürür (YYYY \* 1000000 + MM \* 1000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss). + +## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} + +İşlev, bir tarih/DateTime aralığına bir tarih/DateTime ekler ve ardından Tarih/Datetime'ı döndürür. Mesela: + +``` sql +WITH + toDate('2018-01-01') AS date, + toDateTime('2018-01-01 00:00:00') AS date_time +SELECT + addYears(date, 1) AS add_years_with_date, + addYears(date_time, 1) AS add_years_with_date_time +``` + +``` text +┌─add_years_with_date─┬─add_years_with_date_time─┐ +│ 2019-01-01 │ 2019-01-01 00:00:00 │ +└─────────────────────┴──────────────────────────┘ +``` + +## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} + +Fonksiyon bir tarih/DateTime aralığını bir tarih/DateTime olarak çıkarır ve ardından Tarih/Datetime'ı döndürür. Mesela: + +``` sql +WITH + toDate('2019-01-01') AS date, + toDateTime('2019-01-01 00:00:00') AS date_time +SELECT + subtractYears(date, 1) AS subtract_years_with_date, + subtractYears(date_time, 1) AS subtract_years_with_date_time +``` + +``` text +┌─subtract_years_with_date─┬─subtract_years_with_date_time─┐ +│ 2018-01-01 │ 2018-01-01 00:00:00 │ +└──────────────────────────┴───────────────────────────────┘ +``` + +## dateDiff {#datediff} + +İki Date veya DateTime değerleri arasındaki farkı döndürür. + +**Sözdizimi** + +``` sql +dateDiff('unit', startdate, enddate, [timezone]) +``` + +**Parametre** + +- `unit` — Time unit, in which the returned value is expressed. [Dize](../syntax.md#syntax-string-literal). + + Supported values: + + | unit | + | ---- | + |second | + |minute | + |hour | + |day | + |week | + |month | + |quarter | + |year | + +- `startdate` — The first time value to compare. [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). + +- `enddate` — The second time value to compare. [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). + +- `timezone` — Optional parameter. If specified, it is applied to both `startdate` ve `enddate`. Belirtilmemişse, saat dilimleri `startdate` ve `enddate` kullanılır. Aynı değilse, sonuç belirtilmemiştir. + +**Döndürülen değer** + +Arasındaki fark `startdate` ve `enddate` ifade edilen `unit`. + +Tür: `int`. + +**Örnek** + +Sorgu: + +``` sql +SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00')); +``` + +Sonuç: + +``` text +┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐ +│ 25 │ +└────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## (StartTime, Süresi,\[, Boyutu zaman yuvasının\]) {#timeslotsstarttime-duration-size} + +Başlayan bir zaman aralığı için ‘StartTime’ ve devam etmek için ‘Duration’ saniye, bu aralıktan aşağı yuvarlanan noktalardan oluşan zaman içinde bir dizi moment döndürür ‘Size’ saniyeler içinde. ‘Size’ isteğe bağlı bir parametredir: varsayılan olarak 1800 olarak ayarlanmış bir sabit Uİnt32. +Mesela, `timeSlots(toDateTime('2012-01-01 12:20:00'), 600) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`. +Bu, ilgili oturumda sayfa görüntülemelerini aramak için gereklidir. + +## formatDateTime (saat, Biçim \[, Saat Dilimi\]) {#formatdatetime} + +Function formats a Time according given Format string. N.B.: Format is a constant expression, e.g. you can not have multiple formats for single result column. + +Biçim için desteklenen değiştiriciler: +(“Example” sütun, zaman için biçimlendirme sonucunu gösterir `2018-01-02 22:33:44`) + +| Değiştirici | Açıklama | Örnek | +|-------------|----------------------------------------------------------|------------| +| %C | yıl 100'e bölünür ve tamsayıya kesilir (00-99) | 20 | +| %d | Ayın günü, sıfır yastıklı (01-31) | 02 | +| %D | Kısa MM/DD/YY tarih, eşdeğer %m / %d / % y | 01/02/18 | +| %e | Ayın günü, boşluk dolgulu (1-31) | 2 | +| %F | kısa YYYY-AA-DD tarih, eşdeğer %Y-%m - %d | 2018-01-02 | +| %H | 24 saat formatında saat (00-23) | 22 | +| %I | 12h formatında saat (01-12) | 10 | +| %j | yılın günü (001-366) | 002 | +| %metre | ondalık sayı olarak ay (01-12) | 01 | +| %M | dakika (00-59) | 33 | +| %ve | new-line char (ac (ter (") | | +| %p | AM veya PM atama | PM | +| %R | 24-hour HH: MM Zaman, eşdeğer %H:%M | 22:33 | +| %S | ikinci (00-59) | 44 | +| %t | yatay-sekme karakteri (') | | +| %T | ISO 8601 saat biçimi (HH:MM:SS), eşdeğer %H:%M: % S | 22:33:44 | +| %u | ISO 8601 hafta içi sayı olarak Pazartesi olarak 1 (1-7) | 2 | +| %V | ISO 8601 hafta numarası (01-53) | 01 | +| %g | Pazar günü 0 (0-6) olarak ondalık sayı olarak hafta içi) | 2 | +| %y | Yıl, son iki basamak (00-99) | 18 | +| %Y | Yıllık | 2018 | +| %% | im | % | + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/tr/sql_reference/functions/encoding_functions.md b/docs/tr/sql_reference/functions/encoding_functions.md new file mode 100644 index 00000000000..49ad1467957 --- /dev/null +++ b/docs/tr/sql_reference/functions/encoding_functions.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 52 +toc_title: "Kodlam\u0131\u015F" +--- + +# Kodlama fonksiyonları {#encoding-functions} + +## kömürleşmek {#char} + +Geçirilen bağımsız değişkenlerin sayısı olarak uzunluğu olan dizeyi döndürür ve her bayt karşılık gelen bağımsız değişken değerine sahiptir. Sayısal türlerin birden çok bağımsız değişkeni kabul eder. Bağımsız değişken değeri uint8 veri türü aralığının dışındaysa, Olası yuvarlama ve taşma ile Uint8'e dönüştürülür. + +**Sözdizimi** + +``` sql +char(number_1, [number_2, ..., number_n]); +``` + +**Parametre** + +- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [Tamsayı](../../sql_reference/data_types/int_uint.md), [Yüzdürmek](../../sql_reference/data_types/float.md). + +**Döndürülen değer** + +- verilen bayt dizisi. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello +``` + +Sonuç: + +``` text +┌─hello─┐ +│ hello │ +└───────┘ +``` + +Karşılık gelen baytları geçirerek bir rasgele kodlama dizesi oluşturabilirsiniz. İşte UTF-8 için örnek: + +Sorgu: + +``` sql +SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x82) AS hello; +``` + +Sonuç: + +``` text +┌─hello──┐ +│ привет │ +└────────┘ +``` + +Sorgu: + +``` sql +SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello; +``` + +Sonuç: + +``` text +┌─hello─┐ +│ 你好 │ +└───────┘ +``` + +## büyü {#hex} + +Bağımsız değişkenin onaltılık gösterimini içeren bir dize döndürür. + +**Sözdizimi** + +``` sql +hex(arg) +``` + +İşlev büyük harfler kullanıyor `A-F` ve herhangi bir önek kullanmamak (gibi `0x`) veya sonekler (gibi `h`). + +Tamsayı argümanları için, onaltılık basamak yazdırır (“nibbles”) en önemliden en önemlisine (big endian veya “human readable” sipariş). En önemli sıfır olmayan baytla başlar (önde gelen sıfır bayt atlanır), ancak önde gelen basamak sıfır olsa bile her baytın her iki basamağını da yazdırır. + +Örnek: + +**Örnek** + +Sorgu: + +``` sql +SELECT hex(1); +``` + +Sonuç: + +``` text +01 +``` + +Tip değerleri `Date` ve `DateTime` karşılık gelen tamsayılar olarak biçimlendirilir (tarih için çağdan bu yana geçen gün sayısı ve datetime için Unix zaman damgasının değeri). + +İçin `String` ve `FixedString`, tüm bayt sadece iki onaltılık sayı olarak kodlanır. Sıfır bayt ihmal edilmez. + +Kayan nokta ve ondalık türlerinin değerleri, bellekteki gösterimi olarak kodlanır. Küçük endian mimarisini desteklediğimiz için, bunlar küçük endian'da kodlanmıştır. Sıfır önde gelen / sondaki bayt ihmal edilmez. + +**Parametre** + +- `arg` — A value to convert to hexadecimal. Types: [Dize](../../sql_reference/data_types/string.md), [Uİnt](../../sql_reference/data_types/int_uint.md), [Yüzdürmek](../../sql_reference/data_types/float.md), [Ondalık](../../sql_reference/data_types/decimal.md), [Tarihli](../../sql_reference/data_types/date.md) veya [DateTime](../../sql_reference/data_types/datetime.md). + +**Döndürülen değer** + +- Bağımsız değişken onaltılık gösterimi ile bir dize. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT hex(toFloat32(number)) as hex_presentation FROM numbers(15, 2); +``` + +Sonuç: + +``` text +┌─hex_presentation─┐ +│ 00007041 │ +│ 00008041 │ +└──────────────────┘ +``` + +Sorgu: + +``` sql +SELECT hex(toFloat64(number)) as hex_presentation FROM numbers(15, 2); +``` + +Sonuç: + +``` text +┌─hex_presentation─┐ +│ 0000000000002E40 │ +│ 0000000000003040 │ +└──────────────────┘ +``` + +## unhex (str) {#unhexstr} + +Onaltılık basamak herhangi bir sayıda içeren bir dize kabul eder ve karşılık gelen bayt içeren bir dize döndürür. Hem büyük hem de küçük harfleri destekler a-F. onaltılık basamak sayısı bile olmak zorunda değildir. Tek ise, son rakam 00-0F baytın en az önemli yarısı olarak yorumlanır. Bağımsız değişken dizesi onaltılık basamaklardan başka bir şey içeriyorsa, uygulama tanımlı bazı sonuçlar döndürülür (bir özel durum atılmaz). +Sonucu bir sayıya dönüştürmek istiyorsanız, ‘reverse’ ve ‘reinterpretAsType’ işlevler. + +## UUİDStringToNum (str) {#uuidstringtonumstr} + +Biçiminde 36 karakter içeren bir dize kabul eder `123e4567-e89b-12d3-a456-426655440000` ve bir fixedstring(16) bayt kümesi olarak döndürür. + +## UUİDNumToString (str) {#uuidnumtostringstr} + +FixedString(16) değerini kabul eder. Metin biçiminde 36 karakter içeren bir dize döndürür. + +## bitmaskToList (num) {#bitmasktolistnum} + +Bir tamsayı kabul eder. Özetlendiğinde kaynak sayısını toplayan iki güç listesini içeren bir dize döndürür. Artan düzende metin biçiminde boşluk bırakmadan virgülle ayrılırlar. + +## bitmaskToArray (num) {#bitmasktoarraynum} + +Bir tamsayı kabul eder. Özetlendiğinde kaynak sayısını toplayan iki güç listesini içeren bir uint64 sayı dizisi döndürür. Dizideki sayılar artan düzendedir. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/tr/sql_reference/functions/ext_dict_functions.md b/docs/tr/sql_reference/functions/ext_dict_functions.md new file mode 100644 index 00000000000..564ec9d5df0 --- /dev/null +++ b/docs/tr/sql_reference/functions/ext_dict_functions.md @@ -0,0 +1,205 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 58 +toc_title: "Harici S\xF6zl\xFCklerle \xE7al\u0131\u015Fma" +--- + +# Harici Sözlüklerle Çalışmak İçin İşlevler {#ext_dict_functions} + +Dış sözlükleri bağlama ve yapılandırma hakkında bilgi için bkz. [Dış söz dictionarieslükler](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +## dictGet {#dictget} + +Harici bir sözlükten bir değer alır. + +``` sql +dictGet('dict_name', 'attr_name', id_expr) +dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +``` + +**Parametre** + +- `dict_name` — Name of the dictionary. [String lit literal](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [String lit literal](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [İfade](../syntax.md#syntax-expressions) dönen bir [Uİnt64](../../sql_reference/data_types/int_uint.md) veya [Demet](../../sql_reference/data_types/tuple.md)- sözlük yapılandırmasına bağlı olarak değer yazın. +- `default_value_expr` — Value returned if the dictionary doesn't contain a row with the `id_expr` anahtar. [İfade](../syntax.md#syntax-expressions) veri türü için yapılandırılmış değeri döndürme `attr_name` öznitelik. + +**Döndürülen değer** + +- ClickHouse özniteliği başarıyla ayrıştırırsa [öznitelik veri türü](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), fonksiyonlar karşılık gelen sözlük özniteliğinin değerini döndürür `id_expr`. + +- Anahtar yoksa, karşılık gelen `id_expr`, söz thelükte, sonra: + + - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. + - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. + +Clickhouse, özniteliğin değerini ayrıştıramazsa veya değer öznitelik veri türüyle eşleşmiyorsa bir özel durum atar. + +**Örnek** + +Metin dosyası oluşturma `ext-dict-text.csv` aşağıdakileri içeren: + +``` text +1,1 +2,2 +``` + +İlk sütun `id` ikinci sütun `c1`. + +Dış sözlüğü yapılandırma: + +``` xml + + + ext-dict-test + + + /path-to/ext-dict-test.csv + CSV + + + + + + + + id + + + c1 + UInt32 + + + + 0 + + +``` + +Sorguyu gerçekleştir: + +``` sql +SELECT + dictGetOrDefault('ext-dict-test', 'c1', number + 1, toUInt32(number * 10)) AS val, + toTypeName(val) AS type +FROM system.numbers +LIMIT 3 +``` + +``` text +┌─val─┬─type───┐ +│ 1 │ UInt32 │ +│ 2 │ UInt32 │ +│ 20 │ UInt32 │ +└─────┴────────┘ +``` + +**Ayrıca Bakınız** + +- [Dış Söz Dictionarieslükler](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) + +## dictHas {#dicthas} + +Bir anahtar sözlükte mevcut olup olmadığını denetler. + +``` sql +dictHas('dict_name', id_expr) +``` + +**Parametre** + +- `dict_name` — Name of the dictionary. [String lit literal](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [İfade](../syntax.md#syntax-expressions) dönen bir [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip değeri. + +**Döndürülen değer** + +- 0, anahtar yoksa. +- 1, bir anahtar varsa. + +Tür: `UInt8`. + +## dictGetHierarchy {#dictgethierarchy} + +Bir anahtarın tüm ebeveynlerini içeren bir dizi oluşturur. [hiyerarş dictionaryik sözlük](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md). + +**Sözdizimi** + +``` sql +dictGetHierarchy('dict_name', key) +``` + +**Parametre** + +- `dict_name` — Name of the dictionary. [String lit literal](../syntax.md#syntax-string-literal). +- `key` — Key value. [İfade](../syntax.md#syntax-expressions) dönen bir [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip değeri. + +**Döndürülen değer** + +- Anahtar için ebeveynler. + +Tür: [Dizi (Uİnt64)](../../sql_reference/data_types/array.md). + +## dictİsİn {#dictisin} + +Sözlükteki tüm hiyerarşik zincir boyunca bir anahtarın atasını kontrol eder. + +``` sql +dictIsIn('dict_name', child_id_expr, ancestor_id_expr) +``` + +**Parametre** + +- `dict_name` — Name of the dictionary. [String lit literal](../syntax.md#syntax-string-literal). +- `child_id_expr` — Key to be checked. [İfade](../syntax.md#syntax-expressions) dönen bir [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip değeri. +- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` anahtar. [İfade](../syntax.md#syntax-expressions) dönen bir [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip değeri. + +**Döndürülen değer** + +- 0, eğer `child_id_expr` bir çocuk değil mi `ancestor_id_expr`. +- 1, Eğer `child_id_expr` bir çocuk `ancestor_id_expr` veya eğer `child_id_expr` is an `ancestor_id_expr`. + +Tür: `UInt8`. + +## Diğer Fonksiyonlar {#ext_dict_functions-other} + +ClickHouse sözlük yapılandırma ne olursa olsun belirli bir veri türü için sözlük öznitelik değerlerini dönüştürmek özel işlevleri destekler. + +İşlevler: + +- `dictGetInt8`, `dictGetInt16`, `dictGetInt32`, `dictGetInt64` +- `dictGetUInt8`, `dictGetUInt16`, `dictGetUInt32`, `dictGetUInt64` +- `dictGetFloat32`, `dictGetFloat64` +- `dictGetDate` +- `dictGetDateTime` +- `dictGetUUID` +- `dictGetString` + +Tüm bu işlevler `OrDefault` değişiklik. Mesela, `dictGetDateOrDefault`. + +Sözdizimi: + +``` sql +dictGet[Type]('dict_name', 'attr_name', id_expr) +dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +``` + +**Parametre** + +- `dict_name` — Name of the dictionary. [String lit literal](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [String lit literal](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [İfade](../syntax.md#syntax-expressions) dönen bir [Uİnt64](../../sql_reference/data_types/int_uint.md)- tip değeri. +- `default_value_expr` — Value which is returned if the dictionary doesn't contain a row with the `id_expr` anahtar. [İfade](../syntax.md#syntax-expressions) veri türü için yapılandırılmış bir değer döndürme `attr_name` öznitelik. + +**Döndürülen değer** + +- ClickHouse özniteliği başarıyla ayrıştırırsa [öznitelik veri türü](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), fonksiyonlar karşılık gelen sözlük özniteliğinin değerini döndürür `id_expr`. + +- İsten nomiyorsa `id_expr` söz thelükte o zaman: + + - `dictGet[Type]` returns the content of the `` element specified for the attribute in the dictionary configuration. + - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. + +Clickhouse, özniteliğin değerini ayrıştıramazsa veya değer öznitelik veri türüyle eşleşmiyorsa bir özel durum atar. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/tr/sql_reference/functions/functions_for_nulls.md b/docs/tr/sql_reference/functions/functions_for_nulls.md new file mode 100644 index 00000000000..59567067c42 --- /dev/null +++ b/docs/tr/sql_reference/functions/functions_for_nulls.md @@ -0,0 +1,312 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 63 +toc_title: "Null arg\xFCmanlarla \xE7al\u0131\u015Fma" +--- + +# Null agregalarla çalışmak için işlevler {#functions-for-working-with-nullable-aggregates} + +## isNull {#isnull} + +Bağımsız değişken olup olmadığını denetler [NULL](../syntax.md#null). + +``` sql +isNull(x) +``` + +**Parametre** + +- `x` — A value with a non-compound data type. + +**Döndürülen değer** + +- `1` eğer `x` oluyor `NULL`. +- `0` eğer `x` değildir `NULL`. + +**Örnek** + +Giriş tablosu + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +Sorgu + +``` sql +SELECT x FROM t_null WHERE isNull(y) +``` + +``` text +┌─x─┐ +│ 1 │ +└───┘ +``` + +## isNotNull {#isnotnull} + +Bağımsız değişken olup olmadığını denetler [NULL](../syntax.md#null). + +``` sql +isNotNull(x) +``` + +**Parametre:** + +- `x` — A value with a non-compound data type. + +**Döndürülen değer** + +- `0` eğer `x` oluyor `NULL`. +- `1` eğer `x` değildir `NULL`. + +**Örnek** + +Giriş tablosu + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +Sorgu + +``` sql +SELECT x FROM t_null WHERE isNotNull(y) +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## birleşmek {#coalesce} + +Olup olmadığını soldan sağa denetler `NULL` argümanlar geçti ve ilk olmayan döndürür-`NULL` değişken. + +``` sql +coalesce(x,...) +``` + +**Parametre:** + +- Bileşik olmayan tipte herhangi bir sayıda parametre. Tüm parametreler veri türüne göre uyumlu olmalıdır. + +**Döndürülen değerler** + +- İlk sigara-`NULL` değişken. +- `NULL`, eğer tüm argümanlar `NULL`. + +**Örnek** + +Bir müşteriyle iletişim kurmak için birden çok yol belirtebilecek kişilerin listesini düşünün. + +``` text +┌─name─────┬─mail─┬─phone─────┬──icq─┐ +│ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │ +│ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +└──────────┴──────┴───────────┴──────┘ +``` + +Bu `mail` ve `phone` alanlar String tip ofindedir, ancak `icq` Fi fieldeld is `UInt32`, bu yüzden dönüştürülmesi gerekiyor `String`. + +Müşteri için ilk kullanılabilir iletişim yöntemini kişi listesinden alın: + +``` sql +SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook +``` + +``` text +┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐ +│ client 1 │ 123-45-67 │ +│ client 2 │ ᴺᵁᴸᴸ │ +└──────────┴──────────────────────────────────────────────────────┘ +``` + +## ifNull {#ifnull} + +Ana bağımsız değişken ise alternatif bir değer döndürür `NULL`. + +``` sql +ifNull(x,alt) +``` + +**Parametre:** + +- `x` — The value to check for `NULL`. +- `alt` — The value that the function returns if `x` oluyor `NULL`. + +**Döndürülen değerler** + +- Değer `x`, eğer `x` değildir `NULL`. +- Değer `alt`, eğer `x` oluyor `NULL`. + +**Örnek** + +``` sql +SELECT ifNull('a', 'b') +``` + +``` text +┌─ifNull('a', 'b')─┐ +│ a │ +└──────────────────┘ +``` + +``` sql +SELECT ifNull(NULL, 'b') +``` + +``` text +┌─ifNull(NULL, 'b')─┐ +│ b │ +└───────────────────┘ +``` + +## nullİf {#nullif} + +Dönüşler `NULL` argümanlar eşitse. + +``` sql +nullIf(x, y) +``` + +**Parametre:** + +`x`, `y` — Values for comparison. They must be compatible types, or ClickHouse will generate an exception. + +**Döndürülen değerler** + +- `NULL`, argümanlar eşitse. +- Bu `x` bağımsız değişkenler eşit değilse, değer. + +**Örnek** + +``` sql +SELECT nullIf(1, 1) +``` + +``` text +┌─nullIf(1, 1)─┐ +│ ᴺᵁᴸᴸ │ +└──────────────┘ +``` + +``` sql +SELECT nullIf(1, 2) +``` + +``` text +┌─nullIf(1, 2)─┐ +│ 1 │ +└──────────────┘ +``` + +## assumeNotNull {#assumenotnull} + +Bir tür değeri ile sonuçlanır [Nullable](../../sql_reference/data_types/nullable.md) bir sigara için- `Nullable` eğer değer değil `NULL`. + +``` sql +assumeNotNull(x) +``` + +**Parametre:** + +- `x` — The original value. + +**Döndürülen değerler** + +- Olmayan orijinal değeri-`Nullable` tipi, değilse `NULL`. +- Olmayan için varsayılan değer-`Nullable` özgün değer ise yazın `NULL`. + +**Örnek** + +Düşünün `t_null` Tablo. + +``` sql +SHOW CREATE TABLE t_null +``` + +``` text +┌─statement─────────────────────────────────────────────────────────────────┐ +│ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │ +└───────────────────────────────────────────────────────────────────────────┘ +``` + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +Uygula `assumeNotNull` fonksiyonu için `y` sütun. + +``` sql +SELECT assumeNotNull(y) FROM t_null +``` + +``` text +┌─assumeNotNull(y)─┐ +│ 0 │ +│ 3 │ +└──────────────────┘ +``` + +``` sql +SELECT toTypeName(assumeNotNull(y)) FROM t_null +``` + +``` text +┌─toTypeName(assumeNotNull(y))─┐ +│ Int8 │ +│ Int8 │ +└──────────────────────────────┘ +``` + +## toNullable {#tonullable} + +Bağımsız değişken türünü dönüştürür `Nullable`. + +``` sql +toNullable(x) +``` + +**Parametre:** + +- `x` — The value of any non-compound type. + +**Döndürülen değer** + +- Bir ile giriş değeri `Nullable` tür. + +**Örnek** + +``` sql +SELECT toTypeName(10) +``` + +``` text +┌─toTypeName(10)─┐ +│ UInt8 │ +└────────────────┘ +``` + +``` sql +SELECT toTypeName(toNullable(10)) +``` + +``` text +┌─toTypeName(toNullable(10))─┐ +│ Nullable(UInt8) │ +└────────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/tr/sql_reference/functions/geo.md b/docs/tr/sql_reference/functions/geo.md new file mode 100644 index 00000000000..353418c7b97 --- /dev/null +++ b/docs/tr/sql_reference/functions/geo.md @@ -0,0 +1,510 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 62 +toc_title: "Co\u011Frafi koordinatlar ile \xE7al\u0131\u015Fma" +--- + +# Coğrafi Koordinatlarla çalışmak için fonksiyonlar {#functions-for-working-with-geographical-coordinates} + +## greatCircleDistance {#greatcircledistance} + +Dünya yüzeyindeki iki nokta arasındaki mesafeyi kullanarak hesaplayın [büyük daire formülü](https://en.wikipedia.org/wiki/Great-circle_distance). + +``` sql +greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) +``` + +**Giriş parametreleri** + +- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`. +- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`. +- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`. +- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`. + +Pozitif değerler Kuzey enlemi ve Doğu boylamına karşılık gelir ve negatif değerler Güney enlemi ve Batı boylamına karşılık gelir. + +**Döndürülen değer** + +Dünya yüzeyindeki iki nokta arasındaki mesafe, metre cinsinden. + +Girdi parametre değerleri aralığın dışına düştüğünde bir özel durum oluşturur. + +**Örnek** + +``` sql +SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) +``` + +``` text +┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐ +│ 14132374.194975413 │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## pointİnEllipses {#pointinellipses} + +Noktanın elipslerden en az birine ait olup olmadığını kontrol eder. +Koordinatlar kartezyen koordinat sisteminde geometriktir. + +``` sql +pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) +``` + +**Giriş parametreleri** + +- `x, y` — Coordinates of a point on the plane. +- `xᵢ, yᵢ` — Coordinates of the center of the `i`-inci üç nokta. +- `aᵢ, bᵢ` — Axes of the `i`- x, y koordinatları birimlerinde üç nokta. + +Giriş parametreleri olmalıdır `2+4⋅n`, nere `n` elips sayısıdır. + +**Döndürülen değerler** + +`1` nokta elipslerden en az birinin içindeyse; `0`hayır değil. + +**Örnek** + +``` sql +SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999) +``` + +``` text +┌─pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)─┐ +│ 1 │ +└─────────────────────────────────────────────────┘ +``` + +## pointİnPolygon {#pointinpolygon} + +Noktanın düzlemdeki poligona ait olup olmadığını kontrol eder. + +``` sql +pointInPolygon((x, y), [(a, b), (c, d) ...], ...) +``` + +**Giriş değerleri** + +- `(x, y)` — Coordinates of a point on the plane. Data type — [Demet](../../sql_reference/data_types/tuple.md) — A tuple of two numbers. +- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Dizi](../../sql_reference/data_types/array.md). Her köşe bir çift koordinat ile temsil edilir `(a, b)`. Köşeler saat yönünde veya saat yönünün tersine sırayla belirtilmelidir. Minimum köşe sayısı 3'tür. Çokgen sabit olmalıdır. +- Fonksiyon ayrıca delikli çokgenleri de destekler (bölümleri keser). Bu durumda, işlevin ek argümanlarını kullanarak kesilen bölümleri tanımlayan çokgenler ekleyin. İşlev, basit olmayan bağlı çokgenleri desteklemez. + +**Döndürülen değerler** + +`1` nokta çokgenin içinde ise, `0` hayır değil. +Nokta çokgen sınırında ise, işlev 0 veya 1 döndürebilir. + +**Örnek** + +``` sql +SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## geohashEncode {#geohashencode} + +Enlem ve boylamı bir geohash-string olarak kodlar, Lütfen bakınız (http://geohash.org/, https://en.wikipedia.org/wiki/Geohash). + +``` sql +geohashEncode(longitude, latitude, [precision]) +``` + +**Giriş değerleri** + +- boylam-kodlamak istediğiniz koordinatın boylam kısmı. Aralık floatingta yüz floatingen`[-180°, 180°]` +- latitude-kodlamak istediğiniz koordinatın enlem kısmı. Aralık floatingta yüz floatingen `[-90°, 90°]` +- hassas-isteğe bağlı, elde edilen kodlanmış dizenin uzunluğu, varsayılan olarak `12`. Aralıktaki tamsayı `[1, 12]`. Herhangi bir değer daha az `1` veya daha büyük `12` sessizce dönüştürülür `12`. + +**Döndürülen değerler** + +- alfanümerik `String` kodlanmış koordinat (base32-kodlama alfabesinin değiştirilmiş versiyonu kullanılır). + +**Örnek** + +``` sql +SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res +``` + +``` text +┌─res──────────┐ +│ ezs42d000000 │ +└──────────────┘ +``` + +## geohashDecode {#geohashdecode} + +Herhangi bir geohash kodlu dizeyi boylam ve enlem olarak çözer. + +**Giriş değerleri** + +- kodlanmış dize-geohash kodlanmış dize. + +**Döndürülen değerler** + +- (boylam, enlem) - 2-tuple `Float64` boylam ve enlem değerleri. + +**Örnek** + +``` sql +SELECT geohashDecode('ezs42') AS res +``` + +``` text +┌─res─────────────────────────────┐ +│ (-5.60302734375,42.60498046875) │ +└─────────────────────────────────┘ +``` + +## geoToH3 {#geotoh3} + +Dönüşler [H3](https://uber.github.io/h3/#/documentation/overview/introduction) nokta Endeksi `(lon, lat)` belirtilen çözünürlük ile. + +[H3](https://uber.github.io/h3/#/documentation/overview/introduction) Dünya yüzeyinin altıgen fayanslara bile bölündüğü coğrafi bir indeksleme sistemidir. Bu sistem hiyerarşiktir, örn. üst seviyedeki her altıgen yedi hatta daha küçük olanlara bölünebilir. + +Bu endeks öncelikle kovalama yerleri ve diğer coğrafi manipülasyonlar için kullanılır. + +**Sözdizimi** + +``` sql +geoToH3(lon, lat, resolution) +``` + +**Parametre** + +- `lon` — Longitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `lat` — Latitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Tür: [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- Altıgen dizin numarası. +- Hata durumunda 0. + +Tür: `UInt64`. + +**Örnek** + +Sorgu: + +``` sql +SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index +``` + +Sonuç: + +``` text +┌────────────h3Index─┐ +│ 644325524701193974 │ +└────────────────────┘ +``` + +## geohashesİnBox {#geohashesinbox} + +Verilen kutunun içine giren ve verilen kutunun sınırlarını kesişen, temel olarak diziye düzleştirilmiş bir 2D ızgarası olan bir dizi geohash kodlu dizeler dizisi döndürür. + +**Giriş değerleri** + +- longitude\_min - min boylam, aralıkta kayan değer `[-180°, 180°]` +- latitude\_min - min enlem, aralıkta kayan değer `[-90°, 90°]` +- longitude\_max-maksimum boylam, aralıkta kayan değer `[-180°, 180°]` +- latitude\_max-maksimum enlem, aralıkta kayan değer `[-90°, 90°]` +- hassas-geohash hassas, `UInt8` Aralık inta `[1, 12]` + +Lütfen tüm koordinat parametrelerinin aynı tipte olması gerektiğini unutmayın: `Float32` veya `Float64`. + +**Döndürülen değerler** + +- verilen alanı kapsayan geohash kutularının hassas uzun dizeleri dizisi, öğelerin sırasına güvenmemelisiniz. +- \[\]- eğer boş dizi *dakika* değerleri *enlem* ve *Boylam* karşılık gelenden daha az değil *maksimum* değerler. + +Ortaya çıkan dizi 10'000' 000 ürün uzunluğundaysa, işlevin bir istisna atacağını lütfen unutmayın. + +**Örnek** + +``` sql +SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos +``` + +``` text +┌─thasos──────────────────────────────────────┐ +│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │ +└─────────────────────────────────────────────┘ +``` + +## h3GetBaseCell {#h3getbasecell} + +Dizin temel hücre numarasını döndürür. + +**Sözdizimi** + +``` sql +h3GetBaseCell(index) +``` + +**Parametre** + +- `index` — Hexagon index number. Type: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- Altıgen baz hücre numarası. Tür: [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT h3GetBaseCell(612916788725809151) as basecell +``` + +Sonuç: + +``` text +┌─basecell─┐ +│ 12 │ +└──────────┘ +``` + +## h3HexAreaM2 {#h3hexaream2} + +Verilen çözünürlükte metrekare ortalama altıgen alan. + +**Sözdizimi** + +``` sql +h3HexAreaM2(resolution) +``` + +**Parametre** + +- `resolution` — Index resolution. Range: `[0, 15]`. Tür: [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- Area in m². Type: [Float64](../../sql_reference/data_types/float.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT h3HexAreaM2(13) as area +``` + +Sonuç: + +``` text +┌─area─┐ +│ 43.9 │ +└──────┘ +``` + +## h3İndexesAreNeighbors {#h3indexesareneighbors} + +Sağlanan H3indexlerin komşu olup olmadığını döndürür. + +**Sözdizimi** + +``` sql +h3IndexesAreNeighbors(index1, index2) +``` + +**Parametre** + +- `index1` — Hexagon index number. Type: [Uİnt64](../../sql_reference/data_types/int_uint.md). +- `index2` — Hexagon index number. Type: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- Dönüşler `1` dizinler komşu ise, `0` başka. Tür: [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n +``` + +Sonuç: + +``` text +┌─n─┐ +│ 1 │ +└───┘ +``` + +## h3ToChildren {#h3tochildren} + +Verilen dizinin alt dizinlerini içeren bir dizi döndürür. + +**Sözdizimi** + +``` sql +h3ToChildren(index, resolution) +``` + +**Parametre** + +- `index` — Hexagon index number. Type: [Uİnt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Tür: [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- Alt H3 dizinleri ile dizi. Dizi türü: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT h3ToChildren(599405990164561919, 6) AS children +``` + +Sonuç: + +``` text +┌─children───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ [603909588852408319,603909588986626047,603909589120843775,603909589255061503,603909589389279231,603909589523496959,603909589657714687] │ +└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## h3ToParent {#h3toparent} + +Verilen dizini içeren üst (kaba) dizini döndürür. + +**Sözdizimi** + +``` sql +h3ToParent(index, resolution) +``` + +**Parametre** + +- `index` — Hexagon index number. Type: [Uİnt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Tür: [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- Ana H3 Endeksi. Tür: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT h3ToParent(599405990164561919, 3) as parent +``` + +Sonuç: + +``` text +┌─────────────parent─┐ +│ 590398848891879423 │ +└────────────────────┘ +``` + +## h3ToString {#h3tostring} + +Dizinin H3ındex gösterimini dize gösterimine dönüştürür. + +``` sql +h3ToString(index) +``` + +**Parametre** + +- `index` — Hexagon index number. Type: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- H3 dizininin dize gösterimi. Tür: [Dize](../../sql_reference/data_types/string.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT h3ToString(617420388352917503) as h3_string +``` + +Sonuç: + +``` text +┌─h3_string───────┐ +│ 89184926cdbffff │ +└─────────────────┘ +``` + +## stringToH3 {#stringtoh3} + +Dize gösterimini H3ındex (Uİnt64) gösterimine dönüştürür. + +``` sql +stringToH3(index_str) +``` + +**Parametre** + +- `index_str` — String representation of the H3 index. Type: [Dize](../../sql_reference/data_types/string.md). + +**Döndürülen değerler** + +- Altıgen dizin numarası. Hata 0 döndürür. Tür: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT stringToH3('89184926cc3ffff') as index +``` + +Sonuç: + +``` text +┌──────────────index─┐ +│ 617420388351344639 │ +└────────────────────┘ +``` + +## h3GetResolution {#h3getresolution} + +Dizin çözünürlüğünü döndürür. + +**Sözdizimi** + +``` sql +h3GetResolution(index) +``` + +**Parametre** + +- `index` — Hexagon index number. Type: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değerler** + +- Dizin çözünürlüğü. Aralık: `[0, 15]`. Tür: [Uİnt8](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT h3GetResolution(617420388352917503) as res +``` + +Sonuç: + +``` text +┌─res─┐ +│ 9 │ +└─────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/tr/sql_reference/functions/hash_functions.md b/docs/tr/sql_reference/functions/hash_functions.md new file mode 100644 index 00000000000..fbb21597943 --- /dev/null +++ b/docs/tr/sql_reference/functions/hash_functions.md @@ -0,0 +1,446 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 50 +toc_title: Karma +--- + +# Karma fonksiyonlar {#hash-functions} + +Hash fonksiyonları elementlerin deterministik sözde rastgele karıştırma için kullanılabilir. + +## halfMD5 {#hash-functions-halfmd5} + +[Yorumluyor](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) tüm giriş parametrelerini dizeler olarak hesaplar ve [MD5](https://en.wikipedia.org/wiki/MD5) her biri için karma değeri. Sonra karmaları birleştirir, elde edilen dizenin karmasının ilk 8 baytını alır ve bunları şöyle yorumlar `UInt64` büyük endian bayt sırasına göre. + +``` sql +halfMD5(par1, ...) +``` + +İşlev nispeten yavaştır (işlemci çekirdeği başına saniyede 5 milyon kısa dizge). +Kullanmayı düşünün [sifash64](#hash_functions-siphash64) bunun yerine işlev. + +**Parametre** + +Fonksiyon, değişken sayıda giriş parametresi alır. Parametreler herhangi biri olabilir [desteklenen veri türleri](../../sql_reference/data_types/index.md). + +**Döndürülen Değer** + +A [Uİnt64](../../sql_reference/data_types/int_uint.md) veri türü karma değeri. + +**Örnek** + +``` sql +SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type +``` + +``` text +┌────────halfMD5hash─┬─type───┐ +│ 186182704141653334 │ UInt64 │ +└────────────────────┴────────┘ +``` + +## MD5 {#hash_functions-md5} + +MD5 bir dizeden hesaplar ve elde edilen bayt kümesini FixedString(16) olarak döndürür. +Özellikle MD5'E ihtiyacınız yoksa, ancak iyi bir şifreleme 128 bit karmasına ihtiyacınız varsa, ‘sipHash128’ bunun yerine işlev. +Md5sum yardımcı programı tarafından çıktı ile aynı sonucu elde etmek istiyorsanız, lower(hex(MD5(s))) kullanın. + +## sifash64 {#hash_functions-siphash64} + +64-bit üretir [Sifash](https://131002.net/siphash/) karma değeri. + +``` sql +sipHash64(par1,...) +``` + +Bu bir şifreleme karma işlevidir. En az üç kat daha hızlı çalışır [MD5](#hash_functions-md5) işlev. + +İşlev [yorumluyor](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) tüm giriş parametreleri dizeleri olarak ve bunların her biri için karma değerini hesaplar. Sonra aşağıdaki algoritma ile karmaları birleştirir: + +1. Tüm giriş parametrelerini karma yaptıktan sonra, işlev karma dizisini alır. +2. Fonksiyon birinci ve ikinci öğeleri alır ve bunların dizisi için bir karma hesaplar. +3. Daha sonra işlev, önceki adımda hesaplanan karma değeri ve ilk karma dizinin üçüncü öğesini alır ve bunların dizisi için bir karma hesaplar. +4. Önceki adım, ilk karma dizinin kalan tüm öğeleri için tekrarlanır. + +**Parametre** + +Fonksiyon, değişken sayıda giriş parametresi alır. Parametreler herhangi biri olabilir [desteklenen veri türleri](../../sql_reference/data_types/index.md). + +**Döndürülen Değer** + +A [Uİnt64](../../sql_reference/data_types/int_uint.md) veri türü karma değeri. + +**Örnek** + +``` sql +SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type +``` + +``` text +┌──────────────SipHash─┬─type───┐ +│ 13726873534472839665 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## sifash128 {#hash_functions-siphash128} + +Bir dizeden Sifash hesaplar. +Bir dize türü bağımsız değişkeni kabul eder. Fixedstring(16) Döndürür. +Sifash64'ten farklıdır, çünkü son xor katlama durumu sadece 128 bit'e kadar yapılır. + +## cityHash64 {#cityhash64} + +64-bit üretir [CityHash](https://github.com/google/cityhash) karma değeri. + +``` sql +cityHash64(par1,...) +``` + +Bu hızlı olmayan şifreleme karma işlevidir. Dize parametreleri için CityHash algoritmasını ve diğer veri türleriyle parametreler için uygulamaya özgü hızlı kriptografik olmayan karma işlevini kullanır. İşlev, nihai sonuçları almak için CityHash birleştiricisini kullanır. + +**Parametre** + +Fonksiyon, değişken sayıda giriş parametresi alır. Parametreler herhangi biri olabilir [desteklenen veri türleri](../../sql_reference/data_types/index.md). + +**Döndürülen Değer** + +A [Uİnt64](../../sql_reference/data_types/int_uint.md) veri türü karma değeri. + +**Örnekler** + +Çağrı örneği: + +``` sql +SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type +``` + +``` text +┌─────────────CityHash─┬─type───┐ +│ 12072650598913549138 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +Aşağıdaki örnek, tüm tablonun sağlama toplamının satır sırasına kadar doğrulukla nasıl hesaplanacağını gösterir: + +``` sql +SELECT groupBitXor(cityHash64(*)) FROM table +``` + +## intHash32 {#inthash32} + +Herhangi bir tamsayı türünden 32 bit karma kodu hesaplar. +Bu, sayılar için ortalama kalitenin nispeten hızlı bir kriptografik olmayan karma işlevidir. + +## intHash64 {#inthash64} + +Herhangi bir tamsayı türünden 64 bit karma kodu hesaplar. +Inthash32'den daha hızlı çalışır. Ortalama kalite. + +## SHA1 {#sha1} + +## SHA224 {#sha224} + +## SHA256 {#sha256} + +Bir dizeden SHA-1, SHA-224 veya SHA-256 hesaplar ve elde edilen bayt kümesini FixedString(20), FixedString(28) veya FixedString(32) olarak döndürür. +İşlev oldukça yavaş çalışır (SHA-1, işlemci çekirdeği başına saniyede yaklaşık 5 milyon kısa dizgiyi işler, SHA-224 ve SHA-256 ise yaklaşık 2.2 milyon işlem yapar). +Bu işlevi yalnızca belirli bir karma işleve ihtiyacınız olduğunda ve bunu seçemediğinizde kullanmanızı öneririz. +Bu gibi durumlarda bile, SELECTS'TE uygulamak yerine, tabloya eklerken işlev çevrimdışı ve ön hesaplama değerlerini uygulamanızı öneririz. + +## URLHash(url \[, N\]) {#urlhashurl-n} + +Bir tür normalleştirme kullanarak bir URL'den elde edilen bir dize için hızlı, iyi kalitede olmayan şifreleme karma işlevi. +`URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` veya `#` sonunda, varsa. +`URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` veya `#` sonunda, varsa. +Düzeyleri URLHierarchy aynıdır. Bu fonksiyon (kayıt olmak için özeldir.Metrica. + +## farmHash64 {#farmhash64} + +64-bit üretir [FarmHash](https://github.com/google/farmhash) karma değeri. + +``` sql +farmHash64(par1, ...) +``` + +Fonksiyonu kullanır `Hash64` tüm yöntem [mevcut yöntemler](https://github.com/google/farmhash/blob/master/src/farmhash.h). + +**Parametre** + +Fonksiyon, değişken sayıda giriş parametresi alır. Parametreler herhangi biri olabilir [desteklenen veri türleri](../../sql_reference/data_types/index.md). + +**Döndürülen Değer** + +A [Uİnt64](../../sql_reference/data_types/int_uint.md) veri türü karma değeri. + +**Örnek** + +``` sql +SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type +``` + +``` text +┌─────────────FarmHash─┬─type───┐ +│ 17790458267262532859 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## javaHash {#hash_functions-javahash} + +Hesaplıyor [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) bir ipten. Bu karma işlevi ne hızlı ne de iyi bir kaliteye sahip değildir. Bunu kullanmanın tek nedeni, bu algoritmanın zaten başka bir sistemde kullanılmasıdır ve tam olarak aynı sonucu hesaplamanız gerekir. + +**Sözdizimi** + +``` sql +SELECT javaHash(''); +``` + +**Döndürülen değer** + +A `Int32` veri türü karma değeri. + +**Örnek** + +Sorgu: + +``` sql +SELECT javaHash('Hello, world!'); +``` + +Sonuç: + +``` text +┌─javaHash('Hello, world!')─┐ +│ -1880044555 │ +└───────────────────────────┘ +``` + +## javaHashUTF16LE {#javahashutf16le} + +Hesaplıyor [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) bir dizeden, UTF-16LE kodlamasında bir dizeyi temsil eden bayt içerdiğini varsayarak. + +**Sözdizimi** + +``` sql +javaHashUTF16LE(stringUtf16le) +``` + +**Parametre** + +- `stringUtf16le` — a string in UTF-16LE encoding. + +**Döndürülen değer** + +A `Int32` veri türü karma değeri. + +**Örnek** + +UTF-16LE kodlanmış dize ile doğru sorgu. + +Sorgu: + +``` sql +SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) +``` + +Sonuç: + +``` text +┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ +│ 3556498 │ +└──────────────────────────────────────────────────────────────┘ +``` + +## hiveHash {#hash-functions-hivehash} + +Hesaplıyor `HiveHash` bir ipten. + +``` sql +SELECT hiveHash(''); +``` + +Bu sadece [JavaHash](#hash_functions-javahash) sıfırlanmış işaret biti ile. Bu işlev kullanılır [Apache Kov Hanı](https://en.wikipedia.org/wiki/Apache_Hive) 3.0 öncesi sürümler için. Bu karma işlevi ne hızlı ne de iyi bir kaliteye sahip değildir. Bunu kullanmanın tek nedeni, bu algoritmanın zaten başka bir sistemde kullanılmasıdır ve tam olarak aynı sonucu hesaplamanız gerekir. + +**Döndürülen değer** + +A `Int32` veri türü karma değeri. + +Tür: `hiveHash`. + +**Örnek** + +Sorgu: + +``` sql +SELECT hiveHash('Hello, world!'); +``` + +Sonuç: + +``` text +┌─hiveHash('Hello, world!')─┐ +│ 267439093 │ +└───────────────────────────┘ +``` + +## metroHash64 {#metrohash64} + +64-bit üretir [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/) karma değeri. + +``` sql +metroHash64(par1, ...) +``` + +**Parametre** + +Fonksiyon, değişken sayıda giriş parametresi alır. Parametreler herhangi biri olabilir [desteklenen veri türleri](../../sql_reference/data_types/index.md). + +**Döndürülen Değer** + +A [Uİnt64](../../sql_reference/data_types/int_uint.md) veri türü karma değeri. + +**Örnek** + +``` sql +SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type +``` + +``` text +┌────────────MetroHash─┬─type───┐ +│ 14235658766382344533 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## jumpConsistentHash {#jumpconsistenthash} + +Bir Uint64 Formu jumpconsistenthash hesaplar. +İki bağımsız değişkeni kabul eder: bir uint64 tipi anahtar ve kova sayısı. Int32 Döndürür. +Daha fazla bilgi için bağlantıya bakın: [JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) + +## murmurHash2\_32, murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} + +Üreten bir [MurmurHash2](https://github.com/aappleby/smhasher) karma değeri. + +``` sql +murmurHash2_32(par1, ...) +murmurHash2_64(par1, ...) +``` + +**Parametre** + +Her iki işlev de değişken sayıda giriş parametresi alır. Parametreler herhangi biri olabilir [desteklenen veri türleri](../../sql_reference/data_types/index.md). + +**Döndürülen Değer** + +- Bu `murmurHash2_32` fonksiyon hash değerini döndürür [Uİnt32](../../sql_reference/data_types/int_uint.md) veri türü. +- Bu `murmurHash2_64` fonksiyon hash değerini döndürür [Uİnt64](../../sql_reference/data_types/int_uint.md) veri türü. + +**Örnek** + +``` sql +SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type +``` + +``` text +┌──────────MurmurHash2─┬─type───┐ +│ 11832096901709403633 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## murmurHash3\_32, murmurHash3\_64 {#murmurhash3-32-murmurhash3-64} + +Üreten bir [MurmurHash3](https://github.com/aappleby/smhasher) karma değeri. + +``` sql +murmurHash3_32(par1, ...) +murmurHash3_64(par1, ...) +``` + +**Parametre** + +Her iki işlev de değişken sayıda giriş parametresi alır. Parametreler herhangi biri olabilir [desteklenen veri türleri](../../sql_reference/data_types/index.md). + +**Döndürülen Değer** + +- Bu `murmurHash3_32` fonksiyon bir [Uİnt32](../../sql_reference/data_types/int_uint.md) veri türü karma değeri. +- Bu `murmurHash3_64` fonksiyon bir [Uİnt64](../../sql_reference/data_types/int_uint.md) veri türü karma değeri. + +**Örnek** + +``` sql +SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type +``` + +``` text +┌─MurmurHash3─┬─type───┐ +│ 2152717 │ UInt32 │ +└─────────────┴────────┘ +``` + +## murmurHash3\_128 {#murmurhash3-128} + +128-bit üretir [MurmurHash3](https://github.com/aappleby/smhasher) karma değeri. + +``` sql +murmurHash3_128( expr ) +``` + +**Parametre** + +- `expr` — [İfadeler](../syntax.md#syntax-expressions) dönen bir [Dize](../../sql_reference/data_types/string.md)- tip değeri. + +**Döndürülen Değer** + +A [FixedString (16)](../../sql_reference/data_types/fixedstring.md) veri türü karma değeri. + +**Örnek** + +``` sql +SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type +``` + +``` text +┌─MurmurHash3──────┬─type────────────┐ +│ 6�1�4"S5KT�~~q │ FixedString(16) │ +└──────────────────┴─────────────────┘ +``` + +## xxHash32, xxHash64 {#hash-functions-xxhash32} + +Hesaplıyor `xxHash` bir ipten. İki tat, 32 ve 64 bit olarak önerilmiştir. + +``` sql +SELECT xxHash32(''); + +OR + +SELECT xxHash64(''); +``` + +**Döndürülen değer** + +A `Uint32` veya `Uint64` veri türü karma değeri. + +Tür: `xxHash`. + +**Örnek** + +Sorgu: + +``` sql +SELECT xxHash32('Hello, world!'); +``` + +Sonuç: + +``` text +┌─xxHash32('Hello, world!')─┐ +│ 834093149 │ +└───────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [xxHash](http://cyan4973.github.io/xxHash/). + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/tr/sql_reference/functions/higher_order_functions.md b/docs/tr/sql_reference/functions/higher_order_functions.md new file mode 100644 index 00000000000..e5faadc689a --- /dev/null +++ b/docs/tr/sql_reference/functions/higher_order_functions.md @@ -0,0 +1,264 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 57 +toc_title: "Y\xFCksek Sipari\u015F" +--- + +# Yüksek mertebeden fonksiyonlar {#higher-order-functions} + +## `->` operatör, lambda (params, expr) fonksiyonu {#operator-lambdaparams-expr-function} + +Allows describing a lambda function for passing to a higher-order function. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns. + +Örnekler: `x -> 2 * x, str -> str != Referer.` + +Daha yüksek mertebeden işlevler yalnızca Lambda işlevlerini işlevsel argümanları olarak kabul edebilir. + +Birden çok bağımsız değişkeni kabul eden bir lambda işlevi, daha yüksek mertebeden bir işleve geçirilebilir. Bu durumda, yüksek mertebeden işlev, bu bağımsız değişkenlerin karşılık geleceği aynı uzunlukta birkaç diziden geçirilir. + +Gibi bazı işlevler için [arrayCount](#higher_order_functions-array-count) veya [arraySum](#higher_order_functions-array-count), ilk argüman (lambda işlevi) ihmal edilebilir. Bu durumda, aynı eşleme varsayılır. + +Aşağıdaki işlevler için bir lambda işlevi ihmal edilemez: + +- [arrayMap](#higher_order_functions-array-map) +- [arrayFilter](#higher_order_functions-array-filter) +- [arrayFill](#higher_order_functions-array-fill) +- [arrayReverseFill](#higher_order_functions-array-reverse-fill) +- [arraySplit](#higher_order_functions-array-split) +- [arrayReverseSplit](#higher_order_functions-array-reverse-split) +- [arrayFirst](#higher_order_functions-array-first) +- [arrayFirstİndex](#higher_order_functions-array-first-index) + +### arrayMap(func, arr1, …) {#higher_order_functions-array-map} + +Özgün uygulamadan elde edilen bir dizi döndürür `func` fonksiyon inunda her ele elementmana `arr` dizi. + +Örnekler: + +``` sql +SELECT arrayMap(x -> (x + 2), [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [3,4,5] │ +└─────────┘ +``` + +Aşağıdaki örnek, farklı dizilerden bir öğe kümesinin nasıl oluşturulacağını gösterir: + +``` sql +SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res +``` + +``` text +┌─res─────────────────┐ +│ [(1,4),(2,5),(3,6)] │ +└─────────────────────┘ +``` + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arrayMap` işlev. + +### arrayFilter(func, arr1, …) {#higher_order_functions-array-filter} + +Yalnızca öğeleri içeren bir dizi döndürür `arr1` hangi için `func` 0'dan başka bir şey döndürür. + +Örnekler: + +``` sql +SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res +``` + +``` text +┌─res───────────┐ +│ ['abc World'] │ +└───────────────┘ +``` + +``` sql +SELECT + arrayFilter( + (i, x) -> x LIKE '%World%', + arrayEnumerate(arr), + ['Hello', 'abc World'] AS arr) + AS res +``` + +``` text +┌─res─┐ +│ [2] │ +└─────┘ +``` + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arrayFilter` işlev. + +### arrayFill(func, arr1, …) {#higher_order_functions-array-fill} + +Tarama yoluyla `arr1` ilk öğeden son öğeye ve değiştir `arr1[i]` tarafından `arr1[i - 1]` eğer `func` 0 döndürür. İlk eleman `arr1` değiştir notilm .eyecektir. + +Örnekler: + +``` sql +SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res──────────────────────────────┐ +│ [1,1,3,11,12,12,12,5,6,14,14,14] │ +└──────────────────────────────────┘ +``` + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arrayFill` işlev. + +### arrayReverseFill(func, arr1, …) {#higher_order_functions-array-reverse-fill} + +Tarama yoluyla `arr1` son öğeden ilk öğeye ve değiştir `arr1[i]` tarafından `arr1[i + 1]` eğer `func` 0 döndürür. The La lastst element of `arr1` değiştir notilm .eyecektir. + +Örnekler: + +``` sql +SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res────────────────────────────────┐ +│ [1,3,3,11,12,5,5,5,6,14,NULL,NULL] │ +└────────────────────────────────────┘ +``` + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arrayReverseFill` işlev. + +### arraySplit(func, arr1, …) {#higher_order_functions-array-split} + +Bölme `arr1` birden fazla diziye. Ne zaman `func` 0'dan başka bir şey döndürür, dizi öğenin sol tarafında bölünecektir. Dizi ilk öğeden önce bölünmez. + +Örnekler: + +``` sql +SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res─────────────┐ +│ [[1,2,3],[4,5]] │ +└─────────────────┘ +``` + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arraySplit` işlev. + +### arrayReverseSplit(func, arr1, …) {#higher_order_functions-array-reverse-split} + +Bölme `arr1` birden fazla diziye. Ne zaman `func` 0'dan başka bir şey döndürür, dizi öğenin sağ tarafında bölünecektir. Dizi son öğeden sonra bölünmez. + +Örnekler: + +``` sql +SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res───────────────┐ +│ [[1],[2,3,4],[5]] │ +└───────────────────┘ +``` + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arraySplit` işlev. + +### arrayCount(\[func,\] arr1, …) {#higher_order_functions-array-count} + +Func 0'dan başka bir şey döndüren arr dizisindeki öğelerin sayısını döndürür. Eğer ‘func’ belirtilmemişse, dizideki sıfır olmayan öğelerin sayısını döndürür. + +### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} + +İçinde en az bir öğe varsa 1 değerini döndürür ‘arr’ hangi için ‘func’ 0'dan başka bir şey döndürür. Aksi takdirde, 0 döndürür. + +### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} + +Döner 1 Eğer ‘func’ içindeki tüm öğeler için 0'dan başka bir şey döndürür ‘arr’. Aksi takdirde, 0 döndürür. + +### arraySum(\[func,\] arr1, …) {#higher-order-functions-array-sum} + +Toplamını döndürür ‘func’ değerler. İşlev atlanırsa, sadece dizi öğelerinin toplamını döndürür. + +### arrayFirst(func, arr1, …) {#higher_order_functions-array-first} + +İlk öğeyi döndürür ‘arr1’ dizi hangi ‘func’ 0'dan başka bir şey döndürür. + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arrayFirst` işlev. + +### arrayFirstIndex(func, arr1, …) {#higher_order_functions-array-first-index} + +İlk öğenin dizinini döndürür ‘arr1’ dizi hangi ‘func’ 0'dan başka bir şey döndürür. + +İlk argümanın (lambda işlevi) atlanamayacağını unutmayın. `arrayFirstIndex` işlev. + +### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} + +Kaynak dizideki öğelerin kısmi toplamlarının bir dizisini döndürür (çalışan bir toplam). Eğer... `func` işlev belirtilir, daha sonra dizi öğelerinin değerleri toplanmadan önce bu işlev tarafından dönüştürülür. + +Örnek: + +``` sql +SELECT arrayCumSum([1, 1, 1, 1]) AS res +``` + +``` text +┌─res──────────┐ +│ [1, 2, 3, 4] │ +└──────────────┘ +``` + +### arrayCumSumNonNegative(arr) {#arraycumsumnonnegativearr} + +Aynı olarak `arrayCumSum`, kaynak dizideki öğelerin kısmi toplamlarının bir dizisini döndürür (çalışan bir toplam). Farklı `arrayCumSum`, daha sonra döndürülen değer sıfırdan küçük bir değer içerdiğinde, değer sıfır ile değiştirilir ve sonraki hesaplama sıfır parametrelerle gerçekleştirilir. Mesela: + +``` sql +SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res +``` + +``` text +┌─res───────┐ +│ [1,2,0,1] │ +└───────────┘ +``` + +### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} + +Öğeleri sıralama sonucu bir dizi döndürür `arr1` artan düzende. Eğer... `func` fonksiyon belirtilir, sıralama sırası fonksiyonun sonucu ile belirlenir `func` dizi elemanlarına uygulanır (diziler) + +Bu [Schwartzian dönüşümü](https://en.wikipedia.org/wiki/Schwartzian_transform) sıralama verimliliğini artırmak için kullanılır. + +Örnek: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); +``` + +``` text +┌─res────────────────┐ +│ ['world', 'hello'] │ +└────────────────────┘ +``` + +Hakkında daha fazla bilgi için `arraySort` yöntem, görmek [Dizilerle çalışmak için işlevler](array_functions.md#array_functions-sort) bölme. + +### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} + +Öğeleri sıralama sonucu bir dizi döndürür `arr1` azalan sırada. Eğer... `func` fonksiyon belirtilir, sıralama sırası fonksiyonun sonucu ile belirlenir `func` dizi (diziler) elemanlarına uygulanır. + +Örnek: + +``` sql +SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res───────────────┐ +│ ['hello','world'] │ +└───────────────────┘ +``` + +Hakkında daha fazla bilgi için `arrayReverseSort` yöntem, görmek [Dizilerle çalışmak için işlevler](array_functions.md#array_functions-reverse-sort) bölme. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/tr/sql_reference/functions/in_functions.md b/docs/tr/sql_reference/functions/in_functions.md new file mode 100644 index 00000000000..481c1b61802 --- /dev/null +++ b/docs/tr/sql_reference/functions/in_functions.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 60 +toc_title: "In \u0130\u015Flet theic implementingisinin uygulanmas\u0131" +--- + +# In operatörünü uygulamak için işlevler {#functions-for-implementing-the-in-operator} + +## içinde, notİn, globalİn, globalNotİn {#in-functions} + +Bölümüne bakınız [Operatör İNLERDE](../statements/select.md#select-in-operators). + +## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} + +Birden çok sütun gruplama sağlayan bir işlev. +For columns with the types T1, T2, …, it returns a Tuple(T1, T2, …) type tuple containing these columns. There is no cost to execute the function. +Tuples normalde bir argüman için Ara değerler olarak kullanılır operatörler, veya lambda fonksiyonlarının resmi parametrelerin bir listesini oluşturmak için. Tuples bir masaya yazılamaz. + +## tupleElement (tuple, n), operatör x. N {#tupleelementtuple-n-operator-x-n} + +Bir tuple bir sütun alma sağlayan bir işlev. +‘N’ 1'den başlayarak sütun dizinidir. N sabit olmalıdır. ‘N’ bir sabit olması gerekir. ‘N’ tuple boyutundan daha büyük olmayan katı bir pozitif tamsayı olmalıdır. +İşlevi yürütmek için hiçbir maliyet yoktur. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/tr/sql_reference/functions/index.md b/docs/tr/sql_reference/functions/index.md new file mode 100644 index 00000000000..01961c69526 --- /dev/null +++ b/docs/tr/sql_reference/functions/index.md @@ -0,0 +1,74 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "\u0130\u015Flevler" +toc_priority: 32 +toc_title: "Giri\u015F" +--- + +# İşlevler {#functions} + +En az\* iki tür fonksiyon vardır-düzenli Fonksiyonlar (sadece denir “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function doesn't depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). + +Bu bölümde düzenli işlevleri tartışıyoruz. Toplama işlevleri için bölüme bakın “Aggregate functions”. + +\* - Üçüncü bir işlev türü vardır ‘arrayJoin’ fonksiyon aittir; tablo fonksiyonları da ayrı ayrı belirtilebilir.\* + +## Güçlü yazarak {#strong-typing} + +Standart SQL aksine, ClickHouse güçlü yazarak vardır. Başka bir deyişle, türler arasında örtük dönüşümler yapmaz. Her işlev belirli bir tür kümesi için çalışır. Bu, bazen tür dönüştürme işlevlerini kullanmanız gerektiği anlamına gelir. + +## Ortak subexpression eliminasyonu {#common-subexpression-elimination} + +Aynı AST (aynı kayıt veya sözdizimsel ayrıştırma aynı sonucu) olan bir sorgudaki tüm ifadeler aynı değerlere sahip olarak kabul edilir. Bu tür ifadeler bir kez birleştirilir ve yürütülür. Aynı alt sorgular da bu şekilde elimine edilir. + +## Sonuç türleri {#types-of-results} + +Tüm işlevler sonuç olarak tek bir dönüş döndürür (birkaç değer değil, sıfır değer değil). Sonuç türü genellikle değerlerle değil, yalnızca bağımsız değişken türleriyle tanımlanır. Özel durumlar tupleElement işlevi (a.n işleci) ve tofixedstring işlevidir. + +## Devamlılar {#constants} + +Basitlik için, bazı işlevler yalnızca bazı argümanlar için sabitlerle çalışabilir. Örneğin, LİKE operatörünün doğru argümanı sabit olmalıdır. +Hemen hemen tüm işlevler sabit argümanlar için bir sabit döndürür. İstisna, rasgele sayılar üreten işlevlerdir. +Bu ‘now’ işlev, farklı zamanlarda çalıştırılan sorgular için farklı değerler döndürür, ancak sonuç sabit olarak kabul edilir, çünkü sabitlik yalnızca tek bir sorguda önemlidir. +Sabit bir ifade de sabit olarak kabul edilir (örneğin, LİKE operatörünün sağ yarısı birden fazla sabitten oluşturulabilir). + +Fonksiyonlar sabit ve sabit olmayan argümanlar için farklı şekillerde uygulanabilir (farklı kod yürütülür). Ancak, bir sabit ve yalnızca aynı değeri içeren gerçek bir sütun için sonuçlar birbiriyle eşleşmelidir. + +## NULL işleme {#null-processing} + +Fonksiyonlar aşağıdaki davranışlara sahiptir: + +- İşlevin argümanlarından en az biri ise `NULL`, fonksiyon sonucu da `NULL`. +- Her işlevin açıklamasında ayrı ayrı belirtilen özel davranış. ClickHouse kaynak kodunda, bu işlevler `UseDefaultImplementationForNulls=false`. + +## Süreklilik {#constancy} + +Functions can't change the values of their arguments – any changes are returned as the result. Thus, the result of calculating separate functions does not depend on the order in which the functions are written in the query. + +## Hata işleme {#error-handling} + +Veriler geçersizse bazı işlevler bir istisna oluşturabilir. Bu durumda, sorgu iptal edilir ve bir hata metni istemciye döndürülür. Dağıtılmış işlem için sunuculardan birinde bir özel durum oluştuğunda, diğer sunucular da sorguyu iptal etmeye çalışır. + +## Argüman ifadelerinin değerlendirilmesi {#evaluation-of-argument-expressions} + +Hemen hemen tüm programlama dillerinde, argümanlardan biri belirli operatörler için değerlendirilmeyebilir. Bu genellikle operatörler `&&`, `||`, ve `?:`. +Ancak Clickhouse'da, fonksiyonların (operatörler) argümanları her zaman değerlendirilir. Bunun nedeni, sütunların tüm bölümlerinin her satırı ayrı ayrı hesaplamak yerine bir kerede değerlendirilmesidir. + +## Dağıtılmış sorgu işleme işlevleri gerçekleştirme {#performing-functions-for-distributed-query-processing} + +Dağıtılmış sorgu işleme için, sorgu işlemenin mümkün olduğu kadar çok aşaması uzak sunucularda gerçekleştirilir ve aşamaların geri kalanı (Ara sonuçları ve bundan sonra her şeyi birleştirme) istek sahibi sunucuda gerçekleştirilir. + +Bu, işlevlerin farklı sunucularda gerçekleştirilebileceği anlamına gelir. +Örneğin, sorguda `SELECT f(sum(g(x))) FROM distributed_table GROUP BY h(y),` + +- eğer bir `distributed_table` en az iki parçaya sahiptir, fonksiyonlar ‘g’ ve ‘h’ uzak sunucularda gerçekleştirilir ve işlev ‘f’ ıstekçi sunucuda gerçekleştirilir. +- eğer bir `distributed_table` sadece bir parça var, tüm ‘f’, ‘g’, ve ‘h’ fonksiyonlar bu shard'ın sunucusunda gerçekleştirilir. + +Bir işlevin sonucu genellikle hangi sunucuda gerçekleştirildiğine bağlı değildir. Ancak, bazen bu önemlidir. +Örneğin, sözlüklerle çalışan işlevler, üzerinde çalışmakta oldukları sunucuda bulunan sözlüğü kullanır. +Başka bir örnek ise `hostName` yapmak için üzerinde çalıştığı sunucunun adını döndüren işlev `GROUP BY` sunucular tarafından bir `SELECT` sorgu. + +Eğer sorguda bir işlevi istemcisi sunucu üzerinde yapılır, ama uzak sunucularda bunu gerçekleştirmek için ihtiyacınız varsa, bir saramaz mısın ‘any’ toplama işlevi veya bir anahtara ekleyin `GROUP BY`. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/tr/sql_reference/functions/introspection.md b/docs/tr/sql_reference/functions/introspection.md new file mode 100644 index 00000000000..fdc68fe76fb --- /dev/null +++ b/docs/tr/sql_reference/functions/introspection.md @@ -0,0 +1,310 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 65 +toc_title: "\u0130\xE7g\xF6zlem" +--- + +# İç Gözlem Fonksiyonları {#introspection-functions} + +İç gözlem için bu bölümde açıklanan işlevleri kullanabilirsiniz [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) ve [DWARF](https://en.wikipedia.org/wiki/DWARF) sorgu profilleme için. + +!!! warning "Uyarıcı" + Bu işlevler yavaştır ve güvenlik konuları getirebilir. + +İç gözlem fonksiyonlarının düzgün çalışması için: + +- Yüklemek `clickhouse-common-static-dbg` paket. + +- Ayarla... [allow\_introspection\_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) ayar 1. + + For security reasons introspection functions are disabled by default. + +ClickHouse için profiler raporları kaydeder [trace\_log](../../operations/system_tables.md#system_tables-trace_log) sistem tablosu. Tablo ve profiler düzgün yapılandırıldığından emin olun. + +## addressToLine {#addresstoline} + +ClickHouse sunucu işleminin içindeki sanal bellek adresini dosya adına ve clickhouse kaynak kodundaki satır numarasına dönüştürür. + +Resmi ClickHouse paketleri kullanırsanız, yüklemeniz gerekir `clickhouse-common-static-dbg` paket. + +**Sözdizimi** + +``` sql +addressToLine(address_of_binary_instruction) +``` + +**Parametre** + +- `address_of_binary_instruction` ([Uİnt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. + +**Döndürülen değer** + +- Kaynak kodu dosya adı ve bu dosyadaki satır numarası iki nokta üst üste ile sınırlandırılmıştır. + + For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number. + +- Işlev hata ayıklama bilgilerini bulamadıysanız, bir ikili adı. + +- Adres geçerli değilse, boş dize. + +Tür: [Dize](../../sql_reference/data_types/string.md). + +**Örnek** + +İç gözlem işlevlerini etkinleştirme: + +``` sql +SET allow_introspection_functions=1 +``` + +İlk dizeyi seçme `trace_log` sistem tablosu: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-19 +event_time: 2019-11-19 18:57:23 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 421b6855-1858-45a5-8f37-f383409d6d72 +trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] +``` + +Bu `trace` alan, örnekleme anında yığın izini içerir. + +Tek bir adres için kaynak kodu dosya adını ve satır numarasını alma: + +``` sql +SELECT addressToLine(94784076370703) \G +``` + +``` text +Row 1: +────── +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +``` + +İşlevin tüm yığın izine uygulanması: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToLine(x), trace), '\n') AS trace_source_code_lines +FROM system.trace_log +LIMIT 1 +\G +``` + +Bu [arrayMap](higher_order_functions.md#higher_order_functions-array-map) işlev, her bir elemanın işlenmesini sağlar `trace` ar arrayray by the `addressToLine` işlev. Gördüğünüz bu işlemin sonucu `trace_source_code_lines` çıktı sütunu. + +``` text +Row 1: +────── +trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so +/usr/lib/debug/usr/bin/clickhouse +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.h:155 +/usr/include/c++/9/bits/atomic_base.h:551 +/usr/lib/debug/usr/bin/clickhouse +/lib/x86_64-linux-gnu/libpthread-2.27.so +/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 +``` + +## addressToSymbol {#addresstosymbol} + +Clickhouse sunucu işlemi içindeki sanal bellek adresini ClickHouse nesne dosyalarından gelen simgeye dönüştürür. + +**Sözdizimi** + +``` sql +addressToSymbol(address_of_binary_instruction) +``` + +**Parametre** + +- `address_of_binary_instruction` ([Uİnt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. + +**Döndürülen değer** + +- ClickHouse nesne dosyalarından sembol. +- Adres geçerli değilse, boş dize. + +Tür: [Dize](../../sql_reference/data_types/string.md). + +**Örnek** + +İç gözlem işlevlerini etkinleştirme: + +``` sql +SET allow_introspection_functions=1 +``` + +İlk dizeyi seçme `trace_log` sistem tablosu: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +Bu `trace` alan, örnekleme anında yığın izini içerir. + +Tek bir adres için sembol alma: + +``` sql +SELECT addressToSymbol(94138803686098) \G +``` + +``` text +Row 1: +────── +addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +``` + +İşlevin tüm yığın izine uygulanması: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToSymbol(x), trace), '\n') AS trace_symbols +FROM system.trace_log +LIMIT 1 +\G +``` + +Bu [arrayMap](higher_order_functions.md#higher_order_functions-array-map) işlev, her bir elemanın işlenmesini sağlar `trace` ar arrayray by the `addressToSymbols` işlev. Gördüğünüz bu işlemin sonucu `trace_symbols` çıktı sütunu. + +``` text +Row 1: +────── +trace_symbols: _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +_ZNK2DB10Aggregator21executeWithoutKeyImplERPcmPNS0_28AggregateFunctionInstructionEPNS_5ArenaE +_ZN2DB10Aggregator14executeOnBlockESt6vectorIN3COWINS_7IColumnEE13immutable_ptrIS3_EESaIS6_EEmRNS_22AggregatedDataVariantsERS1_IPKS3_SaISC_EERS1_ISE_SaISE_EERb +_ZN2DB10Aggregator14executeOnBlockERKNS_5BlockERNS_22AggregatedDataVariantsERSt6vectorIPKNS_7IColumnESaIS9_EERS6_ISB_SaISB_EERb +_ZN2DB10Aggregator7executeERKSt10shared_ptrINS_17IBlockInputStreamEERNS_22AggregatedDataVariantsE +_ZN2DB27AggregatingBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB28AsynchronousBlockInputStream9calculateEv +_ZNSt17_Function_handlerIFvvEZN2DB28AsynchronousBlockInputStream4nextEvEUlvE_E9_M_invokeERKSt9_Any_data +_ZN14ThreadPoolImplI20ThreadFromGlobalPoolE6workerESt14_List_iteratorIS0_E +_ZZN20ThreadFromGlobalPoolC4IZN14ThreadPoolImplIS_E12scheduleImplIvEET_St8functionIFvvEEiSt8optionalImEEUlvE1_JEEEOS4_DpOT0_ENKUlvE_clEv +_ZN14ThreadPoolImplISt6threadE6workerESt14_List_iteratorIS0_E +execute_native_thread_routine +start_thread +clone +``` + +## demangle {#demangle} + +Kullanarak alabileceğiniz bir sembolü dönüştürür [addressToSymbol](#addresstosymbol) C++ işlev adına işlev. + +**Sözdizimi** + +``` sql +demangle(symbol) +``` + +**Parametre** + +- `symbol` ([Dize](../../sql_reference/data_types/string.md)) — Symbol from an object file. + +**Döndürülen değer** + +- C++ işlevinin adı. +- Bir sembol geçerli değilse boş dize. + +Tür: [Dize](../../sql_reference/data_types/string.md). + +**Örnek** + +İç gözlem işlevlerini etkinleştirme: + +``` sql +SET allow_introspection_functions=1 +``` + +İlk dizeyi seçme `trace_log` sistem tablosu: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +Bu `trace` alan, örnekleme anında yığın izini içerir. + +Tek bir adres için bir işlev adı alma: + +``` sql +SELECT demangle(addressToSymbol(94138803686098)) \G +``` + +``` text +Row 1: +────── +demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +``` + +İşlevin tüm yığın izine uygulanması: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS trace_functions +FROM system.trace_log +LIMIT 1 +\G +``` + +Bu [arrayMap](higher_order_functions.md#higher_order_functions-array-map) işlev, her bir elemanın işlenmesini sağlar `trace` ar arrayray by the `demangle` işlev. Gördüğünüz bu işlemin sonucu `trace_functions` çıktı sütunu. + +``` text +Row 1: +────── +trace_functions: DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) const +DB::Aggregator::executeOnBlock(std::vector::immutable_ptr, std::allocator::immutable_ptr > >, unsigned long, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::executeOnBlock(DB::Block const&, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::execute(std::shared_ptr const&, DB::AggregatedDataVariants&) +DB::AggregatingBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::AsynchronousBlockInputStream::calculate() +std::_Function_handler::_M_invoke(std::_Any_data const&) +ThreadPoolImpl::worker(std::_List_iterator) +ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const +ThreadPoolImpl::worker(std::_List_iterator) +execute_native_thread_routine +start_thread +clone +``` diff --git a/docs/tr/sql_reference/functions/ip_address_functions.md b/docs/tr/sql_reference/functions/ip_address_functions.md new file mode 100644 index 00000000000..2caa7c49b2a --- /dev/null +++ b/docs/tr/sql_reference/functions/ip_address_functions.md @@ -0,0 +1,248 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 55 +toc_title: "IP adresleriyle \xE7al\u0131\u015Fma" +--- + +# IP adresleriyle çalışmak için işlevler {#functions-for-working-with-ip-addresses} + +## Ipv4numtostring (num) {#ipv4numtostringnum} + +Bir Uınt32 numarası alır. Big endian'da bir IPv4 adresi olarak yorumlar. Karşılık gelen IPv4 adresini a. B. C. d biçiminde içeren bir dize döndürür (ondalık formda nokta ile ayrılmış sayılar). + +## Ipv4stringtonum (s) {#ipv4stringtonums} + +IPv4NumToString ters işlevi. IPv4 adresi geçersiz bir biçime sahipse, 0 döndürür. + +## Ipv4numtostringclassc (num) {#ipv4numtostringclasscnum} + +Ipv4numtostring'e benzer, ancak son sekizli yerine xxx kullanıyor. + +Örnek: + +``` sql +SELECT + IPv4NumToStringClassC(ClientIP) AS k, + count() AS c +FROM test.hits +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + +``` text +┌─k──────────────┬─────c─┐ +│ 83.149.9.xxx │ 26238 │ +│ 217.118.81.xxx │ 26074 │ +│ 213.87.129.xxx │ 25481 │ +│ 83.149.8.xxx │ 24984 │ +│ 217.118.83.xxx │ 22797 │ +│ 78.25.120.xxx │ 22354 │ +│ 213.87.131.xxx │ 21285 │ +│ 78.25.121.xxx │ 20887 │ +│ 188.162.65.xxx │ 19694 │ +│ 83.149.48.xxx │ 17406 │ +└────────────────┴───────┘ +``` + +Kullanıl sincedığından beri ‘xxx’ son derece sıradışı, bu gelecekte değiştirilebilir. Bu parçanın tam biçimine güvenmemenizi öneririz. + +### Ipv6numtostring (x) {#ipv6numtostringx} + +IPv6 adresini ikili biçimde içeren bir FixedString(16) değerini kabul eder. Bu adresi metin biçiminde içeren bir dize döndürür. +IPv6 eşlemeli IPv4 adresleri ::ffff:111.222.33.44 biçiminde çıktıdır. Örnekler: + +``` sql +SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr +``` + +``` text +┌─addr─────────┐ +│ 2a02:6b8::11 │ +└──────────────┘ +``` + +``` sql +SELECT + IPv6NumToString(ClientIP6 AS k), + count() AS c +FROM hits_all +WHERE EventDate = today() AND substring(ClientIP6, 1, 12) != unhex('00000000000000000000FFFF') +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + +``` text +┌─IPv6NumToString(ClientIP6)──────────────┬─────c─┐ +│ 2a02:2168:aaa:bbbb::2 │ 24695 │ +│ 2a02:2698:abcd:abcd:abcd:abcd:8888:5555 │ 22408 │ +│ 2a02:6b8:0:fff::ff │ 16389 │ +│ 2a01:4f8:111:6666::2 │ 16016 │ +│ 2a02:2168:888:222::1 │ 15896 │ +│ 2a01:7e00::ffff:ffff:ffff:222 │ 14774 │ +│ 2a02:8109:eee:ee:eeee:eeee:eeee:eeee │ 14443 │ +│ 2a02:810b:8888:888:8888:8888:8888:8888 │ 14345 │ +│ 2a02:6b8:0:444:4444:4444:4444:4444 │ 14279 │ +│ 2a01:7e00::ffff:ffff:ffff:ffff │ 13880 │ +└─────────────────────────────────────────┴───────┘ +``` + +``` sql +SELECT + IPv6NumToString(ClientIP6 AS k), + count() AS c +FROM hits_all +WHERE EventDate = today() +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + +``` text +┌─IPv6NumToString(ClientIP6)─┬──────c─┐ +│ ::ffff:94.26.111.111 │ 747440 │ +│ ::ffff:37.143.222.4 │ 529483 │ +│ ::ffff:5.166.111.99 │ 317707 │ +│ ::ffff:46.38.11.77 │ 263086 │ +│ ::ffff:79.105.111.111 │ 186611 │ +│ ::ffff:93.92.111.88 │ 176773 │ +│ ::ffff:84.53.111.33 │ 158709 │ +│ ::ffff:217.118.11.22 │ 154004 │ +│ ::ffff:217.118.11.33 │ 148449 │ +│ ::ffff:217.118.11.44 │ 148243 │ +└────────────────────────────┴────────┘ +``` + +## Ipv6stringtonum (s) {#ipv6stringtonums} + +IPv6NumToString ters işlevi. IPv6 adresi geçersiz bir biçime sahipse, bir boş bayt dizesi döndürür. +HEX büyük veya küçük harf olabilir. + +## Ipv4toıpv6 (x) {#ipv4toipv6x} + +Alır bir `UInt32` numara. Bir IPv4 adresi olarak yorumlar [büyük endian](https://en.wikipedia.org/wiki/Endianness). Ret aur ANS a `FixedString(16)` IPv6 adresini ikili biçimde içeren değer. Örnekler: + +``` sql +SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr +``` + +``` text +┌─addr───────────────┐ +│ ::ffff:192.168.0.1 │ +└────────────────────┘ +``` + +## cutİPv6 (x, bytesToCutForİPv6, bytesToCutForİPv4) {#cutipv6x-bytestocutforipv6-bytestocutforipv4} + +IPv6 adresini ikili biçimde içeren bir FixedString(16) değerini kabul eder. Metin biçiminde kaldırılan belirtilen bayt sayısının adresini içeren bir dize döndürür. Mesela: + +``` sql +WITH + IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D') AS ipv6, + IPv4ToIPv6(IPv4StringToNum('192.168.0.1')) AS ipv4 +SELECT + cutIPv6(ipv6, 2, 0), + cutIPv6(ipv4, 0, 2) +``` + +``` text +┌─cutIPv6(ipv6, 2, 0)─────────────────┬─cutIPv6(ipv4, 0, 2)─┐ +│ 2001:db8:ac10:fe01:feed:babe:cafe:0 │ ::ffff:192.168.0.0 │ +└─────────────────────────────────────┴─────────────────────┘ +``` + +## Ipv4cidrtorange(ıpv4, cıdr), {#ipv4cidrtorangeipv4-cidr} + +İçeren bir IPv4 ve bir Uint8 değerini kabul eder [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Alt ağın alt aralığını ve daha yüksek aralığını içeren iki IPv4 içeren bir tuple döndürür. + +``` sql +SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) +``` + +``` text +┌─IPv4CIDRToRange(toIPv4('192.168.5.2'), 16)─┐ +│ ('192.168.0.0','192.168.255.255') │ +└────────────────────────────────────────────┘ +``` + +## Ipv6cidrtorange(ıpv6, cıdr), {#ipv6cidrtorangeipv6-cidr} + +CIDR'Yİ içeren bir IPv6 ve bir Uİnt8 değerini kabul eder. Alt ağın alt aralığını ve daha yüksek aralığını içeren iki IPv6 içeren bir tuple döndürür. + +``` sql +SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); +``` + +``` text +┌─IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32)─┐ +│ ('2001:db8::','2001:db8:ffff:ffff:ffff:ffff:ffff:ffff') │ +└────────────────────────────────────────────────────────────────────────┘ +``` + +## toıpv4 (dize) {#toipv4string} + +İçin bir takma ad `IPv4StringToNum()` bu, IPv4 adresinin bir dize formunu alır ve değerini döndürür [Ipv44](../../sql_reference/data_types/domains/ipv4.md) tarafından döndürülen değere eşit ikili olan tür `IPv4StringToNum()`. + +``` sql +WITH + '171.225.130.45' as IPv4_string +SELECT + toTypeName(IPv4StringToNum(IPv4_string)), + toTypeName(toIPv4(IPv4_string)) +``` + +``` text +┌─toTypeName(IPv4StringToNum(IPv4_string))─┬─toTypeName(toIPv4(IPv4_string))─┐ +│ UInt32 │ IPv4 │ +└──────────────────────────────────────────┴─────────────────────────────────┘ +``` + +``` sql +WITH + '171.225.130.45' as IPv4_string +SELECT + hex(IPv4StringToNum(IPv4_string)), + hex(toIPv4(IPv4_string)) +``` + +``` text +┌─hex(IPv4StringToNum(IPv4_string))─┬─hex(toIPv4(IPv4_string))─┐ +│ ABE1822D │ ABE1822D │ +└───────────────────────────────────┴──────────────────────────┘ +``` + +## toıpv6 (dize) {#toipv6string} + +İçin bir takma ad `IPv6StringToNum()` bu, IPv6 adresinin bir dize formunu alır ve değerini döndürür [IPv6](../../sql_reference/data_types/domains/ipv6.md) tarafından döndürülen değere eşit ikili olan tür `IPv6StringToNum()`. + +``` sql +WITH + '2001:438:ffff::407d:1bc1' as IPv6_string +SELECT + toTypeName(IPv6StringToNum(IPv6_string)), + toTypeName(toIPv6(IPv6_string)) +``` + +``` text +┌─toTypeName(IPv6StringToNum(IPv6_string))─┬─toTypeName(toIPv6(IPv6_string))─┐ +│ FixedString(16) │ IPv6 │ +└──────────────────────────────────────────┴─────────────────────────────────┘ +``` + +``` sql +WITH + '2001:438:ffff::407d:1bc1' as IPv6_string +SELECT + hex(IPv6StringToNum(IPv6_string)), + hex(toIPv6(IPv6_string)) +``` + +``` text +┌─hex(IPv6StringToNum(IPv6_string))─┬─hex(toIPv6(IPv6_string))─────────┐ +│ 20010438FFFF000000000000407D1BC1 │ 20010438FFFF000000000000407D1BC1 │ +└───────────────────────────────────┴──────────────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/tr/sql_reference/functions/json_functions.md b/docs/tr/sql_reference/functions/json_functions.md new file mode 100644 index 00000000000..95aaa1768f2 --- /dev/null +++ b/docs/tr/sql_reference/functions/json_functions.md @@ -0,0 +1,231 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 56 +toc_title: "JSON ile \xE7al\u0131\u015Fmak." +--- + +# Json ile çalışmak için fonksiyonlar {#functions-for-working-with-json} + +Üye Olarak.Metrica, JSON kullanıcılar tarafından oturum parametreleri olarak iletilir. Bu JSON ile çalışmak için bazı özel fonksiyonlar var. (Çoğu durumda, JSONs ek olarak önceden işlenir ve elde edilen değerler işlenmiş biçimlerinde ayrı sütunlara konur .) Tüm bu işlevler, JSON'UN ne olabileceğine dair güçlü varsayımlara dayanır, ancak işi yapmak için mümkün olduğunca az şey yapmaya çalışırlar. + +Aşağıdaki varsayımlar yapılır: + +1. Alan adı (işlev bağımsız değişkeni) sabit olmalıdır. +2. Alan adı bir şekilde json'da kanonik olarak kodlanmıştır. Mesela: `visitParamHas('{"abc":"def"}', 'abc') = 1`, ama `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` +3. Alanlar, herhangi bir yuvalama düzeyinde, ayrım gözetmeksizin aranır. Birden çok eşleşen alan varsa, ilk olay kullanılır. +4. JSON, dize değişmezleri dışında boşluk karakterlerine sahip değildir. + +## visitParamHas (params, isim) {#visitparamhasparams-name} + +İle bir alan olup olmadığını denetler ‘name’ ad. + +## visitParamExtractUİnt (params, isim) {#visitparamextractuintparams-name} + +Uint64 adlı alanın değerinden ayrıştırır ‘name’. Bu bir dize alanı ise, dizenin başlangıcından itibaren bir sayıyı ayrıştırmaya çalışır. Alan yoksa veya varsa ancak bir sayı içermiyorsa, 0 döndürür. + +## visitParamExtractİnt (params, isim) {#visitparamextractintparams-name} + +Int64 için olduğu gibi. + +## visitParamExtractFloat (params, isim) {#visitparamextractfloatparams-name} + +Float64 için olduğu gibi. + +## visitParamExtractBool (params, isim) {#visitparamextractboolparams-name} + +True/false değerini ayrıştırır. Sonuç Uİnt8. + +## visitParamExtractRaw (params, isim) {#visitparamextractrawparams-name} + +Ayırıcılar da dahil olmak üzere bir alanın değerini döndürür. + +Örnekler: + +``` sql +visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' +visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' +``` + +## visitParamExtractString (params, isim) {#visitparamextractstringparams-name} + +Dizeyi çift tırnak içinde ayrıştırır. Değeri unescaped. Unescaping başarısız olursa, boş bir dize döndürür. + +Örnekler: + +``` sql +visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' +visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' +visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' +visitParamExtractString('{"abc":"hello}', 'abc') = '' +``` + +Şu anda biçimdeki kod noktaları için destek yok `\uXXXX\uYYYY` bu temel çok dilli düzlemden değildir(UTF-8 yerine CESU-8'e dönüştürülürler). + +Aşağıdaki işlevler dayanmaktadır [simdjson](https://github.com/lemire/simdjson) daha karmaşık json ayrıştırma gereksinimleri için tasarlanmıştır. Yukarıda belirtilen varsayım 2 hala geçerlidir. + +## ısvalidjson(json) {#isvalidjsonjson} + +Dize geçirilen kontroller geçerli bir json'dur. + +Örnekler: + +``` sql +SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}') = 1 +SELECT isValidJSON('not a json') = 0 +``` + +## JSONHas(json\[, indices\_or\_keys\]…) {#jsonhasjson-indices-or-keys} + +Değer JSON belgesinde varsa, `1` iade edilecektir. + +Değer yoksa, `0` iade edilecektir. + +Örnekler: + +``` sql +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 1 +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 +``` + +`indices_or_keys` sıfır veya daha fazla argüman listesi her biri dize veya tamsayı olabilir. + +- String = nesne üyesine anahtarla erişin. +- Pozitif tamsayı = n-inci üyesine / anahtarına baştan erişin. +- Negatif tamsayı = sondan n-inci üye/anahtara erişin. + +Elemanın minimum Endeksi 1'dir. Böylece 0 öğesi mevcut değildir. + +Hem json dizilerine hem de JSON nesnelerine erişmek için tamsayılar kullanabilirsiniz. + +Bu yüzden, örneğin : + +``` sql +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'a' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2) = 'b' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1) = 'b' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' +``` + +## JSONLength(json\[, indices\_or\_keys\]…) {#jsonlengthjson-indices-or-keys} + +Bir json dizisinin veya bir JSON nesnesinin uzunluğunu döndürür. + +Değer yoksa veya yanlış bir türe sahipse, `0` iade edilecektir. + +Örnekler: + +``` sql +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 +``` + +## JSONType(json\[, indices\_or\_keys\]…) {#jsontypejson-indices-or-keys} + +Bir JSON değerinin türünü döndürür. + +Değer yoksa, `Null` iade edilecektir. + +Örnekler: + +``` sql +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}') = 'Object' +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' +``` + +## JSONExtractUInt(json\[, indices\_or\_keys\]…) {#jsonextractuintjson-indices-or-keys} + +## JSONExtractInt(json\[, indices\_or\_keys\]…) {#jsonextractintjson-indices-or-keys} + +## JSONExtractFloat(json\[, indices\_or\_keys\]…) {#jsonextractfloatjson-indices-or-keys} + +## JSONExtractBool(json\[, indices\_or\_keys\]…) {#jsonextractbooljson-indices-or-keys} + +Bir JSON ayrıştırır ve bir değer ayıklayın. Bu işlevler benzer `visitParam` işlevler. + +Değer yoksa veya yanlış bir türe sahipse, `0` iade edilecektir. + +Örnekler: + +``` sql +SELECT JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1) = -100 +SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200.0 +SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 +``` + +## JSONExtractString(json\[, indices\_or\_keys\]…) {#jsonextractstringjson-indices-or-keys} + +Bir json ayrıştırır ve bir dize ayıklayın. Bu işlev benzer `visitParamExtractString` işlevler. + +Değer yoksa veya yanlış bir tür varsa, boş bir dize döndürülür. + +Değeri unescaped. Unescaping başarısız olursa, boş bir dize döndürür. + +Örnekler: + +``` sql +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'hello' +SELECT JSONExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' +SELECT JSONExtractString('{"abc":"\\u263a"}', 'abc') = '☺' +SELECT JSONExtractString('{"abc":"\\u263"}', 'abc') = '' +SELECT JSONExtractString('{"abc":"hello}', 'abc') = '' +``` + +## JSONExtract(json\[, indices\_or\_keys…\], return\_type) {#jsonextractjson-indices-or-keys-return-type} + +Bir Json ayrıştırır ve verilen ClickHouse veri türünün bir değerini çıkarır. + +Bu, önceki bir genellemedir `JSONExtract` işlevler. +Bu demektir +`JSONExtract(..., 'String')` tam olarak aynı döndürür `JSONExtractString()`, +`JSONExtract(..., 'Float64')` tam olarak aynı döndürür `JSONExtractFloat()`. + +Örnekler: + +``` sql +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))') = ('hello',[-100,200,300]) +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)') = ([-100,200,300],'hello') +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))') = [-100, NULL, NULL] +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)') = NULL +SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8') = 1 +SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Thursday' +SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' +``` + +## JSONExtractKeysAndValues(json\[, indices\_or\_keys…\], value\_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} + +Değerlerin verilen ClickHouse veri türünde olduğu bir json'dan anahtar değer çiftlerini ayrıştırın. + +Örnek: + +``` sql +SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; +``` + +## JSONExtractRaw(json\[, indices\_or\_keys\]…) {#jsonextractrawjson-indices-or-keys} + +Json'un bir bölümünü döndürür. + +Bölüm yoksa veya yanlış bir türe sahipse, boş bir dize döndürülür. + +Örnek: + +``` sql +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' +``` + +## JSONExtractArrayRaw(json\[, indices\_or\_keys\]…) {#jsonextractarrayrawjson-indices-or-keys} + +Her biri ayrıştırılmamış dize olarak temsil edilen json dizisinin öğeleriyle bir dizi döndürür. + +Bölüm yoksa veya dizi değilse, boş bir dizi döndürülür. + +Örnek: + +``` sql +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/tr/sql_reference/functions/logical_functions.md b/docs/tr/sql_reference/functions/logical_functions.md new file mode 100644 index 00000000000..543226319da --- /dev/null +++ b/docs/tr/sql_reference/functions/logical_functions.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 37 +toc_title: "Mant\u0131kl\u0131" +--- + +# Mantıksal fonksiyonlar {#logical-functions} + +Mantıksal işlevler herhangi bir sayısal türü kabul eder, ancak 0 veya 1'e eşit bir Uİnt8 numarası döndürür. + +Bir argüman olarak sıfır kabul edilir “false,” sıfır olmayan herhangi bir değer dikkate alınırken “true”. + +## ve, ve operatör {#and-and-operator} + +## or, or operat ,or {#or-or-operator} + +## değil, operatör değil {#not-not-operator} + +## xor {#xor} + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/tr/sql_reference/functions/machine_learning_functions.md b/docs/tr/sql_reference/functions/machine_learning_functions.md new file mode 100644 index 00000000000..a1d2857ed82 --- /dev/null +++ b/docs/tr/sql_reference/functions/machine_learning_functions.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 64 +toc_title: "Makine \xD6\u011Frenme Fonksiyonlar\u0131" +--- + +# Makine Öğrenme Fonksiyonları {#machine-learning-functions} + +## evalMLMethod (tahmin) {#machine_learning_methods-evalmlmethod} + +Tak fittedılmış regresyon model usinglerini kullanarak tahmin `evalMLMethod` işlev. Lin seeke bakınız `linearRegression`. + +### Stokastik Doğrusal Regresyon {#stochastic-linear-regression} + +Bu [stokastiklinearregression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlinearregression) toplama fonksiyonu, doğrusal model ve MSE kayıp fonksiyonunu kullanarak stokastik Gradyan iniş yöntemini uygular. Kullanma `evalMLMethod` yeni veri üzerinde tahmin etmek için. + +### Stokastik Lojistik Regresyon {#stochastic-logistic-regression} + +Bu [stochasticLogisticRegression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlogisticregression) toplama işlevi, ikili sınıflandırma problemi için stokastik Gradyan iniş yöntemini uygular. Kullanma `evalMLMethod` yeni veri üzerinde tahmin etmek için. diff --git a/docs/tr/sql_reference/functions/math_functions.md b/docs/tr/sql_reference/functions/math_functions.md new file mode 100644 index 00000000000..dee89681e73 --- /dev/null +++ b/docs/tr/sql_reference/functions/math_functions.md @@ -0,0 +1,116 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 44 +toc_title: Matematiksel +--- + +# Matematiksel fonksiyonlar {#mathematical-functions} + +Tüm işlevler bir Float64 numarası döndürür. Sonucun doğruluğu mümkün olan en yüksek hassasiyete yakındır, ancak sonuç, ilgili gerçek sayıya en yakın makine temsil edilebilir numarası ile çakışmayabilir. + +## e() {#e} + +E numarasına yakın bir Float64 numarası döndürür. + +## pi sayısı() {#pi} + +Returns a Float64 number that is close to the number π. + +## exp(x) {#expx} + +Sayısal bir bağımsız değişken kabul eder ve bir Float64 sayı argümanın üs yakın döndürür. + +## log (x), L (n(x) {#logx-lnx} + +Sayısal bir bağımsız değişken kabul eder ve bağımsız değişken doğal logaritma yakın bir Float64 sayı döndürür. + +## exp2 (x) {#exp2x} + +Sayısal bir bağımsız değişkeni kabul eder ve X gücüne 2'ye yakın bir Float64 numarası döndürür. + +## log2 (x) {#log2x} + +Sayısal bir bağımsız değişken kabul eder ve değişken ikili logaritma yakın bir Float64 sayı döndürür. + +## exp10 (x) {#exp10x} + +Sayısal bir bağımsız değişkeni kabul eder ve 10'a yakın Float64 numarasını x gücüne döndürür. + +## log10(x) {#log10x} + +Sayısal bir bağımsız değişken kabul eder ve bir float64 sayı bağımsız değişken ondalık logaritması yakın döndürür. + +## sqrt(x) {#sqrtx} + +Sayısal bir bağımsız değişken kabul eder ve bağımsız değişken kareköküne yakın bir Float64 numarası döndürür. + +## TCMB (x) {#cbrtx} + +Sayısal bir bağımsız değişkeni kabul eder ve bağımsız değişken kübik köküne yakın bir Float64 numarası döndürür. + +## erf (x) {#erfx} + +Eğer ‘x’ is non-negative, then erf(x / σ√2) standart sapma ile normal dağılıma sahip bir rasgele değişkenin olasılığı var mı ‘σ’ beklenen değerden daha fazla ayrılan değeri alır ‘x’. + +Örnek (üç sigma kuralı): + +``` sql +SELECT erf(3 / sqrt(2)) +``` + +``` text +┌─erf(divide(3, sqrt(2)))─┐ +│ 0.9973002039367398 │ +└─────────────────────────┘ +``` + +## erfc (x) {#erfcx} + +Sayısal bir bağımsız değişkeni kabul eder ve 1 - erf(x) yakın bir Float64 numarası döndürür, ancak büyük için hassasiyet kaybı olmadan ‘x’ değerler. + +## lgamma (x) {#lgammax} + +Gama fonksiyonunun logaritması. + +## tgamma (x) {#tgammax} + +Gama fonksiyonu. + +## günah(x) {#sinx} + +Sinüs. + +## C (os (x) {#cosx} + +Kosinüs. + +## tan (x) {#tanx} + +Teğet. + +## asin (x) {#asinx} + +Ark sinüsü. + +## acos (x) {#acosx} + +Ark kosinüsü. + +## atan (x) {#atanx} + +Ark teğet. + +## pow (x, y), güç (x, y)) {#powx-y-powerx-y} + +İki sayısal bağımsız değişken X ve y alır.X'e yakın bir Float64 numarasını y gücüne döndürür. + +## ıntexp2 {#intexp2} + +Sayısal bir bağımsız değişkeni kabul eder ve X'in gücüne 2'ye yakın bir uint64 numarası döndürür. + +## ıntexp10 {#intexp10} + +Sayısal bir bağımsız değişkeni kabul eder ve X gücüne 10'a yakın bir uint64 numarası döndürür. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/tr/sql_reference/functions/other_functions.md b/docs/tr/sql_reference/functions/other_functions.md new file mode 100644 index 00000000000..052f289c64f --- /dev/null +++ b/docs/tr/sql_reference/functions/other_functions.md @@ -0,0 +1,1079 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 66 +toc_title: "Di\u011Fer" +--- + +# Diğer fonksiyonlar {#other-functions} + +## hostnamename() {#hostname} + +Bu işlevin gerçekleştirildiği ana bilgisayarın adını içeren bir dize döndürür. Dağıtılmış işlem için, bu işlev uzak bir sunucuda gerçekleştirilirse, uzak sunucu ana bilgisayarının adıdır. + +## FQDN {#fqdn} + +Tam etki alanı adını döndürür. + +**Sözdizimi** + +``` sql +fqdn(); +``` + +Bu işlev büyük / küçük harf duyarsızdır. + +**Döndürülen değer** + +- Tam etki alanı adı ile dize. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT FQDN(); +``` + +Sonuç: + +``` text +┌─FQDN()──────────────────────────┐ +│ clickhouse.ru-central1.internal │ +└─────────────────────────────────┘ +``` + +## basename {#basename} + +Son eğik çizgi veya ters eğik çizgiden sonra bir dizenin sondaki kısmını ayıklar. Bu işlev, genellikle bir yoldan dosya adını ayıklamak için kullanılır. + +``` sql +basename( expr ) +``` + +**Parametre** + +- `expr` — Expression resulting in a [Dize](../../sql_reference/data_types/string.md) type value. Tüm ters eğik çizgilerin ortaya çıkan değerden kaçması gerekir. + +**Döndürülen Değer** + +İçeren bir dize: + +- Son eğik çizgi veya ters eğik çizgiden sonra bir dizenin sondaki kısmı. + + If the input string contains a path ending with slash or backslash, for example, `/` or `c:\`, the function returns an empty string. + +- Eğik çizgi veya ters eğik çizgi yoksa orijinal dize. + +**Örnek** + +``` sql +SELECT 'some/long/path/to/file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some\\long\\path\\to\\file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some-file-name' AS a, basename(a) +``` + +``` text +┌─a──────────────┬─basename('some-file-name')─┐ +│ some-file-name │ some-file-name │ +└────────────────┴────────────────────────────┘ +``` + +## visibleWidth(x) {#visiblewidthx} + +Değerleri konsola metin biçiminde (sekmeyle ayrılmış) çıkarırken yaklaşık genişliği hesaplar. +Bu işlev, sistem tarafından güzel formatların uygulanması için kullanılır. + +`NULL` karşılık gelen bir dize olarak temsil edilir `NULL` içinde `Pretty` biçimliler. + +``` sql +SELECT visibleWidth(NULL) +``` + +``` text +┌─visibleWidth(NULL)─┐ +│ 4 │ +└────────────────────┘ +``` + +## toTypeName (x) {#totypenamex} + +Geçirilen bağımsız değişken türü adını içeren bir dize döndürür. + +Eğer `NULL` fonksiyona girdi olarak geçirilir, daha sonra `Nullable(Nothing)` bir iç karşılık gelen türü `NULL` Clickhouse'da temsil. + +## blockSize() {#function-blocksize} + +Bloğun boyutunu alır. +Clickhouse'da, sorgular her zaman bloklarda (sütun parçaları kümeleri) çalıştırılır. Bu işlev, aradığınız bloğun boyutunu almanızı sağlar. + +## materialize (x) {#materializex} + +Bir sabiti yalnızca bir değer içeren tam bir sütuna dönüştürür. +Clickhouse'da, tam sütunlar ve sabitler bellekte farklı şekilde temsil edilir. İşlevler, sabit argümanlar ve normal argümanlar için farklı şekilde çalışır (farklı kod yürütülür), ancak sonuç hemen hemen her zaman aynıdır. Bu işlev, bu davranış hata ayıklama içindir. + +## ignore(…) {#ignore} + +Dahil olmak üzere herhangi bir argümanı kabul eder `NULL`. Her zaman 0 döndürür. +Ancak, argüman hala değerlendirilir. Bu kriterler için kullanılabilir. + +## uyku (saniye) {#sleepseconds} + +Uykular ‘seconds’ her veri bloğunda saniye. Bir tamsayı veya kayan noktalı sayı belirtebilirsiniz. + +## sleepEachRow (saniye) {#sleepeachrowseconds} + +Uykular ‘seconds’ her satırda saniye. Bir tamsayı veya kayan noktalı sayı belirtebilirsiniz. + +## currentDatabase() {#currentdatabase} + +Geçerli veritabanının adını döndürür. +Bu işlevi, veritabanını belirtmeniz gereken bir tablo oluştur sorgusunda tablo altyapısı parametrelerinde kullanabilirsiniz. + +## currentUser() {#other-function-currentuser} + +Geçerli kullanıcının oturum açma döndürür. Kullanıcı girişi, bu başlatılan sorgu, durumda distibuted sorguda iade edilecektir. + +``` sql +SELECT currentUser(); +``` + +Takma ad: `user()`, `USER()`. + +**Döndürülen değerler** + +- Geçerli kullanıcının girişi. +- Disributed sorgu durumunda sorgu başlatılan kullanıcının giriş. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT currentUser(); +``` + +Sonuç: + +``` text +┌─currentUser()─┐ +│ default │ +└───────────────┘ +``` + +## isFinite (x) {#isfinitex} + +Float32 ve Float64 kabul eder ve bağımsız değişken sonsuz değilse ve bir NaN değilse, Uint8'i 1'e eşit olarak döndürür, aksi halde 0. + +## isİnfinite (x) {#isinfinitex} + +Float32 ve Float64 kabul eder ve bağımsız değişken sonsuz ise 1'e eşit Uİnt8 döndürür, aksi takdirde 0. Bir NaN için 0 döndürüldüğünü unutmayın. + +## ifNotFinite {#ifnotfinite} + +Kayan nokta değerinin sonlu olup olmadığını kontrol eder. + +**Sözdizimi** + + ifNotFinite(x,y) + +**Parametre** + +- `x` — Value to be checked for infinity. Type: [Yüzdürmek\*](../../sql_reference/data_types/float.md). +- `y` — Fallback value. Type: [Yüzdürmek\*](../../sql_reference/data_types/float.md). + +**Döndürülen değer** + +- `x` eğer `x` son isludur. +- `y` eğer `x` sonlu değildir. + +**Örnek** + +Sorgu: + + SELECT 1/0 as infimum, ifNotFinite(infimum,42) + +Sonuç: + + ┌─infimum─┬─ifNotFinite(divide(1, 0), 42)─┐ + │ inf │ 42 │ + └─────────┴───────────────────────────────┘ + +Kullanarak benzer sonuç alabilirsiniz [üçlü operatör](conditional_functions.md#ternary-operator): `isFinite(x) ? x : y`. + +## ısnan (x) {#isnanx} + +Float32 ve Float64 kabul eder ve bağımsız değişken bir NaN, aksi takdirde 0 ise 1'e eşit uint8 döndürür. + +## hasColumnİnTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} + +Sabit dizeleri kabul eder: veritabanı adı, tablo adı ve sütun adı. Bir sütun varsa 1'e eşit bir uint8 sabit ifadesi döndürür, aksi halde 0. Hostname parametresi ayarlanmışsa, sınama uzak bir sunucuda çalışır. +Tablo yoksa, işlev bir özel durum atar. +İç içe veri yapısındaki öğeler için işlev, bir sütunun varlığını denetler. İç içe veri yapısının kendisi için işlev 0 döndürür. + +## bar {#function-bar} + +Unicode-art diyagramı oluşturmaya izin verir. + +`bar(x, min, max, width)` genişliği orantılı olan bir bant çizer `(x - min)` ve eşit `width` karakterler ne zaman `x = max`. + +Parametre: + +- `x` — Size to display. +- `min, max` — Integer constants. The value must fit in `Int64`. +- `width` — Constant, positive integer, can be fractional. + +Bant, bir sembolün sekizde birine doğrulukla çizilir. + +Örnek: + +``` sql +SELECT + toHour(EventTime) AS h, + count() AS c, + bar(c, 0, 600000, 20) AS bar +FROM test.hits +GROUP BY h +ORDER BY h ASC +``` + +``` text +┌──h─┬──────c─┬─bar────────────────┐ +│ 0 │ 292907 │ █████████▋ │ +│ 1 │ 180563 │ ██████ │ +│ 2 │ 114861 │ ███▋ │ +│ 3 │ 85069 │ ██▋ │ +│ 4 │ 68543 │ ██▎ │ +│ 5 │ 78116 │ ██▌ │ +│ 6 │ 113474 │ ███▋ │ +│ 7 │ 170678 │ █████▋ │ +│ 8 │ 278380 │ █████████▎ │ +│ 9 │ 391053 │ █████████████ │ +│ 10 │ 457681 │ ███████████████▎ │ +│ 11 │ 493667 │ ████████████████▍ │ +│ 12 │ 509641 │ ████████████████▊ │ +│ 13 │ 522947 │ █████████████████▍ │ +│ 14 │ 539954 │ █████████████████▊ │ +│ 15 │ 528460 │ █████████████████▌ │ +│ 16 │ 539201 │ █████████████████▊ │ +│ 17 │ 523539 │ █████████████████▍ │ +│ 18 │ 506467 │ ████████████████▊ │ +│ 19 │ 520915 │ █████████████████▎ │ +│ 20 │ 521665 │ █████████████████▍ │ +│ 21 │ 542078 │ ██████████████████ │ +│ 22 │ 493642 │ ████████████████▍ │ +│ 23 │ 400397 │ █████████████▎ │ +└────┴────────┴────────────────────┘ +``` + +## dönüştürmek {#transform} + +Bir değeri, bazı öğelerin açıkça tanımlanmış eşlemesine göre diğer öğelere dönüştürür. +Bu fonksiyonun iki varyasyonu vardır: + +### transform (x, array\_from, array\_to, varsayılan) {#transformx-array-from-array-to-default} + +`x` – What to transform. + +`array_from` – Constant array of values for converting. + +`array_to` – Constant array of values to convert the values in ‘from’ -e doğru. + +`default` – Which value to use if ‘x’ değer anylerden hiçbir equaline eşit değildir. ‘from’. + +`array_from` ve `array_to` – Arrays of the same size. + +Türler: + +`transform(T, Array(T), Array(U), U) -> U` + +`T` ve `U` sayısal, dize veya tarih veya DateTime türleri olabilir. +Aynı harfin belirtildiği (t veya U), sayısal türler için bunlar eşleşen türler değil, ortak bir türe sahip türler olabilir. +Örneğin, ilk bağımsız değişken Int64 türüne sahip olabilir, ikincisi ise Array(Uİnt16) türüne sahiptir. + +Eğer... ‘x’ değer, içindeki öğelerden birine eşittir. ‘array\_from’ array, varolan öğeyi döndürür (aynı numaralandırılır) ‘array\_to’ dizi. Aksi takdirde, döner ‘default’. İçinde birden fazla eşleşen öğe varsa ‘array\_from’, maçlardan birini döndürür. + +Örnek: + +``` sql +SELECT + transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, + count() AS c +FROM test.hits +WHERE SearchEngineID != 0 +GROUP BY title +ORDER BY c DESC +``` + +``` text +┌─title─────┬──────c─┐ +│ Yandex │ 498635 │ +│ Google │ 229872 │ +│ Other │ 104472 │ +└───────────┴────────┘ +``` + +### transform (x, array\_from, array\_to) {#transformx-array-from-array-to} + +İlk vary thatasyon differsdan farklıdır. ‘default’ argüman atlandı. +Eğer... ‘x’ değer, içindeki öğelerden birine eşittir. ‘array\_from’ array, eşleşen öğeyi (aynı numaralandırılmış) döndürür ‘array\_to’ dizi. Aksi takdirde, döner ‘x’. + +Türler: + +`transform(T, Array(T), Array(T)) -> T` + +Örnek: + +``` sql +SELECT + transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, + count() AS c +FROM test.hits +GROUP BY domain(Referer) +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +┌─s──────────────┬───────c─┐ +│ │ 2906259 │ +│ www.yandex │ 867767 │ +│ ███████.ru │ 313599 │ +│ mail.yandex.ru │ 107147 │ +│ ██████.ru │ 100355 │ +│ █████████.ru │ 65040 │ +│ news.yandex.ru │ 64515 │ +│ ██████.net │ 59141 │ +│ example.com │ 57316 │ +└────────────────┴─────────┘ +``` + +## formatReadableSize (x) {#formatreadablesizex} + +Boyutu (bayt sayısı) kabul eder. Bir sonek (KiB, MıB, vb.) ile yuvarlak bir boyut döndürür.) bir dize olarak. + +Örnek: + +``` sql +SELECT + arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, + formatReadableSize(filesize_bytes) AS filesize +``` + +``` text +┌─filesize_bytes─┬─filesize───┐ +│ 1 │ 1.00 B │ +│ 1024 │ 1.00 KiB │ +│ 1048576 │ 1.00 MiB │ +│ 192851925 │ 183.92 MiB │ +└────────────────┴────────────┘ +``` + +## en az (a, b) {#leasta-b} + +A ve B'den en küçük değeri döndürür. + +## en büyük (a, b) {#greatesta-b} + +A ve B'nin en büyük değerini döndürür. + +## çalışma süresi() {#uptime} + +Sunucunun çalışma süresini saniyeler içinde döndürür. + +## sürüm() {#version} + +Sunucu sürümünü bir dize olarak döndürür. + +## saat dilimi() {#timezone} + +Sunucunun saat dilimini döndürür. + +## blockNumber {#blocknumber} + +Satırın bulunduğu veri bloğunun sıra numarasını döndürür. + +## rowNumberİnBlock {#function-rownumberinblock} + +Veri bloğundaki satırın sıra numarasını döndürür. Farklı veri blokları her zaman yeniden hesaplanır. + +## rownumberınallblocks() {#rownumberinallblocks} + +Veri bloğundaki satırın sıra numarasını döndürür. Bu işlev yalnızca etkilenen veri bloklarını dikkate alır. + +## komşuluk {#neighbor} + +Belirli bir sütunun geçerli satırından önce veya sonra gelen belirli bir ofsette bir satıra erişim sağlayan pencere işlevi. + +**Sözdizimi** + +``` sql +neighbor(column, offset[, default_value]) +``` + +İşlevin sonucu, etkilenen veri bloklarına ve bloktaki veri sırasına bağlıdır. +ORDER BY ile bir alt sorgu yaparsanız ve alt sorgunun dışından işlevi çağırırsanız, beklenen sonucu alabilirsiniz. + +**Parametre** + +- `column` — A column name or scalar expression. +- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../sql_reference/data_types/int_uint.md). +- `default_value` — Optional. The value to be returned if offset goes beyond the scope of the block. Type of data blocks affected. + +**Döndürülen değerler** + +- İçin değer `column` içinde `offset` eğer geçerli satırdan uzaklık `offset` değer blok sınırları dışında değil. +- İçin varsayılan değer `column` eğer `offset` değer, blok sınırlarının dışındadır. Eğer `default_value` verilir, daha sonra kullanılacaktır. + +Tür: etkilenen veri bloklarının türü veya varsayılan değer türü. + +**Örnek** + +Sorgu: + +``` sql +SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; +``` + +Sonuç: + +``` text +┌─number─┬─neighbor(number, 2)─┐ +│ 0 │ 2 │ +│ 1 │ 3 │ +│ 2 │ 4 │ +│ 3 │ 5 │ +│ 4 │ 6 │ +│ 5 │ 7 │ +│ 6 │ 8 │ +│ 7 │ 9 │ +│ 8 │ 0 │ +│ 9 │ 0 │ +└────────┴─────────────────────┘ +``` + +Sorgu: + +``` sql +SELECT number, neighbor(number, 2, 999) FROM system.numbers LIMIT 10; +``` + +Sonuç: + +``` text +┌─number─┬─neighbor(number, 2, 999)─┐ +│ 0 │ 2 │ +│ 1 │ 3 │ +│ 2 │ 4 │ +│ 3 │ 5 │ +│ 4 │ 6 │ +│ 5 │ 7 │ +│ 6 │ 8 │ +│ 7 │ 9 │ +│ 8 │ 999 │ +│ 9 │ 999 │ +└────────┴──────────────────────────┘ +``` + +Bu işlev, yıldan yıla metrik değeri hesaplamak için kullanılabilir: + +Sorgu: + +``` sql +WITH toDate('2018-01-01') AS start_date +SELECT + toStartOfMonth(start_date + (number * 32)) AS month, + toInt32(month) % 100 AS money, + neighbor(money, -12) AS prev_year, + round(prev_year / money, 2) AS year_over_year +FROM numbers(16) +``` + +Sonuç: + +``` text +┌──────month─┬─money─┬─prev_year─┬─year_over_year─┐ +│ 2018-01-01 │ 32 │ 0 │ 0 │ +│ 2018-02-01 │ 63 │ 0 │ 0 │ +│ 2018-03-01 │ 91 │ 0 │ 0 │ +│ 2018-04-01 │ 22 │ 0 │ 0 │ +│ 2018-05-01 │ 52 │ 0 │ 0 │ +│ 2018-06-01 │ 83 │ 0 │ 0 │ +│ 2018-07-01 │ 13 │ 0 │ 0 │ +│ 2018-08-01 │ 44 │ 0 │ 0 │ +│ 2018-09-01 │ 75 │ 0 │ 0 │ +│ 2018-10-01 │ 5 │ 0 │ 0 │ +│ 2018-11-01 │ 36 │ 0 │ 0 │ +│ 2018-12-01 │ 66 │ 0 │ 0 │ +│ 2019-01-01 │ 97 │ 32 │ 0.33 │ +│ 2019-02-01 │ 28 │ 63 │ 2.25 │ +│ 2019-03-01 │ 56 │ 91 │ 1.62 │ +│ 2019-04-01 │ 87 │ 22 │ 0.25 │ +└────────────┴───────┴───────────┴────────────────┘ +``` + +## runningDifference (x) {#other_functions-runningdifference} + +Calculates the difference between successive row values ​​in the data block. +İlk satır için 0 ve sonraki her satır için önceki satırdan farkı döndürür. + +İşlevin sonucu, etkilenen veri bloklarına ve bloktaki veri sırasına bağlıdır. +ORDER BY ile bir alt sorgu yaparsanız ve alt sorgunun dışından işlevi çağırırsanız, beklenen sonucu alabilirsiniz. + +Örnek: + +``` sql +SELECT + EventID, + EventTime, + runningDifference(EventTime) AS delta +FROM +( + SELECT + EventID, + EventTime + FROM events + WHERE EventDate = '2016-11-24' + ORDER BY EventTime ASC + LIMIT 5 +) +``` + +``` text +┌─EventID─┬───────────EventTime─┬─delta─┐ +│ 1106 │ 2016-11-24 00:00:04 │ 0 │ +│ 1107 │ 2016-11-24 00:00:05 │ 1 │ +│ 1108 │ 2016-11-24 00:00:05 │ 0 │ +│ 1109 │ 2016-11-24 00:00:09 │ 4 │ +│ 1110 │ 2016-11-24 00:00:10 │ 1 │ +└─────────┴─────────────────────┴───────┘ +``` + +Lütfen dikkat - blok boyutu sonucu etkiler. Her yeni blok ile, `runningDifference` durum sıfırlandı. + +``` sql +SELECT + number, + runningDifference(number + 1) AS diff +FROM numbers(100000) +WHERE diff != 1 +``` + +``` text +┌─number─┬─diff─┐ +│ 0 │ 0 │ +└────────┴──────┘ +┌─number─┬─diff─┐ +│ 65536 │ 0 │ +└────────┴──────┘ +``` + +``` sql +set max_block_size=100000 -- default value is 65536! + +SELECT + number, + runningDifference(number + 1) AS diff +FROM numbers(100000) +WHERE diff != 1 +``` + +``` text +┌─number─┬─diff─┐ +│ 0 │ 0 │ +└────────┴──────┘ +``` + +## runningDifferenceStartingWithFirstvalue {#runningdifferencestartingwithfirstvalue} + +İçin aynı [runningDifference](./other_functions.md#other_functions-runningdifference), fark ilk satırın değeridir, ilk satırın değerini döndürdü ve sonraki her satır önceki satırdan farkı döndürür. + +## MACNumToString (num) {#macnumtostringnum} + +Bir uınt64 numarasını kabul eder. Big endian'da bir MAC adresi olarak yorumlar. AA:BB:CC:DD:EE:FF biçiminde karşılık gelen MAC adresini içeren bir dize döndürür (onaltılık formda iki nokta üst üste ayrılmış sayılar). + +## MACStringToNum (s) {#macstringtonums} + +MACNumToString ters işlevi. MAC adresi geçersiz bir biçime sahipse, 0 döndürür. + +## MACStringToOUİ (s) {#macstringtoouis} + +AA:BB:CC:DD:EE:FF (onaltılık formda iki nokta üst üste ayrılmış sayılar) biçiminde bir MAC adresi kabul eder. İlk üç sekizli uint64 numarası olarak döndürür. MAC adresi geçersiz bir biçime sahipse, 0 döndürür. + +## getSizeOfEnumType {#getsizeofenumtype} + +Alan sayısını döndürür [Enum](../../sql_reference/data_types/enum.md). + +``` sql +getSizeOfEnumType(value) +``` + +**Parametre:** + +- `value` — Value of type `Enum`. + +**Döndürülen değerler** + +- İle alan sayısı `Enum` giriş değerleri. +- Tür değilse bir istisna atılır `Enum`. + +**Örnek** + +``` sql +SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## blockSerializedSize {#blockserializedsize} + +Diskteki boyutu döndürür (sıkıştırmayı hesaba katmadan). + +``` sql +blockSerializedSize(value[, value[, ...]]) +``` + +**Parametre:** + +- `value` — Any value. + +**Döndürülen değerler** + +- (Sıkıştırma olmadan) değerler bloğu için diske yazılacak bayt sayısı. + +**Örnek** + +``` sql +SELECT blockSerializedSize(maxState(1)) as x +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## toColumnTypeName {#tocolumntypename} + +RAM'DEKİ sütunun veri türünü temsil eden sınıfın adını döndürür. + +``` sql +toColumnTypeName(value) +``` + +**Parametre:** + +- `value` — Any type of value. + +**Döndürülen değerler** + +- Temsil etmek için kullanılan sınıfın adını içeren bir dize `value` RAM veri türü. + +**Arasındaki fark örneği`toTypeName ' and ' toColumnTypeName`** + +``` sql +SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) +``` + +``` text +┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime │ +└─────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) +``` + +``` text +┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ Const(UInt32) │ +└───────────────────────────────────────────────────────────┘ +``` + +Örnek gösteriyor ki `DateTime` veri türü olarak bellekte saklanır `Const(UInt32)`. + +## dumpColumnStructure {#dumpcolumnstructure} + +Ram'deki veri yapılarının ayrıntılı bir açıklamasını verir + +``` sql +dumpColumnStructure(value) +``` + +**Parametre:** + +- `value` — Any type of value. + +**Döndürülen değerler** + +- Temsil etmek için kullanılan yapıyı açıklayan bir dize `value` RAM veri türü. + +**Örnek** + +``` sql +SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) +``` + +``` text +┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime, Const(size = 1, UInt32(size = 1)) │ +└──────────────────────────────────────────────────────────────┘ +``` + +## defaultValueOfArgumentType {#defaultvalueofargumenttype} + +Veri türü için varsayılan değeri verir. + +Kullanıcı tarafından ayarlanan özel sütunlar için varsayılan değerleri içermez. + +``` sql +defaultValueOfArgumentType(expression) +``` + +**Parametre:** + +- `expression` — Arbitrary type of value or an expression that results in a value of an arbitrary type. + +**Döndürülen değerler** + +- `0` sayılar için. +- Dizeler için boş dize. +- `ᴺᵁᴸᴸ` için [Nullable](../../sql_reference/data_types/nullable.md). + +**Örnek** + +``` sql +SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) +``` + +``` text +┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ +│ 0 │ +└─────────────────────────────────────────────┘ +``` + +``` sql +SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) +``` + +``` text +┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ +│ ᴺᵁᴸᴸ │ +└───────────────────────────────────────────────────────┘ +``` + +## çoğaltmak {#other-functions-replicate} + +Tek bir değere sahip bir dizi oluşturur. + +İç uygulama için kullanılan [arrayJoin](array_join.md#functions_arrayjoin). + +``` sql +SELECT replicate(x, arr); +``` + +**Parametre:** + +- `arr` — Original array. ClickHouse creates a new array of the same length as the original and fills it with the value `x`. +- `x` — The value that the resulting array will be filled with. + +**Döndürülen değer** + +Değerle dolu bir dizi `x`. + +Tür: `Array`. + +**Örnek** + +Sorgu: + +``` sql +SELECT replicate(1, ['a', 'b', 'c']) +``` + +Sonuç: + +``` text +┌─replicate(1, ['a', 'b', 'c'])─┐ +│ [1,1,1] │ +└───────────────────────────────┘ +``` + +## filesystemAvailable {#filesystemavailable} + +Veritabanlarının dosyalarının bulunduğu dosya sisteminde kalan alan miktarını döndürür. Her zaman toplam boş alandan daha küçüktür ([filesystemFree](#filesystemfree)) çünkü OS için biraz alan ayrılmıştır. + +**Sözdizimi** + +``` sql +filesystemAvailable() +``` + +**Döndürülen değer** + +- Bayt olarak kullanılabilir kalan alan miktarı. + +Tür: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT formatReadableSize(filesystemAvailable()) AS "Available space", toTypeName(filesystemAvailable()) AS "Type"; +``` + +Sonuç: + +``` text +┌─Available space─┬─Type───┐ +│ 30.75 GiB │ UInt64 │ +└─────────────────┴────────┘ +``` + +## filesystemFree {#filesystemfree} + +Veritabanlarının dosyalarının bulunduğu dosya sistemindeki boş alanın toplam miktarını döndürür. Ayrıca bakınız `filesystemAvailable` + +**Sözdizimi** + +``` sql +filesystemFree() +``` + +**Döndürülen değer** + +- Bayt cinsinden boş alan miktarı. + +Tür: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT formatReadableSize(filesystemFree()) AS "Free space", toTypeName(filesystemFree()) AS "Type"; +``` + +Sonuç: + +``` text +┌─Free space─┬─Type───┐ +│ 32.39 GiB │ UInt64 │ +└────────────┴────────┘ +``` + +## filesystemCapacity {#filesystemcapacity} + +Dosya sisteminin kapasitesini bayt cinsinden döndürür. Değerlendirme için, [yol](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) veri dizinine yapılandırılmalıdır. + +**Sözdizimi** + +``` sql +filesystemCapacity() +``` + +**Döndürülen değer** + +- Dosya sisteminin bayt cinsinden kapasite bilgisi. + +Tür: [Uİnt64](../../sql_reference/data_types/int_uint.md). + +**Örnek** + +Sorgu: + +``` sql +SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesystemCapacity()) AS "Type" +``` + +Sonuç: + +``` text +┌─Capacity──┬─Type───┐ +│ 39.32 GiB │ UInt64 │ +└───────────┴────────┘ +``` + +## finalizeAggregation {#function-finalizeaggregation} + +Toplama işlevinin durumunu alır. Toplama sonucunu döndürür (kesinleşmiş durum). + +## runningAccumulate {#function-runningaccumulate} + +Toplama işlevinin durumlarını alır ve değerleri olan bir sütun döndürür, bu durumların bir dizi blok satırı için ilk satırdan geçerli satıra birikmesinin sonucudur. +Örneğin, toplama işlevinin durumunu alır (örnek runningAccumulate (uniqState (Userıd))) ve her blok satırı için, önceki tüm Satırların ve geçerli satırın durumlarının birleştirilmesinde toplama işlevinin sonucunu döndürür. +Bu nedenle, işlevin sonucu, verilerin bloklara bölünmesine ve blok içindeki verilerin sırasına bağlıdır. + +## joinGet {#joinget} + +İşlev, tablodan verileri bir tablodan aynı şekilde ayıklamanızı sağlar [sözlük](../../sql_reference/dictionaries/index.md). + +Veri alır [Katmak](../../engines/table_engines/special/join.md#creating-a-table) belirtilen birleştirme anahtarını kullanarak tablolar. + +Sadece ile oluşturulan tabloları destekler `ENGINE = Join(ANY, LEFT, )` deyim. + +**Sözdizimi** + +``` sql +joinGet(join_storage_table_name, `value_column`, join_keys) +``` + +**Parametre** + +- `join_storage_table_name` — an [tanıtıcı](../syntax.md#syntax-identifiers) aramanın nerede yapıldığını gösterir. Tanımlayıcı varsayılan veritabanında aranır (bkz. parametre `default_database` config dosyası). Varsayılan veritabanını geçersiz kılmak için `USE db_name` veya ayırıcı aracılığıyla veritabanını ve tabloyu belirtin `db_name.db_table` örnek bakın. +- `value_column` — name of the column of the table that contains required data. +- `join_keys` — list of keys. + +**Döndürülen değer** + +Anahtarların listesine karşılık gelen değerlerin listesini döndürür. + +Kaynak tabloda kesin yoksa o zaman `0` veya `null` esas alınarak iade edilecektir [join\_use\_nulls](../../operations/settings/settings.md#join_use_nulls) ayar. + +Hakkında daha fazla bilgi `join_use_nulls` içinde [Birleştirme işlemi](../../engines/table_engines/special/join.md). + +**Örnek** + +Giriş tablosu: + +``` sql +CREATE DATABASE db_test +CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 +INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) +``` + +``` text +┌─id─┬─val─┐ +│ 4 │ 13 │ +│ 2 │ 12 │ +│ 1 │ 11 │ +└────┴─────┘ +``` + +Sorgu: + +``` sql +SELECT joinGet(db_test.id_val,'val',toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 +``` + +Sonuç: + +``` text +┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ +│ 0 │ +│ 11 │ +│ 12 │ +│ 0 │ +└──────────────────────────────────────────────────┘ +``` + +## modelEvaluate(model\_name, …) {#function-modelevaluate} + +Dış modeli değerlendirin. +Bir model adı ve model bağımsız değişkenleri kabul eder. Float64 Döndürür. + +## throwİf(x \[, custom\_message\]) {#throwifx-custom-message} + +Argüman sıfır değilse bir istisna atın. +custom\_message-isteğe bağlı bir parametredir: sabit bir dize, bir hata mesajı sağlar + +``` sql +SELECT throwIf(number = 3, 'Too many') FROM numbers(10); +``` + +``` text +↙ Progress: 0.00 rows, 0.00 B (0.00 rows/s., 0.00 B/s.) Received exception from server (version 19.14.1): +Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. +``` + +## kimlik {#identity} + +Bağımsız değişkeni olarak kullanılan aynı değeri döndürür. Hata ayıklama ve test için kullanılan, dizin kullanarak iptal ve tam bir tarama sorgu performansını almak için izin verir. Olası dizin kullanımı için sorgu analiz edildiğinde, analizör içeriye bakmaz `identity` işlevler. + +**Sözdizimi** + +``` sql +identity(x) +``` + +**Örnek** + +Sorgu: + +``` sql +SELECT identity(42) +``` + +Sonuç: + +``` text +┌─identity(42)─┐ +│ 42 │ +└──────────────┘ +``` + +## randomPrintableASCİİ {#randomascii} + +Rastgele bir dizi ile bir dize oluşturur [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) yazdırılabilir karakterler. + +**Sözdizimi** + +``` sql +randomPrintableASCII(length) +``` + +**Parametre** + +- `length` — Resulting string length. Positive integer. + + If you pass `length < 0`, behavior of the function is undefined. + +**Döndürülen değer** + +- Rastgele bir dizi dize [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) yazdırılabilir karakterler. + +Tür: [Dize](../../sql_reference/data_types/string.md) + +**Örnek** + +``` sql +SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers LIMIT 3 +``` + +``` text +┌─number─┬─str────────────────────────────┬─length(randomPrintableASCII(30))─┐ +│ 0 │ SuiCOSTvC0csfABSw=UcSzp2.`rv8x │ 30 │ +│ 1 │ 1Ag NlJ &RCN:*>HVPG;PE-nO"SUFD │ 30 │ +│ 2 │ /"+<"wUTh:=LjJ Vm!c&hI*m#XTfzz │ 30 │ +└────────┴────────────────────────────────┴──────────────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/tr/sql_reference/functions/random_functions.md b/docs/tr/sql_reference/functions/random_functions.md new file mode 100644 index 00000000000..e57106426f4 --- /dev/null +++ b/docs/tr/sql_reference/functions/random_functions.md @@ -0,0 +1,30 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 51 +toc_title: "S\xF6zde Rasgele Say\u0131lar Olu\u015Fturma" +--- + +# Sözde rasgele sayılar üretmek için fonksiyonlar {#functions-for-generating-pseudo-random-numbers} + +Sözde rasgele sayıların kriptografik olmayan jeneratörleri kullanılır. + +Tüm işlevler sıfır bağımsız değişkeni veya bir bağımsız değişkeni kabul eder. +Bir argüman geçirilirse, herhangi bir tür olabilir ve değeri hiçbir şey için kullanılmaz. +Bu argümanın tek amacı, aynı işlevin iki farklı örneğinin farklı rasgele sayılarla farklı sütunlar döndürmesi için ortak alt ifade eliminasyonunu önlemektir. + +## Güney Afrika parası {#rand} + +Tüm uint32 tipi sayılar arasında eşit olarak dağıtılan bir sözde rasgele uint32 numarası döndürür. +Doğrusal bir uyumlu jeneratör kullanır. + +## rand64 {#rand64} + +Tüm uint64 tipi sayılar arasında eşit olarak dağıtılan sözde rasgele bir uint64 numarası döndürür. +Doğrusal bir uyumlu jeneratör kullanır. + +## randConstant {#randconstant} + +Bir sözde rastgele uint32 numarası döndürür, değer farklı bloklar için birdir. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/tr/sql_reference/functions/rounding_functions.md b/docs/tr/sql_reference/functions/rounding_functions.md new file mode 100644 index 00000000000..6d47ae54dae --- /dev/null +++ b/docs/tr/sql_reference/functions/rounding_functions.md @@ -0,0 +1,190 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 45 +toc_title: "D\xF6nm\xFC\u015F" +--- + +# Yuvarlama fonksiyonları {#rounding-functions} + +## kat(x \[, N\]) {#floorx-n} + +Küçük veya eşit olan en büyük yuvarlak sayıyı döndürür `x`. Yuvarlak bir sayı, 1/10N'NİN katları veya 1 / 10N tam değilse, uygun veri türünün en yakın sayısıdır. +‘N’ bir tamsayı sabiti, isteğe bağlı parametredir. Varsayılan olarak sıfırdır, bu da bir tam sayıya yuvarlamak anlamına gelir. +‘N’ negatif olabilir. + +Örnekler: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` + +`x` herhangi bir sayısal türüdür. Sonuç aynı türden bir sayıdır. +Tamsayı argümanları için, bir negatif ile yuvarlamak mantıklıdır `N` değer (negatif olmayan için `N`, işlev hiçbir şey yapmaz). +Yuvarlama taşmasına neden olursa (örneğin, floor (-128, -1)), uygulamaya özgü bir sonuç döndürülür. + +## tavan(x \[, N\]), tavan (x \[, N\]) {#ceilx-n-ceilingx-n} + +Büyük veya eşit olan en küçük yuvarlak sayıyı döndürür `x`. Diğer her şekilde, aynı `floor` (yukarıda) işlevi. + +## trunc(x \[, N\]), truncate(x \[, N\]) {#truncx-n-truncatex-n} + +Mutlak değeri küçük veya eşit olan en büyük mutlak değere sahip yuvarlak sayıyı döndürür `x`‘s. In every other way, it is the same as the ’floor’ (yukarıda) işlevi. + +## Yuvarlak(x \[, N\]) {#rounding_functions-round} + +Belirtilen sayıda ondalık basamak için bir değer yuvarlar. + +İşlev, belirtilen siparişin en yakın numarasını döndürür. Verilen sayı çevreleyen sayılara eşit mesafeye sahip olduğunda, işlev, float sayı türleri için bankacının yuvarlamasını kullanır ve diğer sayı türleri için sıfırdan uzaklaşır. + +``` sql +round(expression [, decimal_places]) +``` + +**Parametre:** + +- `expression` — A number to be rounded. Can be any [ifade](../syntax.md#syntax-expressions) sayısal dönen [veri türü](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — An integer value. + - Eğer `decimal-places > 0` sonra işlev değeri ondalık noktanın sağına yuvarlar. + - Eğer `decimal-places < 0` ardından işlev değeri ondalık noktanın soluna yuvarlar. + - Eğer `decimal-places = 0` sonra işlev değeri tamsayı olarak yuvarlar. Bu durumda argüman ihmal edilebilir. + +**Döndürülen değer:** + +Giriş numarası ile aynı türden yuvarlatılmış sayı. + +### Örnekler {#examples} + +**Kullanım örneği** + +``` sql +SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 +``` + +``` text +┌───x─┬─round(divide(number, 2))─┐ +│ 0 │ 0 │ +│ 0.5 │ 0 │ +│ 1 │ 1 │ +└─────┴──────────────────────────┘ +``` + +**Yuvarlama örnekleri** + +En yakın numaraya yuvarlama. + +``` text +round(3.2, 0) = 3 +round(4.1267, 2) = 4.13 +round(22,-1) = 20 +round(467,-2) = 500 +round(-467,-2) = -500 +``` + +Bankacı yuvarlanıyor. + +``` text +round(3.5) = 4 +round(4.5) = 4 +round(3.55, 1) = 3.6 +round(3.65, 1) = 3.6 +``` + +**Ayrıca Bakınız** + +- [roundBankers](#roundbankers) + +## roundBankers {#roundbankers} + +Bir sayıyı belirtilen ondalık konuma yuvarlar. + +- Yuvarlama sayısı iki sayı arasında yarıya ise, işlev banker yuvarlama kullanır. + + Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. + + It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. + +- Diğer durumlarda, işlev sayıları en yakın tam sayıya yuvarlar. + +Banker yuvarlama kullanarak, yuvarlama numaraları toplama veya bu sayıları çıkarma sonuçları üzerindeki etkisini azaltabilir. + +Örneğin, farklı yuvarlama ile 1.5, 2.5, 3.5, 4.5 sayılarını topla: + +- Yuvarlama yok: 1.5 + 2.5 + 3.5 + 4.5 = 12. +- Bankacı yuvarlama: 2 + 2 + 4 + 4 = 12. +- En yakın tam sayıya yuvarlama: 2 + 3 + 4 + 5 = 14. + +**Sözdizimi** + +``` sql +roundBankers(expression [, decimal_places]) +``` + +**Parametre** + +- `expression` — A number to be rounded. Can be any [ifade](../syntax.md#syntax-expressions) sayısal dönen [veri türü](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — Decimal places. An integer number. + - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. + - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. + - `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. + +**Döndürülen değer** + +Banker yuvarlama yöntemi tarafından yuvarlanan bir değer. + +### Örnekler {#examples-1} + +**Kullanım örneği** + +Sorgu: + +``` sql + SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10 +``` + +Sonuç: + +``` text +┌───x─┬─b─┐ +│ 0 │ 0 │ +│ 0.5 │ 0 │ +│ 1 │ 1 │ +│ 1.5 │ 2 │ +│ 2 │ 2 │ +│ 2.5 │ 2 │ +│ 3 │ 3 │ +│ 3.5 │ 4 │ +│ 4 │ 4 │ +│ 4.5 │ 4 │ +└─────┴───┘ +``` + +**Bankacı yuvarlama örnekleri** + +``` text +roundBankers(0.4) = 0 +roundBankers(-3.5) = -4 +roundBankers(4.5) = 4 +roundBankers(3.55, 1) = 3.6 +roundBankers(3.65, 1) = 3.6 +roundBankers(10.35, 1) = 10.4 +roundBankers(10.755, 2) = 11,76 +``` + +**Ayrıca Bakınız** + +- [turlu](#rounding_functions-round) + +## roundToExp2 (num) {#roundtoexp2num} + +Bir sayı kabul eder. Sayı birden az ise, 0 döndürür. Aksi takdirde, sayıyı en yakın (negatif olmayan) iki dereceye yuvarlar. + +## roundDuration (num) {#rounddurationnum} + +Bir sayı kabul eder. Sayı birden az ise, 0 döndürür. Aksi takdirde, sayıyı kümeden sayılara yuvarlar: 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000. Bu fonksiyon (kayıt olmak için özeldir.Metrica ve oturum uzunluğu raporu uygulamak için kullanılır. + +## roundAge (num) {#roundagenum} + +Bir sayı kabul eder. Sayı 18'den küçükse, 0 döndürür. Aksi takdirde, sayıyı kümeden bir sayıya yuvarlar: 18, 25, 35, 45, 55. Bu fonksiyon (kayıt olmak için özeldir.Metrica ve kullanıcı yaş raporu uygulamak için kullanılır. + +## roundDown (num, arr) {#rounddownnum-arr} + +Bir sayıyı kabul eder ve belirtilen Dizideki bir öğeye yuvarlar. Değer en düşük sınırdan küçükse, en düşük sınır döndürülür. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/tr/sql_reference/functions/splitting_merging_functions.md b/docs/tr/sql_reference/functions/splitting_merging_functions.md new file mode 100644 index 00000000000..81269606d98 --- /dev/null +++ b/docs/tr/sql_reference/functions/splitting_merging_functions.md @@ -0,0 +1,116 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 47 +toc_title: "Dizeleri ve dizileri b\xF6lme ve birle\u015Ftirme" +--- + +# Dizeleri ve dizileri bölme ve birleştirme işlevleri {#functions-for-splitting-and-merging-strings-and-arrays} + +## splitByChar (ayırıcı, s) {#splitbycharseparator-s} + +Bir dizeyi belirtilen bir karakterle ayrılmış alt dizelere böler. Sabit bir dize kullanır `separator` tam olarak bir karakterden oluşan. +Seçili alt dizelerin bir dizisini döndürür. Ayırıcı dizenin başında veya sonunda oluşursa veya ardışık birden çok ayırıcı varsa, boş alt dizeler seçilebilir. + +**Sözdizimi** + +``` sql +splitByChar(, ) +``` + +**Parametre** + +- `separator` — The separator which should contain exactly one character. [Dize](../../sql_reference/data_types/string.md). +- `s` — The string to split. [Dize](../../sql_reference/data_types/string.md). + +**Döndürülen değer (ler)** + +Seçili alt dizelerin bir dizisini döndürür. Boş alt dizeler şu durumlarda seçilebilir: + +- Dizenin başında veya sonunda bir ayırıcı oluşur; +- Birden fazla ardışık ayırıcı vardır; +- Orijinal dize `s` boş. + +Tür: [Dizi](../../sql_reference/data_types/array.md) -den [Dize](../../sql_reference/data_types/string.md). + +**Örnek** + +``` sql +SELECT splitByChar(',', '1,2,3,abcde') +``` + +``` text +┌─splitByChar(',', '1,2,3,abcde')─┐ +│ ['1','2','3','abcde'] │ +└─────────────────────────────────┘ +``` + +## splitByString (ayırıcı, s) {#splitbystringseparator-s} + +Bir dizeyi bir dizeyle ayrılmış alt dizelere böler. Sabit bir dize kullanır `separator` ayırıcı olarak birden fazla karakter. Eğer dize `separator` boş olduğunu, bu bölünmüş dize `s` tek karakter dizisine. + +**Sözdizimi** + +``` sql +splitByString(, ) +``` + +**Parametre** + +- `separator` — The separator. [Dize](../../sql_reference/data_types/string.md). +- `s` — The string to split. [Dize](../../sql_reference/data_types/string.md). + +**Döndürülen değer (ler)** + +Seçili alt dizelerin bir dizisini döndürür. Boş alt dizeler şu durumlarda seçilebilir: + +Tür: [Dizi](../../sql_reference/data_types/array.md) -den [Dize](../../sql_reference/data_types/string.md). + +- Boş olmayan bir ayırıcı dizenin başında veya sonunda oluşur; +- Birden fazla ardışık boş olmayan ayırıcı vardır; +- Orijinal dize `s` ayırıcı boş değilken boş. + +**Örnek** + +``` sql +SELECT splitByString(', ', '1, 2 3, 4,5, abcde') +``` + +``` text +┌─splitByString(', ', '1, 2 3, 4,5, abcde')─┐ +│ ['1','2 3','4,5','abcde'] │ +└───────────────────────────────────────────┘ +``` + +``` sql +SELECT splitByString('', 'abcde') +``` + +``` text +┌─splitByString('', 'abcde')─┐ +│ ['a','b','c','d','e'] │ +└────────────────────────────┘ +``` + +## arrayStringConcat(arr \[, ayırıcı\]) {#arraystringconcatarr-separator} + +Dizide listelenen dizeleri ayırıcı ile birleştirir.'separator' isteğe bağlı bir parametredir: varsayılan olarak boş bir dizeye ayarlanmış sabit bir dize. +Dizeyi döndürür. + +## alphaTokens (s) {#alphatokenss} + +A-z ve A-Z aralıklarından ardışık baytların alt dizelerini seçer. + +**Örnek** + +``` sql +SELECT alphaTokens('abca1abc') +``` + +``` text +┌─alphaTokens('abca1abc')─┐ +│ ['abca','abc'] │ +└─────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/tr/sql_reference/functions/string_functions.md b/docs/tr/sql_reference/functions/string_functions.md new file mode 100644 index 00000000000..012c8210537 --- /dev/null +++ b/docs/tr/sql_reference/functions/string_functions.md @@ -0,0 +1,489 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 40 +toc_title: "Dizeleri ile \xE7al\u0131\u015Fma" +--- + +# Dizelerle çalışmak için işlevler {#functions-for-working-with-strings} + +## boş {#empty} + +Boş bir dize için 1 veya boş olmayan bir dize için 0 döndürür. +Sonuç türü Uint8'dir. +Bir boşluk veya boş bayt olsa bile, en az bir bayt içeriyorsa, bir dize boş olarak kabul edilir. +İşlev ayrıca diziler için de çalışır. + +## notEmpty {#notempty} + +Boş bir dize için 0 veya boş olmayan bir dize için 1 döndürür. +Sonuç türü Uint8'dir. +İşlev ayrıca diziler için de çalışır. + +## uzunluk {#length} + +Bir dizenin uzunluğunu bayt cinsinden döndürür (karakterlerde değil, kod noktalarında değil). +Sonuç türü Uint64'tür. +İşlev ayrıca diziler için de çalışır. + +## lengthUTF8 {#lengthutf8} + +Dizenin UTF-8 kodlanmış metni oluşturan bir bayt kümesi içerdiğini varsayarak, Unicode kod noktalarında (karakterlerde değil) bir dizenin uzunluğunu döndürür. Bu varsayım karşılanmazsa, bir sonuç döndürür (bir istisna atmaz). +Sonuç türü Uint64'tür. + +## char\_length, CHAR\_LENGTH {#char-length} + +Dizenin UTF-8 kodlanmış metni oluşturan bir bayt kümesi içerdiğini varsayarak, Unicode kod noktalarında (karakterlerde değil) bir dizenin uzunluğunu döndürür. Bu varsayım karşılanmazsa, bir sonuç döndürür (bir istisna atmaz). +Sonuç türü Uint64'tür. + +## character\_length, CHARACTER\_LENGTH {#character-length} + +Dizenin UTF-8 kodlanmış metni oluşturan bir bayt kümesi içerdiğini varsayarak, Unicode kod noktalarında (karakterlerde değil) bir dizenin uzunluğunu döndürür. Bu varsayım karşılanmazsa, bir sonuç döndürür (bir istisna atmaz). +Sonuç türü Uint64'tür. + +## alt, lcase {#lower} + +Bir dizedeki ASCII Latin sembollerini küçük harfe dönüştürür. + +## üst, ucase {#upper} + +Bir dizedeki ASCII Latin sembollerini büyük harfe dönüştürür. + +## lowerUTF8 {#lowerutf8} + +Dizenin UTF-8 kodlu bir metni oluşturan bir bayt kümesi içerdiğini varsayarak bir dizeyi küçük harfe dönüştürür. +Dili algılamaz. Yani Türkçe için sonuç tam olarak doğru olmayabilir. +UTF-8 bayt dizisinin uzunluğu bir kod noktasının büyük ve küçük harf için farklıysa, sonuç bu kod noktası için yanlış olabilir. +Dize, UTF-8 olmayan bir bayt kümesi içeriyorsa, davranış tanımsızdır. + +## upperUTF8 {#upperutf8} + +Dize, UTF-8 kodlanmış bir metni oluşturan bir bayt kümesi içerdiğini varsayarak bir dizeyi büyük harfe dönüştürür. +Dili algılamaz. Yani Türkçe için sonuç tam olarak doğru olmayabilir. +UTF-8 bayt dizisinin uzunluğu bir kod noktasının büyük ve küçük harf için farklıysa, sonuç bu kod noktası için yanlış olabilir. +Dize, UTF-8 olmayan bir bayt kümesi içeriyorsa, davranış tanımsızdır. + +## ısvalidutf8 {#isvalidutf8} + +Bayt kümesi geçerli UTF-8 kodlanmış, aksi takdirde 0 ise, 1 döndürür. + +## toValidUTF8 {#tovalidutf8} + +Geçersiz UTF-8 karakterlerini değiştirir `�` (U+FFFD) karakteri. Bir satırda çalışan tüm geçersiz karakterler bir yedek karaktere daraltılır. + +``` sql +toValidUTF8( input_string ) +``` + +Parametre: + +- input\_string — Any set of bytes represented as the [Dize](../../sql_reference/data_types/string.md) veri türü nesnesi. + +Döndürülen değer: geçerli UTF-8 dizesi. + +**Örnek** + +``` sql +SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') +``` + +``` text +┌─toValidUTF8('a����b')─┐ +│ a�b │ +└───────────────────────┘ +``` + +## tekrarlama {#repeat} + +Bir dizeyi belirtilen kadar çok tekrarlar ve çoğaltılmış değerleri tek bir dize olarak birleştirir. + +**Sözdizimi** + +``` sql +repeat(s, n) +``` + +**Parametre** + +- `s` — The string to repeat. [Dize](../../sql_reference/data_types/string.md). +- `n` — The number of times to repeat the string. [Uİnt](../../sql_reference/data_types/int_uint.md). + +**Döndürülen değer** + +Dize içeren tek dize `s` tekrarlanan `n` kez. Eğer `n` \< 1, işlev boş dize döndürür. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT repeat('abc', 10) +``` + +Sonuç: + +``` text +┌─repeat('abc', 10)──────────────┐ +│ abcabcabcabcabcabcabcabcabcabc │ +└────────────────────────────────┘ +``` + +## tersi {#reverse} + +Dizeyi tersine çevirir (bayt dizisi olarak). + +## reverseUTF8 {#reverseutf8} + +Dizenin UTF-8 metnini temsil eden bir bayt kümesi içerdiğini varsayarak bir Unicode kod noktası dizisini tersine çevirir. Aksi takdirde, başka bir şey yapar(bir istisna atmaz). + +## format(pattern, s0, s1, …) {#format} + +Bağımsız değişkenlerde listelenen dize ile sabit desen biçimlendirme. `pattern` basitleştirilmiş bir Python biçimi desenidir. Biçim dizesi içerir “replacement fields” kıvırcık parantez ile çevrili `{}`. Parantez içinde bulunmayan herhangi bir şey, çıktıya değişmeden kopyalanan hazır metin olarak kabul edilir. Literal metne bir ayraç karakteri eklemeniz gerekiyorsa, iki katına çıkararak kaçabilir: `{{ '{{' }}` ve `{{ '}}' }}`. Alan adları sayılar (sıfırdan başlayarak) veya boş olabilir (daha sonra sonuç numaraları olarak kabul edilir). + +``` sql +SELECT format('{1} {0} {1}', 'World', 'Hello') +``` + +``` text +┌─format('{1} {0} {1}', 'World', 'Hello')─┐ +│ Hello World Hello │ +└─────────────────────────────────────────┘ +``` + +``` sql +SELECT format('{} {}', 'Hello', 'World') +``` + +``` text +┌─format('{} {}', 'Hello', 'World')─┐ +│ Hello World │ +└───────────────────────────────────┘ +``` + +## concat {#concat} + +Bağımsız değişkenlerde listelenen dizeleri ayırıcı olmadan birleştirir. + +**Sözdizimi** + +``` sql +concat(s1, s2, ...) +``` + +**Parametre** + +String veya FixedString türünün değerleri. + +**Döndürülen değerler** + +Bağımsız değişkenlerin birleştirilmesinden kaynaklanan dizeyi döndürür. + +Argüman değerlerinden herhangi biri ise `NULL`, `concat` dönüşler `NULL`. + +**Örnek** + +Sorgu: + +``` sql +SELECT concat('Hello, ', 'World!') +``` + +Sonuç: + +``` text +┌─concat('Hello, ', 'World!')─┐ +│ Hello, World! │ +└─────────────────────────────┘ +``` + +## concatassumeınjective {#concatassumeinjective} + +Aynı olarak [concat](#concat) emin olun bu ihtiyaç fark var `concat(s1, s2, ...) → sn` enjekte edilir, grup tarafından optimizasyonu için kullanılacaktır. + +İşlev adlı “injective” bağımsız değişkenlerin farklı değerleri için her zaman farklı sonuç döndürürse. Başka bir deyişle: farklı argümanlar asla aynı sonucu vermez. + +**Sözdizimi** + +``` sql +concatAssumeInjective(s1, s2, ...) +``` + +**Parametre** + +String veya FixedString türünün değerleri. + +**Döndürülen değerler** + +Bağımsız değişkenlerin birleştirilmesinden kaynaklanan dizeyi döndürür. + +Argüman değerlerinden herhangi biri ise `NULL`, `concatAssumeInjective` dönüşler `NULL`. + +**Örnek** + +Giriş tablosu: + +``` sql +CREATE TABLE key_val(`key1` String, `key2` String, `value` UInt32) ENGINE = TinyLog; +INSERT INTO key_val VALUES ('Hello, ','World',1), ('Hello, ','World',2), ('Hello, ','World!',3), ('Hello',', World!',2); +SELECT * from key_val; +``` + +``` text +┌─key1────┬─key2─────┬─value─┐ +│ Hello, │ World │ 1 │ +│ Hello, │ World │ 2 │ +│ Hello, │ World! │ 3 │ +│ Hello │ , World! │ 2 │ +└─────────┴──────────┴───────┘ +``` + +Sorgu: + +``` sql +SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY concatAssumeInjective(key1, key2) +``` + +Sonuç: + +``` text +┌─concat(key1, key2)─┬─sum(value)─┐ +│ Hello, World! │ 3 │ +│ Hello, World! │ 2 │ +│ Hello, World │ 3 │ +└────────────────────┴────────────┘ +``` + +## alt dize (s, ofset, uzunluk), orta (s, ofset, uzunluk), substr (s, ofset, uzunluk) {#substring} + +Bayttan başlayarak bir alt dize döndürür ‘offset’ ind thatex yani ‘length’ uzun bayt. Karakter indeksleme birinden başlar (standart SQL'DE olduğu gibi). Bu ‘offset’ ve ‘length’ bağımsız değişkenler sabit olmalıdır. + +## substringUTF8(s, ofset, uzunluk) {#substringutf8} + +Olarak aynı ‘substring’, ancak Unicode kod noktaları için. Dizenin UTF-8 kodlanmış bir metni temsil eden bir bayt kümesi içerdiği varsayımı altında çalışır. Bu varsayım karşılanmazsa, bir sonuç döndürür (bir istisna atmaz). + +## appendTrailingCharİfAbsent (s, c) {#appendtrailingcharifabsent} + +Eğer... ‘s’ dize boş değildir ve ‘c’ sonunda karakter, ekler ‘c’ sonuna kadar karakter. + +## convertCharset (s, from, to) {#convertcharset} + +Dize döndürür ‘s’ bu kodlamadan dönüştürüldü ‘from’ kod encodinglamaya ‘to’. + +## base64Encode (s) {#base64encode} + +Kodluyor ‘s’ Base64 içine dize + +## base64Decode (s) {#base64decode} + +Base64 kodlu dizeyi çözme ‘s’ orijinal dizeye. Başarısızlık durumunda bir istisna yükseltir. + +## tryBase64Decode (s) {#trybase64decode} + +Base64decode'a benzer, ancak hata durumunda boş bir dize döndürülür. + +## endsWith (s, sonek) {#endswith} + +Belirtilen sonek ile bitip bitmeyeceğini döndürür. Dize belirtilen sonek ile biterse 1 değerini döndürür, aksi takdirde 0 değerini döndürür. + +## startsWith (str, önek) {#startswith} + +Dize belirtilen önek ile başlayıp başlamadığını 1 döndürür, aksi halde 0 döndürür. + +``` sql +SELECT startsWith('Spider-Man', 'Spi'); +``` + +**Döndürülen değerler** + +- 1, dize belirtilen önek ile başlarsa. +- 0, dize belirtilen önek ile başlamazsa. + +**Örnek** + +Sorgu: + +``` sql +SELECT startsWith('Hello, world!', 'He'); +``` + +Sonuç: + +``` text +┌─startsWith('Hello, world!', 'He')─┐ +│ 1 │ +└───────────────────────────────────┘ +``` + +## kırpmak {#trim} + +Belirtilen tüm karakterleri bir dizenin başlangıcından veya sonundan kaldırır. +Varsayılan olarak, bir dizenin her iki ucundan ortak boşlukların (ASCII karakteri 32) tüm ardışık tekrarlarını kaldırır. + +**Sözdizimi** + +``` sql +trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) +``` + +**Parametre** + +- `trim_character` — specified characters for trim. [Dize](../../sql_reference/data_types/string.md). +- `input_string` — string for trim. [Dize](../../sql_reference/data_types/string.md). + +**Döndürülen değer** + +Önde gelen ve (veya) belirtilen karakterleri izleyen bir dize. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT trim(BOTH ' ()' FROM '( Hello, world! )') +``` + +Sonuç: + +``` text +┌─trim(BOTH ' ()' FROM '( Hello, world! )')─┐ +│ Hello, world! │ +└───────────────────────────────────────────────┘ +``` + +## trimLeft {#trimleft} + +Bir dizenin başlangıcından ortak boşluk (ASCII karakteri 32) tüm ardışık tekrarlarını kaldırır. Diğer boşluk karakterlerini (sekme, boşluksuz boşluk, vb.) kaldırmaz.). + +**Sözdizimi** + +``` sql +trimLeft(input_string) +``` + +Takma ad: `ltrim(input_string)`. + +**Parametre** + +- `input_string` — string to trim. [Dize](../../sql_reference/data_types/string.md). + +**Döndürülen değer** + +Bir dize olmadan lider ortak whitespaces. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT trimLeft(' Hello, world! ') +``` + +Sonuç: + +``` text +┌─trimLeft(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## trimRight {#trimright} + +Bir dizenin sonundan ortak boşluk (ASCII karakteri 32) tüm ardışık tekrarlarını kaldırır. Diğer boşluk karakterlerini (sekme, boşluksuz boşluk, vb.) kaldırmaz.). + +**Sözdizimi** + +``` sql +trimRight(input_string) +``` + +Takma ad: `rtrim(input_string)`. + +**Parametre** + +- `input_string` — string to trim. [Dize](../../sql_reference/data_types/string.md). + +**Döndürülen değer** + +Ortak whitespaces firar olmadan bir dize. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT trimRight(' Hello, world! ') +``` + +Sonuç: + +``` text +┌─trimRight(' Hello, world! ')─┐ +│ Hello, world! │ +└──────────────────────────────────────┘ +``` + +## trimBoth {#trimboth} + +Bir dizenin her iki ucundan ortak boşluk (ASCII karakteri 32) tüm ardışık tekrarlarını kaldırır. Diğer boşluk karakterlerini (sekme, boşluksuz boşluk, vb.) kaldırmaz.). + +**Sözdizimi** + +``` sql +trimBoth(input_string) +``` + +Takma ad: `trim(input_string)`. + +**Parametre** + +- `input_string` — string to trim. [Dize](../../sql_reference/data_types/string.md). + +**Döndürülen değer** + +Bir dize olmadan lider ve sondaki ortak whitespaces. + +Tür: `String`. + +**Örnek** + +Sorgu: + +``` sql +SELECT trimBoth(' Hello, world! ') +``` + +Sonuç: + +``` text +┌─trimBoth(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## CRC32 (s) {#crc32} + +CRC-32-IEEE 802.3 polinom ve başlangıç değerini kullanarak bir dizenin CRC32 sağlama toplamını döndürür `0xffffffff` (zlib uygulaması). + +Sonuç türü Uint32'dir. + +## Crc32ieee (s) {#crc32ieee} + +CRC-32-IEEE 802.3 polinomunu kullanarak bir dizenin CRC32 sağlama toplamını döndürür. + +Sonuç türü Uint32'dir. + +## CRC64 (s) {#crc64} + +CRC-64-ECMA polinomunu kullanarak bir dizenin CRC64 sağlama toplamını döndürür. + +Sonuç türü Uint64'tür. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/tr/sql_reference/functions/string_replace_functions.md b/docs/tr/sql_reference/functions/string_replace_functions.md new file mode 100644 index 00000000000..6a6c0e56aec --- /dev/null +++ b/docs/tr/sql_reference/functions/string_replace_functions.md @@ -0,0 +1,94 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 42 +toc_title: "Dizelerde de\u011Fi\u015Ftirilmesi i\xE7in" +--- + +# Dizelerde arama ve değiştirme işlevleri {#functions-for-searching-and-replacing-in-strings} + +## replaceOne(Samanlık, desen, değiştirme) {#replaceonehaystack-pattern-replacement} + +Varsa, ilk oluş replacesumun yerini ‘pattern’ substring içinde ‘haystack’ ile... ‘replacement’ dize. +Ahiret, ‘pattern’ ve ‘replacement’ sabitleri olması gerekir. + +## replaceAll (Samanlık, desen, değiştirme), değiştirin (Samanlık, desen, değiştirme) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} + +Tüm oluşumları değiştirir ‘pattern’ substring içinde ‘haystack’ ile... ‘replacement’ dize. + +## replaceRegexpOne(Samanlık, desen, değiştirme) {#replaceregexponehaystack-pattern-replacement} + +Kullanarak değiştirme ‘pattern’ düzenli ifade. Re2 düzenli ifade. +Varsa, yalnızca ilk oluşumu değiştirir. +Bir desen olarak belirtilebilir ‘replacement’. Bu desen değiştirmeleri içerebilir `\0-\9`. +İkame `\0` tüm düzenli ifadeyi içerir. İkameler `\1-\9` alt desene karşılık gelir numbers.To use the `\` bir şablondaki karakter, kullanarak kaçış `\`. +Ayrıca, bir dize literalinin ekstra bir kaçış gerektirdiğini unutmayın. + +Örnek 1. Tarihi Amerikan format convertingına dönüştürme: + +``` sql +SELECT DISTINCT + EventDate, + replaceRegexpOne(toString(EventDate), '(\\d{4})-(\\d{2})-(\\d{2})', '\\2/\\3/\\1') AS res +FROM test.hits +LIMIT 7 +FORMAT TabSeparated +``` + +``` text +2014-03-17 03/17/2014 +2014-03-18 03/18/2014 +2014-03-19 03/19/2014 +2014-03-20 03/20/2014 +2014-03-21 03/21/2014 +2014-03-22 03/22/2014 +2014-03-23 03/23/2014 +``` + +Örnek 2. Bir dize on kez kopyalama: + +``` sql +SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') AS res +``` + +``` text +┌─res────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ +└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## replaceRegexpAll(Samanlık, desen, değiştirme) {#replaceregexpallhaystack-pattern-replacement} + +Bu aynı şeyi yapar, ancak tüm oluşumların yerini alır. Örnek: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res +``` + +``` text +┌─res────────────────────────┐ +│ HHeelllloo,, WWoorrlldd!! │ +└────────────────────────────┘ +``` + +Normal bir ifade boş bir alt dize üzerinde çalıştıysa, bir istisna olarak, değiştirme birden çok kez yapılmaz. +Örnek: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res +``` + +``` text +┌─res─────────────────┐ +│ here: Hello, World! │ +└─────────────────────┘ +``` + +## regexpQuoteMeta (s) {#regexpquotemetas} + +İşlev, dizedeki bazı önceden tanımlanmış karakterlerden önce bir ters eğik çizgi ekler. +Önceden tanımlanmış karakterler: ‘0’, ‘\\’, ‘\|’, ‘(’, ‘)’, ‘^’, ‘$’, ‘.’, ‘\[’, '\]', ‘?’, '\*‘,’+‘,’{‘,’:‘,’-'. +Bu uygulama biraz re2::RE2::QuoteMeta farklıdır. Sıfır bayttan 00 yerine \\0 olarak çıkar ve yalnızca gerekli karakterlerden kaçar. +Daha fazla bilgi için bağlantıya bakın: [RE2](https://github.com/google/re2/blob/master/re2/re2.cc#L473) + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/tr/sql_reference/functions/string_search_functions.md b/docs/tr/sql_reference/functions/string_search_functions.md new file mode 100644 index 00000000000..bfa3d8d0bd7 --- /dev/null +++ b/docs/tr/sql_reference/functions/string_search_functions.md @@ -0,0 +1,379 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 41 +toc_title: "Arama Dizeleri \u0130\xE7in" +--- + +# Dizeleri aramak için işlevler {#functions-for-searching-strings} + +Arama, tüm bu işlevlerde varsayılan olarak büyük / küçük harf duyarlıdır. Büyük / küçük harf duyarlı arama için ayrı Varyantlar vardır. + +## pozisyon (Samanlık, iğne), bulun (Samanlık, iğne) {#position} + +1'den başlayarak dizedeki bulunan alt dizenin konumunu (bayt cinsinden) döndürür. + +Dize, tek baytlık kodlanmış bir metni temsil eden bir bayt kümesi içerdiği varsayımı altında çalışır. Bu varsayım karşılanmazsa ve bir karakter tek bir bayt kullanılarak temsil edilemezse, işlev bir istisna atmaz ve beklenmeyen bir sonuç döndürür. Karakter iki bayt kullanılarak temsil edilebilirse, iki bayt vb. kullanır. + +Büyük / küçük harf duyarsız arama için işlevi kullanın [positionCaseİnsensitive](#positioncaseinsensitive). + +**Sözdizimi** + +``` sql +position(haystack, needle) +``` + +Takma ad: `locate(haystack, needle)`. + +**Parametre** + +- `haystack` — string, in which substring will to be searched. [Dize](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Dize](../syntax.md#syntax-string-literal). + +**Döndürülen değerler** + +- Alt dize bulunursa, bayt cinsinden başlangıç pozisyonu (1'den sayma). +- 0, alt dize bulunamadı. + +Tür: `Integer`. + +**Örnekler** + +İfade “Hello, world!” tek baytla kodlanmış bir metni temsil eden bir bayt kümesi içerir. İşlev beklenen bazı sonuçları döndürür: + +Sorgu: + +``` sql +SELECT position('Hello, world!', '!') +``` + +Sonuç: + +``` text +┌─position('Hello, world!', '!')─┐ +│ 13 │ +└────────────────────────────────┘ +``` + +Rusça'daki aynı ifade, tek bir bayt kullanılarak temsil edilemeyen karakterler içerir. İşlev beklenmedik bir sonuç verir (kullanım [positionUTF8](#positionutf8) çok bayt kodlu metin için işlev): + +Sorgu: + +``` sql +SELECT position('Привет, мир!', '!') +``` + +Sonuç: + +``` text +┌─position('Привет, мир!', '!')─┐ +│ 21 │ +└───────────────────────────────┘ +``` + +## positionCaseİnsensitive {#positioncaseinsensitive} + +Olarak aynı [konum](#position) 1'den başlayarak dizedeki bulunan alt dizenin konumunu (bayt cinsinden) döndürür. Büyük / küçük harf duyarlı bir arama için işlevi kullanın. + +Dize, tek baytlık kodlanmış bir metni temsil eden bir bayt kümesi içerdiği varsayımı altında çalışır. Bu varsayım karşılanmazsa ve bir karakter tek bir bayt kullanılarak temsil edilemezse, işlev bir istisna atmaz ve beklenmeyen bir sonuç döndürür. Karakter iki bayt kullanılarak temsil edilebilirse, iki bayt vb. kullanır. + +**Sözdizimi** + +``` sql +positionCaseInsensitive(haystack, needle) +``` + +**Parametre** + +- `haystack` — string, in which substring will to be searched. [Dize](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Dize](../syntax.md#syntax-string-literal). + +**Döndürülen değerler** + +- Alt dize bulunursa, bayt cinsinden başlangıç pozisyonu (1'den sayma). +- 0, alt dize bulunamadı. + +Tür: `Integer`. + +**Örnek** + +Sorgu: + +``` sql +SELECT positionCaseInsensitive('Hello, world!', 'hello') +``` + +Sonuç: + +``` text +┌─positionCaseInsensitive('Hello, world!', 'hello')─┐ +│ 1 │ +└───────────────────────────────────────────────────┘ +``` + +## positionUTF8 {#positionutf8} + +1'den başlayarak dizedeki bulunan alt dizenin konumunu (Unicode noktalarında) döndürür. + +Dizenin UTF-8 kodlanmış bir metni temsil eden bir bayt kümesi içerdiği varsayımı altında çalışır. Bu varsayım karşılanmazsa, işlev bir istisna atmaz ve beklenmeyen bir sonuç döndürür. Karakter iki Unicode noktası kullanılarak temsil edilebilirse, iki vb. kullanır. + +Büyük / küçük harf duyarsız arama için işlevi kullanın [positionCaseİnsensitiveUTF8](#positioncaseinsensitiveutf8). + +**Sözdizimi** + +``` sql +positionUTF8(haystack, needle) +``` + +**Parametre** + +- `haystack` — string, in which substring will to be searched. [Dize](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Dize](../syntax.md#syntax-string-literal). + +**Döndürülen değerler** + +- Unicode noktalarında başlangıç pozisyonu (1'den sayma), eğer alt dize bulundu. +- 0, alt dize bulunamadı. + +Tür: `Integer`. + +**Örnekler** + +İfade “Hello, world!” rusça'da, tek noktalı kodlanmış bir metni temsil eden bir dizi Unicode noktası bulunur. İşlev beklenen bazı sonuçları döndürür: + +Sorgu: + +``` sql +SELECT positionUTF8('Привет, мир!', '!') +``` + +Sonuç: + +``` text +┌─positionUTF8('Привет, мир!', '!')─┐ +│ 12 │ +└───────────────────────────────────┘ +``` + +İfade “Salut, étudiante!” karakter nerede `é` bir nokta kullanılarak temsil edilebilir (`U+00E9`) veya iki puan (`U+0065U+0301`) fonksiyon bazı beklenmedik sonuç iade edilebilir: + +Mektup için sorgu `é` bir Unicode noktasını temsil eden `U+00E9`: + +``` sql +SELECT positionUTF8('Salut, étudiante!', '!') +``` + +Sonuç: + +``` text +┌─positionUTF8('Salut, étudiante!', '!')─┐ +│ 17 │ +└────────────────────────────────────────┘ +``` + +Mektup için sorgu `é`, iki Unicode noktası temsil edilen `U+0065U+0301`: + +``` sql +SELECT positionUTF8('Salut, étudiante!', '!') +``` + +Sonuç: + +``` text +┌─positionUTF8('Salut, étudiante!', '!')─┐ +│ 18 │ +└────────────────────────────────────────┘ +``` + +## positionCaseİnsensitiveUTF8 {#positioncaseinsensitiveutf8} + +Olarak aynı [positionUTF8](#positionutf8) ama büyük küçük harf duyarlı. 1'den başlayarak dizedeki bulunan alt dizenin konumunu (Unicode noktalarında) döndürür. + +Dizenin UTF-8 kodlanmış bir metni temsil eden bir bayt kümesi içerdiği varsayımı altında çalışır. Bu varsayım karşılanmazsa, işlev bir istisna atmaz ve beklenmeyen bir sonuç döndürür. Karakter iki Unicode noktası kullanılarak temsil edilebilirse, iki vb. kullanır. + +**Sözdizimi** + +``` sql +positionCaseInsensitiveUTF8(haystack, needle) +``` + +**Parametre** + +- `haystack` — string, in which substring will to be searched. [Dize](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Dize](../syntax.md#syntax-string-literal). + +**Döndürülen değer** + +- Unicode noktalarında başlangıç pozisyonu (1'den sayma), eğer alt dize bulundu. +- 0, alt dize bulunamadı. + +Tür: `Integer`. + +**Örnek** + +Sorgu: + +``` sql +SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир') +``` + +Sonuç: + +``` text +┌─positionCaseInsensitiveUTF8('Привет, мир!', 'Мир')─┐ +│ 9 │ +└────────────────────────────────────────────────────┘ +``` + +## multiSearchAllPositions {#multisearchallpositions} + +Olarak aynı [konum](string_search_functions.md#position) ama döner `Array` dizede bulunan karşılık gelen alt dizelerin konumlarının (bayt cinsinden). Pozisyonlar 1'den başlayarak endekslenir. + +Arama, dize kodlaması ve harmanlama ile ilgili olmayan bayt dizileri üzerinde gerçekleştirilir. + +- Büyük / küçük harf duyarlı ASCII arama için işlevi kullanın `multiSearchAllPositionsCaseInsensitive`. +- UTF-8'de arama yapmak için işlevi kullanın [multiSearchAllPositionsUTF8](#multiSearchAllPositionsUTF8). +- Büyük / küçük harf duyarlı UTF-8 arama için multisearchallpositionscaseınsensitiveutf8 işlevini kullanın. + +**Sözdizimi** + +``` sql +multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) +``` + +**Parametre** + +- `haystack` — string, in which substring will to be searched. [Dize](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Dize](../syntax.md#syntax-string-literal). + +**Döndürülen değerler** + +- Bayt cinsinden başlangıç pozisyonları dizisi (1'den sayma), karşılık gelen alt dize bulunursa ve 0 bulunmazsa. + +**Örnek** + +Sorgu: + +``` sql +SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']) +``` + +Sonuç: + +``` text +┌─multiSearchAllPositions('Hello, World!', ['hello', '!', 'world'])─┐ +│ [0,13,0] │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## multiSearchAllPositionsUTF8 {#multiSearchAllPositionsUTF8} + +Görmek `multiSearchAllPositions`. + +## multiSearchFirstPosition (Samanlık, \[iğne1, iğne2, …, needleve\]) {#multisearchfirstposition} + +Olarak aynı `position` ancak dizenin en soldaki ofsetini döndürür `haystack` bu bazı iğnelerle eşleşti. + +Büyük/küçük harfe duyarsız arama veya / VE UTF-8 biçiminde kullanım işlevleri için `multiSearchFirstPositionCaseInsensitive, multiSearchFirstPositionUTF8, multiSearchFirstPositionCaseInsensitiveUTF8`. + +## multiSearchFirstİndex (Samanlık, \[iğne1, iğne2, …, needleve\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} + +Dizini döndürür `i` en soldaki bulunan iğnenin (1'den başlayarak)ben diz inede `haystack` ve 0 aksi takdirde. + +Büyük/küçük harfe duyarsız arama veya / VE UTF-8 biçiminde kullanım işlevleri için `multiSearchFirstIndexCaseInsensitive, multiSearchFirstIndexUTF8, multiSearchFirstIndexCaseInsensitiveUTF8`. + +## multiSearchAny (Samanlık, \[iğne1, iğne2, …, needleve\]) {#function-multisearchany} + +Döner 1, Eğer en az bir dize iğneben dize ile eşleşir `haystack` ve 0 aksi takdirde. + +Büyük/küçük harfe duyarsız arama veya / VE UTF-8 biçiminde kullanım işlevleri için `multiSearchAnyCaseInsensitive, multiSearchAnyUTF8, multiSearchAnyCaseInsensitiveUTF8`. + +!!! note "Not" + Tamamı `multiSearch*` fonksiyonlar iğne sayısı 2'den az olmalıdır8 uygulama şartname nedeniyle. + +## maç (Samanlık, desen) {#matchhaystack-pattern} + +Dize eşleşip eşleşmediğini denetler `pattern` düzenli ifade. Bir `re2` düzenli ifade. Bu [sözdizimi](https://github.com/google/re2/wiki/Syntax) of the `re2` düzenli ifadeler, Perl düzenli ifadelerin sözdiziminden daha sınırlıdır. + +Eşleşmezse 0 veya eşleşirse 1 değerini döndürür. + +Ters eğik çizgi sembolünün (`\`) normal ifadede kaçmak için kullanılır. Aynı sembol, dize değişmezlerinde kaçmak için kullanılır. Bu nedenle, normal bir ifadede sembolden kaçmak için, bir dize literalinde iki ters eğik çizgi (\\) yazmanız gerekir. + +Normal ifade, bir bayt kümesiymiş gibi dizeyle çalışır. Normal ifade boş bayt içeremez. +Bir dizedeki alt dizeleri aramak için desenler için, LİKE veya ‘position’, çok daha hızlı çalıştıkları için. + +## multiMatchAny (Samanlık, \[desen1, desen2, …, patternve\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} + +Olarak aynı `match`, ancak normal ifadelerin hiçbiri eşleşmezse 0 ve desenlerden herhangi biri eşleşirse 1 değerini döndürür. Kullanır [hyperscan](https://github.com/intel/hyperscan) kitaplık. Bir dizede alt dizeleri aramak için desenler için, kullanmak daha iyidir `multiSearchAny` çok daha hızlı çalıştığı için. + +!!! note "Not" + Herhangi birinin uzunluğu `haystack` dize 2'den az olmalıdır32 bayt aksi takdirde özel durum atılır. Bu kısıtlama, hyperscan API nedeniyle gerçekleşir. + +## multimatchanyındex (haystack, \[desen1, desen2, …, patternve\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} + +Olarak aynı `multiMatchAny`, ancak Samanlık eşleşen herhangi bir dizin döndürür. + +## multiMatchAllİndices (haystack, \[desen1, desen2, …, patternve\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} + +Olarak aynı `multiMatchAny`, ancak herhangi bir sırada Samanlık eşleşen tüm indicies dizisini döndürür. + +## multiFuzzyMatchAny (Samanlık, mesafe, \[desen1, desen2, …, patternve\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} + +Olarak aynı `multiMatchAny`, ancak herhangi bir desen samanlıkta bir sabitle eşleşirse 1 döndürür [mesafeyi Düzenle](https://en.wikipedia.org/wiki/Edit_distance). Bu fonksiyon aynı zamanda deneysel bir moddadır ve son derece yavaş olabilir. Daha fazla bilgi için bkz. [hyperscan belgeleri](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching). + +## multifuzzymatchanyındex (Samanlık, mesafe, \[desen1, desen2, …, patternve\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} + +Olarak aynı `multiFuzzyMatchAny`, ancak sabit bir düzenleme mesafesi içinde Samanlık eşleşen herhangi bir dizin döndürür. + +## multiFuzzyMatchAllİndices (Samanlık, mesafe, \[desen1, desen2, …, patternve\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} + +Olarak aynı `multiFuzzyMatchAny`, ancak sabit bir düzenleme mesafesi içinde saman yığını ile eşleşen herhangi bir sırada tüm dizinlerin dizisini döndürür. + +!!! note "Not" + `multiFuzzyMatch*` işlevler UTF-8 normal ifadeleri desteklemez ve bu tür ifadeler hyperscan kısıtlaması nedeniyle bayt olarak kabul edilir. + +!!! note "Not" + Hyperscan kullanan tüm işlevleri kapatmak için, ayarı kullanın `SET allow_hyperscan = 0;`. + +## özü (Samanlık, desen) {#extracthaystack-pattern} + +Normal ifade kullanarak bir dize parçasını ayıklar. Eğer ‘haystack’ eşleşmiyor ‘pattern’ regex, boş bir dize döndürülür. Regex alt desenler içermiyorsa, tüm regex ile eşleşen parçayı alır. Aksi takdirde, ilk alt desenle eşleşen parçayı alır. + +## extractAll(Samanlık, desen) {#extractallhaystack-pattern} + +Normal bir ifade kullanarak bir dizenin tüm parçalarını ayıklar. Eğer ‘haystack’ eşleşmiyor ‘pattern’ regex, boş bir dize döndürülür. Regex için tüm eşleşmelerden oluşan bir dizi dizeyi döndürür. Genel olarak, davranış ile aynıdır ‘extract’ işlev (bir alt desen yoksa ilk alt deseni veya tüm ifadeyi alır). + +## gibi (Samanlık, desen), Samanlık gibi desen operatörü {#function-like} + +Bir dizenin basit bir normal ifadeyle eşleşip eşleşmediğini denetler. +Normal ifade metasymbols içerebilir `%` ve `_`. + +`%` herhangi bir bayt miktarını (sıfır karakter dahil) gösterir. + +`_` herhangi bir bayt gösterir. + +Ters eğik çizgi kullanın (`\`) metasimbollerden kaçmak için. Açıklamasında kaçan nota bakın ‘match’ işlev. + +Gibi düzenli ifadeler için `%needle%`, kod daha optimal ve hızlı olarak çalışır `position` işlev. +Diğer normal ifadeler için kod, ‘match’ işlev. + +## notLike (Samanlık, desen), Samanlık desen operatörü gibi değil {#function-notlike} + +Aynı şey ‘like’ ama negatif. + +## ngramDistance(Samanlık, iğne) {#ngramdistancehaystack-needle} + +Arasındaki 4 gram distancelık mesaf theeyi hesaplar `haystack` ve `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` veya `haystack` 32kb'den fazla, bir istisna atar. Sabit olmayan bazı `haystack` veya `needle` dizeler 32kb'den daha fazladır, mesafe her zaman birdir. + +Büyük/küçük harf duyarsız arama veya / VE UTF-8 formatında kullanım işlevleri için `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8`. + +## ngramsearch(Samanlık, iğne) {#ngramsearchhaystack-needle} + +Aynı olarak `ngramDistance` ama arasındaki simetrik olmayan farkı hesaplar `needle` ve `haystack` – the number of n-grams from needle minus the common number of n-grams normalized by the number of `needle` n-büyükanne. Daha yakın, daha `needle` is in the `haystack`. Bulanık dize arama için yararlı olabilir. + +Büyük/küçük harf duyarsız arama veya / VE UTF-8 formatında kullanım işlevleri için `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8`. + +!!! note "Not" + For UTF-8 case we use 3-gram distance. All these are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/tr/sql_reference/functions/type_conversion_functions.md b/docs/tr/sql_reference/functions/type_conversion_functions.md new file mode 100644 index 00000000000..643ddf9dbc0 --- /dev/null +++ b/docs/tr/sql_reference/functions/type_conversion_functions.md @@ -0,0 +1,534 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 38 +toc_title: "Tip D\xF6n\xFC\u015Ft\xFCrme" +--- + +# Tip Dönüştürme Fonksiyonları {#type-conversion-functions} + +## Sayısal dönüşümlerin ortak sorunları {#numeric-conversion-issues} + +Bir değeri birinden başka bir veri türüne dönüştürdüğünüzde, ortak durumda, veri kaybına neden olabilecek güvenli olmayan bir işlem olduğunu unutmamalısınız. Değeri daha büyük bir veri türünden daha küçük bir veri türüne sığdırmaya çalışırsanız veya değerleri farklı veri türleri arasında dönüştürürseniz, veri kaybı oluşabilir. + +ClickHouse vardır [C++ programları ile aynı davranış](https://en.cppreference.com/w/cpp/language/implicit_conversion). + +## toİnt(8/16/32/64) {#toint8163264} + +Bir giriş değeri dönüştürür [Tamsayı](../../sql_reference/data_types/int_uint.md) veri türü. Bu işlev ailesi şunları içerir: + +- `toInt8(expr)` — Results in the `Int8` veri türü. +- `toInt16(expr)` — Results in the `Int16` veri türü. +- `toInt32(expr)` — Results in the `Int32` veri türü. +- `toInt64(expr)` — Results in the `Int64` veri türü. + +**Parametre** + +- `expr` — [İfade](../syntax.md#syntax-expressions) bir sayının ondalık gösterimiyle bir sayı veya dize döndürülmesi. Sayıların ikili, sekizli ve onaltılık gösterimleri desteklenmez. Önde gelen sıfırlar soyulur. + +**Döndürülen değer** + +Tamsayı değeri `Int8`, `Int16`, `Int32`, veya `Int64` veri türü. + +Fonksiyonlar kullanımı [sıfıra doğru yuvarlama](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), yani sayıların kesirli rakamlarını keserler. + +Fonksiyon behaviorların davranışı [N andan ve In andf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) argümanlar tanımsızdır. Hakkında hatırla [sayısal convertions sorunları](#numeric-conversion-issues), fonksiyonları kullanırken. + +**Örnek** + +``` sql +SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8) +``` + +``` text +┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐ +│ -9223372036854775808 │ 32 │ 16 │ 8 │ +└──────────────────────┴─────────────┴───────────────┴─────────────┘ +``` + +## toİnt (8/16/32/64)OrZero {#toint8163264orzero} + +String türünde bir argüman alır ve İnt içine ayrıştırmaya çalışır(8 \| 16 \| 32 \| 64). Başarısız olursa, 0 döndürür. + +**Örnek** + +``` sql +select toInt64OrZero('123123'), toInt8OrZero('123qwe123') +``` + +``` text +┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐ +│ 123123 │ 0 │ +└─────────────────────────┴───────────────────────────┘ +``` + +## toİnt(8/16/32/64) OrNull {#toint8163264ornull} + +String türünde bir argüman alır ve İnt içine ayrıştırmaya çalışır(8 \| 16 \| 32 \| 64). Başarısız olursa, NULL döndürür. + +**Örnek** + +``` sql +select toInt64OrNull('123123'), toInt8OrNull('123qwe123') +``` + +``` text +┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐ +│ 123123 │ ᴺᵁᴸᴸ │ +└─────────────────────────┴───────────────────────────┘ +``` + +## toUİnt(8/16/32/64) {#touint8163264} + +Bir giriş değeri dönüştürür [Uİnt](../../sql_reference/data_types/int_uint.md) veri türü. Bu işlev ailesi şunları içerir: + +- `toUInt8(expr)` — Results in the `UInt8` veri türü. +- `toUInt16(expr)` — Results in the `UInt16` veri türü. +- `toUInt32(expr)` — Results in the `UInt32` veri türü. +- `toUInt64(expr)` — Results in the `UInt64` veri türü. + +**Parametre** + +- `expr` — [İfade](../syntax.md#syntax-expressions) bir sayının ondalık gösterimiyle bir sayı veya dize döndürülmesi. Sayıların ikili, sekizli ve onaltılık gösterimleri desteklenmez. Önde gelen sıfırlar soyulur. + +**Döndürülen değer** + +Tamsayı değeri `UInt8`, `UInt16`, `UInt32`, veya `UInt64` veri türü. + +Fonksiyonlar kullanımı [sıfıra doğru yuvarlama](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), yani sayıların kesirli rakamlarını keserler. + +Olumsuz agruments için işlevlerin davranışı ve [N andan ve In andf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) argümanlar tanımsızdır. Örneğin, negatif bir sayı ile bir dize geçirirseniz `'-32'`, ClickHouse bir özel durum yükseltir. Hakkında hatırla [sayısal convertions sorunları](#numeric-conversion-issues), fonksiyonları kullanırken. + +**Örnek** + +``` sql +SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) +``` + +``` text +┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐ +│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │ +└─────────────────────┴───────────────┴────────────────┴──────────────┘ +``` + +## toUİnt (8/16/32/64)OrZero {#touint8163264orzero} + +## toUİnt(8/16/32/64) OrNull {#touint8163264ornull} + +## toFloat(32/64) {#tofloat3264} + +## toFloat (32/64)OrZero {#tofloat3264orzero} + +## toFloat(32/64) OrNull {#tofloat3264ornull} + +## toDate {#todate} + +## toDateOrZero {#todateorzero} + +## toDateOrNull {#todateornull} + +## toDateTime {#todatetime} + +## toDateTimeOrZero {#todatetimeorzero} + +## toDateTimeOrNull {#todatetimeornull} + +## toDecimal(32/64/128) {#todecimal3264128} + +Dönüşüyo `value` to the [Ondalık](../../sql_reference/data_types/decimal.md) hassas veri türü `S`. Bu `value` bir sayı veya bir dize olabilir. Bu `S` (scale) parametresi ondalık basamak sayısını belirtir. + +- `toDecimal32(value, S)` +- `toDecimal64(value, S)` +- `toDecimal128(value, S)` + +## toDecimal(32/64/128) OrNull {#todecimal3264128ornull} + +Bir giriş dizesini bir [Nullable (Ondalık (P, S))](../../sql_reference/data_types/decimal.md) veri türü değeri. Bu işlev ailesi şunları içerir: + +- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` veri türü. +- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` veri türü. +- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` veri türü. + +Bu işlevler yerine kullanılmalıdır `toDecimal*()` fonksiyonlar, eğer bir almak için tercih `NULL` bir giriş değeri ayrıştırma hatası durumunda bir özel durum yerine değer. + +**Parametre** + +- `expr` — [İfade](../syntax.md#syntax-expressions) bir değeri döndürür [Dize](../../sql_reference/data_types/string.md) veri türü. ClickHouse ondalık sayının metinsel temsilini bekler. Mesela, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. + +**Döndürülen değer** + +İçinde bir değer `Nullable(Decimal(P,S))` veri türü. Değeri içerir: + +- İle sayı `S` ondalık basamaklar, ClickHouse giriş dizesi bir sayı olarak yorumlar. +- `NULL`, ClickHouse giriş dizesini bir sayı olarak yorumlayamazsa veya giriş numarası birden fazla içeriyorsa `S` ondalık basamaklar. + +**Örnekler** + +``` sql +SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val) +``` + +``` text +┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐ +│ -1.11100 │ Nullable(Decimal(9, 5)) │ +└──────────┴────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) +``` + +``` text +┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐ +│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │ +└──────┴────────────────────────────────────────────────────┘ +``` + +## toDecimal (32/64/128)OrZero {#todecimal3264128orzero} + +Bir giriş değeri dönüştürür [Ondalık(P, S)](../../sql_reference/data_types/decimal.md) veri türü. Bu işlev ailesi şunları içerir: + +- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` veri türü. +- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` veri türü. +- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` veri türü. + +Bu işlevler yerine kullanılmalıdır `toDecimal*()` fonksiyonlar, eğer bir almak için tercih `0` bir giriş değeri ayrıştırma hatası durumunda bir özel durum yerine değer. + +**Parametre** + +- `expr` — [İfade](../syntax.md#syntax-expressions) bir değeri döndürür [Dize](../../sql_reference/data_types/string.md) veri türü. ClickHouse ondalık sayının metinsel temsilini bekler. Mesela, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. + +**Döndürülen değer** + +İçinde bir değer `Nullable(Decimal(P,S))` veri türü. Değeri içerir: + +- İle sayı `S` ondalık basamaklar, ClickHouse giriş dizesi bir sayı olarak yorumlar. +- 0 ile `S` ondalık basamaklar, ClickHouse giriş dizesini bir sayı olarak yorumlayamazsa veya giriş numarası birden fazla içeriyorsa `S` ondalık basamaklar. + +**Örnek** + +``` sql +SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val) +``` + +``` text +┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐ +│ -1.11100 │ Decimal(9, 5) │ +└──────────┴────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val) +``` + +``` text +┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐ +│ 0.00 │ Decimal(9, 2) │ +└──────┴────────────────────────────────────────────────────┘ +``` + +## toString {#tostring} + +Sayılar, dizeler (ancak sabit olmayan dizeler), tarihler ve tarihlerle saatler arasında dönüştürme işlevleri. +Tüm bu işlevler bir argümanı kabul eder. + +Bir dizeye veya dizeye dönüştürürken, değer, sekmeyle aynı kuralları kullanarak biçimlendirilir veya ayrıştırılır. ayrı biçim (ve hemen hemen tüm diğer metin biçimleri). Dize ayrıştırılamazsa, bir istisna atılır ve istek iptal edilir. + +Tarihleri sayılara dönüştürürken veya tam tersi, Tarih Unix döneminin başlangıcından bu yana geçen gün sayısına karşılık gelir. +Tarihleri zamanlarla sayılara dönüştürürken veya tam tersi olduğunda, zaman ile tarih, Unix döneminin başlangıcından bu yana geçen saniye sayısına karşılık gelir. + +ToDate / toDateTime işlevleri için tarih ve saatli tarih biçimleri aşağıdaki gibi tanımlanır: + +``` text +YYYY-MM-DD +YYYY-MM-DD hh:mm:ss +``` + +Özel durum olarak, uınt32, Int32, Uınt64 veya Int64 sayısal türlerinden bugüne dönüştürme ve sayı 65536'dan büyük veya eşitse, sayı Unıx zaman damgası (ve gün sayısı olarak değil) olarak yorumlanır ve tarihe yuvarlanır. Bu, yaygın yazı oluşumu için destek sağlar ‘toDate(unix\_timestamp)’, aksi takdirde bir hata olur ve daha hantal yazmayı gerektirir ‘toDate(toDateTime(unix\_timestamp))’. + +Bir tarih ve tarih ile saat arasında dönüştürme doğal bir şekilde gerçekleştirilir: boş bir zaman ekleyerek veya saati bırakarak. + +Sayısal türler arasındaki dönüştürme, C++ ' daki farklı sayısal türler arasındaki atamalarla aynı kuralları kullanır. + +Ayrıca, Tostring işlevi DateTime bağımsız değişkeni, saat dilimi adını içeren ikinci bir dize bağımsız değişkeni alabilir. Örnek: `Asia/Yekaterinburg` Bu durumda, saat belirtilen saat dilimine göre biçimlendirilir. + +``` sql +SELECT + now() AS now_local, + toString(now(), 'Asia/Yekaterinburg') AS now_yekat +``` + +``` text +┌───────────now_local─┬─now_yekat───────────┐ +│ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │ +└─────────────────────┴─────────────────────┘ +``` + +Ayrıca bakınız `toUnixTimestamp` işlev. + +## toFixedString(s, N) {#tofixedstrings-n} + +Bir dize türü bağımsız değişkeni dönüştürür bir FixedString(N) türü (sabit uzunlukta bir dize N). N sabit olmalıdır. +Dize n'den daha az bayt varsa, sağa boş bayt ile geçirilir. Dize n'den daha fazla bayt varsa, bir özel durum atılır. + +## tostringcuttozero (s) {#tostringcuttozeros} + +Bir dize veya fixedstring bağımsız değişkeni kabul eder. Bulunan ilk sıfır baytta kesilmiş içeriği olan dizeyi döndürür. + +Örnek: + +``` sql +SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut +``` + +``` text +┌─s─────────────┬─s_cut─┐ +│ foo\0\0\0\0\0 │ foo │ +└───────────────┴───────┘ +``` + +``` sql +SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut +``` + +``` text +┌─s──────────┬─s_cut─┐ +│ foo\0bar\0 │ foo │ +└────────────┴───────┘ +``` + +## reinterpretAsUİnt(8/16/32/64) {#reinterpretasuint8163264} + +## reinterpretAsİnt(8/16/32/64) {#reinterpretasint8163264} + +## reinterpretAsFloat (32/64) {#reinterpretasfloat3264} + +## reinterpretAsDate {#reinterpretasdate} + +## reinterpretAsDateTime {#reinterpretasdatetime} + +Bu işlevler bir dizeyi kabul eder ve dizenin başına yerleştirilen baytları ana bilgisayar düzeninde (little endian) bir sayı olarak yorumlar. Dize yeterince uzun değilse, işlevler dize gerekli sayıda boş baytla doldurulmuş gibi çalışır. Dize gerekenden daha uzunsa, ek bayt yoksayılır. Bir tarih, Unix döneminin başlangıcından bu yana geçen gün sayısı olarak yorumlanır ve zamana sahip bir tarih, Unix döneminin başlangıcından bu yana geçen saniye sayısı olarak yorumlanır. + +## reinterpretAsString {#type_conversion_functions-reinterpretAsString} + +Bu işlev, bir sayı veya tarih veya tarih saat ile kabul eder ve ana bilgisayar düzeninde (little endian) karşılık gelen değeri temsil eden bayt içeren bir dize döndürür. Boş bayt sondan bırakılır. Örneğin, 255 uint32 türü değeri bir bayt uzunluğunda bir dizedir. + +## reinterpretAsFixedString {#reinterpretasfixedstring} + +Bu işlev, bir sayı veya tarih veya tarih saat ile kabul eder ve karşılık gelen değeri ana bilgisayar sırasına (little endian) temsil eden bayt içeren bir FixedString döndürür. Boş bayt sondan bırakılır. Örneğin, 255 uint32 türü değeri bir bayt uzunluğunda bir FixedString. + +## CAS (t(x, t) {#type_conversion_function-cast} + +Dönüşüyo ‘x’ to the ‘t’ veri türü. Sözdizimi CAST (x AS t) da desteklenmektedir. + +Örnek: + +``` sql +SELECT + '2016-06-15 23:00:00' AS timestamp, + CAST(timestamp AS DateTime) AS datetime, + CAST(timestamp AS Date) AS date, + CAST(timestamp, 'String') AS string, + CAST(timestamp, 'FixedString(22)') AS fixed_string +``` + +``` text +┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐ +│ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │ +└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ +``` + +Fixedstring(N) ' ye dönüştürme yalnızca String veya FixedString(N) türünde argümanlar için çalışır. + +Type con conversionvers conversionion to [Nullable](../../sql_reference/data_types/nullable.md) ve geri desteklenmektedir. Örnek: + +``` sql +SELECT toTypeName(x) FROM t_null +``` + +``` text +┌─toTypeName(x)─┐ +│ Int8 │ +│ Int8 │ +└───────────────┘ +``` + +``` sql +SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null +``` + +``` text +┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ +│ Nullable(UInt16) │ +│ Nullable(UInt16) │ +└─────────────────────────────────────────┘ +``` + +## toİnterval(yıl\|Çeyrek\|Ay\|hafta\|Gün\|Saat\|Dakika / Saniye) {#function-tointerval} + +Bir sayı türü argümanını bir [Aralıklı](../../sql_reference/data_types/special_data_types/interval.md) veri türü. + +**Sözdizimi** + +``` sql +toIntervalSecond(number) +toIntervalMinute(number) +toIntervalHour(number) +toIntervalDay(number) +toIntervalWeek(number) +toIntervalMonth(number) +toIntervalQuarter(number) +toIntervalYear(number) +``` + +**Parametre** + +- `number` — Duration of interval. Positive integer number. + +**Döndürülen değerler** + +- Değeri `Interval` veri türü. + +**Örnek** + +``` sql +WITH + toDate('2019-01-01') AS date, + INTERVAL 1 WEEK AS interval_week, + toIntervalWeek(1) AS interval_to_week +SELECT + date + interval_week, + date + interval_to_week +``` + +``` text +┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ +│ 2019-01-08 │ 2019-01-08 │ +└───────────────────────────┴──────────────────────────────┘ +``` + +## parseDateTimeBestEffort {#parsedatetimebesteffort} + +Bir tarih ve saati dönüştürür [Dize](../../sql_reference/data_types/string.md) temsil etmek [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) veri türü. + +İşlev ayrıştırır [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123-5.2.14 RFC-822 Tarih ve Saat özellikleri](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse ve diğer bazı tarih ve saat biçimleri. + +**Sözdizimi** + +``` sql +parseDateTimeBestEffort(time_string [, time_zone]); +``` + +**Parametre** + +- `time_string` — String containing a date and time to convert. [Dize](../../sql_reference/data_types/string.md). +- `time_zone` — Time zone. The function parses `time_string` saat dilimine göre. [Dize](../../sql_reference/data_types/string.md). + +**Desteklenen standart dışı formatlar** + +- 9 içeren bir dize..10 haneli [unix zaman damgası](https://en.wikipedia.org/wiki/Unix_time). +- Tarih ve saat bileşeni olan bir dize: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss` vb. +- Bir tarih, ancak hiçbir zaman bileşeni ile bir dize: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` vb. +- Bir gün ve Saat ile bir dize: `DD`, `DD hh`, `DD hh:mm`. Bu durumda `YYYY-MM` olarak ikame edilir `2000-01`. +- Tarih ve Saat Saat Dilimi uzaklık bilgileri ile birlikte içeren bir dize: `YYYY-MM-DD hh:mm:ss ±h:mm` vb. Mesela, `2020-12-12 17:36:00 -5:00`. + +Ayırıcılı tüm formatlar için işlev, tam adlarıyla veya bir ay adının ilk üç harfiyle ifade edilen ay adlarını ayrıştırır. Örnekler: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. + +**Döndürülen değer** + +- `time_string` dönüştürül thedü `DateTime` veri türü. + +**Örnekler** + +Sorgu: + +``` sql +SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') +AS parseDateTimeBestEffort; +``` + +Sonuç: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2020-12-12 12:12:57 │ +└─────────────────────────┘ +``` + +Sorgu: + +``` sql +SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') +AS parseDateTimeBestEffort +``` + +Sonuç: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-08-18 10:22:16 │ +└─────────────────────────┘ +``` + +Sorgu: + +``` sql +SELECT parseDateTimeBestEffort('1284101485') +AS parseDateTimeBestEffort +``` + +Sonuç: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2015-07-07 12:04:41 │ +└─────────────────────────┘ +``` + +Sorgu: + +``` sql +SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') +AS parseDateTimeBestEffort +``` + +Sonuç: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-12-12 10:12:12 │ +└─────────────────────────┘ +``` + +Sorgu: + +``` sql +SELECT parseDateTimeBestEffort('10 20:19') +``` + +Sonuç: + +``` text +┌─parseDateTimeBestEffort('10 20:19')─┐ +│ 2000-01-10 20:19:00 │ +└─────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- \[ISO 8601 announcement by @xkcd\](https://xkcd.com/1179/) +- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [toDate](#todate) +- [toDateTime](#todatetime) + +## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} + +İçin aynı [parseDateTimeBestEffort](#parsedatetimebesteffort) işlenemeyen bir tarih biçimiyle karşılaştığında null döndürmesi dışında. + +## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} + +İçin aynı [parseDateTimeBestEffort](#parsedatetimebesteffort) bunun dışında, işlenemeyen bir tarih biçimiyle karşılaştığında sıfır tarih veya sıfır tarih saati döndürür. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/tr/sql_reference/functions/url_functions.md b/docs/tr/sql_reference/functions/url_functions.md new file mode 100644 index 00000000000..47f127f1667 --- /dev/null +++ b/docs/tr/sql_reference/functions/url_functions.md @@ -0,0 +1,209 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 54 +toc_title: "URL'ler ile \xE7al\u0131\u015Fma" +--- + +# URL'ler ile çalışmak için işlevler {#functions-for-working-with-urls} + +Tüm bu işlevler RFC'Yİ takip etmez. Geliştirilmiş performans için maksimum derecede basitleştirilmişlerdir. + +## Bir URL'nin bölümlerini Ayıklayan işlevler {#functions-that-extract-parts-of-a-url} + +İlgili bölüm bir URL'de yoksa, boş bir dize döndürülür. + +### protokol {#protocol} + +Protokolü bir URL'den ayıklar. + +Examples of typical returned values: http, https, ftp, mailto, tel, magnet… + +### etki {#domain} + +Ana bilgisayar adını bir URL'den ayıklar. + +``` sql +domain(url) +``` + +**Parametre** + +- `url` — URL. Type: [Dize](../../sql_reference/data_types/string.md). + +URL, bir şema ile veya şema olmadan belirtilebilir. Örnekler: + +``` text +svn+ssh://some.svn-hosting.com:80/repo/trunk +some.svn-hosting.com:80/repo/trunk +https://yandex.com/time/ +``` + +Bu örnekler için, `domain` işlev aşağıdaki sonuçları döndürür: + +``` text +some.svn-hosting.com +some.svn-hosting.com +yandex.com +``` + +**Döndürülen değerler** + +- Adı ana. ClickHouse giriş dizesini bir URL olarak ayrıştırırsa. +- Boş dize. ClickHouse giriş dizesini bir URL olarak ayrıştıramazsa. + +Tür: `String`. + +**Örnek** + +``` sql +SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') +``` + +``` text +┌─domain('svn+ssh://some.svn-hosting.com:80/repo/trunk')─┐ +│ some.svn-hosting.com │ +└────────────────────────────────────────────────────────┘ +``` + +### domainWithoutWWW {#domainwithoutwww} + +Etki alanını döndürür ve birden fazla kaldırır ‘www.’ başlangıcına, eğer var dan. + +### topLevelDomain {#topleveldomain} + +Üst düzey etki alanını bir URL'den ayıklar. + +``` sql +topLevelDomain(url) +``` + +**Parametre** + +- `url` — URL. Type: [Dize](../../sql_reference/data_types/string.md). + +URL, bir şema ile veya şema olmadan belirtilebilir. Örnekler: + +``` text +svn+ssh://some.svn-hosting.com:80/repo/trunk +some.svn-hosting.com:80/repo/trunk +https://yandex.com/time/ +``` + +**Döndürülen değerler** + +- Etki alanı adı. ClickHouse giriş dizesini bir URL olarak ayrıştırırsa. +- Boş dize. ClickHouse giriş dizesini bir URL olarak ayrıştıramazsa. + +Tür: `String`. + +**Örnek** + +``` sql +SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') +``` + +``` text +┌─topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')─┐ +│ com │ +└────────────────────────────────────────────────────────────────────┘ +``` + +### firstSignificantSubdomain {#firstsignificantsubdomain} + +Ret theur thens the “first significant subdomain”. Bu, Yandex'e özgü standart olmayan bir kavramdır.Metrica. İlk önemli alt etki alanı ise ikinci düzey bir etki alanıdır ‘com’, ‘net’, ‘org’, veya ‘co’. Aksi takdirde, üçüncü düzey bir alandır. Mesela, `firstSignificantSubdomain (‘https://news.yandex.ru/’) = ‘yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’`. Listesi “insignificant” ikinci düzey etki alanları ve diğer uygulama ayrıntıları gelecekte değişebilir. + +### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} + +En üst düzey alt etki alanlarını içeren etki alanının bir bölümünü döndürür. “first significant subdomain” (yukarıdaki açıklamaya bakınız). + +Mesela, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. + +### yol {#path} + +Yolu döndürür. Örnek: `/top/news.html` Yol sorgu dizesini içermez. + +### pathFull {#pathfull} + +Yukarıdaki ile aynı, ancak sorgu dizesi ve parça dahil. Örnek: / top / haberler.html?Sayfa = 2 \# yorumlar + +### queryString {#querystring} + +Sorgu dizesini döndürür. Örnek: Sayfa = 1 & lr = 213. sorgu dizesi, ilk soru işaretinin yanı sıra \# ve \# sonrası her şeyi içermez. + +### parça {#fragment} + +Parça tanımlayıcısını döndürür. fragment ilk karma sembolü içermez. + +### queryStringAndFragment {#querystringandfragment} + +Sorgu dizesini ve parça tanımlayıcısını döndürür. Örnek: Sayfa = 1\#29390. + +### extractURLParameter (URL, isim) {#extracturlparameterurl-name} + +Değerini döndürür ‘name’ varsa, URL'DEKİ parametre. Aksi takdirde, boş bir dize. Bu ada sahip birçok parametre varsa, ilk oluşumu döndürür. Bu işlev, parametre adının URL'de geçirilen bağımsız değişkenle aynı şekilde kodlandığı varsayımı altında çalışır. + +### extractURLParameters (URL) {#extracturlparametersurl} + +Bir dizi döndürür name = URL parametrelerine karşılık gelen değer dizeleri. Değerler hiçbir şekilde deşifre edilmez. + +### extractURLParameterNames(URL) {#extracturlparameternamesurl} + +URL parametrelerinin adlarına karşılık gelen bir dizi ad dizesi döndürür. Değerler hiçbir şekilde deşifre edilmez. + +### URLHierarchy(URL) {#urlhierarchyurl} + +Sonunda/,? simgeleriyle kesilen URL'yi içeren bir dizi döndürür yol ve sorgu dizesinde. Ardışık ayırıcı karakterler bir olarak sayılır. Kesim, tüm ardışık ayırıcı karakterlerden sonra pozisyonda yapılır. + +### URLPathHierarchy(URL) {#urlpathhierarchyurl} + +Yukarıdaki ile aynı, ancak sonuçta protokol ve ana bilgisayar olmadan. / Eleman (kök) dahil değildir. Örnek: işlev, yandex'te URL'yi ağaç raporları uygulamak için kullanılır. Ölçü. + +``` text +URLPathHierarchy('https://example.com/browse/CONV-6788') = +[ + '/browse/', + '/browse/CONV-6788' +] +``` + +### decodeURLComponent (URL) {#decodeurlcomponenturl} + +Çözülmüş URL'yi döndürür. +Örnek: + +``` sql +SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS DecodedURL; +``` + +``` text +┌─DecodedURL─────────────────────────────┐ +│ http://127.0.0.1:8123/?query=SELECT 1; │ +└────────────────────────────────────────┘ +``` + +## URL'nin bir bölümünü kaldıran işlevler {#functions-that-remove-part-of-a-url} + +URL'de benzer bir şey yoksa, URL değişmeden kalır. + +### cutWWW {#cutwww} + +Birden fazla kaldırır ‘www.’ varsa, URL'nin etki alanının başından itibaren. + +### cutQueryString {#cutquerystring} + +Sorgu dizesini kaldırır. Soru işareti de kaldırılır. + +### cutFragment {#cutfragment} + +Parça tanımlayıcısını kaldırır. Sayı işareti de kaldırılır. + +### cutQueryStringAndFragment {#cutquerystringandfragment} + +Sorgu dizesini ve parça tanımlayıcısını kaldırır. Soru işareti ve sayı işareti de kaldırılır. + +### cutURLParameter (URL, isim) {#cuturlparameterurl-name} + +Kaldırır ‘name’ Varsa URL parametresi. Bu işlev, parametre adının URL'de geçirilen bağımsız değişkenle aynı şekilde kodlandığı varsayımı altında çalışır. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/tr/sql_reference/functions/uuid_functions.md b/docs/tr/sql_reference/functions/uuid_functions.md new file mode 100644 index 00000000000..93969dd4640 --- /dev/null +++ b/docs/tr/sql_reference/functions/uuid_functions.md @@ -0,0 +1,122 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 53 +toc_title: "UUID ile \xE7al\u0131\u015Fma" +--- + +# UUID ile çalışmak için fonksiyonlar {#functions-for-working-with-uuid} + +UUID ile çalışmak için işlevler aşağıda listelenmiştir. + +## generateuuıdv4 {#uuid-function-generate} + +Üretir [UUID](../../sql_reference/data_types/uuid.md) -den [sürüm 4](https://tools.ietf.org/html/rfc4122#section-4.4). + +``` sql +generateUUIDv4() +``` + +**Döndürülen değer** + +UUID türü değeri. + +**Kullanım örneği** + +Bu örnek, UUID türü sütunuyla bir tablo oluşturma ve tabloya bir değer ekleme gösterir. + +``` sql +CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog + +INSERT INTO t_uuid SELECT generateUUIDv4() + +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┐ +│ f4bf890f-f9dc-4332-ad5c-0c18e73f28e9 │ +└──────────────────────────────────────┘ +``` + +## toUUİD (x) {#touuid-x} + +Dize türü değerini UUID türüne dönüştürür. + +``` sql +toUUID(String) +``` + +**Döndürülen değer** + +UUID türü değeri. + +**Kullanım örneği** + +``` sql +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid +``` + +``` text +┌─────────────────────────────────uuid─┐ +│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │ +└──────────────────────────────────────┘ +``` + +## UUİDStringToNum {#uuidstringtonum} + +Biçiminde 36 karakter içeren bir dize kabul eder `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` ve bir bayt kümesi olarak döndürür [FixedString (16)](../../sql_reference/data_types/fixedstring.md). + +``` sql +UUIDStringToNum(String) +``` + +**Döndürülen değer** + +FixedString (16) + +**Kullanım örnekleri** + +``` sql +SELECT + '612f3c40-5d3b-217e-707b-6a546a3d7b29' AS uuid, + UUIDStringToNum(uuid) AS bytes +``` + +``` text +┌─uuid─────────────────────────────────┬─bytes────────────┐ +│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ +└──────────────────────────────────────┴──────────────────┘ +``` + +## UUİDNumToString {#uuidnumtostring} + +Kabul eder bir [FixedString (16)](../../sql_reference/data_types/fixedstring.md) değer ve metin biçiminde 36 karakter içeren bir dize döndürür. + +``` sql +UUIDNumToString(FixedString(16)) +``` + +**Döndürülen değer** + +Dize. + +**Kullanım örneği** + +``` sql +SELECT + 'a/<@];!~p{jTj={)' AS bytes, + UUIDNumToString(toFixedString(bytes, 16)) AS uuid +``` + +``` text +┌─bytes────────────┬─uuid─────────────────────────────────┐ +│ a/<@];!~p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ +└──────────────────┴──────────────────────────────────────┘ +``` + +## Ayrıca bakınız {#see-also} + +- [dictGetUUİD](ext_dict_functions.md#ext_dict_functions-other) + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/tr/sql_reference/functions/ym_dict_functions.md b/docs/tr/sql_reference/functions/ym_dict_functions.md new file mode 100644 index 00000000000..46384107029 --- /dev/null +++ b/docs/tr/sql_reference/functions/ym_dict_functions.md @@ -0,0 +1,155 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 59 +toc_title: "Yandex ile \xE7al\u0131\u015Fmak.Metrica S\xF6zl\xFCkleri" +--- + +# Yandex ile çalışmak için fonksiyonlar.Metrica sözlükleri {#functions-for-working-with-yandex-metrica-dictionaries} + +Aşağıdaki işlevlerin çalışması için, sunucu yapılandırmasının tüm Yandex'i almak için yolları ve adresleri belirtmesi gerekir.Metrica sözlükler. Sözlükler, bu işlevlerden herhangi birinin ilk çağrısında yüklenir. Başvuru listeleri yüklenemiyorsa, bir özel durum atılır. + +Başvuru listeleri oluşturma hakkında daha fazla bilgi için bölüme bakın “Dictionaries”. + +## Çoklu geobazlar {#multiple-geobases} + +ClickHouse, belirli bölgelerin hangi ülkelere ait olduğu konusunda çeşitli perspektifleri desteklemek için aynı anda birden fazla alternatif jeobaz (bölgesel hiyerarşiler) ile çalışmayı destekler. + +Bu ‘clickhouse-server’ config, dosyayı bölgesel hiyerarşi ile belirtir::`/opt/geo/regions_hierarchy.txt` + +Bu dosyanın yanı sıra, yakındaki \_ sembolüne ve isme eklenen herhangi bir sonek (dosya uzantısından önce) olan dosyaları da arar. +Örneğin, dosyayı da bulacaktır `/opt/geo/regions_hierarchy_ua.txt` varsa. + +`ua` sözlük anahtarı denir. Soneksiz bir sözlük için anahtar boş bir dizedir. + +Tüm sözlükler çalışma zamanında yeniden yüklenir (buıltın\_dıctıonarıes\_reload\_ınterval yapılandırma parametresinde tanımlandığı gibi belirli sayıda saniyede bir kez veya varsayılan olarak saatte bir kez). Ancak, sunucu başladığında kullanılabilir sözlüklerin listesi bir kez tanımlanır. + +All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. +Örnek: + +``` sql +regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt +regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt +regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt +``` + +### regionToCity (id \[, geobase\]) {#regiontocityid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. + +### regionToArea (id \[, geobase\]) {#regiontoareaid-geobase} + +Bir bölgeyi bir alana dönüştürür (geobase içinde 5 yazın). Diğer her şekilde, bu işlev aynıdır ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + +``` text +┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐ +│ │ +│ Moscow and Moscow region │ +│ St. Petersburg and Leningrad region │ +│ Belgorod region │ +│ Ivanovsk region │ +│ Kaluga region │ +│ Kostroma region │ +│ Kursk region │ +│ Lipetsk region │ +│ Orlov region │ +│ Ryazan region │ +│ Smolensk region │ +│ Tambov region │ +│ Tver region │ +│ Tula region │ +└──────────────────────────────────────────────────────┘ +``` + +### regionToDistrict (id \[, geobase\]) {#regiontodistrictid-geobase} + +Bir bölgeyi federal bir bölgeye dönüştürür (geobase içinde tip 4). Diğer her şekilde, bu işlev aynıdır ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + +``` text +┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐ +│ │ +│ Central federal district │ +│ Northwest federal district │ +│ South federal district │ +│ North Caucases federal district │ +│ Privolga federal district │ +│ Ural federal district │ +│ Siberian federal district │ +│ Far East federal district │ +│ Scotland │ +│ Faroe Islands │ +│ Flemish region │ +│ Brussels capital region │ +│ Wallonia │ +│ Federation of Bosnia and Herzegovina │ +└──────────────────────────────────────────────────────────┘ +``` + +### regionToCountry (ıd \[, geobase\]) {#regiontocountryid-geobase} + +Bir bölgeyi bir ülkeye dönüştürür. Diğer her şekilde, bu işlev aynıdır ‘regionToCity’. +Örnek: `regionToCountry(toUInt32(213)) = 225` Moskova'yı (213) Rusya'ya (225) dönüştürür. + +### regionToContinent (id \[, geobase\]) {#regiontocontinentid-geobase} + +Bir bölgeyi bir kıtaya dönüştürür. Diğer her şekilde, bu işlev aynıdır ‘regionToCity’. +Örnek: `regionToContinent(toUInt32(213)) = 10001` Moskova'yı (213) Avrasya'ya (10001) dönüştürür. + +### regionToTopContinent (\#regiontotopcontinent) {#regiontotopcontinent-regiontotopcontinent} + +Bölgenin hiyerarşisinde en yüksek kıtayı bulur. + +**Sözdizimi** + +``` sql +regionToTopContinent(id[, geobase]); +``` + +**Parametre** + +- `id` — Region ID from the Yandex geobase. [Uİnt32](../../sql_reference/data_types/int_uint.md). +- `geobase` — Dictionary key. See [Çoklu Geobazlar](#multiple-geobases). [Dize](../../sql_reference/data_types/string.md). İsteğe bağlı. + +**Döndürülen değer** + +- Üst düzey kıtanın tanımlayıcısı (bölgeler hiyerarşisine tırmandığınızda ikincisi). +- 0, yoksa. + +Tür: `UInt32`. + +### regionToPopulation (id \[, geobase\]) {#regiontopopulationid-geobase} + +Bir bölge için nüfusu alır. +Nüfus geobase ile dosyalarda kaydedilebilir. Bölümüne bakınız “External dictionaries”. +Bölge için nüfus kaydedilmezse, 0 döndürür. +Yandex geobase'de, nüfus alt bölgeler için kaydedilebilir, ancak üst bölgeler için kaydedilemez. + +### regionİn (lhs, rhs \[, geobase\]) {#regioninlhs-rhs-geobase} + +Olup olmadığını denetler bir ‘lhs’ bölge bir ‘rhs’ bölge. Aitse 1'e eşit bir Uİnt8 numarası veya ait değilse 0 döndürür. +The relationship is reflexive – any region also belongs to itself. + +### regionHierarchy (id \[, geobase\]) {#regionhierarchyid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. +Örnek: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. + +### regionToName (id \[, lang\]) {#regiontonameid-lang} + +Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn't exist, an empty string is returned. + +`ua` ve `uk` hem Ukrayna demek. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/tr/sql_reference/index.md b/docs/tr/sql_reference/index.md new file mode 100644 index 00000000000..25b886b9c9d --- /dev/null +++ b/docs/tr/sql_reference/index.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "SQL ba\u015Fvurusu" +toc_hidden: true +toc_priority: 28 +toc_title: "gizlenmi\u015F" +--- + +# SQL başvurusu {#sql-reference} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md#query_language_queries_alter) +- [Diğer sorgu türleri](statements/misc.md) + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/tr/sql_reference/operators.md b/docs/tr/sql_reference/operators.md new file mode 100644 index 00000000000..63100e9e9ea --- /dev/null +++ b/docs/tr/sql_reference/operators.md @@ -0,0 +1,277 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 37 +toc_title: "Operat\xF6rler" +--- + +# Operatörler {#operators} + +ClickHouse onların öncelik, öncelik ve ilişkilendirme göre sorgu ayrıştırma aşamasında karşılık gelen işlevlere işleçleri dönüştürür. + +## Erişim Operatörleri {#access-operators} + +`a[N]` – Access to an element of an array. The `arrayElement(a, N)` işlev. + +`a.N` – Access to a tuple element. The `tupleElement(a, N)` işlev. + +## Sayısal Olumsuzlama Operatörü {#numeric-negation-operator} + +`-a` – The `negate (a)` işlev. + +## Çarpma ve bölme operatörleri {#multiplication-and-division-operators} + +`a * b` – The `multiply (a, b)` işlev. + +`a / b` – The `divide(a, b)` işlev. + +`a % b` – The `modulo(a, b)` işlev. + +## Toplama ve çıkarma operatörleri {#addition-and-subtraction-operators} + +`a + b` – The `plus(a, b)` işlev. + +`a - b` – The `minus(a, b)` işlev. + +## Karşılaştırma Operatörleri {#comparison-operators} + +`a = b` – The `equals(a, b)` işlev. + +`a == b` – The `equals(a, b)` işlev. + +`a != b` – The `notEquals(a, b)` işlev. + +`a <> b` – The `notEquals(a, b)` işlev. + +`a <= b` – The `lessOrEquals(a, b)` işlev. + +`a >= b` – The `greaterOrEquals(a, b)` işlev. + +`a < b` – The `less(a, b)` işlev. + +`a > b` – The `greater(a, b)` işlev. + +`a LIKE s` – The `like(a, b)` işlev. + +`a NOT LIKE s` – The `notLike(a, b)` işlev. + +`a BETWEEN b AND c` – The same as `a >= b AND a <= c`. + +`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. + +## Veri kümeleriyle çalışmak için operatörler {#operators-for-working-with-data-sets} + +*Görmek [Operatör İNLERDE](statements/select.md#select-in-operators).* + +`a IN ...` – The `in(a, b)` işlev. + +`a NOT IN ...` – The `notIn(a, b)` işlev. + +`a GLOBAL IN ...` – The `globalIn(a, b)` işlev. + +`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` işlev. + +## Tarih ve Saatlerle çalışmak için operatörler {#operators-datetime} + +### EXTRACT {#operator-extract} + +``` sql +EXTRACT(part FROM date); +``` + +Belirli bir tarihten parçaları ayıklayın. Örneğin, belirli bir tarihten bir ay veya bir zamandan bir saniye alabilirsiniz. + +Bu `part` parametre almak için tarihin hangi bölümünü belirtir. Aşağıdaki değerler kullanılabilir: + +- `DAY` — The day of the month. Possible values: 1–31. +- `MONTH` — The number of a month. Possible values: 1–12. +- `YEAR` — The year. +- `SECOND` — The second. Possible values: 0–59. +- `MINUTE` — The minute. Possible values: 0–59. +- `HOUR` — The hour. Possible values: 0–23. + +Bu `part` parametre büyük / küçük harf duyarsızdır. + +Bu `date` parametre, işlenecek tarihi veya saati belirtir. Ya [Tarihli](../sql_reference/data_types/date.md) veya [DateTime](../sql_reference/data_types/datetime.md) türü desteklenir. + +Örnekler: + +``` sql +SELECT EXTRACT(DAY FROM toDate('2017-06-15')); +SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); +SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); +``` + +Aşağıdaki örnekte bir tablo oluşturuyoruz ve içine bir değer ekliyoruz `DateTime` tür. + +``` sql +CREATE TABLE test.Orders +( + OrderId UInt64, + OrderName String, + OrderDate DateTime +) +ENGINE = Log; +``` + +``` sql +INSERT INTO test.Orders VALUES (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); +``` + +``` sql +SELECT + toYear(OrderDate) AS OrderYear, + toMonth(OrderDate) AS OrderMonth, + toDayOfMonth(OrderDate) AS OrderDay, + toHour(OrderDate) AS OrderHour, + toMinute(OrderDate) AS OrderMinute, + toSecond(OrderDate) AS OrderSecond +FROM test.Orders; +``` + +``` text +┌─OrderYear─┬─OrderMonth─┬─OrderDay─┬─OrderHour─┬─OrderMinute─┬─OrderSecond─┐ +│ 2008 │ 10 │ 11 │ 13 │ 23 │ 44 │ +└───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ +``` + +Daha fazla örnek görebilirsiniz [testler](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). + +### INTERVAL {#operator-interval} + +Oluşturur bir [Aralıklı](../sql_reference/data_types/special_data_types/interval.md)- aritmetik işlemlerde kullanılması gereken tip değeri [Tarihli](../sql_reference/data_types/date.md) ve [DateTime](../sql_reference/data_types/datetime.md)- tip değerleri. + +Aralık türleri: +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +!!! warning "Uyarıcı" + Farklı tiplere sahip aralıklar birleştirilemez. Gibi ifadeler kullanamazsınız `INTERVAL 4 DAY 1 HOUR`. Aralıkların, örneğin aralığın en küçük birimine eşit veya daha küçük olan birimlerdeki aralıkları belirtin, `INTERVAL 25 HOUR`. Aşağıdaki örnekte olduğu gibi ardışık işlemleri kullanabilirsiniz. + +Örnek: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +**Ayrıca Bakınız** + +- [Aralıklı](../sql_reference/data_types/special_data_types/interval.md) veri türü +- [toİnterval](../sql_reference/functions/type_conversion_functions.md#function-tointerval) tip dönüştürme işlevleri + +## Mantıksal Olumsuzlama Operatörü {#logical-negation-operator} + +`NOT a` – The `not(a)` işlev. + +## Mantıksal ve operatör {#logical-and-operator} + +`a AND b` – The`and(a, b)` işlev. + +## Mantıksal veya operatör {#logical-or-operator} + +`a OR b` – The `or(a, b)` işlev. + +## Koşullu Operatör {#conditional-operator} + +`a ? b : c` – The `if(a, b, c)` işlev. + +Not: + +Koşullu işleç B ve c değerlerini hesaplar, ardından a koşulunun karşılanıp karşılanmadığını kontrol eder ve ardından karşılık gelen değeri döndürür. Eğer `b` veya `C` is an [arrayJoin()](../sql_reference/functions/array_join.md#functions_arrayjoin) işlev, her satır ne olursa olsun çoğaltılır “a” koşul. + +## Koşullu İfade {#operator_case} + +``` sql +CASE [x] + WHEN a THEN b + [WHEN ... THEN ...] + [ELSE c] +END +``` + +Eğer `x` belirtilen sonra `transform(x, [a, ...], [b, ...], c)` function is used. Otherwise – `multiIf(a, b, ..., c)`. + +Eğer herhangi bir `ELSE c` ifadedeki yan tümce, varsayılan değer `NULL`. + +Bu `transform` fonksiyonu ile çalışmıyor `NULL`. + +## Birleştirme Operatörü {#concatenation-operator} + +`s1 || s2` – The `concat(s1, s2) function.` + +## Lambda Oluşturma Operatörü {#lambda-creation-operator} + +`x -> expr` – The `lambda(x, expr) function.` + +Parantez oldukları için aşağıdaki operatörler bir önceliğe sahip değildir: + +## Dizi Oluşturma Operatörü {#array-creation-operator} + +`[x1, ...]` – The `array(x1, ...) function.` + +## Tuple Oluşturma Operatörü {#tuple-creation-operator} + +`(x1, x2, ...)` – The `tuple(x2, x2, ...) function.` + +## İlişkisellik {#associativity} + +Tüm ikili operatörler ilişkisellikten ayrıldı. Mesela, `1 + 2 + 3` dönüştür toülür `plus(plus(1, 2), 3)`. +Bazen bu beklediğiniz gibi çalışmaz. Mesela, `SELECT 4 > 2 > 3` 0 ile sonuç willlanır. + +Verimlilik için, `and` ve `or` işlevler herhangi bir sayıda bağımsız değişkeni kabul eder. İlgili zincirler `AND` ve `OR` operatörler bu işlevlerin tek bir çağrısına dönüştürülür. + +## İçin kontrol `NULL` {#checking-for-null} + +ClickHouse destekler `IS NULL` ve `IS NOT NULL` operatörler. + +### IS NULL {#operator-is-null} + +- İçin [Nullable](../sql_reference/data_types/nullable.md) türü değerleri `IS NULL` operatör döner: + - `1` değeri ise `NULL`. + - `0` başka. +- Diğer değerler için, `IS NULL` operatör her zaman döner `0`. + + + +``` sql +SELECT x+100 FROM t_null WHERE y IS NULL +``` + +``` text +┌─plus(x, 100)─┐ +│ 101 │ +└──────────────┘ +``` + +### IS NOT NULL {#is-not-null} + +- İçin [Nullable](../sql_reference/data_types/nullable.md) türü değerleri `IS NOT NULL` operatör döner: + - `0` değeri ise `NULL`. + - `1` başka. +- Diğer değerler için, `IS NOT NULL` operatör her zaman döner `1`. + + + +``` sql +SELECT * FROM t_null WHERE y IS NOT NULL +``` + +``` text +┌─x─┬─y─┐ +│ 2 │ 3 │ +└───┴───┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/tr/sql_reference/statements/alter.md b/docs/tr/sql_reference/statements/alter.md new file mode 100644 index 00000000000..b61a3784af9 --- /dev/null +++ b/docs/tr/sql_reference/statements/alter.md @@ -0,0 +1,504 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 36 +toc_title: ALTER +--- + +## ALTER {#query_language_queries_alter} + +Bu `ALTER` sorgu yalnızca için desteklenir `*MergeTree` tablo gibi `Merge`ve`Distributed`. Sorgunun çeşitli varyasyonları vardır. + +### Sütun Manipülasyonları {#column-manipulations} + +Tablo yapısını değiştirme. + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... +``` + +Sorguda, bir veya daha fazla virgülle ayrılmış eylemlerin bir listesini belirtin. +Her eylem bir sütun üzerinde bir işlemdir. + +Aşağıdaki eylemler desteklenir: + +- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. +- [DROP COLUMN](#alter_drop-column) — Deletes the column. +- [CLEAR COLUMN](#alter_clear-column) — Resets column values. +- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. +- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL. + +Bu eylemler aşağıda ayrıntılı olarak açıklanmıştır. + +#### ADD COLUMN {#alter_add-column} + +``` sql +ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] +``` + +Belirtilen tabloya yeni bir sütun ekler `name`, `type`, [`codec`](create.md#codecs) ve `default_expr` (bkz [Varsayılan ifadeler](create.md#create-default-values)). + +Eğer... `IF NOT EXISTS` yan tümcesi dahil, sütun zaten varsa sorgu bir hata döndürmez. Belirtir specifyseniz `AFTER name_after` (başka bir sütunun adı), sütun tablo sütunları listesinde belirtilen sonra eklenir. Aksi takdirde, sütun tablonun sonuna eklenir. Bir tablonun başına bir sütun eklemek için bir yol olduğunu unutmayın. Bir eylem zinciri için, `name_after` önceki eylemlerden birine eklenen bir sütunun adı olabilir. + +Bir sütun eklemek, verilerle herhangi bir işlem yapmadan tablo yapısını değiştirir. Sonra veriler diskte görünmüyor `ALTER`. Tablodan okurken bir sütun için veri eksikse, varsayılan değerlerle doldurulur (varsa, varsayılan ifadeyi gerçekleştirerek veya sıfır veya boş dizeler kullanarak). Sütun, veri parçalarını birleştirdikten sonra diskte görünür (bkz. [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)). + +Bu yaklaşım bize tamamlamak için izin verir `ALTER` eski verilerin hacmini arttırmadan anında sorgulayın. + +Örnek: + +``` sql +ALTER TABLE visits ADD COLUMN browser String AFTER user_id +``` + +#### DROP COLUMN {#alter_drop-column} + +``` sql +DROP COLUMN [IF EXISTS] name +``` + +Sütun adı ile siler `name`. Eğer... `IF EXISTS` yan tümcesi belirtilir, sütun yoksa sorgu bir hata döndürmez. + +Dosya sisteminden veri siler. Bu, tüm dosyaları sildiğinden, sorgu neredeyse anında tamamlanır. + +Örnek: + +``` sql +ALTER TABLE visits DROP COLUMN browser +``` + +#### CLEAR COLUMN {#alter_clear-column} + +``` sql +CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name +``` + +Belirtilen bölüm için bir sütundaki tüm verileri sıfırlar. Bölümdeki bölüm adını ayarlama hakkında daha fazla bilgi edinin [Bölüm ifadesi nasıl belirlenir](#alter-how-to-specify-part-expr). + +Eğer... `IF EXISTS` yan tümcesi belirtilir, sütun yoksa sorgu bir hata döndürmez. + +Örnek: + +``` sql +ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() +``` + +#### COMMENT COLUMN {#alter_comment-column} + +``` sql +COMMENT COLUMN [IF EXISTS] name 'comment' +``` + +Sütuna bir yorum ekler. Eğer... `IF EXISTS` yan tümcesi belirtilir, sütun yoksa sorgu bir hata döndürmez. + +Her sütunun bir yorumu olabilir. Sütun için bir yorum zaten varsa, yeni bir yorum önceki yorumun üzerine yazar. + +Yorumlar saklanır `comment_expression` tarafından döndürülen sütun [DESCRIBE TABLE](misc.md#misc-describe-table) sorgu. + +Örnek: + +``` sql +ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' +``` + +#### MODIFY COLUMN {#alter_modify-column} + +``` sql +MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] +``` + +Bu sorgu değişiklikleri `name` sütun özellikleri: + +- Tür + +- Varsayılan ifade + +- TTL + + For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). + +Eğer... `IF EXISTS` yan tümcesi belirtilir, sütun yoksa sorgu bir hata döndürmez. + +Türü değiştirirken, değerler sanki [toType](../../sql_reference/functions/type_conversion_functions.md) fonksiyonlar onlara uygulandı. Yalnızca varsayılan ifade değiştirilirse, sorgu karmaşık bir şey yapmaz ve neredeyse anında tamamlanır. + +Örnek: + +``` sql +ALTER TABLE visits MODIFY COLUMN browser Array(String) +``` + +Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. + +Birkaç işlem aşaması vardır: + +- Geçici (yeni) dosyaları değiştirilmiş verilerle hazırlama. +- Eski dosyaları yeniden adlandırma. +- Geçici (yeni) dosyaları eski adlara yeniden adlandırma. +- Eski dosyaları silme. + +Sadece ilk aşama zaman alır. Bu aşamada bir hata varsa, veriler değişmez. +Ardışık aşamalardan biri sırasında bir hata varsa, veriler el ile geri yüklenebilir. Eski dosyalar dosya sisteminden silindi, ancak yeni dosyaların verileri diske yazılmadı ve kaybolduysa istisnadır. + +Bu `ALTER` sütunları değiştirmek için sorgu çoğaltılır. Talimatlar ZooKeeper kaydedilir, daha sonra her kopya bunları uygular. Tüm `ALTER` sorgular aynı sırada çalıştırılır. Sorgu, diğer yinelemeler üzerinde tamamlanması uygun eylemleri bekler. Ancak, yinelenen bir tablodaki sütunları değiştirmek için bir sorgu kesilebilir ve tüm eylemler zaman uyumsuz olarak gerçekleştirilir. + +#### Sorgu sınırlamalarını değiştir {#alter-query-limitations} + +Bu `ALTER` sorgu oluşturmak ve iç içe veri yapıları, ancak tüm iç içe veri yapıları ayrı öğeleri (sütunlar) silmenizi sağlar. İç içe geçmiş bir veri yapısı eklemek için, aşağıdaki gibi bir ada sahip sütunlar ekleyebilirsiniz `name.nested_name` ve türü `Array(T)`. İç içe geçmiş bir veri yapısı, noktadan önce aynı öneki olan bir ada sahip birden çok dizi sütununa eşdeğerdir. + +Birincil anahtardaki veya örnekleme anahtarındaki sütunları silmek için destek yoktur. `ENGINE` ifade). Birincil anahtarda bulunan sütunların türünü değiştirmek, yalnızca bu değişiklik verilerin değiştirilmesine neden olmazsa mümkündür (örneğin, bir numaraya değer eklemenize veya bir türden değiştirmenize izin verilir `DateTime` -e doğru `UInt32`). + +Eğer... `ALTER` sorgu, ihtiyacınız olan tablo değişikliklerini yapmak için yeterli değildir, yeni bir tablo oluşturabilir, verileri kullanarak kopyalayabilirsiniz. [INSERT SELECT](insert_into.md#insert_query_insert-select) sorgu, daha sonra tabloları kullanarak geçiş [RENAME](misc.md#misc_operations-rename) sorgu ve eski tabloyu silin. Kullanabilirsiniz [clickhouse-fotokopi makinesi](../../operations/utilities/clickhouse-copier.md) bir alternatif olarak `INSERT SELECT` sorgu. + +Bu `ALTER` sorgu tüm okur ve tablo için yazar engeller. Başka bir deyişle, Eğer uzun `SELECT` zamanda çalışıyor `ALTER` sorgu `ALTER` sorgu tamamlanmasını bekleyecektir. Aynı zamanda, aynı tablodaki tüm yeni sorgular bu sırada bekleyecektir `ALTER` çalışıyor. + +Verileri kendileri saklamayan tablolar için (örneğin `Merge` ve `Distributed`), `ALTER` sadece tablo yapısını değiştirir ve alt tabloların yapısını değiştirmez. Örneğin, ALTER for a çalıştırırken `Distributed` tablo, ayrıca çalıştırmak gerekir `ALTER` tüm uzak sunuculardaki tablolar için. + +### Anahtar İfadelerle Manipülasyonlar {#manipulations-with-key-expressions} + +Aşağıdaki komut desteklenir: + +``` sql +MODIFY ORDER BY new_expression +``` + +Sadece tablolar için çalışır [`MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) aile (dahil +[çoğaltıyordu](../../engines/table_engines/mergetree_family/replication.md) Tablolar). Komutu değiştirir +[sıralama anahtarı](../../engines/table_engines/mergetree_family/mergetree.md) tablonun +-e doğru `new_expression` (bir ifade veya ifadelerin bir tuple). Birincil anahtar aynı kalır. + +Komut, yalnızca meta verileri değiştirdiği bir anlamda hafiftir. Veri parçası özelliği tutmak için +satırlar sıralama anahtarı ifadesi tarafından sıralanır varolan sütunları içeren ifadeler ekleyemezsiniz +sıralama anahtarına (yalnızca sütun tarafından eklenen `ADD COLUMN` aynı komut `ALTER` sorgu). + +### Veri Atlama Endeksleri İle Manipülasyonlar {#manipulations-with-data-skipping-indices} + +Sadece tablolar için çalışır [`*MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) aile (dahil +[çoğaltıyordu](../../engines/table_engines/mergetree_family/replication.md) Tablolar). Aşağıdaki işlemler +mevcuttur: + +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Tablolar meta dizin açıklama ekler. + +- `ALTER TABLE [db].name DROP INDEX name` - Tablolar meta dizin açıklama kaldırır ve diskten dizin dosyalarını siler. + +Bu komutlar, yalnızca meta verileri değiştirdikleri veya dosyaları kaldırdıkları bir anlamda hafiftir. +Ayrıca, çoğaltılırlar (ZooKeeper aracılığıyla indeks meta verilerini senkronize etme). + +### Kısıtlamalar İle Manipülasyonlar {#manipulations-with-constraints} + +Daha fazla görmek [kısıtlamalar](create.md#constraints) + +Kısıtlamalar eklenebilir veya aşağıdaki sözdizimi kullanılarak silinebilir: + +``` sql +ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; +ALTER TABLE [db].name DROP CONSTRAINT constraint_name; +``` + +Sorgular eklemek veya hemen işlenir, böylece tablodan kısıtlamaları hakkında meta verileri kaldırın. + +Kısıtlama kontrolü *idam edilm willeyecek* eklen .mişse mevcut ver .ilerde + +Çoğaltılmış tablolardaki tüm değişiklikler Zookeeper'a yayınlanır, bu nedenle diğer kopyalara uygulanır. + +### Bölümler ve parçalar ile manipülasyonlar {#alter_manipulations-with-partitions} + +Aşağıdaki işlemler ile [bölümler](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) mevcuttur: + +- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` dizin ve unutun. +- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. +- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` tabloya dizin. +- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. +- [REPLACE PARTITION](#alter_replace-partition) - Veri bölümünü bir tablodan diğerine kopyalar ve değiştirir. +- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition) - veri bölümünü bir tablodan diğerine taşıyın. +- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) - Bir bölümdeki belirtilen sütunun değerini sıfırlar. +- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) - Bir bölümde belirtilen ikincil dizini sıfırlar. +- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. +- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. +- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. + + + +#### Bölüm ayırmak {\#alter\_detach-bölüm} {#detach-partition-alter-detach-partition} + +``` sql +ALTER TABLE table_name DETACH PARTITION partition_expr +``` + +Belirtilen bölüm için tüm verileri `detached` dizin. Sunucu, yok gibi ayrılmış veri Bölümü hakkında unutur. Sunucu, bu verileri siz yapana kadar bilmeyecektir. [ATTACH](#alter_attach-partition) sorgu. + +Örnek: + +``` sql +ALTER TABLE visits DETACH PARTITION 201901 +``` + +Bir bölümdeki bölüm ifadesini ayarlama hakkında bilgi edinin [Bölüm ifadesi nasıl belirlenir](#alter-how-to-specify-part-expr). + +Sorgu yürütüldükten sonra, veri ile istediğiniz her şeyi yapabilirsiniz `detached` directory — delete it from the file system, or just leave it. + +This query is replicated – it moves the data to the `detached` tüm kopyalarda dizin. Bu sorguyu yalnızca bir lider yinelemesinde yürütebileceğinizi unutmayın. Bir kopya bir lider olup olmadığını öğrenmek için `SELECT` sorgu için [sistem.yinelemeler](../../operations/system_tables.md#system_tables-replicas) Tablo. Alternatif olarak, bir yapmak daha kolaydır `DETACH` tüm yinelemelerde sorgu - tüm yinelemeler, lider yinelemesi dışında bir özel durum oluşturur. + +#### DROP PARTITION {#alter_drop-partition} + +``` sql +ALTER TABLE table_name DROP PARTITION partition_expr +``` + +Belirtilen bölümü tablodan siler. Bu sorgu bölümü etkin olarak etiketler ve verileri tamamen yaklaşık 10 dakika içinde siler. + +Bir bölümdeki bölüm ifadesini ayarlama hakkında bilgi edinin [Bölüm ifadesi nasıl belirlenir](#alter-how-to-specify-part-expr). + +The query is replicated – it deletes data on all replicas. + +#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} + +``` sql +ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr +``` + +Belirtilen bölümü veya belirtilen bölümün tüm bölümlerini kaldırır `detached`. +Bir bölümdeki bölüm ifadesini ayarlama hakkında daha fazla bilgi edinin [Bölüm ifadesi nasıl belirlenir](#alter-how-to-specify-part-expr). + +#### ATTACH PARTITION\|PART {#alter_attach-partition} + +``` sql +ALTER TABLE table_name ATTACH PARTITION|PART partition_expr +``` + +Tablodan veri ekler `detached` dizin. Tüm bir bölüm veya ayrı bir bölüm için veri eklemek mümkündür. Örnekler: + +``` sql +ALTER TABLE visits ATTACH PARTITION 201901; +ALTER TABLE visits ATTACH PART 201901_2_2_0; +``` + +Bir bölümdeki bölüm ifadesini ayarlama hakkında daha fazla bilgi edinin [Bölüm ifadesi nasıl belirlenir](#alter-how-to-specify-part-expr). + +Bu sorgu çoğaltılır. Çoğaltma başlatıcısı, veri olup olmadığını denetler. `detached` dizin. Veri varsa, sorgu bütünlüğünü denetler. Her şey doğruysa, sorgu verileri tabloya ekler. Diğer tüm yinelemeler, çoğaltma başlatıcısından verileri karşıdan yükleyin. + +Böylece veri koyabilirsiniz `detached` bir kopya üzerinde dizin ve `ALTER ... ATTACH` tüm yinelemelerde tabloya eklemek için sorgu. + +#### ATTACH PARTITION FROM {#alter_attach-partition-from} + +``` sql +ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 +``` + +Bu sorgu, veri bölümünü `table1` -e doğru `table2` exsisting için veri ekler `table2`. Verilerin silinmeyeceğini unutmayın `table1`. + +Sorgunun başarıyla çalışması için aşağıdaki koşulların karşılanması gerekir: + +- Her iki tablo da aynı yapıya sahip olmalıdır. +- Her iki tablo da aynı bölüm anahtarına sahip olmalıdır. + +#### REPLACE PARTITION {#alter_replace-partition} + +``` sql +ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 +``` + +Bu sorgu, veri bölümünü `table1` -e doğru `table2` ve mevcut bölümün yerini alır `table2`. Verilerin silinmeyeceğini unutmayın `table1`. + +Sorgunun başarıyla çalışması için aşağıdaki koşulların karşılanması gerekir: + +- Her iki tablo da aynı yapıya sahip olmalıdır. +- Her iki tablo da aynı bölüm anahtarına sahip olmalıdır. + +#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} + +``` sql +ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest +``` + +Bu sorgu, veri bölümünü `table_source` -e doğru `table_dest` verileri silme ile `table_source`. + +Sorgunun başarıyla çalışması için aşağıdaki koşulların karşılanması gerekir: + +- Her iki tablo da aynı yapıya sahip olmalıdır. +- Her iki tablo da aynı bölüm anahtarına sahip olmalıdır. +- Her iki tablo da aynı motor ailesi olmalıdır. (çoğaltılmış veya çoğaltılmamış) +- Her iki tablo da aynı depolama ilkesine sahip olmalıdır. + +#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} + +``` sql +ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr +``` + +Bir bölümdeki belirtilen sütundaki tüm değerleri sıfırlar. Eğer... `DEFAULT` bir tablo oluştururken yan tümcesi belirlendi, bu sorgu sütun değerini belirtilen varsayılan değere ayarlar. + +Örnek: + +``` sql +ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 +``` + +#### FREEZE PARTITION {#alter_freeze-partition} + +``` sql +ALTER TABLE table_name FREEZE [PARTITION partition_expr] +``` + +Bu sorgu, belirtilen bir bölümün yerel yedeğini oluşturur. Eğer... `PARTITION` yan tümcesi atlandı, sorgu aynı anda tüm bölümlerin yedeğini oluşturur. + +!!! note "Not" + Tüm yedekleme işlemi sunucuyu durdurmadan gerçekleştirilir. + +Eski tarz tablolar için bölüm adının önekini belirtebileceğinizi unutmayın (örneğin, ‘2019’)- daha sonra sorgu tüm ilgili bölümler için yedek oluşturur. Bir bölümdeki bölüm ifadesini ayarlama hakkında bilgi edinin [Bölüm ifadesi nasıl belirlenir](#alter-how-to-specify-part-expr). + +Yürütme sırasında, bir veri anlık görüntüsü için sorgu, bir tablo verilerine sabit bağlantılar oluşturur. Hardlinks dizine yerleştirilir `/var/lib/clickhouse/shadow/N/...`, nere: + +- `/var/lib/clickhouse/` yapılandırmada belirtilen çalışma ClickHouse dizinidir. +- `N` yedeklemenin artımlı sayısıdır. + +!!! note "Not" + Kullanıyorsanız [bir tablodaki veri depolama için disk kümesi](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes), bu `shadow/N` dizin tarafından eşleşen veri parçalarını depolamak, her diskte görünür `PARTITION` ifade. + +Dizinlerin aynı yapısı, içinde olduğu gibi yedek içinde oluşturulur `/var/lib/clickhouse/`. Sorgu gerçekleştirir ‘chmod’ tüm dosyalar için, onlara yazmayı yasaklamak. + +Yedeklemeyi oluşturduktan sonra, verileri `/var/lib/clickhouse/shadow/` uzak sunucuya ve sonra yerel sunucudan silin. Not `ALTER t FREEZE PARTITION` sorgu çoğaltılmaz. Yalnızca yerel sunucuda yerel bir yedekleme oluşturur. + +Sorgu neredeyse anında yedekleme oluşturur (ancak önce geçerli sorguları ilgili tabloya çalışmayı bitirmek için bekler). + +`ALTER TABLE t FREEZE PARTITION` tablo meta verilerini değil, yalnızca verileri kopyalar. Tablo meta verilerinin yedeğini almak için dosyayı kopyalayın `/var/lib/clickhouse/metadata/database/table.sql` + +Bir yedekten veri geri yüklemek için aşağıdakileri yapın: + +1. Yoksa tablo oluşturun. Sorguyu görüntülemek için kullanın .sql dosyası (değiştir `ATTACH` içinde ile `CREATE`). +2. Veri kopyalama `data/database/table/` yedekleme içindeki dizin `/var/lib/clickhouse/data/database/table/detached/` dizin. +3. Koşmak `ALTER TABLE t ATTACH PARTITION` verileri bir tabloya eklemek için sorgular. + +Yedeklemeden geri yükleme, sunucuyu durdurmayı gerektirmez. + +Yedekleme ve geri yükleme verileri hakkında daha fazla bilgi için bkz: [Veri Yedekleme](../../operations/backup.md) bölme. + +#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} + +``` sql +ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr +``` + +Sorgu benzer çalışır `CLEAR COLUMN`, ancak bir sütun verileri yerine bir dizini sıfırlar. + +#### FETCH PARTITION {#alter_fetch-partition} + +``` sql +ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +``` + +Başka bir sunucudan bir bölüm indirir. Bu sorgu yalnızca çoğaltılmış tablolar için çalışır. + +Sorgu aşağıdakileri yapar: + +1. Bölümü belirtilen parçadan indirir. İçinde ‘path-in-zookeeper’ zookeeper içinde shard için bir yol belirtmeniz gerekir. +2. Sonra sorgu indirilen verileri `detached` directory of the `table_name` Tablo. Kullan... [ATTACH PARTITION\|PART](#alter_attach-partition) tabloya veri eklemek için sorgu. + +Mesela: + +``` sql +ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PARTITION 201902; +``` + +Not thate that: + +- Bu `ALTER ... FETCH PARTITION` sorgu çoğaltılmaz. Bu bölüm için yerleştirir `detached` yalnızca yerel sunucuda dizin. +- Bu `ALTER TABLE ... ATTACH` sorgu çoğaltılır. Verileri tüm yinelemelere ekler. Veriler, kopyalardan birine eklenir. `detached` dizin ve diğerlerine - komşu kopyalardan. + +İndirmeden önce, sistem bölümün olup olmadığını ve tablo yapısının eşleşip eşleşmediğini kontrol eder. En uygun yineleme, sağlıklı yinelemeler otomatik olarak seçilir. + +Sorgu çağrılsa da `ALTER TABLE`, tablo yapısını değiştirmez ve tabloda bulunan verileri hemen değiştirmez. + +#### MOVE PARTITION\|PART {#alter_move-partition} + +Bölümleri veya veri parçalarını başka bir birime veya diske taşır. `MergeTree`- motor masaları. Görmek [Veri depolama için birden fazla blok cihazı kullanma](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes). + +``` sql +ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' +``` + +Bu `ALTER TABLE t MOVE` sorgu: + +- Çoğaltılamaz, çünkü farklı çoğaltmalar farklı depolama ilkelerine sahip olabilir. +- Belirtilen disk veya birim yapılandırılmamışsa bir hata döndürür. Depolama ilkesinde belirtilen veri taşıma koşulları uygulanamazsa, sorgu da bir hata döndürür. +- Durumda bir hata döndürebilir, taşınacak veriler zaten bir arka plan işlemi tarafından taşındığında, eşzamanlı `ALTER TABLE t MOVE` sorgu veya arka plan veri birleştirme sonucu. Bir kullanıcı bu durumda herhangi bir ek eylem gerçekleştirmemelidir. + +Örnek: + +``` sql +ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' +ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' +``` + +#### Bölüm İfadesi Nasıl Ayarlanır {#alter-how-to-specify-part-expr} + +Bölüm ifadesini şu şekilde belirtebilirsiniz `ALTER ... PARTITION` farklı şekillerde sorgular: + +- Bu gibi bir değer `partition` sütun `system.parts` Tablo. Mesela, `ALTER TABLE visits DETACH PARTITION 201901`. +- Tablo sütunundan ifade olarak. Sabitler ve sabit ifadeler desteklenir. Mesela, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. +- Bölüm kimliğini kullanma. Partition ID, dosya sistemindeki ve Zookeeper'daki bölümlerin adları olarak kullanılan bölümün (mümkünse insan tarafından okunabilir) bir dize tanımlayıcısıdır. Bölüm kimliği belirtilmelidir `PARTITION ID` fık .ra, tek tırnak içinde. Mesela, `ALTER TABLE visits DETACH PARTITION ID '201901'`. +- İn the [ALTER ATTACH PART](#alter_attach-partition) ve [DROP DETACHED PART](#alter_drop-detached) sorgu, bir parçanın adını belirtmek için, bir değer ile dize literal kullanın `name` sütun [sistem.detached\_parts](../../operations/system_tables.md#system_tables-detached_parts) Tablo. Mesela, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. + +Bölüm belirtilirken tırnak kullanımı bölüm ifadesi türüne bağlıdır. Örneğin, için `String` yazın, adını tırnak içinde belirtmeniz gerekir (`'`). İçin `Date` ve `Int*` türleri hiçbir tırnak gereklidir. + +Eski stil tablolar için, bölümü bir sayı olarak belirtebilirsiniz `201901` veya bir dize `'201901'`. Yeni stil tabloları için sözdizimi türleri ile daha sıkı (değerleri giriş biçimi için ayrıştırıcı benzer). + +Yukarıdaki tüm kurallar için de geçerlidir [OPTIMIZE](misc.md#misc_operations-optimize) sorgu. Bölümlenmemiş bir tabloyu en iyi duruma getirirken tek bölümü belirtmeniz gerekiyorsa, ifadeyi ayarlayın `PARTITION tuple()`. Mesela: + +``` sql +OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; +``` + +Örnekleri `ALTER ... PARTITION` sorgular testlerde gösterilmiştir [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) ve [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). + +### Tablo TTL ile manipülasyonlar {#manipulations-with-table-ttl} + +Değiştirebilirsiniz [tablo TTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-table-ttl) aşağıdaki formun bir isteği ile: + +``` sql +ALTER TABLE table-name MODIFY TTL ttl-expression +``` + +### Alter sorgularının eşzamanlılığı {#synchronicity-of-alter-queries} + +Replicatable olmayan tablolar için, tüm `ALTER` sorgular eşzamanlı olarak gerçekleştirilir. Replicatable tablolar için, sorgu yalnızca uygun eylemler için yönergeler ekler `ZooKeeper` ve eylemlerin kendileri mümkün olan en kısa sürede gerçekleştirilir. Ancak, sorgu tüm yinelemeler üzerinde tamamlanması için bu eylemleri bekleyebilir. + +İçin `ALTER ... ATTACH|DETACH|DROP` sorgular, kullanabilirsiniz `replication_alter_partitions_sync` bekleyen kurmak için ayarlama. +Olası değerler: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. + +### Mutasyonlar {#alter-mutations} + +Mutasyonlar, bir tablodaki satırların değiştirilmesine veya silinmesine izin veren bir alter query varyantıdır. Standart aksine `UPDATE` ve `DELETE` nokta veri değişikliklerine yönelik sorgular, mutasyonlar, bir tablodaki çok sayıda satırı değiştiren ağır işlemler için tasarlanmıştır. İçin desteklenen `MergeTree` çoğaltma desteği olan motorlar da dahil olmak üzere tablo motorları ailesi. + +Varolan tablolar olduğu gibi mutasyonlar için hazırdır(dönüştürme gerekmez), ancak ilk mutasyon bir tabloya uygulandıktan sonra Meta Veri formatı önceki sunucu sürümleriyle uyumsuz hale gelir ve önceki bir sürüme geri dönmek imkansız hale gelir. + +Şu anda mevcut komutlar: + +``` sql +ALTER TABLE [db.]table DELETE WHERE filter_expr +``` + +Bu `filter_expr` tip olmalıdır `UInt8`. Sorgu, bu ifadenin sıfır olmayan bir değer aldığı tablodaki satırları siler. + +``` sql +ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr +``` + +Bu `filter_expr` tip olmalıdır `UInt8`. Bu sorgu, belirtilen sütunların değerlerini, satırlardaki karşılık gelen ifadelerin değerlerine güncelleştirir. `filter_expr` sıfır olmayan bir değer alır. Değerleri kullanarak sütun türüne döküm `CAST` operatör. Birincil veya bölüm anahtarının hesaplanmasında kullanılan sütunları güncelleştirme desteklenmiyor. + +``` sql +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name +``` + +Sorgu ikincil dizini yeniden oluşturur `name` bölümünde `partition_name`. + +Bir sorgu virgülle ayrılmış birkaç komut içerebilir. + +\* MergeTree tabloları mutasyonları için tüm veri parçalarını yeniden yazarak yürütün. Atomiklik yoktur-parçalar, hazır oldukları anda mutasyona uğramış parçalar için ikame edilir ve bir `SELECT` bir mutasyon sırasında yürütülmeye başlayan sorgu, henüz mutasyona uğramamış olan parçalardan gelen verilerle birlikte mutasyona uğramış olan parçalardan gelen verileri görecektir. + +Mutasyonlar tamamen yaratılış sırasına göre sıralanır ve her bir parçaya bu sırayla uygulanır. Mutasyonlar da kısmen ekler ile sıralanır-mutasyon gönderilmeden önce tabloya eklenen veriler mutasyona uğrayacak ve bundan sonra eklenen veriler mutasyona uğramayacaktır. Mutasyonların ekleri hiçbir şekilde engellemediğini unutmayın. + +Mutasyon girişi eklendikten hemen sonra bir mutasyon sorgusu döner(çoğaltılmış tablolar Zookeeper'a, çoğaltılmamış tablolar için-dosya sistemine). Mutasyonun kendisi sistem profili ayarlarını kullanarak eşzamansız olarak yürütür. Mutasyonların ilerlemesini izlemek için kullanabilirsiniz [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) Tablo. Başarıyla gönderilen BIR mutasyon, ClickHouse sunucuları yeniden başlatılmış olsa bile yürütmeye devam edecektir. Gönderildikten sonra mutasyonu geri almanın bir yolu yoktur, ancak mutasyon herhangi bir nedenle sıkışmışsa, [`KILL MUTATION`](misc.md#kill-mutation) sorgu. + +Bitmiş mutasyonlar için girişler hemen silinmez (korunmuş girişlerin sayısı, `finished_mutations_to_keep` depolama motoru parametresi). Eski mutasyon girişleri silinir. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/tr/sql_reference/statements/create.md b/docs/tr/sql_reference/statements/create.md new file mode 100644 index 00000000000..5479034ad98 --- /dev/null +++ b/docs/tr/sql_reference/statements/create.md @@ -0,0 +1,305 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 35 +toc_title: CREATE +--- + +# Sorgu oluştur {#create-queries} + +## CREATE DATABASE {#query-language-create-database} + +Veritabanı oluşturur. + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)] +``` + +### Yanlar {#clauses} + +- `IF NOT EXISTS` + Eğer... `db_name` veritabanı zaten var, daha sonra ClickHouse yeni bir veritabanı oluşturmuyor ve: + + - If yan tümcesi belirtilmişse bir istisna atmaz. + - Bir istisna atar if yan tümcesi belirtilmemiş. + +- `ON CLUSTER` + ClickHouse oluşturur `db_name` belirtilen bir kümenin tüm sunucularında veritabanı. + +- `ENGINE` + + - [MySQL](../../engines/database_engines/mysql.md) + Uzak MySQL sunucusundan veri almanızı sağlar. + Varsayılan olarak, ClickHouse kendi kullanır [Veritabanı Altyapısı](../../engines/database_engines/index.md). + +## CREATE TABLE {#create-table-query} + +Bu `CREATE TABLE` sorgu çeşitli formlara sahip olabilir. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], + ... +) ENGINE = engine +``` + +Adlı bir tablo oluşturur ‘name’ in the ‘db’ veritabanı veya geçerli veritabanı ise ‘db’ küme değil, parantez içinde belirtilen yapı ve ‘engine’ motor. +Tablonun yapısı sütun açıklamalarının bir listesidir. Dizinler altyapısı tarafından destekleniyorsa, tablo altyapısı için parametreler olarak gösterilir. + +Bir sütun açıklaması `name type` en basit durumda. Örnek: `RegionID UInt32`. +İfadeler varsayılan değerler için de tanımlanabilir (aşağıya bakın). + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] +``` + +Başka bir tablo ile aynı yapıya sahip bir tablo oluşturur. Tablo için farklı bir motor belirtebilirsiniz. Motor belirtilmemişse, aynı motor için olduğu gibi kullanılacaktır `db2.name2` Tablo. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() +``` + +Yapısı ve veri tarafından döndürülen bir tablo oluşturur. [tablo fonksiyonu](../table_functions/index.md). + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +``` + +Sonucu gibi bir yapıya sahip bir tablo oluşturur `SELECT` Sorgu, ile ‘engine’ motor ve SELECT verilerle doldurur. + +Her durumda, eğer `IF NOT EXISTS` tablo zaten varsa, sorgu bir hata döndürmez. Bu durumda, sorgu hiçbir şey yapmaz. + +Sonra başka maddeler olabilir `ENGINE` sorguda yan tümcesi. Açıklamalarda tabloların nasıl oluşturulacağına ilişkin ayrıntılı belgelere bakın [masa motorları](../../engines/table_engines/index.md#table_engines). + +### Varsayılan Değerler {#create-default-values} + +Sütun açıklaması, aşağıdaki yollardan biriyle varsayılan değer için bir ifade belirtebilir:`DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. +Örnek: `URLDomain String DEFAULT domain(URL)`. + +Varsayılan değer için bir ifade tanımlanmamışsa, varsayılan değerler sayılar için sıfırlar, dizeler için boş dizeler, diziler için boş diziler ve `0000-00-00` tarihler için veya `0000-00-00 00:00:00` zamanla tarihler için. Boş alanlar desteklenmez. + +Varsayılan ifade tanımlanmışsa, sütun türü isteğe bağlıdır. Açıkça tanımlanmış bir tür yoksa, varsayılan ifade türü kullanılır. Örnek: `EventDate DEFAULT toDate(EventTime)` – the ‘Date’ türü için kullanılacak ‘EventDate’ sütun. + +Veri türü ve varsayılan ifade açıkça tanımlanırsa, bu ifade type casting işlevleri kullanılarak belirtilen türe aktarılır. Örnek: `Hits UInt32 DEFAULT 0` aynı şeyi ifade eder `Hits UInt32 DEFAULT toUInt32(0)`. + +Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions don't contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. + +`DEFAULT expr` + +Normal varsayılan değer. INSERT sorgusu karşılık gelen sütunu belirtmezse, ilgili ifadeyi hesaplayarak doldurulur. + +`MATERIALIZED expr` + +Somut ifade. Böyle bir sütun INSERT için belirtilemez, çünkü her zaman hesaplanır. +Sütun listesi olmayan bir ekleme için bu sütunlar dikkate alınmaz. +Buna ek olarak, bir SELECT sorgusunda Yıldız İşareti kullanıldığında bu sütun değiştirilmez. Bu, dökümü kullanarak elde edilen değişmezi korumaktır `SELECT *` sütun listesini belirtmeden INSERT kullanarak tabloya geri eklenebilir. + +`ALIAS expr` + +Eşanlamlı sözcük. Böyle bir sütun tabloda hiç depolanmaz. +Değerleri bir tabloya eklenemez ve bir SELECT sorgusunda Yıldız İşareti kullanılırken değiştirilmez. +Sorgu ayrıştırma sırasında diğer ad genişletilirse, seçimlerde kullanılabilir. + +Yeni sütunlar eklemek için ALTER sorgusunu kullanırken, bu sütunlar için eski veriler yazılmaz. Bunun yerine, yeni sütunlar için değerleri olmayan eski verileri okurken, ifadeler varsayılan olarak anında hesaplanır. Ancak, ifadeleri çalıştırmak sorguda belirtilmeyen farklı sütunlar gerektiriyorsa, bu sütunlar ayrıca okunur, ancak yalnızca buna ihtiyaç duyan veri blokları için okunur. + +Bir tabloya yeni bir sütun eklerseniz, ancak daha sonra varsayılan ifadesini değiştirirseniz, eski veriler için kullanılan değerler değişir (değerlerin diskte depolanmadığı veriler için). Arka plan birleştirmeleri çalıştırırken, birleştirme parçalarından birinde eksik olan sütunların verileri birleştirilmiş parçaya yazıldığını unutmayın. + +İç içe geçmiş veri yapılarındaki öğeler için varsayılan değerleri ayarlamak mümkün değildir. + +### Kısıtlamalar {#constraints} + +Sütun açıklamaları kısıtlamaları ile birlikte tanımlanabilir: + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + ... + CONSTRAINT constraint_name_1 CHECK boolean_expr_1, + ... +) ENGINE = engine +``` + +`boolean_expr_1` herhangi bir Boole ifadesi ile olabilir. Tablo için kısıtlamalar tanımlanırsa, her biri her satır için kontrol edilir `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. + +Büyük miktarda kısıtlama eklemek, büyük `INSERT` sorgular. + +### TTL ifadesi {#ttl-expression} + +Değerler için depolama süresini tanımlar. Sadece MergeTree-family tabloları için belirtilebilir. Ayrıntılı açıklama için, bkz. [Sütunlar ve tablolar için TTL](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). + +### Sütun Sıkıştırma Kodekleri {#codecs} + +Varsayılan olarak, ClickHouse `lz4` sıkıştırma yöntemi. İçin `MergeTree`- motor ailesi varsayılan sıkıştırma yöntemini değiştirebilirsiniz [sıkıştırma](../../operations/server_configuration_parameters/settings.md#server-settings-compression) bir sunucu yapılandırması bölümü. Her bir sütun için sıkıştırma yöntemini de tanımlayabilirsiniz. `CREATE TABLE` sorgu. + +``` sql +CREATE TABLE codec_example +( + dt Date CODEC(ZSTD), + ts DateTime CODEC(LZ4HC), + float_value Float32 CODEC(NONE), + double_value Float64 CODEC(LZ4HC(9)) + value Float32 CODEC(Delta, ZSTD) +) +ENGINE = +... +``` + +Bir codec bileşeni belirtilmişse, varsayılan codec bileşeni geçerli değildir. Kodekler bir boru hattında birleştirilebilir, örneğin, `CODEC(Delta, ZSTD)`. Projeniz için en iyi codec kombinasyonunu seçmek için, Altınlıkta açıklanana benzer kriterler geçirin [ClickHouse verimliliğini artırmak için yeni Kodlamalar](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) makale. + +!!! warning "Uyarıcı" + ClickHouse veritabanı dosyalarını harici yardımcı programlarla açamazsınız `lz4`. Bunun yerine, özel kullanın [clickhouse-kompresör](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) program. + +Sıkıştırma Aşağıdaki tablo motorları için desteklenir: + +- [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) aile. Sütun sıkıştırma kodeklerini destekler ve varsayılan sıkıştırma yöntemini seçerek [sıkıştırma](../../operations/server_configuration_parameters/settings.md#server-settings-compression) ayarlar. +- [Günlük](../../engines/table_engines/log_family/log_family.md) aile. Kullanır `lz4` sıkıştırma yöntemi varsayılan olarak ve sütun sıkıştırma codec destekler. +- [Koymak](../../engines/table_engines/special/set.md). Yalnızca varsayılan sıkıştırmayı destekledi. +- [Katmak](../../engines/table_engines/special/join.md). Yalnızca varsayılan sıkıştırmayı destekledi. + +ClickHouse ortak amaçlı codec ve özel codec destekler. + +#### Özel Kodekler {#create-query-specialized-codecs} + +Bu kodekler, verilerin belirli özelliklerini kullanarak sıkıştırmayı daha etkili hale getirmek için tasarlanmıştır. Bu kodeklerden bazıları verileri kendileri sıkıştırmaz. Bunun yerine, verileri ortak bir amaç için hazırlarlar codec, bu hazırlık olmadan daha iyi sıkıştırır. + +Özel kodekler: + +- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` delta değerlerini saklamak için kullanılır, böylece `delta_bytes` ham değerlerin maksimum boyutudur. Olası `delta_bytes` değerler: 1, 2, 4, 8. İçin varsayılan değer `delta_bytes` oluyor `sizeof(type)` 1, 2, 4 veya 8'e eşitse. Diğer tüm durumlarda, 1. +- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: Hızlı, Ölçeklenebilir, Bellek İçi Zaman Serisi Veritabanı](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla: Hızlı, Ölçeklenebilir, Bellek İçi Zaman Serisi Veritabanı](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` ve `DateTime`). Algoritmasının her adımında, codec 64 değerden oluşan bir blok alır, 64x64 bit matrisine koyar, aktarır, kullanılmayan değer bitlerini kırpar ve gerisini bir dizi olarak döndürür. Kullanılmayan bitler, sıkıştırmanın kullanıldığı tüm veri bölümündeki maksimum ve minimum değerler arasında farklılık göstermeyen bitlerdir. + +`DoubleDelta` ve `Gorilla` kodekler, Gorilla TSDB'DE sıkıştırma algoritmasının bileşenleri olarak kullanılır. Gorilla yaklaşımı, zaman damgaları ile yavaş yavaş değişen değerler dizisi olduğunda senaryolarda etkilidir. Zaman damgaları tarafından etkili bir şekilde sıkıştırılır `DoubleDelta` codec ve değerler etkin bir şekilde sıkıştırılır `Gorilla` codec. Örneğin, etkili bir şekilde saklanan bir tablo elde etmek için, aşağıdaki yapılandırmada oluşturabilirsiniz: + +``` sql +CREATE TABLE codec_example +( + timestamp DateTime CODEC(DoubleDelta), + slow_values Float32 CODEC(Gorilla) +) +ENGINE = MergeTree() +``` + +#### Ortak Amaç {#create-query-common-purpose-codecs} + +Cod codecsec codecs'ler: + +- `NONE` — No compression. +- `LZ4` — Lossless [veri sıkıştırma algoritması](https://github.com/lz4/lz4) varsayılan olarak kullanılır. Lz4 hızlı sıkıştırma uygular. +- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` varsayılan düzeyi uygular. Olası seviyeleri: \[1, 12\]. Önerilen seviye aralığı: \[4, 9\]. +- `ZSTD[(level)]` — [Zstd sıkıştırma algoritması](https://en.wikipedia.org/wiki/Zstandard) yapılandırılabilir ile `level`. Olası seviyeler: \[1, 22\]. Varsayılan değer: 1. + +Yüksek Sıkıştırma seviyeleri asimetrik senaryolar için kullanışlıdır, örneğin bir kez sıkıştırın, tekrar tekrar sıkıştırın. Daha yüksek seviyeler daha iyi sıkıştırma ve daha yüksek CPU kullanımı anlamına gelir. + +## Geçici Tablolar {#temporary-tables} + +ClickHouse aşağıdaki özelliklere sahip geçici tabloları destekler: + +- Bağlantı kaybolursa da dahil olmak üzere oturum sona erdiğinde geçici tablolar kaybolur. +- Geçici bir tablo yalnızca bellek altyapısını kullanır. +- DB geçici bir tablo için belirtilemez. Veritabanları dışında oluşturulur. +- Tüm küme sunucularında dağıtılmış DDL sorgusu ile geçici bir tablo oluşturmak imkansız (kullanarak `ON CLUSTER`): bu tablo yalnızca geçerli oturumda bulunur. +- Geçici bir tablo başka bir ile aynı ada sahip ve bir sorgu DB belirtmeden tablo adını belirtir, geçici tablo kullanılır. +- Dağıtılmış sorgu işleme için bir sorguda kullanılan geçici tablolar uzak sunuculara geçirilir. + +Geçici bir tablo oluşturmak için aşağıdaki sözdizimini kullanın: + +``` sql +CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) +``` + +Çoğu durumda, geçici tablolar el ile oluşturulmaz, ancak bir sorgu için veya dağıtılmış için dış verileri kullanırken `(GLOBAL) IN`. Daha fazla bilgi için uygun bölümlere bakın + +İle tabloları kullanmak mümkündür [Motor = bellek](../../engines/table_engines/special/memory.md) geçici tablolar yerine. + +## Dağıtılmış DDL sorguları (küme yan tümcesinde) {#distributed-ddl-queries-on-cluster-clause} + +Bu `CREATE`, `DROP`, `ALTER`, ve `RENAME` sorgular, bir kümede dağıtılmış yürütmeyi destekler. +Örneğin, aşağıdaki sorgu oluşturur `all_hits` `Distributed` her ana bilgisayarda tablo `cluster`: + +``` sql +CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits) +``` + +Bu sorguları doğru bir şekilde çalıştırmak için, her ana bilgisayarın aynı küme tanımına sahip olması gerekir (senkronizasyon yapılandırmalarını basitleştirmek için zookeeper'dan değiştirmeleri kullanabilirsiniz). Ayrıca ZooKeeper sunucularına bağlanmaları gerekir. +Bazı ana bilgisayarlar şu anda mevcut olmasa bile, sorgunun yerel sürümü sonunda kümedeki her ana bilgisayarda uygulanır. Tek bir ana makine içinde sorguları yürütme sırası garanti edilir. + +## CREATE VIEW {#create-view} + +``` sql +CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... +``` + +Bir görünüm oluşturur. İki tür görüş vardır: normal ve SOMUTLAŞTIRILMIŞ. + +Normal görünümler herhangi bir veri depolamaz, ancak başka bir tablodan bir okuma gerçekleştirir. Başka bir deyişle, normal bir görünüm kaydedilmiş bir sorgudan başka bir şey değildir. Bir görünümden okurken, bu kaydedilmiş sorgu FROM yan tümcesinde bir alt sorgu olarak kullanılır. + +Örnek olarak, bir görünüm oluşturduğunuzu varsayalım: + +``` sql +CREATE VIEW view AS SELECT ... +``` + +ve bir sorgu yazdı: + +``` sql +SELECT a, b, c FROM view +``` + +Bu sorgu, alt sorguyu kullanmaya tam olarak eşdeğerdir: + +``` sql +SELECT a, b, c FROM (SELECT ...) +``` + +Materialized görünümler, ilgili SELECT sorgusu tarafından dönüştürülmüş verileri depolar. + +Olmadan hayata bir görünüm oluştururken `TO [db].[table]`, you must specify ENGINE – the table engine for storing data. + +İle somutlaştırılmış bir görünüm oluştururken `TO [db].[table]`, kullanma mustmalısınız `POPULATE`. + +Materialized görünüm aşağıdaki gibi düzenlenmiştir: SELECT belirtilen tabloya veri eklerken, eklenen verilerin bir kısmı bu SELECT sorgusu tarafından dönüştürülür ve sonuç görünümde eklenir. + +Doldur belirtirseniz, varolan tablo verilerini oluştururken görünümde, sanki bir `CREATE TABLE ... AS SELECT ...` . Aksi takdirde, sorgu yalnızca görünümü oluşturduktan sonra tabloya eklenen verileri içerir. Görünüm oluşturma sırasında tabloya eklenen veriler EKLENMEYECEĞİNDEN, doldur kullanmanızı önermiyoruz. + +A `SELECT` sorgu içerebilir `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` ayarlanır, veri ekleme sırasında toplanır, ancak yalnızca tek bir eklenen veri paketi içinde toplanır. Veriler daha fazla toplanmayacaktır. Özel durum, bağımsız olarak veri toplama, gibi gerçekleştiren bir motor kullanırken olur `SummingMergeTree`. + +Yürütme `ALTER` somut görünümlerle ilgili sorgular tam olarak geliştirilmemiştir, bu nedenle rahatsız edici olabilirler. Eğer hayata görünüm inşaat kullanıyorsa `TO [db.]name` yapabilirsiniz `DETACH` the view, run `ALTER` hedef tablo için ve sonra `ATTACH` daha önce müstakil (`DETACH`) görünüm. + +Görünümler normal tablolarla aynı görünür. Örneğin, bunlar sonucu listelenir `SHOW TABLES` sorgu. + +Görünümleri silmek için ayrı bir sorgu yok. Bir görünümü silmek için şunları kullanın `DROP TABLE`. + +## CREATE DICTIONARY {#create-dictionary-query} + +``` sql +CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] +( + key1 type1 [DEFAULT|EXPRESSION expr1] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + key2 type2 [DEFAULT|EXPRESSION expr2] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + attr1 type2 [DEFAULT|EXPRESSION expr3], + attr2 type2 [DEFAULT|EXPRESSION expr4] +) +PRIMARY KEY key1, key2 +SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) +LAYOUT(LAYOUT_NAME([param_name param_value])) +LIFETIME([MIN val1] MAX val2) +``` + +Oluşturuyor [dış sözlük](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) verilen ile [yapılı](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md), [kaynaklı](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md), [düzen](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) ve [ömür](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md). + +Dış sözlük yapısı özniteliklerden oluşur. Sözlük öznitelikleri tablo sütunlarına benzer şekilde belirtilir. Tek gerekli öznitelik özelliği türüdür, diğer tüm özelliklerin varsayılan değerleri olabilir. + +Sözlüğe bağlı olarak [düzen](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) bir veya daha fazla öznitelik sözlük anahtarları olarak belirtilebilir. + +Daha fazla bilgi için, bkz. [Dış Söz Dictionarieslükler](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) bölme. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/create/) diff --git a/docs/tr/sql_reference/statements/index.md b/docs/tr/sql_reference/statements/index.md new file mode 100644 index 00000000000..0298948b26e --- /dev/null +++ b/docs/tr/sql_reference/statements/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Deyimler +toc_priority: 31 +--- + + diff --git a/docs/tr/sql_reference/statements/insert_into.md b/docs/tr/sql_reference/statements/insert_into.md new file mode 100644 index 00000000000..1aafdc368e9 --- /dev/null +++ b/docs/tr/sql_reference/statements/insert_into.md @@ -0,0 +1,80 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 34 +toc_title: INSERT INTO +--- + +## INSERT {#insert} + +Veri ekleme. + +Temel sorgu biçimi: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... +``` + +Sorgu eklemek için sütunların bir listesini belirtebilirsiniz `[(c1, c2, c3)]`. Bu durumda, sütunların geri kalanı ile doldurulur: + +- Hesaplanan değerler `DEFAULT` tablo tanımında belirtilen ifadeler. +- Sıfırlar ve boş dizeler, eğer `DEFAULT` ifadeler tanımlanmamıştır. + +Eğer [strict\_ınsert\_defaults = 1](../../operations/settings/settings.md), sahip olmayan sütunlar `DEFAULT` tanımlanan sorguda listelenmelidir. + +Veri herhangi bir İNSERT geçirilebilir [biçimli](../../interfaces/formats.md#formats) ClickHouse tarafından desteklenmektedir. Biçim sorguda açıkça belirtilmelidir: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set +``` + +For example, the following query format is identical to the basic version of INSERT … VALUES: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... +``` + +ClickHouse, veriden önce tüm boşlukları ve bir satır beslemesini (varsa) kaldırır. Bir sorgu oluştururken, sorgu işleçlerinden sonra verileri yeni bir satıra koymanızı öneririz (veriler boşluklarla başlarsa bu önemlidir). + +Örnek: + +``` sql +INSERT INTO t FORMAT TabSeparated +11 Hello, world! +22 Qwerty +``` + +Komut satırı istemcisini veya HTTP arabirimini kullanarak verileri sorgudan ayrı olarak ekleyebilirsiniz. Daha fazla bilgi için bölüme bakın “[Arabirimler](../../interfaces/index.md#interfaces)”. + +### Kısıtlamalar {#constraints} + +Tablo varsa [kısıtlamalar](create.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. + +### Sonuçları Ekleme `SELECT` {#insert_query_insert-select} + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... +``` + +Sütunlar, SELECT yan tümcesindeki konumlarına göre eşleştirilir. Ancak, SELECT ifadesi ve INSERT için tablo adları farklı olabilir. Gerekirse, tip döküm yapılır. + +Değerler dışındaki veri biçimlerinin hiçbiri, aşağıdaki gibi ifadelere değerler ayarlamasına izin vermez `now()`, `1 + 2` ve bu yüzden. Değerler biçimi, ifadelerin sınırlı kullanımına izin verir, ancak bu önerilmez, çünkü bu durumda verimsiz kod yürütme için kullanılır. + +Veri bölümlerini değiştirmek için diğer sorgular desteklenmiyor: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. +Ancak, eski verileri kullanarak silebilirsiniz `ALTER TABLE ... DROP PARTITION`. + +`FORMAT` yan tümcesi sorgu sonunda belirtilmelidir eğer `SELECT` yan tümcesi tablo işlevi içerir [girdi()](../table_functions/input.md). + +### Performans Konuları {#performance-considerations} + +`INSERT` giriş verilerini birincil anahtarla sıralar ve bunları bir bölüm anahtarı ile bölümlere ayırır. Bir kerede birkaç bölüme veri eklerseniz, bu veri tabanının performansını önemli ölçüde azaltabilir. `INSERT` sorgu. Bunu önlemek için: + +- Bir seferde 100.000 satır gibi oldukça büyük gruplar halinde veri ekleyin. +- Clickhouse'a yüklemeden önce verileri bir bölüm anahtarıyla gruplandırın. + +Eğer performans azalmaz: + +- Veri gerçek zamanlı olarak eklenir. +- Genellikle zamana göre sıralanır veri yükleyin. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/tr/sql_reference/statements/misc.md b/docs/tr/sql_reference/statements/misc.md new file mode 100644 index 00000000000..689fc8cd77d --- /dev/null +++ b/docs/tr/sql_reference/statements/misc.md @@ -0,0 +1,252 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 39 +toc_title: "Di\u011Fer" +--- + +# Çeşitli Sorgular {#miscellaneous-queries} + +## ATTACH {#attach} + +Bu sorgu tam olarak aynıdır `CREATE`, ama + +- Kelime yerine `CREATE` kelime kullanır `ATTACH`. +- Sorgu diskte veri oluşturmaz, ancak verilerin zaten uygun yerlerde olduğunu ve yalnızca tablo hakkında bilgi sunucuya eklediğini varsayar. + Bir ekleme sorgusu çalıştırdıktan sonra, sunucu tablonun varlığı hakkında bilgi sahibi olacaktır. + +Tablo daha önce ayrılmış olsaydı (`DETACH`), yapısının bilindiği anlamına gelir, yapıyı tanımlamadan steno kullanabilirsiniz. + +``` sql +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] +``` + +Bu sorgu, sunucu başlatılırken kullanılır. Sunucu, tablo meta verilerini dosyalar olarak depolar `ATTACH` başlangıçta çalıştığı sorgular (sunucuda açıkça oluşturulan sistem tabloları hariç). + +## CHECK TABLE {#check-table} + +Tablodaki verilerin bozuk olup olmadığını denetler. + +``` sql +CHECK TABLE [db.]name +``` + +Bu `CHECK TABLE` sorgu, gerçek dosya boyutlarını sunucuda depolanan beklenen değerlerle karşılaştırır. Dosya boyutları depolanan değerlerle eşleşmiyorsa, verilerin bozuk olduğu anlamına gelir. Bu, örneğin, sorgu yürütme sırasında bir sistem çökmesine neden olabilir. + +Sorgu yanıtı içerir `result` tek satırlı sütun. Satır bir değere sahiptir +[Boeanoleanean](../../sql_reference/data_types/boolean.md) tür: + +- 0-tablodaki veriler bozuk. +- 1 - veri bütünlüğünü korur. + +Bu `CHECK TABLE` sorgu Aşağıdaki tablo motorlarını destekler: + +- [Günlük](../../engines/table_engines/log_family/log.md) +- [TinyLog](../../engines/table_engines/log_family/tinylog.md) +- [StripeLog](../../engines/table_engines/log_family/stripelog.md) +- [MergeTree ailesi](../../engines/table_engines/mergetree_family/mergetree.md) + +Başka bir tablo motorları ile tablolar üzerinde gerçekleştirilen bir özel duruma neden olur. + +Motor fromlardan `*Log` aile başarısızlık otomatik veri kurtarma sağlamaz. Kullan... `CHECK TABLE` veri kaybını zamanında izlemek için sorgu. + +İçin `MergeTree` aile motorları, `CHECK TABLE` sorgu, yerel sunucudaki bir tablonun her bir veri bölümü için bir kontrol durumunu gösterir. + +**Veri bozuksa** + +Tablo bozuksa, bozuk olmayan verileri başka bir tabloya kopyalayabilirsiniz. Bunu yapmak için : + +1. Bozuk tablo ile aynı yapıya sahip yeni bir tablo oluşturun. Bunu yapmak için sorguyu yürütün `CREATE TABLE AS `. +2. Ayarla... [max\_threads](../../operations/settings/settings.md#settings-max_threads) bir sonraki sorguyu tek bir iş parçacığında işlemek için 1 değeri. Bunu yapmak için sorguyu çalıştırın `SET max_threads = 1`. +3. Sorgu yürütme `INSERT INTO SELECT * FROM `. Bu istek bozuk olmayan verileri bozuk tablodan başka bir tabloya kopyalar. Yalnızca bozuk parçadan önceki veriler kopyalanır. +4. Yeniden Başlat `clickhouse-client` sıfırlamak için `max_threads` değer. + +## DESCRIBE TABLE {#misc-describe-table} + +``` sql +DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +Aşağıdaki döndürür `String` sütun tipi: + +- `name` — Column name. +- `type`— Column type. +- `default_type` — Clause that is used in [varsayılan ifade](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` veya `ALIAS`). Varsayılan ifade belirtilmemişse, sütun boş bir dize içerir. +- `default_expression` — Value specified in the `DEFAULT` yan. +- `comment_expression` — Comment text. + +İç içe veri yapıları çıktı “expanded” biçimli. Her sütun ayrı ayrı gösterilir, bir noktadan sonra adı ile. + +## DETACH {#detach} + +Hakkında bilgi siler ‘name’ sunucudan tablo. Sunucu, tablonun varlığını bilmeyi durdurur. + +``` sql +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +Bu tablonun veri veya meta verileri silmez. Bir sonraki sunucu lansmanında, sunucu meta verileri okuyacak ve tablo hakkında tekrar bilgi edinecektir. +Benzer şekilde, bir “detached” tablo kullanılarak yeniden eklenebilir `ATTACH` sorgu (bunlar için depolanan meta verilere sahip olmayan sistem tabloları hariç). + +Hiç yok... `DETACH DATABASE` sorgu. + +## DROP {#drop} + +Bu sorgu iki türü vardır: `DROP DATABASE` ve `DROP TABLE`. + +``` sql +DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] +``` + +İçindeki tüm tabloları siler ‘db’ veritabanı, daha sonra siler ‘db’ veritabanı kendisi. +Eğer `IF EXISTS` belirtilen, veritabanı yoksa bir hata döndürmez. + +``` sql +DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +Tabloyu siler. +Eğer `IF EXISTS` belirtilmişse, tablo yoksa veya veritabanı yoksa bir hata döndürmez. + + DROP DICTIONARY [IF EXISTS] [db.]name + +Sözlük Delets. +Eğer `IF EXISTS` belirtilmişse, tablo yoksa veya veritabanı yoksa bir hata döndürmez. + +## EXISTS {#exists} + +``` sql +EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] +``` + +Bir tek döndürür `UInt8`- tek değeri içeren sütun yazın `0` tablo veya veritabanı yoksa veya `1` tablo belirtilen veritabanında varsa. + +## KILL QUERY {#kill-query} + +``` sql +KILL QUERY [ON CLUSTER cluster] + WHERE + [SYNC|ASYNC|TEST] + [FORMAT format] +``` + +Şu anda çalışan sorguları zorla sonlandırmaya çalışır. +Sonlandırılacak sorgular sistemden seçilir.tanımlanan kriterleri kullanarak işlemler tablosu `WHERE` fıkra ofsı `KILL` sorgu. + +Örnekler: + +``` sql +-- Forcibly terminates all queries with the specified query_id: +KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' + +-- Synchronously terminates all queries run by 'username': +KILL QUERY WHERE user='username' SYNC +``` + +Salt okunur kullanıcılar yalnızca kendi sorgularını durdurabilir. + +Varsayılan olarak, sorguların zaman uyumsuz sürümü kullanılır (`ASYNC`), sorguların durduğuna dair onay beklemez. + +Senkron versiyonu (`SYNC`) tüm sorguların durmasını bekler ve durduğunda her işlem hakkında bilgi görüntüler. +Yanıt içerir `kill_status` aşağıdaki değerleri alabilen sütun: + +1. ‘finished’ – The query was terminated successfully. +2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. +3. The other values ​​explain why the query can't be stopped. + +Bir test sorgusu (`TEST`) yalnızca kullanıcının haklarını denetler ve durdurulacak sorguların bir listesini görüntüler. + +## KILL MUTATION {#kill-mutation} + +``` sql +KILL MUTATION [ON CLUSTER cluster] + WHERE + [TEST] + [FORMAT format] +``` + +İptal etmek ve kaldırmak için çalışır [mutasyonlar](alter.md#alter-mutations) şu anda yürütülüyor. İptal etmek için mutationsasyonlar seçilir [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) tablo tarafından belirtilen filtreyi kullanarak `WHERE` fıkra ofsı `KILL` sorgu. + +Bir test sorgusu (`TEST`) yalnızca kullanıcının haklarını denetler ve durdurulacak sorguların bir listesini görüntüler. + +Örnekler: + +``` sql +-- Cancel and remove all mutations of the single table: +KILL MUTATION WHERE database = 'default' AND table = 'table' + +-- Cancel the specific mutation: +KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' +``` + +The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). + +Mutasyon tarafından yapılan değişiklikler geri alınmaz. + +## OPTIMIZE {#misc_operations-optimize} + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] +``` + +Bu sorgu, bir tablo altyapısı ile tablolar için veri parçaları planlanmamış birleştirme başlatmaya çalışır. [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) aile. + +Bu `OPTMIZE` sorgu için de desteklenmektedir [MaterializedView](../../engines/table_engines/special/materializedview.md) ve... [Arabellek](../../engines/table_engines/special/buffer.md) motorlar. Diğer tablo motorları desteklenmiyor. + +Ne zaman `OPTIMIZE` ile kullanılır [ReplicatedMergeTree](../../engines/table_engines/mergetree_family/replication.md) Tablo motorları ailesi, ClickHouse birleştirme için bir görev oluşturur ve tüm düğümlerde yürütülmeyi bekler (eğer `replication_alter_partitions_sync` ayar etkinse) ' dir. + +- Eğer `OPTIMIZE` herhangi bir nedenle bir birleştirme gerçekleştirmez, müşteriye bildirmez. Bildirimleri etkinleştirmek için [optimize\_throw\_if\_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) ayar. +- Belirtir aseniz bir `PARTITION`, sadece belirtilen bölüm optimize edilmiştir. [Bölüm ifadesi nasıl ayarlanır](alter.md#alter-how-to-specify-part-expr). +- Belirtir specifyseniz `FINAL`, optimizasyon, tüm veriler zaten bir parçada olsa bile gerçekleştirilir. +- Belirtir specifyseniz `DEDUPLICATE`, sonra tamamen aynı satırlar tekilleştirilecektir (tüm sütunlar karşılaştırılır), sadece MergeTree motoru için anlamlıdır. + +!!! warning "Uyarıcı" + `OPTIMIZE` Düzelt canemiyorum “Too many parts” hatasız. + +## RENAME {#misc_operations-rename} + +Bir veya daha fazla tabloyu yeniden adlandırır. + +``` sql +RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] +``` + +Tüm tablolar genel kilitleme altında yeniden adlandırılır. Tabloları yeniden adlandırma hafif bir işlemdir. İÇİN'DEN sonra başka bir veritabanı belirttiyseniz, tablo bu veritabanına taşınacaktır. Ancak, veritabanlarına sahip dizinlerin aynı dosya sisteminde bulunması gerekir (aksi takdirde bir hata döndürülür). + +## SET {#query-set} + +``` sql +SET param = value +``` + +Atıyor `value` to the `param` [ayar](../../operations/settings/index.md) geçerli oturum için. Değiştiremezsiniz [sunucu ayarları](../../operations/server_configuration_parameters/index.md) bu şekilde. + +Belirtilen ayarlar profilindeki tüm değerleri tek bir sorguda da ayarlayabilirsiniz. + +``` sql +SET profile = 'profile-name-from-the-settings-file' +``` + +Daha fazla bilgi için, bkz. [Ayarlar](../../operations/settings/settings.md). + +## TRUNCATE {#truncate} + +``` sql +TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +Bir tablodaki tüm verileri kaldırır. Fık thera ne zaman `IF EXISTS` tablo yoksa, sorgu bir hata döndürür. + +Bu `TRUNCATE` sorgu için desteklenmiyor [Görünüm](../../engines/table_engines/special/view.md), [Dosya](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) ve [Boş](../../engines/table_engines/special/null.md) masa motorları. + +## USE {#use} + +``` sql +USE db +``` + +Oturum için geçerli veritabanını ayarlamanızı sağlar. +Geçerli veritabanı, veritabanı sorguda tablo adından önce bir nokta ile açıkça tanımlanmamışsa, tabloları aramak için kullanılır. +Bir oturum kavramı olmadığından, bu sorgu HTTP protokolünü kullanırken yapılamaz. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/tr/sql_reference/statements/select.md b/docs/tr/sql_reference/statements/select.md new file mode 100644 index 00000000000..287a8029ee9 --- /dev/null +++ b/docs/tr/sql_reference/statements/select.md @@ -0,0 +1,610 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 33 +toc_title: SELECT +--- + +# Select Queries sözdizimi {#select-queries-syntax} + +`SELECT` veri alma gerçekleştirir. + +``` sql +[WITH expr_list|(subquery)] +SELECT [DISTINCT] expr_list +[FROM [db.]table | (subquery) | table_function] [FINAL] +[SAMPLE sample_coeff] +[ARRAY JOIN ...] +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list +[PREWHERE expr] +[WHERE expr] +[GROUP BY expr_list] [WITH TOTALS] +[HAVING expr] +[ORDER BY expr_list] +[LIMIT [offset_value, ]n BY columns] +[LIMIT [n, ]m] +[UNION ALL ...] +[INTO OUTFILE filename] +[FORMAT format] +``` + +Tüm yan tümceleri isteğe bağlıdır, hemen sonra ifadelerin gerekli listesi hariç seçin. +Aşağıdaki yan tümceleri sorgu yürütme konveyör hemen hemen aynı sırada açıklanmıştır. + +Sorgu atlarsa `DISTINCT`, `GROUP BY` ve `ORDER BY` CLA andus Andes and the `IN` ve `JOIN` alt sorgular, sorgu o (1) RAM miktarını kullanarak tamamen akış işlenecektir. +Aksi takdirde, uygun kısıtlamalar belirtilmezse, sorgu çok fazla RAM tüketebilir: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. Daha fazla bilgi için bölüme bakın “Settings”. Harici sıralama (geçici tabloları bir diske kaydetme) ve harici toplama kullanmak mümkündür. `The system does not have "merge join"`. + +### Fık WİTHRA ile {#with-clause} + +Bu bölüm, ortak tablo ifadeleri için destek sağlar ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), bazı sınırlamalar ile: +1. Özyinelemeli sorgular desteklenmiyor +2. Alt sorgu bölüm ile birlikte kullanıldığında, sonuç tam olarak bir satır ile skaler olmalıdır +3. İfadenin sonuçları alt sorgularda kullanılamaz +WITH yan tümcesi ifadeleri sonuçları SELECT yan tümcesi içinde kullanılabilir. + +Örnek 1: Sabit ifadeyi aşağıdaki gibi kullanma “variable” + +``` sql +WITH '2019-08-01 15:23:00' as ts_upper_bound +SELECT * +FROM hits +WHERE + EventDate = toDate(ts_upper_bound) AND + EventTime <= ts_upper_bound +``` + +Örnek 2: SELECT yan tümcesi sütun listesinden toplam(bayt) ifade sonucunu çıkarma + +``` sql +WITH sum(bytes) as s +SELECT + formatReadableSize(s), + table +FROM system.parts +GROUP BY table +ORDER BY s +``` + +Örnek 3: skaler alt sorgu sonuçlarını kullanma + +``` sql +/* this example would return TOP 10 of most huge tables */ +WITH + ( + SELECT sum(bytes) + FROM system.parts + WHERE active + ) AS total_disk_usage +SELECT + (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, + table +FROM system.parts +GROUP BY table +ORDER BY table_disk_usage DESC +LIMIT 10 +``` + +Örnek 4: alt sorguda ifadeyi yeniden kullanma +Alt sorgularda ifade kullanımı için geçerli sınırlama için bir geçici çözüm olarak çoğaltabilirsiniz. + +``` sql +WITH ['hello'] AS hello +SELECT + hello, + * +FROM +( + WITH ['hello'] AS hello + SELECT hello +) +``` + +``` text +┌─hello─────┬─hello─────┐ +│ ['hello'] │ ['hello'] │ +└───────────┴───────────┘ +``` + +### Fık FROMRAS FROMINDAN {#select-from} + +FROM yan tümcesi atlanırsa, veriler `system.one` Tablo. +Bu `system.one` tablo tam olarak bir satır içerir (bu tablo diğer Dbms'lerde bulunan çift tablo ile aynı amacı yerine getirir). + +Bu `FROM` yan tümcesi veri okumak için kaynak belirtir: + +- Tablo +- Alt sorgu +- [Tablo fonksiyonu](../table_functions/index.md) + +`ARRAY JOIN` ve düzenli `JOIN` ayrıca dahil edilebilir (aşağıya bakınız). + +Bunun yerine bir tablo, `SELECT` alt sorgu parantez içinde belirtilebilir. +Standart SQL aksine, bir eşanlamlı bir alt sorgudan sonra belirtilmesi gerekmez. + +Bir sorguyu yürütmek için, sorguda listelenen tüm sütunlar uygun tablodan ayıklanır. Dış sorgu için gerekli olmayan tüm sütunlar alt sorgulardan atılır. +Bir sorgu herhangi bir sütun listelemezse (örneğin, `SELECT count() FROM t`), satır sayısını hesaplamak için yine de tablodan bir sütun çıkarılır (en küçük olanı tercih edilir). + +#### Son değiştirici {#select-from-final} + +Tablolardan veri seçerken uygulanabilir [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)- motor ailesi dışında `GraphiteMergeTree`. Ne zaman `FINAL` belirtilen, ClickHouse sonucu döndürmeden önce verileri tam olarak birleştirir ve böylece verilen tablo altyapısı için birleştirmeler sırasında gerçekleşen tüm veri dönüşümlerini gerçekleştirir. + +Ayrıca için desteklenen: +- [Çoğaltıyordu](../../engines/table_engines/mergetree_family/replication.md) sürümleri `MergeTree` motorlar. +- [Görünüm](../../engines/table_engines/special/view.md), [Arabellek](../../engines/table_engines/special/buffer.md), [Dağılı](../../engines/table_engines/special/distributed.md), ve [MaterializedView](../../engines/table_engines/special/materializedview.md) üzerinden oluşturul ,maları koşuluyla diğer motorlar üzerinde çalışan motorlar `MergeTree`- motor masaları. + +Kullanan sorgular `FINAL` olmayan benzer sorgular kadar hızlı Yürüt ,ülür, çünkü: + +- Sorgu tek bir iş parçacığında yürütülür ve veri sorgu yürütme sırasında birleştirilir. +- İle sorgular `FINAL` sorguda belirtilen sütunlara ek olarak birincil anahtar sütunlarını okuyun. + +Çoğu durumda, kullanmaktan kaçının `FINAL`. + +### Örnek Madde {#select-sample-clause} + +Bu `SAMPLE` yan tümcesi yaklaşık sorgu işleme için izin verir. + +Veri örneklemesi etkinleştirildiğinde, sorgu tüm veriler üzerinde değil, yalnızca belirli bir veri kesirinde (örnek) gerçekleştirilir. Örneğin, tüm ziyaretler için istatistikleri hesaplamanız gerekiyorsa, sorguyu tüm ziyaretlerin 1/10 kesirinde yürütmek ve ardından sonucu 10 ile çarpmak yeterlidir. + +Yaklaşık sorgu işleme aşağıdaki durumlarda yararlı olabilir: + +- Sıkı zamanlama gereksinimleriniz olduğunda (\<100ms gibi), ancak bunları karşılamak için ek donanım kaynaklarının maliyetini haklı çıkaramazsınız. +- Ham verileriniz doğru olmadığında, yaklaşım kaliteyi belirgin şekilde düşürmez. +- İş gereksinimleri yaklaşık sonuçları hedef alır (maliyet etkinliği için veya kesin sonuçları premium kullanıcılara pazarlamak için). + +!!! note "Not" + Örneklemeyi yalnızca aşağıdaki tablolarla kullanabilirsiniz: [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) tablo oluşturma sırasında örnekleme ifadesi belirtilmişse (bkz [MergeTree motoru](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table)). + +Veri örneklemesinin özellikleri aşağıda listelenmiştir: + +- Veri örneklemesi deterministik bir mekanizmadır. Aynı sonucu `SELECT .. SAMPLE` sorgu her zaman aynıdır. +- Örnekleme, farklı tablolar için sürekli olarak çalışır. Tek bir örnekleme anahtarına sahip tablolar için, aynı katsayıya sahip bir örnek her zaman olası verilerin aynı alt kümesini seçer. Örneğin, kullanıcı kimlikleri örneği, farklı tablolardan olası tüm kullanıcı kimliklerinin aynı alt kümesine sahip satırları alır. Bu, örneği alt sorgularda kullanabileceğiniz anlamına gelir. [IN](#select-in-operators) yan. Ayrıca, kullanarak örnekleri katılabilir [JOIN](#select-join) yan. +- Örnekleme, bir diskten daha az veri okumayı sağlar. Örnekleme anahtarını doğru belirtmeniz gerektiğini unutmayın. Daha fazla bilgi için, bkz. [MergeTree tablosu oluşturma](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). + +İçin `SAMPLE` yan tümcesi aşağıdaki sözdizimi desteklenir: + +| SAMPLE Clause Syntax | Açıklama | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SAMPLE k` | Burada `k` 0'dan 1'e kadar olan sayıdır.
    Sorgu üzerinde yürütülür `k` verilerin kesir. Mesela, `SAMPLE 0.1` sorguyu verilerin %10'unda çalıştırır. [Daha fazla bilgi edinin](#select-sample-k) | +| `SAMPLE n` | Burada `n` yeterince büyük bir tamsayıdır.
    Sorgu en az bir örnek üzerinde yürütülür `n` satırlar (ancak bundan önemli ölçüde daha fazla değil). Mesela, `SAMPLE 10000000` sorguyu en az 10.000.000 satır çalıştırır. [Daha fazla bilgi edinin](#select-sample-n) | +| `SAMPLE k OFFSET m` | Burada `k` ve `m` 0'dan 1'e kadar olan sayılardır.
    Sorgu bir örnek üzerinde yürütülür `k` verilerin kesir. Örnek için kullanılan veriler, `m` bölme. [Daha fazla bilgi edinin](#select-sample-offset) | + +#### SAMPLE K {#select-sample-k} + +Burada `k` 0'dan 1'e kadar olan sayıdır (hem kesirli hem de ondalık gösterimler desteklenir). Mesela, `SAMPLE 1/2` veya `SAMPLE 0.5`. + +İn a `SAMPLE k` fık ,ra, örnek alınır `k` verilerin kesir. Örnek aşağıda gösterilmiştir: + +``` sql +SELECT + Title, + count() * 10 AS PageViews +FROM hits_distributed +SAMPLE 0.1 +WHERE + CounterID = 34 +GROUP BY Title +ORDER BY PageViews DESC LIMIT 1000 +``` + +Bu örnekte, sorgu 0.1 (%10) veri örneği üzerinde yürütülür. Toplam fonksiyonların değerleri otomatik olarak düzeltilmez, bu nedenle yaklaşık bir sonuç elde etmek için, değer `count()` elle 10 ile çarpılır. + +#### SAMPLE N {#select-sample-n} + +Burada `n` yeterince büyük bir tamsayıdır. Mesela, `SAMPLE 10000000`. + +Bu durumda, sorgu en az bir örnek üzerinde yürütülür `n` satırlar (ancak bundan önemli ölçüde daha fazla değil). Mesela, `SAMPLE 10000000` sorguyu en az 10.000.000 satır çalıştırır. + +Veri okuma için minimum birim bir granül olduğundan (boyutu `index_granularity` ayar), granülün boyutundan çok daha büyük bir örnek ayarlamak mantıklıdır. + +Kullanırken `SAMPLE n` yan tümce, verilerin hangi göreli yüzde işlendiğini bilmiyorsunuz. Yani toplam fonksiyonların çarpılması gereken katsayıyı bilmiyorsunuz. Kullan... `_sample_factor` sanal sütun yaklaşık sonucu almak için. + +Bu `_sample_factor` sütun dinamik olarak hesaplanan göreli katsayıları içerir. Bu sütun otomatik olarak oluşturulduğunda [oluşturmak](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table) belirtilen örnekleme anahtarına sahip bir tablo. Kullanım örnekleri `_sample_factor` sütun aşağıda gösterilmiştir. + +Masayı düşünelim `visits`, site ziyaretleri ile ilgili istatistikleri içerir. İlk örnek, sayfa görünümlerinin sayısını nasıl hesaplayacağınızı gösterir: + +``` sql +SELECT sum(PageViews * _sample_factor) +FROM visits +SAMPLE 10000000 +``` + +Bir sonraki örnek, toplam ziyaret sayısını nasıl hesaplayacağınızı gösterir: + +``` sql +SELECT sum(_sample_factor) +FROM visits +SAMPLE 10000000 +``` + +Aşağıdaki örnek, ortalama oturum süresinin nasıl hesaplanacağını göstermektedir. Ortalama değerleri hesaplamak için göreli katsayıyı kullanmanız gerekmediğini unutmayın. + +``` sql +SELECT avg(Duration) +FROM visits +SAMPLE 10000000 +``` + +#### SAMPLE K OFFSET M {#select-sample-offset} + +Burada `k` ve `m` 0'dan 1'e kadar olan sayılardır. Örnekler aşağıda gösterilmiştir. + +**Örnek 1** + +``` sql +SAMPLE 1/10 +``` + +Bu örnekte, örnek tüm verilerin 1/10'udur: + +`[++------------]` + +**Örnek 2** + +``` sql +SAMPLE 1/10 OFFSET 1/2 +``` + +Burada, verilerin ikinci yarısından %10'luk bir örnek alınır. + +`[------++------]` + +### Dizi Jo JOİNİN yan tüm Clausecesi {#select-array-join-clause} + +Yürüt allowsmeyi sağlar `JOIN` bir dizi veya iç içe veri yapısı ile. Niyet benzer [arrayJoin](../../sql_reference/functions/array_join.md#functions_arrayjoin) işlev, ancak işlevselliği daha geniştir. + +``` sql +SELECT +FROM +[LEFT] ARRAY JOIN +[WHERE|PREWHERE ] +... +``` + +Yalnızca bir tek belirtebilirsiniz `ARRAY JOIN` bir sorguda yan tümcesi. + +Sorgu yürütme sırası çalışırken en iyi duruma getirilmiştir `ARRAY JOIN`. Rağmen `ARRAY JOIN` her zaman önce belirtilmelidir `WHERE/PREWHERE` fık ,ra, daha önce de yapılabilir `WHERE/PREWHERE` (sonuç bu maddede gerekliyse) veya tamamladıktan sonra (hesaplamaların hacmini azaltmak için). İşlem sırası sorgu iyileştiricisi tarafından denetlenir. + +Desteklenen türleri `ARRAY JOIN` aşağıda listelenmiştir: + +- `ARRAY JOIN` - Bu durumda, boş diziler sonucu dahil değildir `JOIN`. +- `LEFT ARRAY JOIN` Bunun sonucu `JOIN` boş dizilere sahip satırlar içerir. Boş bir dizinin değeri, dizi öğesi türü için varsayılan değere ayarlanır (genellikle 0, boş dize veya NULL). + +Aşağıdaki örnekler kullanımını göstermektedir `ARRAY JOIN` ve `LEFT ARRAY JOIN` yanlar. Bir tablo oluşturalım [Dizi](../../sql_reference/data_types/array.md) sütun yazın ve içine değerler ekleyin: + +``` sql +CREATE TABLE arrays_test +( + s String, + arr Array(UInt8) +) ENGINE = Memory; + +INSERT INTO arrays_test +VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); +``` + +``` text +┌─s───────────┬─arr─────┐ +│ Hello │ [1,2] │ +│ World │ [3,4,5] │ +│ Goodbye │ [] │ +└─────────────┴─────────┘ +``` + +Aşağıdaki örnek kullanır `ARRAY JOIN` yan: + +``` sql +SELECT s, arr +FROM arrays_test +ARRAY JOIN arr; +``` + +``` text +┌─s─────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +└───────┴─────┘ +``` + +Sonraki örnek kullanımlar `LEFT ARRAY JOIN` yan: + +``` sql +SELECT s, arr +FROM arrays_test +LEFT ARRAY JOIN arr; +``` + +``` text +┌─s───────────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +│ Goodbye │ 0 │ +└─────────────┴─────┘ +``` + +#### Takma Ad Kullanma {#using-aliases} + +Bir dizi için bir diğer ad belirtilebilir `ARRAY JOIN` yan. Bu durumda, bir dizi öğesine bu diğer adla erişilebilir, ancak dizinin kendisine özgün adla erişilir. Örnek: + +``` sql +SELECT s, arr, a +FROM arrays_test +ARRAY JOIN arr AS a; +``` + +``` text +┌─s─────┬─arr─────┬─a─┐ +│ Hello │ [1,2] │ 1 │ +│ Hello │ [1,2] │ 2 │ +│ World │ [3,4,5] │ 3 │ +│ World │ [3,4,5] │ 4 │ +│ World │ [3,4,5] │ 5 │ +└───────┴─────────┴───┘ +``` + +Takma adlar kullanarak şunları yapabilirsiniz `ARRAY JOIN` harici bir dizi ile. Mesela: + +``` sql +SELECT s, arr_external +FROM arrays_test +ARRAY JOIN [1, 2, 3] AS arr_external; +``` + +``` text +┌─s───────────┬─arr_external─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ Hello │ 3 │ +│ World │ 1 │ +│ World │ 2 │ +│ World │ 3 │ +│ Goodbye │ 1 │ +│ Goodbye │ 2 │ +│ Goodbye │ 3 │ +└─────────────┴──────────────┘ +``` + +Birden çok diziler virgülle ayrılmış olabilir `ARRAY JOIN` yan. Bu durumda, `JOIN` onlarla aynı anda gerçekleştirilir (doğrudan toplam, kartezyen ürün değil). Tüm dizilerin aynı boyuta sahip olması gerektiğini unutmayın. Örnek: + +``` sql +SELECT s, arr, a, num, mapped +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ +│ Hello │ [1,2] │ 1 │ 1 │ 2 │ +│ Hello │ [1,2] │ 2 │ 2 │ 3 │ +│ World │ [3,4,5] │ 3 │ 1 │ 4 │ +│ World │ [3,4,5] │ 4 │ 2 │ 5 │ +│ World │ [3,4,5] │ 5 │ 3 │ 6 │ +└───────┴─────────┴───┴─────┴────────┘ +``` + +Aşağıdaki örnek kullanır [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) işlev: + +``` sql +SELECT s, arr, a, num, arrayEnumerate(arr) +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ +│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ +│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ +│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ +│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ +│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ +└───────┴─────────┴───┴─────┴─────────────────────┘ +``` + +#### İç içe veri yapısı ile dizi birleştirme {#array-join-with-nested-data-structure} + +`ARRAY`Jo "in " ile de çalışır [iç içe veri yapıları](../../sql_reference/data_types/nested_data_structures/nested.md). Örnek: + +``` sql +CREATE TABLE nested_test +( + s String, + nest Nested( + x UInt8, + y UInt32) +) ENGINE = Memory; + +INSERT INTO nested_test +VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); +``` + +``` text +┌─s───────┬─nest.x──┬─nest.y─────┐ +│ Hello │ [1,2] │ [10,20] │ +│ World │ [3,4,5] │ [30,40,50] │ +│ Goodbye │ [] │ [] │ +└─────────┴─────────┴────────────┘ +``` + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +İç içe geçmiş veri yapılarının adlarını belirtirken `ARRAY JOIN` anlam aynıdır `ARRAY JOIN` içerdiği tüm dizi öğeleri ile. Örnekler aşağıda listelenmiştir: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`, `nest.y`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +Bu varyasyon da mantıklı: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─────┐ +│ Hello │ 1 │ [10,20] │ +│ Hello │ 2 │ [10,20] │ +│ World │ 3 │ [30,40,50] │ +│ World │ 4 │ [30,40,50] │ +│ World │ 5 │ [30,40,50] │ +└───────┴────────┴────────────┘ +``` + +Bir diğer ad, iç içe geçmiş bir veri yapısı için kullanılabilir. `JOIN` sonuç veya kaynak dizi. Örnek: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest AS n; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ +└───────┴─────┴─────┴─────────┴────────────┘ +``` + +Kullanma örneği [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) işlev: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num +FROM nested_test +ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ +└───────┴─────┴─────┴─────────┴────────────┴─────┘ +``` + +### Jo {#select-join} + +Verileri normal olarak birleştirir [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) anlama. + +!!! info "Not" + İlgili [ARRAY JOIN](#select-array-join-clause). + +``` sql +SELECT +FROM +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN +(ON )|(USING ) ... +``` + +Tablo adları yerine belirtilebilir `` ve ``. Bu eşdeğerdir `SELECT * FROM table` alt sorgu, tablonun sahip olduğu özel bir durum dışında [Katmak](../../engines/table_engines/special/join.md) engine – an array prepared for joining. + +#### Desteklenen Türleri `JOIN` {#select-join-types} + +- `INNER JOIN` (veya `JOIN`) +- `LEFT JOIN` (veya `LEFT OUTER JOIN`) +- `RIGHT JOIN` (veya `RIGHT OUTER JOIN`) +- `FULL JOIN` (veya `FULL OUTER JOIN`) +- `CROSS JOIN` (veya `,` ) + +Standarda bakın [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) açıklama. + +#### Çoklu birleştirme {#multiple-join} + +Sorguları gerçekleştiren ClickHouse, çoklu tablo birleşimlerini iki tablo birleşimlerinin sırasına yeniden yazar. Örneğin, JOIN ClickHouse için dört tablo varsa birinci ve ikinci katılır, ardından üçüncü tablo ile sonuç katılır ve son adımda dördüncü bir katılır. + +Bir sorgu içeriyorsa `WHERE` yan tümcesi, ClickHouse Ara birleştirme aracılığıyla bu yan tümcesi filtreleri pushdown çalışır. Filtreyi her Ara birleşime uygulayamazsa, tüm birleşimler tamamlandıktan sonra clickhouse filtreleri uygular. + +Biz tavsiye `JOIN ON` veya `JOIN USING` sorguları oluşturmak için sözdizimi. Mesela: + +``` sql +SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a +``` + +Virgülle ayrılmış tablo listelerini kullanabilirsiniz. `FROM` yan. Mesela: + +``` sql +SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a +``` + +Bu sözdizimleri karıştırmayın. + +ClickHouse virgülle sözdizimini doğrudan desteklemez, bu yüzden bunları kullanmanızı önermiyoruz. Algoritma, sorguyu şu şekilde yeniden yazmaya çalışır: `CROSS JOIN` ve `INNER JOIN` yan tümceleri ve sonra sorgu işleme devam eder. Sorguyu yeniden yazarken, ClickHouse performansı ve bellek tüketimini en iyi duruma getirmeye çalışır. Varsayılan olarak, ClickHouse virgülleri bir `INNER JOIN` CLA anduse and conver andts `INNER JOIN` -e doğru `CROSS JOIN` algoritma bunu garanti edemez zaman `INNER JOIN` gerekli verileri döndürür. + +#### Katılık {#select-join-strictness} + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [Kartezyen ürün](https://en.wikipedia.org/wiki/Cartesian_product) eşleşen satırlardan. Bu standart `JOIN` SQL davranış. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of queries with `ANY` ve `ALL` anahtar kelimeler aynıdır. +- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` kullanım aşağıda açıklanmıştır. + +**ASOF JOIN kullanımı** + +`ASOF JOIN` tam olarak eşleşmeyen kayıtlara katılmanız gerektiğinde kullanışlıdır. + +İçin tablolar `ASOF JOIN` sıralı bir sıra sütunu olmalıdır. Bu sütun bir tabloda tek başına olamaz ve veri türlerinden biri olmalıdır: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, ve `DateTime`. + +Sözdizimi `ASOF JOIN ... ON`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF LEFT JOIN table_2 +ON equi_cond AND closest_match_cond +``` + +Herhangi bir sayıda eşitlik koşulunu ve tam olarak en yakın eşleşme koşulunu kullanabilirsiniz. Mesela, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. + +En yakın maç için desteklenen koşullar: `>`, `>=`, `<`, `<=`. + +Sözdizimi `ASOF JOIN ... USING`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF JOIN table_2 +USING (equi_column1, ... equi_columnN, asof_column) +``` + +`ASOF JOIN` kullanma `equi_columnX` eşit onliğe katılma ve `asof_column` ile en yakın maça katılmak için `table_1.asof_column >= table_2.asof_column` koşul. Bu `asof_column` sütun her zaman sonuncusu `USING` yan. + +Örneğin, aşağıdaki tabloları göz önünde bulundurun: + +\`\`\` Metin +table\_1 table\_2 + +olay / ev\_time / user\_id olay / ev\_time / user\_id diff --git a/docs/tr/sql_reference/statements/show.md b/docs/tr/sql_reference/statements/show.md new file mode 100644 index 00000000000..155b28886d9 --- /dev/null +++ b/docs/tr/sql_reference/statements/show.md @@ -0,0 +1,105 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 38 +toc_title: SHOW +--- + +# Sorguları göster {#show-queries} + +## SHOW CREATE TABLE {#show-create-table} + +``` sql +SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +Bir tek döndürür `String`-tür ‘statement’ column, which contains a single value – the `CREATE` belirtilen nesneyi oluşturmak için kullanılan sorgu. + +## SHOW DATABASES {#show-databases} + +``` sql +SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] +``` + +Tüm veritabanlarının bir listesini yazdırır. +Bu sorgu ile aynıdır `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. + +## SHOW PROCESSLIST {#show-processlist} + +``` sql +SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] +``` + +İçeriği verir [sistem.işleyişler](../../operations/system_tables.md#system_tables-processes) şu anda işlenmekte olan sorguların bir listesini içeren tablo, hariç `SHOW PROCESSLIST` sorgular. + +Bu `SELECT * FROM system.processes` sorgu, geçerli tüm sorgular hakkında veri döndürür. + +İpucu (konsolda Yürüt): + +``` bash +$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" +``` + +## SHOW TABLES {#show-tables} + +Tablo listesini görüntüler. + +``` sql +SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +Eğer... `FROM` yan tümcesi belirtilmemiş, sorgu geçerli veritabanından tabloların listesini döndürür. + +Aynı sonuçları elde edebilirsiniz `SHOW TABLES` aşağıdaki şekilde sorgu: + +``` sql +SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**Örnek** + +Aşağıdaki sorgu, tablo listesinden ilk iki satırı seçer. `system` adları içeren veritabanı `co`. + +``` sql +SHOW TABLES FROM system LIKE '%co%' LIMIT 2 +``` + +``` text +┌─name───────────────────────────┐ +│ aggregate_function_combinators │ +│ collations │ +└────────────────────────────────┘ +``` + +## SHOW DICTIONARIES {#show-dictionaries} + +Bir listesini görüntüler [dış söz dictionarieslükler](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +``` sql +SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +Eğer... `FROM` yan tümcesi belirtilmemiş, sorgu geçerli veritabanından sözlükler listesini döndürür. + +Aynı sonuçları elde edebilirsiniz `SHOW DICTIONARIES` aşağıdaki şekilde sorgu: + +``` sql +SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**Örnek** + +Aşağıdaki sorgu, tablo listesinden ilk iki satırı seçer. `system` adları içeren veritabanı `reg`. + +``` sql +SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 +``` + +``` text +┌─name─────────┐ +│ regions │ +│ region_names │ +└──────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/tr/sql_reference/statements/system.md b/docs/tr/sql_reference/statements/system.md new file mode 100644 index 00000000000..761f6e77737 --- /dev/null +++ b/docs/tr/sql_reference/statements/system.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 37 +toc_title: SYSTEM +--- + +# Sistem sorguları {#query-language-system} + +- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) +- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) +- [DROP DNS CACHE](#query_language-system-drop-dns-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) +- [FLUSH LOGS](#query_language-system-flush_logs) +- [RELOAD CONFIG](#query_language-system-reload-config) +- [SHUTDOWN](#query_language-system-shutdown) +- [KILL](#query_language-system-kill) +- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) +- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) +- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) +- [STOP MERGES](#query_language-system-stop-merges) +- [START MERGES](#query_language-system-start-merges) + +## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} + +Daha önce başarıyla yüklenen tüm sözlükleri yeniden yükler. +Varsayılan olarak, sözlükler tembel yüklenir (bkz [dictionaries\_lazy\_load](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), bu nedenle başlangıçta otomatik olarak yüklenmek yerine, dictGet işlevi aracılığıyla ilk erişimde başlatılır veya ENGİNE = Dictionary ile tablolardan seçim yapılır. Bu `SYSTEM RELOAD DICTIONARIES` sorgu bu sözlükleri yeniden yükler (yüklü). +Her zaman döner `Ok.` sözlük güncellemesinin sonucu ne olursa olsun. + +## Sözlük Dictionary\_name yeniden yükle {#query_language-system-reload-dictionary} + +Tamamen bir sözlük reloads `dictionary_name`, sözlük durumuna bakılmaksızın (LOADED / NOT\_LOADED / FAİLED). +Her zaman döner `Ok.` ne olursa olsun sözlük güncelleme sonucu. +Sözlüğün durumu sorgulanarak kontrol edilebilir `system.dictionaries` Tablo. + +``` sql +SELECT name, status FROM system.dictionaries; +``` + +## DROP DNS CACHE {#query_language-system-drop-dns-cache} + +Clickhouse'un iç DNS önbelleğini sıfırlar. Bazen (eski ClickHouse sürümleri için) altyapıyı değiştirirken (başka bir ClickHouse sunucusunun IP adresini veya sözlükler tarafından kullanılan sunucuyu değiştirirken) bu komutu kullanmak gerekir. + +Daha uygun (otomatik) önbellek yönetimi için bkz: disable\_internal\_dns\_cache, dns\_cache\_update\_period parametreleri. + +## DROP MARK CACHE {#query_language-system-drop-mark-cache} + +İşaret önbelleğini sıfırlar. ClickHouse ve performans testlerinin geliştirilmesinde kullanılır. + +## FLUSH LOGS {#query_language-system-flush_logs} + +Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. + +## RELOAD CONFIG {#query_language-system-reload-config} + +ClickHouse yapılandırmasını yeniden yükler. Yapılandırma ZooKeeeper saklandığında kullanılır. + +## SHUTDOWN {#query_language-system-shutdown} + +Normalde Clickhouse'u kapatır (gibi `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) + +## KILL {#query_language-system-kill} + +ClickHouse işlemini iptal eder (gibi `kill -9 {$ pid_clickhouse-server}`) + +## Dağıtılmış Tabloları Yönetme {#query-language-system-distributed} + +ClickHouse yönetebilir [dağılı](../../engines/table_engines/special/distributed.md) Tablolar. Bir kullanıcı bu tablolara veri eklediğinde, ClickHouse önce küme düğümlerine gönderilmesi gereken verilerin bir sırası oluşturur, sonra zaman uyumsuz olarak gönderir. İle kuyruk işleme yönetebilirsiniz [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), ve [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) sorgular. Ayrıca, dağıtılmış verileri eşzamanlı olarak `insert_distributed_sync` ayar. + +### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} + +Dağıtılmış tablolara veri eklerken arka plan veri dağıtımını devre dışı bırakır. + +``` sql +SYSTEM STOP DISTRIBUTED SENDS [db.] +``` + +### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} + +Küme düğümlerine eşzamanlı olarak veri göndermek için Clickhouse'u zorlar. Herhangi bir düğüm kullanılamıyorsa, ClickHouse bir özel durum atar ve sorgu yürütülmesini durdurur. Tüm düğümler tekrar çevrimiçi olduğunda gerçekleşecek olan başarılı olana kadar sorguyu yeniden deneyebilirsiniz. + +``` sql +SYSTEM FLUSH DISTRIBUTED [db.] +``` + +### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} + +Dağıtılmış tablolara veri eklerken arka plan veri dağıtımını etkinleştirir. + +``` sql +SYSTEM START DISTRIBUTED SENDS [db.] +``` + +### STOP MERGES {#query_language-system-stop-merges} + +MergeTree ailesindeki tablolar için arka plan birleşmelerini durdurma imkanı sağlar: + +``` sql +SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] +``` + +!!! note "Not" + `DETACH / ATTACH` tablo, daha önce tüm MergeTree tabloları için birleştirmeler durdurulduğunda bile tablo için arka plan birleştirmelerini başlatır. + +### START MERGES {#query_language-system-start-merges} + +MergeTree ailesindeki tablolar için arka plan birleştirmelerini başlatma imkanı sağlar: + +``` sql +SYSTEM START MERGES [[db.]merge_tree_family_table_name] +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/tr/sql_reference/syntax.md b/docs/tr/sql_reference/syntax.md new file mode 100644 index 00000000000..a7d725fe696 --- /dev/null +++ b/docs/tr/sql_reference/syntax.md @@ -0,0 +1,187 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 31 +toc_title: "S\xF6zdizimi" +--- + +# Sözdizimi {#syntax} + +Sistemde iki tür ayrıştırıcı vardır: tam SQL ayrıştırıcısı (özyinelemeli bir iniş ayrıştırıcısı) ve veri biçimi ayrıştırıcısı (hızlı akış ayrıştırıcısı). +Dışında her durumda `INSERT` sorgu, sadece tam SQL ayrıştırıcı kullanılır. +Bu `INSERT` sorgu her iki ayrıştırıcıyı da kullanır: + +``` sql +INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') +``` + +Bu `INSERT INTO t VALUES` parça tam ayrıştırıcı tarafından ayrıştırılır ve veriler `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` hızlı akış ayrıştırıcısı tarafından ayrıştırılır. Ayrıca kullanarak veriler için tam ayrıştırıcı açabilirsiniz [ınput\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) ayar. Ne zaman `input_format_values_interpret_expressions = 1`, ClickHouse önce hızlı akış ayrıştırıcısı ile değerleri ayrıştırmaya çalışır. Başarısız olursa, ClickHouse veriler için tam ayrıştırıcıyı kullanmaya çalışır ve bir SQL gibi davranır [ifade](#syntax-expressions). + +Veri herhangi bir biçime sahip olabilir. Bir sorgu alındığında, sunucu daha fazla hesaplar [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) istek bayt RAM (varsayılan olarak, 1 MB) ve geri kalanı akış ayrıştırılır. +Bu büyük sorunları önlemek için izin verir `INSERT` sorgular. + +Kullanırken `Values` biçim içinde bir `INSERT` sorgu, verilerin bir ifadedeki ifadelerle aynı şekilde ayrıştırıldığı görünebilir `SELECT` sorgu, ancak bu doğru değil. Bu `Values` biçim çok daha sınırlıdır. + +Bu makalenin geri kalanı tam çözümleyici kapsar. Biçim ayrıştırıcıları hakkında daha fazla bilgi için bkz: [Biçimliler](../interfaces/formats.md) bölme. + +## Alanlar {#spaces} + +Sözdizimsel yapılar arasında (bir sorgunun başlangıcı ve sonu dahil) herhangi bir sayıda boşluk simgesi olabilir. Boşluk sembolleri boşluk, sekme, satır beslemesi, CR ve form beslemesini içerir. + +## Yorumlar {#comments} + +ClickHouse, SQL stili ve C stili yorumlarını destekler. +SQL tarzı yorumlar ile başlar `--` ve hattın sonuna kadar devam, bir boşluk sonra `--` atlanmış olabilir. +C-style dan `/*` -e doğru `*/`ve çok satırlı olabilir, boşluklar da gerekli değildir. + +## Kelimeler {#syntax-keywords} + +Anahtar kelimeler karşılık geldiğinde büyük / küçük harf duyarsızdır: + +- SQL standardı. Mesela, `SELECT`, `select` ve `SeLeCt` hepsi geçerlidir. +- Bazı popüler DBMS'DE (MySQL veya Postgres) uygulama. Mesela, `DateTime` ile aynıdır `datetime`. + +Veri türü adı büyük / küçük harf duyarlı olup olmadığını denetlenebilir `system.data_type_families` Tablo. + +Standart SQL'İN aksine, diğer tüm anahtar kelimeler (işlev adları dahil) şunlardır **büyük küçük harf duyarlı**. + +Anahtar kelimeler ayrılmış değildir; sadece karşılık gelen bağlamda bu şekilde ele alınır. Kullanıyorsanız [tanıtıcılar](#syntax-identifiers) anahtar kelimelerle aynı ada sahip olarak, bunları çift tırnak veya backticks içine alın. Örneğin, sorgu `SELECT "FROM" FROM table_name` tablo geçerli ise `table_name` adı ile sütun vardır `"FROM"`. + +## Tanıtıcılar {#syntax-identifiers} + +Tanımlay areıcılar: + +- Küme, veritabanı, tablo, bölüm ve sütun adları. +- İşlevler. +- Veri türleri. +- [İfade takma adları](#syntax-expression_aliases). + +Tanımlayıcılar alıntılanabilir veya alıntılanamaz. İkincisi tercih edilir. + +Alıntılanmamış tanımlayıcılar regex ile eşleşmelidir `^[a-zA-Z_][0-9a-zA-Z_]*$` ve eşit olamaz [kelimeler](#syntax-keywords). Örnekler: `x, _1, X_y__Z123_.` + +Tanımlayıcıları anahtar kelimelerle aynı şekilde kullanmak istiyorsanız veya tanımlayıcılarda başka semboller kullanmak istiyorsanız, örneğin çift tırnak işaretleri veya backticks kullanarak alıntı yapın, `"id"`, `` `id` ``. + +## Harfler {#literals} + +Sayısal, dize, bileşik ve `NULL` harfler. + +### Sayısal {#numeric} + +Sayısal literal ayrıştırılmaya çalışılıyor: + +- İlk olarak, 64-bit imzalı bir sayı olarak, [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) işlev. +- Başarısız olursa, 64-bit imzasız bir sayı olarak, [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) işlev. +- Başarısız olursa, kayan noktalı sayı olarak [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) işlev. +- Aksi takdirde, bir hata döndürür. + +Hazır bilgi değeri, değerin sığdığı en küçük türe sahiptir. +Örneğin, 1 olarak ayrıştırılır `UInt8`, ancak 256 olarak ayrıştırılır `UInt16`. Daha fazla bilgi için, bkz. [Veri türleri](../sql_reference/data_types/index.md). + +Örnekler: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. + +### Dize {#syntax-string-literal} + +Tek tırnak yalnızca dize değişmezleri desteklenir. Kapalı karakterler ters eğik çizgi kaçabilir. Aşağıdaki kaçış dizileri karşılık gelen özel bir değere sahiptir: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Diğer tüm durumlarda, çıkış dizileri biçiminde `\c`, nere `c` herhangi bir karakter, dönüştürülür `c`. Bu dizileri kullanabileceğiniz anlamına gelir `\'`ve`\\`. Değeri olacak [Dize](../sql_reference/data_types/string.md) tür. + +Dize değişmezlerinde, en azından kaçmanız gerekir `'` ve `\`. Tek tırnak tek Alıntı ile kaçabilir, değişmez `'It\'s'` ve `'It''s'` eşittir. + +### Bileşik {#compound} + +Diziler köşeli parantez ile inşa edilmiştir `[1, 2, 3]`. Nuples yuvarlak parantez ile inşa edilmiştir `(1, 'Hello, world!', 2)`. +Teknik olarak bunlar değişmezler değil, sırasıyla dizi oluşturma işleci ve tuple oluşturma işleci ile ifadeler. +Bir dizi en az bir öğeden oluşmalı ve bir tuple en az iki öğeye sahip olmalıdır. +İçinde tuples göründüğünde ayrı bir durum var `IN` CLA ause of a `SELECT` sorgu. Sorgu sonuçları tuples içerebilir, ancak tuples bir veritabanına kaydedilemez (tablolar hariç [Bellek](../engines/table_engines/special/memory.md) motor). + +### NULL {#null-literal} + +Değerin eksik olduğunu gösterir. + +Saklamak için `NULL` bir tablo alanında, bu olmalıdır [Nullable](../sql_reference/data_types/nullable.md) tür. + +Veri formatına bağlı olarak (giriş veya çıkış), `NULL` farklı bir temsili olabilir. Daha fazla bilgi için belgelere bakın [veri formatları](../interfaces/formats.md#formats). + +İşleme için birçok nüans var `NULL`. Örneğin, bir karşılaştırma işleminin argümanlarından en az biri ise `NULL`, bu işlemin sonucu da `NULL`. Aynı şey çarpma, toplama ve diğer işlemler için de geçerlidir. Daha fazla bilgi için her işlem için belgeleri okuyun. + +Sorgularda, kontrol edebilirsiniz `NULL` kullanarak [IS NULL](operators.md#operator-is-null) ve [IS NOT NULL](operators.md) operatörler ve ilgili fonksiyonlar `isNull` ve `isNotNull`. + +## İşlevler {#functions} + +İşlev çağrıları, yuvarlak parantez içinde bir argüman listesi (muhtemelen boş) olan bir tanımlayıcı gibi yazılır. Standart SQL'İN aksine, boş bir argüman listesi için bile parantezler gereklidir. Örnek: `now()`. +Düzenli ve agrega işlevleri vardır (bkz. “Aggregate functions”). Bazı toplama işlevleri parantez içinde iki bağımsız değişken listesi içerebilir. Örnek: `quantile (0.9) (x)`. Bu toplama fonksiyonları denir “parametric” fonksiyonlar ve ilk listedeki argümanlar çağrılır “parameters”. Parametresiz toplama işlevlerinin sözdizimi, normal işlevlerle aynıdır. + +## Operatörler {#operators} + +Operatörler, sorgu ayrıştırma sırasında önceliklerini ve ilişkilendirmelerini dikkate alarak karşılık gelen işlevlerine dönüştürülür. +Örneğin, ifade `1 + 2 * 3 + 4` dönüştür toülür `plus(plus(1, multiply(2, 3)), 4)`. + +## Veri türleri ve veritabanı tablosu motorları {#data_types-and-database-table-engines} + +Veri türleri ve tablo motorları `CREATE` sorgu tanımlayıcıları veya işlevleri aynı şekilde yazılır. Başka bir deyişle, parantez içinde bir argüman listesi içerebilir veya içermeyebilir. Daha fazla bilgi için bölümlere bakın “Data types,” “Table engines,” ve “CREATE”. + +## İfade Takma Adları {#syntax-expression_aliases} + +Diğer ad, sorgudaki ifade için kullanıcı tanımlı bir addır. + +``` sql +expr AS alias +``` + +- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` kullanmadan fık thera `AS` kelime. + + For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. + + In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + +- `expr` — Any expression supported by ClickHouse. + + For example, `SELECT column_name * 2 AS double FROM some_table`. + +- `alias` — Name for `expr`. Takma adlar ile uyumlu olmalıdır [tanıtıcılar](#syntax-identifiers) sözdizimi. + + For example, `SELECT "table t".column_name FROM table_name AS "table t"`. + +### Kullanımı ile ilgili notlar {#notes-on-usage} + +Diğer adlar bir sorgu veya alt sorgu için geneldir ve herhangi bir ifade için sorgunun herhangi bir bölümünde bir diğer ad tanımlayabilirsiniz. Mesela, `SELECT (1 AS n) + 2, n`. + +Diğer adlar alt sorgularda ve alt sorgular arasında görünmez. Örneğin, sorgu yürütülürken `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouse istisna oluşturur `Unknown identifier: num`. + +Sonuç sütunları için bir diğer ad tanımlanmışsa `SELECT` bir alt sorgunun yan tümcesi, bu sütunlar dış sorguda görülebilir. Mesela, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. + +Sütun veya tablo adlarıyla aynı olan diğer adlara dikkat edin. Aşağıdaki örneği ele alalım: + +``` sql +CREATE TABLE t +( + a Int, + b Int +) +ENGINE = TinyLog() +``` + +``` sql +SELECT + argMax(a, b), + sum(b) AS b +FROM t +``` + +``` text +Received exception from server (version 18.14.17): +Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. +``` + +Bu örnekte, tablo ilan ettik `t` sütun ile `b`. Ardından, veri seçerken, `sum(b) AS b` takma ad. Takma adlar küresel olduğundan, ClickHouse literal yerine `b` ifad theesinde `argMax(a, b)` ifad theesiyle `sum(b)`. Bu ikame istisnaya neden oldu. + +## Yıldız işareti {#asterisk} + +İn a `SELECT` sorgu, bir yıldız ifadesinin yerini alabilir. Daha fazla bilgi için bölüme bakın “SELECT”. + +## İfadeler {#syntax-expressions} + +Bir ifade, bir işlev, tanımlayıcı, değişmez, bir operatörün uygulaması, parantez içindeki ifade, alt sorgu veya yıldız işaretidir. Ayrıca bir takma ad içerebilir. +İfadelerin listesi, virgülle ayrılmış bir veya daha fazla ifadedir. +Fonksiyonlar ve operatörler, sırayla, argüman olarak ifadelere sahip olabilirler. + +[Orijinal makale](https://clickhouse.tech/docs/en/sql_reference/syntax/) diff --git a/docs/tr/sql_reference/table_functions/file.md b/docs/tr/sql_reference/table_functions/file.md new file mode 100644 index 00000000000..67d67ccd7cd --- /dev/null +++ b/docs/tr/sql_reference/table_functions/file.md @@ -0,0 +1,121 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 37 +toc_title: Dosya +--- + +# Dosya {#file} + +Bir dosyadan bir tablo oluşturur. Bu tablo işlevi benzer [url](url.md) ve [hdf'ler](hdfs.md) biri. + +``` sql +file(path, format, structure) +``` + +**Giriş parametreleri** + +- `path` — The relative path to the file from [user\_files\_path](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-user_files_path). Readonly modunda glob'ları takip eden dosya desteğine giden yol: `*`, `?`, `{abc,def}` ve `{N..M}` nerede `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [biçimli](../../interfaces/formats.md#formats) dosya. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**Döndürülen değer** + +Belirtilen dosyada veri okumak veya yazmak için belirtilen yapıya sahip bir tablo. + +**Örnek** + +Ayar `user_files_path` ve dosyanın içeriği `test.csv`: + +``` bash +$ grep user_files_path /etc/clickhouse-server/config.xml + /var/lib/clickhouse/user_files/ + +$ cat /var/lib/clickhouse/user_files/test.csv + 1,2,3 + 3,2,1 + 78,43,45 +``` + +Tablo fromdan`test.csv` ve ondan ilk iki satır seçimi: + +``` sql +SELECT * +FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +``` sql +-- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file +SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 +``` + +**Yolda Globs** + +Birden çok yol bileşenleri globs olabilir. İşlenmek için dosya var olmalı ve tüm yol deseniyle eşleşmelidir (sadece sonek veya önek değil). + +- `*` — Substitutes any number of any characters except `/` boş dize dahil. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +İle yapılar `{}` benzer olan [uzaktan masa fonksiyonu](../../sql_reference/table_functions/remote.md)). + +**Örnek** + +1. Aşağıdaki göreli yollara sahip birkaç dosyamız olduğunu varsayalım: + +- ‘some\_dir/some\_file\_1’ +- ‘some\_dir/some\_file\_2’ +- ‘some\_dir/some\_file\_3’ +- ‘another\_dir/some\_file\_1’ +- ‘another\_dir/some\_file\_2’ +- ‘another\_dir/some\_file\_3’ + +1. Bu dosyalardaki satır miktarını sorgula: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. Bu iki dizinin tüm dosyalarındaki satır miktarını sorgula: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "Uyarıcı" + Dosya listenizde önde gelen sıfırlar içeren sayı aralıkları varsa, her basamak için parantez içeren yapıyı ayrı ayrı kullanın veya kullanın `?`. + +**Örnek** + +Adlı dosy thealardan verileri sorgu thelamak `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## Sanal Sütunlar {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**Ayrıca Bakınız** + +- [Sanal sütunlar](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/tr/sql_reference/table_functions/generate.md b/docs/tr/sql_reference/table_functions/generate.md new file mode 100644 index 00000000000..f9fc1fc9b21 --- /dev/null +++ b/docs/tr/sql_reference/table_functions/generate.md @@ -0,0 +1,45 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 47 +toc_title: generateRandom +--- + +# generateRandom {#generaterandom} + +Verilen şema ile rastgele veri üretir. +Test tablolarını verilerle doldurmaya izin verir. +Dışında tabloda saklanabilir tüm veri türlerini destekler `LowCardinality` ve `AggregateFunction`. + +``` sql +generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); +``` + +**Parametre** + +- `name` — Name of corresponding column. +- `TypeName` — Type of corresponding column. +- `limit` — Number of rows to generate. +- `max_array_length` — Maximum array length for all generated arrays. Defaults to `10`. +- `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. +- `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. + +**Döndürülen Değer** + +Istenen şema ile bir tablo nesnesi. + +## Kullanım Örneği {#usage-example} + +``` sql +SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2); +``` + +``` text +┌─a────────┬────────────d─┬─c──────────────────────────────────────────────────────────────────┐ +│ [77] │ -124167.6723 │ ('2061-04-17 21:59:44.573','3f72f405-ec3e-13c8-44ca-66ef335f7835') │ +│ [32,110] │ -141397.7312 │ ('1979-02-09 03:43:48.526','982486d1-5a5d-a308-e525-7bd8b80ffa73') │ +│ [68] │ -67417.0770 │ ('2080-03-12 14:17:31.269','110425e5-413f-10a6-05ba-fa6b3e929f15') │ +└──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/tr/sql_reference/table_functions/hdfs.md b/docs/tr/sql_reference/table_functions/hdfs.md new file mode 100644 index 00000000000..e15d721135a --- /dev/null +++ b/docs/tr/sql_reference/table_functions/hdfs.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 45 +toc_title: hdf'ler +--- + +# hdf'ler {#hdfs} + +Hdfs'deki dosyalardan bir tablo oluşturur. Bu tablo işlevi benzer [url](url.md) ve [Dosya](file.md) biri. + +``` sql +hdfs(URI, format, structure) +``` + +**Giriş parametreleri** + +- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` ve `{N..M}` nerede `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [biçimli](../../interfaces/formats.md#formats) dosya. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**Döndürülen değer** + +Belirtilen dosyada veri okumak veya yazmak için belirtilen yapıya sahip bir tablo. + +**Örnek** + +Tablo fromdan `hdfs://hdfs1:9000/test` ve ondan ilk iki satır seçimi: + +``` sql +SELECT * +FROM hdfs('hdfs://hdfs1:9000/test', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +**Yolda Globs** + +Birden çok yol bileşenleri globs olabilir. İşlenmek için dosya var olmalı ve tüm yol deseniyle eşleşmelidir (sadece sonek veya önek değil). + +- `*` — Substitutes any number of any characters except `/` boş dize dahil. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +İle yapılar `{}` benzer olan [uzaktan masa fonksiyonu](../../sql_reference/table_functions/remote.md)). + +**Örnek** + +1. HDFS'DE aşağıdaki Urı'lere sahip birkaç dosyamız olduğunu varsayalım: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. Bu dosyalardaki satır miktarını sorgula: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. Bu iki dizinin tüm dosyalarındaki satır miktarını sorgula: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "Uyarıcı" + Dosya listenizde önde gelen sıfırlar içeren sayı aralıkları varsa, her basamak için parantez içeren yapıyı ayrı ayrı kullanın veya kullanın `?`. + +**Örnek** + +Adlı dosy thealardan verileri sorgu thelamak `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## Sanal Sütunlar {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**Ayrıca Bakınız** + +- [Sanal sütunlar](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/tr/sql_reference/table_functions/index.md b/docs/tr/sql_reference/table_functions/index.md new file mode 100644 index 00000000000..3108903713f --- /dev/null +++ b/docs/tr/sql_reference/table_functions/index.md @@ -0,0 +1,38 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: "Tablo Fonksiyonlar\u0131" +toc_priority: 34 +toc_title: "Giri\u015F" +--- + +# Tablo Fonksiyonları {#table-functions} + +Tablo işlevleri tabloları oluşturmak için yöntemlerdir. + +Tablo işlevlerini kullanabilirsiniz: + +- [FROM](../statements/select.md#select-from) fıkra ofsı `SELECT` sorgu. + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [Tablo oluştur \](../statements/create.md#create-table-query) sorgu. + + It's one of the methods of creating a table. + +!!! warning "Uyarıcı" + Eğer tablo işlevlerini kullanamazsınız [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) ayarı devre dışı. + +| İşlev | Açıklama | +|--------------------------|-----------------------------------------------------------------------------------------------------------------------------| +| [Dosya](file.md) | Oluşturur bir [Dosya](../../engines/table_engines/special/file.md)- motor masası. | +| [birleştirmek](merge.md) | Oluşturur bir [Birleştirmek](../../engines/table_engines/special/merge.md)- motor masası. | +| [şiir](numbers.md) | Tamsayı sayılarla dolu tek bir sütun içeren bir tablo oluşturur. | +| [uzak](remote.md) | Oluşturmadan uzak sunuculara erişmenizi sağlar. [Dağılı](../../engines/table_engines/special/distributed.md)- motor masası. | +| [url](url.md) | Oluşturur bir [Url](../../engines/table_engines/special/url.md)- motor masası. | +| [mysql](mysql.md) | Oluşturur bir [MySQL](../../engines/table_engines/integrations/mysql.md)- motor masası. | +| [jdbc](jdbc.md) | Oluşturur bir [JDBC](../../engines/table_engines/integrations/jdbc.md)- motor masası. | +| [odbc](odbc.md) | Oluşturur bir [ODBC](../../engines/table_engines/integrations/odbc.md)- motor masası. | +| [hdf'ler](hdfs.md) | Oluşturur bir [HDFS](../../engines/table_engines/integrations/hdfs.md)- motor masası. | + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/tr/sql_reference/table_functions/input.md b/docs/tr/sql_reference/table_functions/input.md new file mode 100644 index 00000000000..5639e05eb81 --- /dev/null +++ b/docs/tr/sql_reference/table_functions/input.md @@ -0,0 +1,47 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 46 +toc_title: girdi +--- + +# girdi {#input} + +`input(structure)` - etkin bir şekilde dönüştürmek ve veri eklemek sağlar tablo fonksiyonu gönderilen +başka bir yapıya sahip tabloya verilen yapıya sahip sunucu. + +`structure` - aşağıdaki formatta sunucuya gönderilen verilerin yapısı `'column1_name column1_type, column2_name column2_type, ...'`. +Mesela, `'id UInt32, name String'`. + +Bu işlev yalnızca kullanılabilir `INSERT SELECT` sorgu ve sadece bir kez ama aksi takdirde sıradan tablo işlevi gibi davranır +(örneğin, alt sorguda vb.kullanılabilir.). + +Veri sıradan gibi herhangi bir şekilde gönderilebilir `INSERT` sorgu ve herhangi bir kullanılabilir geçti [biçimli](../../interfaces/formats.md#formats) +bu sorgu sonunda belirtilmelidir (sıradan aksine `INSERT SELECT`). + +Bu işlevin ana özelliği, sunucu istemciden veri aldığında aynı anda onu dönüştürmesidir +ifadeler listesine göre `SELECT` yan tümcesi ve hedef tabloya ekler. Geçici tablo +aktarılan tüm veriler ile oluşturulmaz. + +**Örnekler** + +- L letet the `test` tablo aşağıdaki yapıya sahiptir `(a String, b String)` + ve veri `data.csv` farklı bir yapıya sahiptir `(col1 String, col2 Date, col3 Int32)`. Insert sorgusu + bu verileri `data.csv` içine `test` eşzamanlı dönüşüm ile tablo şöyle görünüyor: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; +``` + +- Eğer `data.csv` aynı yapının verilerini içerir `test_structure` tablo olarak `test` sonra bu iki sorgu eşittir: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/tr/sql_reference/table_functions/jdbc.md b/docs/tr/sql_reference/table_functions/jdbc.md new file mode 100644 index 00000000000..451fdefc013 --- /dev/null +++ b/docs/tr/sql_reference/table_functions/jdbc.md @@ -0,0 +1,29 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 43 +toc_title: jdbc +--- + +# jdbc {#table-function-jdbc} + +`jdbc(jdbc_connection_uri, schema, table)` - JDBC sürücüsü ile bağlı döner tablo. + +Bu tablo işlevi ayrı gerektirir `clickhouse-jdbc-bridge` program çalıştırılacak. +Bu (sorgulanan uzak tablonun DDL dayalı) null türleri destekler. + +**Örnekler** + +``` sql +SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/tr/sql_reference/table_functions/merge.md b/docs/tr/sql_reference/table_functions/merge.md new file mode 100644 index 00000000000..67e1355383c --- /dev/null +++ b/docs/tr/sql_reference/table_functions/merge.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 38 +toc_title: "birle\u015Ftirmek" +--- + +# birleştirmek {#merge} + +`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. + +Tablo yapısı, normal ifadeyle eşleşen ilk tablodan alınır. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/tr/sql_reference/table_functions/mysql.md b/docs/tr/sql_reference/table_functions/mysql.md new file mode 100644 index 00000000000..aee7311dc56 --- /dev/null +++ b/docs/tr/sql_reference/table_functions/mysql.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 42 +toc_title: mysql +--- + +# mysql {#mysql} + +Veriyor `SELECT` uzak bir MySQL sunucusunda depolanan veriler üzerinde gerçekleştirilecek sorgular. + +``` sql +mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +**Parametre** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` için sorgular `REPLACE INTO`. Eğer `replace_query=1`, sorgu değiştirilir. + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` eklenen ifade `INSERT` sorgu. + + Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. + + To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. + +Basit `WHERE` gibi maddeler `=, !=, >, >=, <, <=` şu anda MySQL sunucusunda yürütülür. + +Geri kalan şartlar ve `LIMIT` örnekleme kısıtlaması, yalnızca MySQL sorgusu bittikten sonra Clickhouse'da yürütülür. + +**Döndürülen Değer** + +Orijinal MySQL tablosu ile aynı sütunlara sahip bir tablo nesnesi. + +## Kullanım Örneği {#usage-example} + +MySQL tablo: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +Clickhouse'dan veri seçme: + +``` sql +SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## Ayrıca Bakınız {#see-also} + +- [Bu ‘MySQL’ masa motoru](../../engines/table_engines/integrations/mysql.md) +- [Harici sözlük kaynağı olarak MySQL kullanma](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/tr/sql_reference/table_functions/numbers.md b/docs/tr/sql_reference/table_functions/numbers.md new file mode 100644 index 00000000000..45ca2f5f45d --- /dev/null +++ b/docs/tr/sql_reference/table_functions/numbers.md @@ -0,0 +1,30 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 39 +toc_title: "\u015Fiir" +--- + +# şiir {#numbers} + +`numbers(N)` – Returns a table with the single ‘number’ 0'dan n-1'e kadar tamsayılar içeren sütun (Uİnt64). +`numbers(N, M)` - Tek bir tablo döndürür ‘number’ n'den (n + M - 1) tamsayıları içeren sütun (Uİnt64). + +Benzer `system.numbers` tablo, ardışık değerleri test etmek ve üretmek için kullanılabilir, `numbers(N, M)` daha verimli `system.numbers`. + +Aşağıdaki sorgular eşdeğerdir: + +``` sql +SELECT * FROM numbers(10); +SELECT * FROM numbers(0, 10); +SELECT * FROM system.numbers LIMIT 10; +``` + +Örnekler: + +``` sql +-- Generate a sequence of dates from 2010-01-01 to 2010-12-31 +select toDate('2010-01-01') + number as d FROM numbers(365); +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/tr/sql_reference/table_functions/odbc.md b/docs/tr/sql_reference/table_functions/odbc.md new file mode 100644 index 00000000000..d250ce21311 --- /dev/null +++ b/docs/tr/sql_reference/table_functions/odbc.md @@ -0,0 +1,108 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 44 +toc_title: odbc +--- + +# odbc {#table-functions-odbc} + +Üzerinden bağlanan tabloyu döndürür [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +``` sql +odbc(connection_settings, external_database, external_table) +``` + +Parametre: + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` Dosya. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +ODBC bağlantılarını güvenli bir şekilde uygulamak için ClickHouse ayrı bir program kullanır `clickhouse-odbc-bridge`. ODBC sürücüsü doğrudan yüklenmişse `clickhouse-server`, sürücü sorunları ClickHouse sunucu çökmesine neden olabilir. ClickHouse otomatik olarak başlar `clickhouse-odbc-bridge` gerekli olduğunda. ODBC Köprüsü programı aynı paketten yüklenir `clickhouse-server`. + +Alanları ile `NULL` dış tablodaki değerler, temel veri türü için varsayılan değerlere dönüştürülür. Örneğin, uzak bir MySQL tablo alanı `INT NULL` yazın 0'a dönüştürülür (ClickHouse için varsayılan değer `Int32` veri türü). + +## Kullanım örneği {#usage-example} + +**ODBC üzerinden yerel MySQL kurulumundan veri alma** + +Bu örnek Ubuntu Linux 18.04 ve MySQL server 5.7 için kontrol edilir. + +UnixODBC ve MySQL Connector yüklü olduğundan emin olun. + +Varsayılan olarak (paketlerden yüklüyse), ClickHouse kullanıcı olarak başlar `clickhouse`. Bu nedenle, bu kullanıcıyı MySQL sunucusunda oluşturmanız ve yapılandırmanız gerekir. + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +Sonra bağlantıyı yapılandırın `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +Kullanarak bağlantıyı kontrol edebilirsiniz `isql` unixodbc yüklemesinden yardımcı program. + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQL tablo: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +Clickhouse'daki MySQL tablosundan veri alma: + +``` sql +SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ 0 │ 2 │ 0 │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## Ayrıca Bakınız {#see-also} + +- [ODBC harici sözlükler](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC tablo motoru](../../engines/table_engines/integrations/odbc.md). + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/tr/sql_reference/table_functions/remote.md b/docs/tr/sql_reference/table_functions/remote.md new file mode 100644 index 00000000000..58c4154643c --- /dev/null +++ b/docs/tr/sql_reference/table_functions/remote.md @@ -0,0 +1,83 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 40 +toc_title: uzak +--- + +# uzaktan, remoteSecure {#remote-remotesecure} + +Oluşturmadan uzak sunuculara erişmenizi sağlar. `Distributed` Tablo. + +İmzalar: + +``` sql +remote('addresses_expr', db, table[, 'user'[, 'password']]) +remote('addresses_expr', db.table[, 'user'[, 'password']]) +``` + +`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port` ya da sadece `host`. Ana bilgisayar sunucu adı veya IPv4 veya IPv6 adresi olarak belirtilebilir. Köşeli parantez içinde bir IPv6 adresi belirtilir. Bağlantı noktası, uzak sunucudaki TCP bağlantı noktasıdır. Bağlantı noktası atlanırsa, kullanır `tcp_port` sunucunun yapılandırma dosyasından (varsayılan olarak, 9000). + +!!! important "Önemli" + Bir IPv6 adresi için bağlantı noktası gereklidir. + +Örnekler: + +``` text +example01-01-1 +example01-01-1:9000 +localhost +127.0.0.1 +[::]:9000 +[2a02:6b8:0:1111::11]:9000 +``` + +Birden çok Adres virgülle ayrılmış olabilir. Bu durumda, ClickHouse dağıtılmış işleme kullanır, bu nedenle sorguyu belirtilen tüm adreslere gönderir (farklı verilerle kırıklar gibi). + +Örnek: + +``` text +example01-01-1,example01-02-1 +``` + +İfadenin bir kısmı kıvırcık parantez içinde belirtilebilir. Önceki örnek aşağıdaki gibi yazılabilir: + +``` text +example01-0{1,2}-1 +``` + +Kıvırcık parantez iki nokta (negatif olmayan tamsayılar) ile ayrılmış bir sayı aralığı içerebilir. Bu durumda, Aralık, shard adresleri üreten bir değer kümesine genişletilir. İlk sayı sıfır ile başlarsa, değerler aynı sıfır hizalamasıyla oluşturulur. Önceki örnek aşağıdaki gibi yazılabilir: + +``` text +example01-{01..02}-1 +``` + +Birden fazla kıvırcık parantez çiftiniz varsa, ilgili kümelerin doğrudan ürününü oluşturur. + +Adresler ve kıvırcık parantez içindeki adreslerin parçaları boru sembolü (\|) ile ayrılabilir. Bu durumda, karşılık gelen Adres kümeleri yinelemeler olarak yorumlanır ve sorgu ilk sağlıklı yinelemeye gönderilir. Ancak, yinelemeler şu anda ayarlanmış sırayla yinelenir [dengeleme](../../operations/settings/settings.md) ayar. + +Örnek: + +``` text +example01-{01..02}-{1|2} +``` + +Bu örnek, her birinin iki kopyası olan iki parçayı belirtir. + +Oluşturulan Adres sayısı bir sabit tarafından sınırlıdır. Şu anda bu 1000 Adres. + +Kullanarak `remote` tablo işlevi, bir `Distributed` tablo, çünkü bu durumda, her istek için sunucu bağlantısı yeniden kurulur. Buna ek olarak, ana bilgisayar adları ayarlanmışsa, adlar giderilir ve çeşitli yinelemelerle çalışırken hatalar sayılmaz. Çok sayıda sorgu işlerken, her zaman `Distributed` masa vaktinden önce, ve kullanmayın `remote` tablo işlevi. + +Bu `remote` tablo işlevi aşağıdaki durumlarda yararlı olabilir: + +- Veri karşılaştırma, hata ayıklama ve sınama için belirli bir sunucuya erişme. +- Araştırma amaçlı çeşitli ClickHouse kümeleri arasındaki sorgular. +- El ile yapılan seyrek dağıtılmış istekler. +- Sunucu kümesinin her seferinde yeniden tanımlandığı dağıtılmış istekler. + +Kullanıcı belirtilmemişse, `default` kullanılır. +Parola belirtilmezse, boş bir parola kullanılır. + +`remoteSecure` - aynı `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) yapılandırma veya 9440'ten. + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/tr/sql_reference/table_functions/url.md b/docs/tr/sql_reference/table_functions/url.md new file mode 100644 index 00000000000..bb841fd344c --- /dev/null +++ b/docs/tr/sql_reference/table_functions/url.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 41 +toc_title: url +--- + +# url {#url} + +`url(URL, format, structure)` - oluşturulan bir tablo döndürür `URL` verilen ile +`format` ve `structure`. + +Kabul edebilen URL - HTTP veya HTTPS sunucu adresi `GET` ve / veya `POST` istemler. + +biçimli - [biçimli](../../interfaces/formats.md#formats) verilerin. + +yapı-tablo yapısı `'UserID UInt64, Name String'` biçimli. Sütun adlarını ve türlerini belirler. + +**Örnek** + +``` sql +-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` + +[Orijinal makale](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/tr/whats_new/changelog/2017.md b/docs/tr/whats_new/changelog/2017.md new file mode 100644 index 00000000000..2f31be7f6de --- /dev/null +++ b/docs/tr/whats_new/changelog/2017.md @@ -0,0 +1,268 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 79 +toc_title: '2017' +--- + +### ClickHouse sürüm 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} + +Bu sürüm önceki sürüm 1.1.54318 için hata düzeltmeleri içerir: + +- Veri kaybına yol açabilir çoğaltma Olası yarış koşulu ile Sabit hata. Bu sorun sürümleri 1.1.54310 ve 1.1.54318 etkiler. Bu sürümlerden birini çoğaltılmış tablolarla kullanırsanız, güncelleştirme önerilir. Bu sorun, aşağıdaki gibi uyarı iletilerindeki günlüklerde gösterilir `Part ... from own log doesn't exist.` Bu iletileri günlüklerde görmeseniz bile sorun geçerlidir. + +### ClickHouse sürümü 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} + +Bu sürüm önceki sürüm 1.1.54310 için hata düzeltmeleri içerir: + +- SummingMergeTree motorunda birleştirmeler sırasında hatalı satır silme işlemleri düzeltildi +- Unreplicated MergeTree motorlarında bir bellek sızıntısı düzeltildi +- MergeTree motorlarında sık sık eklerle sabit performans düşüşü +- Çoğaltma kuyruğunun çalışmayı durdurmasına neden olan bir sorun giderildi +- Sunucu günlüklerinin sabit rotasyonu ve arşivlenmesi + +### ClickHouse sürümü 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} + +#### Yenilik: {#new-features} + +- Tablo motorları MergeTree ailesi için özel bölümleme anahtarı. +- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) masa motoru. +- Yükleme için destek eklendi [CatBoost](https://catboost.yandex/) modelleri ve ClickHouse saklanan verilere uygulayarak. +- UTC olmayan tamsayı uzaklıklar ile saat dilimleri için destek eklendi. +- Zaman aralıklarıyla aritmetik işlemler için destek eklendi. +- Tarih ve DateTime türleri için değer aralığı 2105 yılına genişletilir. +- Add theed the `CREATE MATERIALIZED VIEW x TO y` sorgu (materyalleştirilmiş bir görünümün verilerini depolamak için varolan bir tabloyu belirtir). +- Add theed the `ATTACH TABLE` argüman olmadan sorgu. +- Bir SummingMergeTree tablosunda-Map biten adları ile iç içe sütunlar için işleme mantığı için ayıklandı sumMap toplama işlevi. Şimdi bu tür sütunları açıkça belirtebilirsiniz. +- IP trie sözlüğünün maksimum boyutu 128M girişlerine yükseltilir. +- GetSizeOfEnumType işlevi eklendi. +- SumWithOverflow toplama işlevi eklendi. +- Cap'n Proto giriş biçimi için destek eklendi. +- Artık zstd algoritmasını kullanırken sıkıştırma seviyesini özelleştirebilirsiniz. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes} + +- Bellek dışında bir motor ile geçici tablolar oluşturulmasına izin verilmez. +- View veya MaterializedView altyapısı ile tabloların açık oluşturulmasına izin verilmez. +- Tablo oluşturma sırasında yeni bir onay örnekleme anahtar ifadesinin birincil anahtara dahil edildiğini doğrular. + +#### Hata düzeltmeleri: {#bug-fixes} + +- Sabit hangups zaman eşzamanlı bir dağıtılmış tabloya ekleme. +- Sabit atomik olmayan ekleme ve çoğaltılmış tablolarda parçaların çıkarılması. +- Materialized görünümüne eklenen veriler gereksiz tekilleştirme tabi değildir. +- Yerel yinelemenin geciktiği ve uzak yinelemelerin kullanılamadığı dağıtılmış bir tabloya bir sorgu yürütmek artık bir hataya neden olmaz. +- Kullanıcıların erişim izinlerine ihtiyacı yoktur `default` veritabanı artık geçici tablolar oluşturmak için. +- Bağımsız değişkenler olmadan dizi türünü belirtirken çökmesini düzeltildi. +- Sunucu günlüklerini içeren disk birimi dolu olduğunda sabit hangups. +- Unix döneminin ilk haftası için toRelativeWeekNum işlevinde bir taşma düzeltildi. + +#### İyileştirmeler oluşturun: {#build-improvements} + +- Birkaç üçüncü taraf Kütüphanesi (özellikle Poco) güncellendi ve git alt modüllerine dönüştürüldü. + +### ClickHouse sürümü 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} + +#### Yenilik: {#new-features-1} + +- Yerel protokolde TLS desteği (etkinleştirmek, ayarlamak için `tcp_ssl_port` içinde `config.xml` ). + +#### Hata düzeltmeleri: {#bug-fixes-1} + +- `ALTER` çoğaltılmış tablolar için şimdi en kısa sürede çalışmaya başlamak çalışır. +- Ayarı ile veri okurken çökmesini sabit `preferred_block_size_bytes=0.` +- Sabit çöker `clickhouse-client` bas whenarken `Page Down` +- Bazı karmaşık sorguların doğru yorumlanması `GLOBAL IN` ve `UNION ALL` +- `FREEZE PARTITION` her zaman atomik olarak çalışır. +- Boş posta istekleri şimdi 411 koduyla bir yanıt döndürür. +- Gibi ifadeler için sabit yorumlama hataları `CAST(1 AS Nullable(UInt8)).` +- Okurken bir hata düzeltildi `Array(Nullable(String))` Col fromum fromns from `MergeTree` Tablolar. +- Gibi sorguları ayrıştırırken sabit çökmesini `SELECT dummy AS dummy, dummy AS b` +- Kullanıcılar geçersiz ile doğru güncellenir `users.xml` +- Yürütülebilir bir sözlük sıfır olmayan bir yanıt kodu döndürdüğünde doğru işleme. + +### ClickHouse yayın 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} + +#### Yenilik: {#new-features-2} + +- Add theed the `pointInPolygon` koordinat düzleminde koordinatlarla çalışmak için işlev. +- Add theed the `sumMap` benzer dizilerin toplamını hesaplamak için toplama işlevi `SummingMergeTree`. +- Add theed the `trunc` işlev. Yuvarlama fonksiyonlarının geliştirilmiş performansı (`round`, `floor`, `ceil`, `roundToExp2`) ve nasıl çalıştıklarının mantığını düzeltti. Mantığını değiştirdi `roundToExp2` kesirler ve negatif sayılar için işlev. +- ClickHouse yürütülebilir dosyası artık libc sürümüne daha az bağımlıdır. Aynı ClickHouse yürütülebilir dosya Linux sistemleri çok çeşitli çalıştırabilirsiniz. Derlenmiş sorguları kullanırken hala bir bağımlılık var (ayar ile `compile = 1` , varsayılan olarak kullanılmaz). +- Sorguların dinamik derlenmesi için gereken süreyi azalttı. + +#### Hata düzeltmeleri: {#bug-fixes-2} + +- Bazen üretilen bir hata düzeltildi `part ... intersects previous part` mesajlar ve kopya tutarlılığı zayıfladı. +- ZooKeeper kapatma sırasında kullanılamıyorsa sunucunun kilitlenmesine neden olan bir hata düzeltildi. +- Kopyaları geri yüklerken aşırı günlüğü kaldırıldı. +- Birlik tüm uygulamasında bir hata düzeltildi. +- Bir bloktaki ilk sütun dizi türüne sahipse, concat işlevinde bir hata düzeltildi. +- İlerleme şimdi sistemde doğru görüntülenir.tablo birleştirir. + +### ClickHouse yayın 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} + +#### Yenilik: {#new-features-3} + +- `SYSTEM` sunucu yönetimi için sorgular: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. +- Dizilerle çalışmak için işlevler eklendi: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. +- Katma `root` ve `identity` ZooKeeper yapılandırması için parametreler. Bu, aynı ZooKeeper kümesinde tek tek kullanıcıları izole etmenizi sağlar. +- Toplam fonksiyonları eklendi `groupBitAnd`, `groupBitOr`, ve `groupBitXor` (uyumluluk için, isimler altında da mevcuttur `BIT_AND`, `BIT_OR`, ve `BIT_XOR`). +- Dış sözlükler dosya sisteminde bir soket belirterek Mysql'den yüklenebilir. +- Harici sözlükler SSL üzerinden MySQL yüklenebilir (`ssl_cert`, `ssl_key`, `ssl_ca` parametre). +- Add theed the `max_network_bandwidth_for_user` kullanıcı başına sorgular için genel bant genişliği kullanımını kısıtlamak için ayarlama. +- İçin destek `DROP TABLE` geçici tablolar için. +- Okuma desteği `DateTime` Unix zaman damgası biçimindeki değerler `CSV` ve `JSONEachRow` biçimliler. +- Dağıtılmış sorgularda gecikmeli yinelemeler artık varsayılan olarak dışlanır (varsayılan eşik 5 dakikadır). +- FIFO kilitleme sırasında kullanılır ALTER: bir ALTER sorgusu sürekli çalışan sorgular için süresiz olarak engellenmez. +- Seçeneği ayarlamak için `umask` yapılandırma dosyasında. +- İle sorgular için geliştirilmiş performans `DISTINCT` . + +#### Hata düzeltmeleri: {#bug-fixes-3} + +- ZooKeeper eski düğümleri silme işlemi geliştirildi. Daha önce, eski düğümler bazen çok sık ekler varsa silinmedi, bu da sunucunun diğer şeylerin yanı sıra kapanması için yavaş olmasına neden oldu. +- ZooKeeper bağlantı için ana seçerken sabit randomizasyon. +- Çoğaltma localhost ise, dağıtılmış sorgularda gecikmiş kopyaların dışlanması düzeltildi. +- Bir veri parçası bir hata düzeltildi `ReplicatedMergeTree` tablo çalıştırdıktan sonra kırık olabilir `ALTER MODIFY` bir element üzerinde `Nested` yapılı. +- SELECT sorgularına neden olabilecek bir hata düzeltildi “hang”. +- Dağıtılmış DDL sorguları için iyileştirmeler. +- Sorgu düzeltildi `CREATE TABLE ... AS `. +- Çık themaz theı Çöz thedü `ALTER ... CLEAR COLUMN IN PARTITION` sorgu için `Buffer` Tablolar. +- İçin geçersiz varsayılan değer düzeltildi `Enum` kullanırken s (minimum yerine 0) `JSONEachRow` ve `TSKV` biçimliler. +- Bir sözlük ile bir sözlük kullanırken zombi süreçlerinin görünümünü çözdü `executable` kaynaklı. +- Kafa sorgusu için sabit segfault. + +#### ClickHouse geliştirmek ve birleştirmek için geliştirilmiş iş akışı: {#improved-workflow-for-developing-and-assembling-clickhouse} + +- Kullanabilirsiniz `pbuilder` ClickHouse inşa etmek. +- Kullanabilirsiniz `libc++` yerine `libstdc++` Linux üzerine inşa edilmiştir. +- Statik kod analiz araçlarını kullanma talimatları eklendi: `Coverage`, `clang-tidy`, `cppcheck`. + +#### Yükseltme yaparken lütfen unutmayın: {#please-note-when-upgrading} + +- MergeTree ayarı için artık daha yüksek bir varsayılan değer var `max_bytes_to_merge_at_max_space_in_pool` (bayt olarak birleştirilecek veri parçalarının maksimum toplam boyutu): 100 Gib'den 150 Gib'e yükseldi. Bu, disk Alt Sisteminde artan bir yüke neden olabilecek sunucu yükseltmesinden sonra çalışan büyük birleştirmelere neden olabilir. Sunucuda kullanılabilir boş alan, çalışan birleştirmelerin toplam miktarının iki katından azsa, bu, diğer tüm birleştirmelerin, küçük veri parçalarının birleştirmeleri de dahil olmak üzere çalışmayı durdurmasına neden olur. Sonuç olarak, INSERT sorguları iletiyle başarısız olur “Merges are processing significantly slower than inserts.” Kullan... `SELECT * FROM system.merges` durumu izlemek için sorgu. Ayrıca kontrol edebilirsiniz `DiskSpaceReservedForMerge` metr theik `system.metrics` tablo veya Grafit. Bunu düzeltmek için hiçbir şey yapmanıza gerek yoktur, çünkü büyük birleşimler bittikten sonra sorun kendiliğinden çözülür. Bu kabul edilemez bulursanız, önceki değeri geri yükleyebilirsiniz `max_bytes_to_merge_at_max_space_in_pool` ayar. Bunu yapmak için, gidin config bölümünde.xml, set ``` ``107374182400 ``` ve sunucuyu yeniden başlatın. + +### ClickHouse sürümü 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} + +- Bu, önceki 1.1.54282 sürümü için bir hata düzeltme sürümüdür. Bu ZooKeeper parçalar dizininde sızıntıları giderir. + +### ClickHouse yayın 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} + +Bu sürüm önceki sürüm 1.1.54276 için hata düzeltmeleri içerir: + +- Sabit `DB::Exception: Assertion violation: !_path.empty()` dağıtılmış bir tabloya eklerken. +- Giriş verileri'; ' ile başlarsa RowBinary biçiminde eklerken ayrıştırma düzeltildi. +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). + +### ClickHouse Yayın 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} + +#### Yenilik: {#new-features-4} + +- Bir seçme sorgusu için bölüm ile isteğe bağlı eklendi. Örnek sorgu: `WITH 1+1 AS a SELECT a, a*a` +- INSERT, dağıtılmış bir tabloda eşzamanlı olarak gerçekleştirilebilir: Tamam, yalnızca tüm veriler tüm parçalara kaydedildikten sonra döndürülür. Bu ayar tarafından etkinleştirilir ınsert\_distributed\_sync = 1. +- 16 baytlık tanımlayıcılarla çalışmak için UUID veri türü eklendi. +- Tablo ile uyumluluk için CHAR, FLOAT ve diğer türlerin takma adları eklendi. +- Sayılara zaman dönüştürmek için fonksiyonları toYYYYMM, toYYYYMMDD ve toYYYYMMDDhhmmss eklendi. +- Kümelenmiş DDL sorguları için sunucuları tanımlamak için IP adreslerini (ana bilgisayar adı ile birlikte) kullanabilirsiniz. +- İşlevde sabit olmayan argümanlar ve negatif uzaklıklar için destek eklendi `substring(str, pos, len).` +- İçin max\_size parametresi eklendi `groupArray(max_size)(column)` toplama işlevi ve performansını optimize etti. + +#### Ana değişiklikler: {#main-changes} + +- Güvenlik iyileştirmeleri: tüm sunucu dosyaları 0640 izinleriyle oluşturulur (üzerinden değiştirilebilir config parametresi). +- Geçersiz sözdizimi ile sorgular için geliştirilmiş hata mesajları. +- MergeTree verilerinin büyük bölümlerini birleştirirken bellek tüketimini önemli ölçüde azalttı ve performansı artırdı. +- ReplacingMergeTree motoru için veri birleştirmelerinin performansını önemli ölçüde artırdı. +- Birden çok kaynak ekler birleştirerek dağıtılmış bir tablodan zaman uyumsuz ekler için geliştirilmiş performans. Bu işlevi etkinleştirmek için distributed\_directory\_monitor\_batch\_ınserts = 1 ayarını kullanın. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-1} + +- Toplam durumların ikili biçimini değiştirdi `groupArray(array_column)` diziler için fonksiyonlar. + +#### Değişikliklerin tam listesi: {#complete-list-of-changes} + +- Add theed the `output_format_json_quote_denormals` json formatında nan ve ınf değerleri çıktısı sağlayan ayar. +- Dağıtılmış bir tablodan okurken optimize edilmiş akış tahsisi. +- Değer değişmezse ayarlar salt okunur modda yapılandırılabilir. +- Preferred\_block\_size\_bytes ayarında belirtilen blok boyutu üzerindeki kısıtlamaları karşılamak için mergetree motorunun tamsayı olmayan granüllerini alma yeteneği eklendi. Amaç, RAM tüketimini azaltmak ve büyük sütunlu tablolardan gelen sorguları işlerken önbellek konumunu arttırmaktır. +- Gibi ifadeler içeren dizinlerin verimli kullanımı `toStartOfHour(x)` gibi koşullar için `toStartOfHour(x) op сonstexpr.` +- MergeTree motorları için yeni ayarlar eklendi (yapılandırmada merge\_tree bölümü.xml): + - replicated\_deduplication\_window\_seconds yinelenen tablolar ekler tekilleştirme için izin verilen saniye sayısını ayarlar. + - cleanup\_delay\_period, eski verileri kaldırmak için temizleme işleminin ne sıklıkta başlatılacağını ayarlar. + - replicated\_can\_become\_leader, bir kopyanın lider olmasını (ve birleştirme atamasını) engelleyebilir. +- Hızlandırılmış Temizleme ZooKeeper eski verileri kaldırmak için. +- Kümelenmiş DDL sorguları için birden fazla iyileştirme ve düzeltme. Özellikle ilgi çekici olan yeni ayardır distributed\_ddl\_task\_timeout, kümedeki sunuculardan bir yanıt beklemek için zamanı sınırlar. Tüm ana bilgisayarlarda bir ddl isteği gerçekleştirilmediyse, bir yanıt bir zaman aşımı hatası içerir ve bir zaman uyumsuz modunda bir istek yürütülür. +- Sunucu günlüklerinde yığın izlerinin geliştirilmiş gösterimi. +- Add theed the “none” sıkıştırma yöntemi için değer. +- Yapılandırmada birden çok dictionaries\_config bölümünü kullanabilirsiniz.xml. +- Bu dosya sisteminde bir soket üzerinden MySQL bağlanmak mümkündür. +- Sistem.parçalar tablosu, bayt cinsinden işaretlerin boyutu hakkında bilgi içeren yeni bir sütuna sahiptir. + +#### Hata düzeltmeleri: {#bug-fixes-4} + +- Bir birleştirme tablosu kullanarak dağıtılmış tablolar artık bir koşulla bir SELECT sorgusu için doğru şekilde çalışır. `_table` alan. +- Veri parçalarını kontrol ederken ReplicatedMergeTree nadir bir yarış durumu düzeltildi. +- Sabit Olası donma “leader election” bir sunucu başlatırken. +- Veri kaynağının yerel bir kopyasını kullanırken max\_replica\_delay\_for\_distributed\_queries ayarı göz ardı edildi. Bu sorun giderildi. +- Sabit yanlış davranış `ALTER TABLE CLEAR COLUMN IN PARTITION` varolan olmayan bir sütunu temizlemeye çalışırken. +- Boş diziler veya dizeleri kullanırken multiİf işlevinde bir istisna düzeltildi. +- Sabit aşırı bellek ayırmaları yerel biçimi serisini kaldırırken. +- Trie sözlüklerin sabit yanlış otomatik güncelleme. +- Örnek kullanırken bir birleştirme tablosundan bir GROUP BY yan tümcesi ile sorguları çalıştırırken bir özel durum düzeltildi. +- Distributed\_aggregation\_memory\_efficient = 1 kullanırken grup çökmesi düzeltildi. +- Şimdi veritabanını belirtebilirsiniz.ın ve JOİN sağ tarafında tablo. +- Paralel toplama için çok fazla iplik kullanıldı. Bu sorun giderildi. +- Sabit nasıl “if” işlev FixedString argümanları ile çalışır. +- 0 ağırlığında kırıkları için dağıtılmış bir tablodan yanlış çalıştı seçin. Bu sorun giderildi. +- Çalışma `CREATE VIEW IF EXISTS no longer causes crashes.` +- Sabit yanlış davranış input\_format\_skip\_unknown\_fields=1 ayarlanır ve negatif sayılar vardır. +- Sabit bir sonsuz döngü içinde `dictGetHierarchy()` sözlükte bazı geçersiz veriler varsa işlev. +- Sabit `Syntax error: unexpected (...)` bir In veya JOIN yan tümcesi ve birleştirme tablolarındaki alt sorgularla dağıtılmış sorguları çalıştırırken hatalar. +- Sözlük tablolarından bir seçme sorgusunun yanlış yorumlanması düzeltildi. +- Sabit “Cannot mremap” 2 milyardan fazla öğe içeren ın ve JOIN yan tümcelerinde diziler kullanılırken hata oluştu. +- Kaynak olarak MySQL ile sözlükler için yük devretme düzeltildi. + +#### ClickHouse geliştirmek ve birleştirmek için geliştirilmiş iş akışı: {#improved-workflow-for-developing-and-assembling-clickhouse-1} + +- Yapılar Arcadia'da monte edilebilir. +- Clickhouse'u derlemek için gcc 7'yi kullanabilirsiniz. +- Ccache+distcc kullanarak paralel yapılar artık daha hızlı. + +### ClickHouse yayın 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} + +#### Yenilik: {#new-features-5} + +- Dağıtılmış DDL (örneğin, `CREATE TABLE ON CLUSTER`) +- Çoğaltılan sorgu `ALTER TABLE CLEAR COLUMN IN PARTITION.` +- Sözlük tabloları için motor (bir tablo şeklinde sözlük verilerine erişim). +- Sözlük veritabanı motoru (bu tür veritabanı otomatik olarak bağlı tüm dış sözlükler için sözlük tabloları vardır). +- Kaynağa bir istek göndererek sözlükteki güncellemeleri kontrol edebilirsiniz. +- Nitelikli sütun adları +- Çift tırnak işaretleri kullanarak tanımlayıcıları alıntı. +- HTTP arabirimindeki oturumlar. +- Çoğaltılmış bir tablo için en iyi duruma getirme sorgusu yalnızca lider üzerinde çalışabilir. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-2} + +- Kaldırıldı set GLOBAL. + +#### Küçük değişiklikler: {#minor-changes} + +- Şimdi bir uyarı tetiklendikten sonra günlük tam yığın izleme yazdırır. +- Başlangıçta hasarlı / ekstra veri parçalarının sayısının doğrulanmasını rahatlattı(çok fazla yanlış pozitif vardı). + +#### Hata düzeltmeleri: {#bug-fixes-5} + +- Kötü bir bağlantı düzeltildi “sticking” dağıtılmış bir tabloya eklerken. +- GLOBAL IN şimdi dağıtılmış bir tabloya bakan bir birleştirme tablosundan bir sorgu için çalışır. +- Bir Google Compute Engine sanal makinesinde yanlış sayıda çekirdek tespit edildi. Bu sorun giderildi. +- Önbelleğe alınmış harici sözlüklerin yürütülebilir bir kaynağının nasıl çalıştığındaki değişiklikler. +- Null karakter içeren dizelerin karşılaştırması düzeltildi. +- Float32 birincil anahtar alanlarının sabitler ile karşılaştırılması düzeltildi. +- Önceden, bir alanın boyutunun yanlış bir tahmini, aşırı büyük tahsisatlara neden olabilir. +- Alter kullanarak bir tabloya eklenen null bir sütun sorgularken bir kilitlenme düzeltildi. +- Satır sayısı sınırdan az ise, null bir sütuna göre sıralama yaparken bir kilitlenme düzeltildi. +- Yalnızca sabit değerlerden oluşan alt sorgu ile bir sipariş düzeltildi. +- Daha önce, çoğaltılmış bir tablo başarısız bir bırakma tablosundan sonra geçersiz durumda kalabilir. +- Boş sonuçları olan skaler alt sorgular için takma adlar artık kaybolmaz. +- Şimdi derleme kullanılan bir sorgu .so dosyası zarar görürse bir hata ile başarısız değil. diff --git a/docs/tr/whats_new/changelog/2018.md b/docs/tr/whats_new/changelog/2018.md new file mode 100644 index 00000000000..884c1725bbd --- /dev/null +++ b/docs/tr/whats_new/changelog/2018.md @@ -0,0 +1,1063 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 78 +toc_title: '2018' +--- + +## ClickHouse sürümü 18.16 {#clickhouse-release-18-16} + +### ClickHouse sürümü 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} + +#### Hata düzeltmeleri: {#bug-fixes} + +- ODBC kaynağı ile sözlükleri güncelleme ile ilgili sorunlara yol açan bir hata düzeltildi. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- Toplama işlevlerinin JIT derlemesi artık LowCardinality sütunlarıyla çalışır. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) + +#### Geliştirmeler: {#improvements} + +- Add theed the `low_cardinality_allow_in_native_format` ayar (varsayılan: etkin). Devre dışı bırakıldığında, LOWCARDİNALİTY sütunları, SELECT sorguları için sıradan sütunlara dönüştürülür ve INSERT sorguları için sıradan sütunlar beklenir. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) + +#### İyileştirmeler oluşturun: {#build-improvements} + +- MacOS ve ARM üzerine inşa için düzeltmeler. + +### ClickHouse yayın 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} + +#### Yenilik: {#new-features} + +- `DEFAULT` ifadeler, yarı yapılandırılmış giriş biçimlerinde veri yüklenirken eksik alanlar için değerlendirilir (`JSONEachRow`, `TSKV`). Özelliği ile etkin `insert_sample_with_metadata` ayar. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) +- Bu `ALTER TABLE` sorgu şimdi var `MODIFY ORDER BY` bir tablo sütunu eklerken veya kaldırırken sıralama anahtarını değiştirme eylemi. Bu tablolar için yararlıdır `MergeTree` bu sıralama anahtarına dayalı olarak birleştirilirken ek görevler gerçekleştiren aile `SummingMergeTree`, `AggregatingMergeTree` ve bu yüzden. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) +- Tablolar için `MergeTree` aile, şimdi farklı bir sıralama anahtarı belirtebilirsiniz (`ORDER BY` ve dizin (`PRIMARY KEY`). Sıralama anahtarı dizin daha uzun olabilir. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) +- Add theed the `hdfs` tablo fonksiyonu ve `HDFS` hdfs'ye veri içe ve dışa aktarmak için tablo motoru. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) +- Base64 ile çalışmak için fonksiyonlar eklendi: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) +- Şimdi hassasiyetini yapılandırmak için bir parametre kullanabilirsiniz `uniqCombined` toplama işlevi (HyperLogLog hücrelerinin sayısını seçin). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) +- Add theed the `system.contributors` ClickHouse taahhüt yapılan herkesin adlarını içeren tablo. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) +- Bölüm için ihmal yeteneği eklendi `ALTER TABLE ... FREEZE` tüm bölümleri bir kerede yedeklemek için sorgu. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) +- Katma `dictGet` ve `dictGetOrDefault` dönüş değeri türünü belirtmeyi gerektirmeyen işlevler. Tür sözlük açıklamasından otomatik olarak belirlenir. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3564) +- Şimdi tablo açıklamasında bir sütun için yorum belirtmek ve kullanarak değiştirebilirsiniz `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) +- Okuma için desteklenir `Join` basit tuşlarla tabloları yazın. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Şimdi seçenekleri belirtebilirsiniz `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, ve `join_overflow_mode` oluştururken bir `Join` tablo yazın. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Add theed the `joinGet` kullan allowsmanıza olanak sağlayan bir işlev `Join` bir sözlük gibi tablo yazın. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Add theed the `partition_key`, `sorting_key`, `primary_key`, ve `sampling_key` Col theum thens to the `system.tables` tablo tuşları hakkında bilgi vermek için tablo. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- Add theed the `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, ve `is_in_sampling_key` Col theum thens to the `system.columns` Tablo. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- Add theed the `min_time` ve `max_time` Col theum thens to the `system.parts` Tablo. Bölümleme anahtarı aşağıdakilerden oluşan bir ifade olduğunda bu sütunlar doldurulur `DateTime` sütun. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) + +#### Hata düzeltmeleri: {#bug-fixes-1} + +- Düzeltmeler ve performans iyileştirmeleri için `LowCardinality` veri türü. `GROUP BY` kullanım `LowCardinality(Nullable(...))`. Değerlerini almak `extremes`. Yüksek mertebeden fonksiyonların işlenmesi. `LEFT ARRAY JOIN`. Dağılı `GROUP BY`. Dönen işlevler `Array`. Yürütme `ORDER BY`. Yazma `Distributed` tablolar (nicelulu). Geriye dönük uyumluluk için `INSERT` uygulayan eski istemcilerden gelen sorgular `Native` protokol. İçin destek `LowCardinality` için `JOIN`. Tek bir akışta çalışırken geliştirilmiş performans. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) +- Sabit nasıl `select_sequential_consistency` seçenek çalışır. Daha önce, bu ayar etkinleştirildiğinde, bazen yeni bir bölüme yazmaya başladıktan sonra tamamlanmamış bir sonuç döndürüldü. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) +- DDL yürütülürken veritabanları doğru belirtilir `ON CLUSTER` sorgular ve `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- Veritabanları doğru bir görünüm içinde alt sorgular için belirtilir. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) +- Bir hata düzeltildi `PREWHERE` ile `FINAL` için `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) +- Şimdi kullanabilirsiniz `KILL QUERY` henüz başlatılmamış olan sorguları iptal etmek için, tablonun kilitlenmesini bekliyorlar. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) +- Saatlerin gece yarısı geri hareket ettirilmesi durumunda düzeltilmiş tarih ve saat hesaplamaları (bu İran'da olur ve Moskova'da 1981'den 1983'e kadar olur). Önceden, bu, gerekenden bir gün önce sıfırlanma süresine yol açtı ve ayrıca tarih ve saatin metin biçiminde yanlış biçimlendirilmesine neden oldu. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) +- Bazı durumlarda sabit hatalar `VIEW` ve veritabanını atlayan alt sorgular. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3521) +- Aynı anda bir yarıştan okurken bir yarış durumu düzeltildi `MATERIALIZED VIEW` ve Silme bir `MATERIALIZED VIEW` nedeniyle değil kilitleme iç `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) +- Hata düzeltildi `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) +- Sabit sorgu işleme zaman `compile_expressions` seçenek etkindir (varsayılan olarak etkindir). Gibi Nondeterministic sabit ifadeler `now` işlev artık açılmıyor. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) +- Sabit olmayan bir ölçek argümanı belirtilirken bir kilitlenme düzeltildi `toDecimal32/64/128` işlevler. +- Bir dizi eklemeye çalışırken bir hata düzeltildi `NULL` element inler `Values` bir sütuna Biçimlendir türü `Array` olarak `Nullable` (eğer `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) +- Sabit sürekli hata günlüğü `DDLWorker` ZooKeeper mevcut değilse. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) +- Sabit dönüş türü için `quantile*` gelen fonksiyonlar `Date` ve `DateTime` argüman türleri. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) +- Sabit `WITH` Ifade olmadan basit bir takma ad belirtirse yan tümcesi. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) +- Adlandırılmış alt sorgular ve nitelikli sütun adları ile sorguların sabit işleme `enable_optimize_predicate_expression` etkindir. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3588) +- Hata düzeltildi `Attempt to attach to nullptr thread group` maddi görüşlerle çalışırken. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) +- Belirli yanlış argümanları iletirken bir kilitlenme düzeltildi `arrayReverse` işlev. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- Tampon taş theması Düzelt theildi `extractURLParameter` işlev. Geliştirilmiş performans. Sıfır bayt içeren dizelerin doğru işlenmesi eklendi. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) +- Sabit tampon taşması `lowerUTF8` ve `upperUTF8` işlevler. Üzerinde bu işlevleri yürütmek için yeteneği kaldırıldı `FixedString` bağımsız değişkenleri yazın. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) +- Silerken nadir bir yarış durumu düzeltildi `MergeTree` Tablolar. [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) +- Okuma sırasında bir yarış durumu düzeltildi `Buffer` tablolar ve aynı anda performans `ALTER` veya `DROP` hedef masalarda. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) +- Sabit bir segfault eğer `max_temporary_non_const_columns` sınır aşıldı. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### Geliştirmeler: {#improvements-1} + +- Sunucu için işlenmiş yapılandırma dosyalarını yazmaz `/etc/clickhouse-server/` dizin. Bunun yerine, onları kaydeder `preprocessed_configs` içindeki dizin `path`. Bu demektir `/etc/clickhouse-server/` dizin için yazma erişimi yok `clickhouse` güvenliği artıran kullanıcı. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- Bu `min_merge_bytes_to_use_direct_io` seçenek varsayılan olarak 10 GiB olarak ayarlanır. Mergetree ailesinden tabloların büyük bölümlerini oluşturan bir birleştirme gerçekleştirilir `O_DIRECT` aşırı sayfa önbellek tahliyesini önleyen mod. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) +- Hızlandırılmış sunucu, çok sayıda tablo olduğunda başlar. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) +- Bir bağlantı havuzu ve HTTP eklendi `Keep-Alive` yinelemeler arasındaki bağlantılar için. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) +- Sorgu sözdizimi geçersiz ise, `400 Bad Request` kod iade edilir `HTTP` arabirim (500 daha önce iade edildi). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) +- Bu `join_default_strictness` option is set to `ALL` uyumluluk için varsayılan olarak. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) +- Kaldırılan günlüğü `stderr` şuradan `re2` geçersiz veya karmaşık düzenli ifadeler için kütüphane. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) +- İçin eklendi `Kafka` tablo altyapısı: kafka'dan okumaya başlamadan önce abonelikleri denetler; tablo için kafka\_max\_block\_size ayarı. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) +- Bu `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, ve `murmurHash3_64` işlevler artık herhangi bir sayıda bağımsız değişken ve bağımsız değişkenler için tuples şeklinde çalışır. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) +- Bu `arrayReverse` işlev artık herhangi bir dizi türü ile çalışır. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- İsteğe bağlı bir parametre eklendi: yuva boyutu için `timeSlots` işlev. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3724) +- İçin `FULL` ve `RIGHT JOIN`, bu `max_block_size` ayar, sağ tablodan birleştirilmemiş veri akışı için kullanılır. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3699) +- Add theed the `--secure` komut satırı parametresi `clickhouse-benchmark` ve `clickhouse-performance-test` TLS'Yİ etkinleştirmek için. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) +- Tür dönüştürme zaman yapısı bir `Buffer` tür tablo hedef tablonun yapısıyla eşleşmiyor. [Vitaly Baranov](https://github.com/ClickHouse/ClickHouse/pull/3603) +- Add theed the `tcp_keep_alive_timeout` belirtilen zaman aralığı için hareketsizlikten sonra canlı tutma paketlerini etkinleştirme seçeneği. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) +- Bölüm anahtarı için değerlerin gereksiz alıntı kaldırıldı `system.parts` tablo tek bir sütundan oluşuyorsa. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) +- Modulo fonksiyonu için çalışır `Date` ve `DateTime` veri türleri. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) +- İçin eş anlamlı eklendi `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, ve `MID` işlevler. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Bazı işlev adları, SQL standardı ile uyumluluk için büyük / küçük harf duyarsızdır. Sözdizimsel şeker eklendi `SUBSTRING(expr FROM start FOR length)` SQL ile uyumluluk için. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) +- Yeteneği eklendi `mlock` karşılık gelen bellek sayfaları `clickhouse-server` bellek yetersiz zorla önlemek için yürütülebilir kod. Bu özellik varsayılan olarak devre dışıdır. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) +- Okuma sırasında geliştirilmiş performans `O_DIRECT` (ile `min_bytes_to_use_direct_io` seçeneği etkin). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) +- Geliştirilmiş performans `dictGet...OrDefault` sabit anahtar bağımsız değişkeni ve sabit olmayan bir varsayılan bağımsız değişken için işlev. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3563) +- Bu `firstSignificantSubdomain` işlev artık etki alanlarını işler `gov`, `mil`, ve `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Geliştirilmiş performans. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) +- Başlangıç için özel ortam değişkenlerini belirleme yeteneği `clickhouse-server` kullanarak `SYS-V init.d` tanım bylayarak script `CLICKHOUSE_PROGRAM_ENV` içinde `/etc/default/clickhouse`. + [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) +- Clickhouse-server init komut dosyası için doğru dönüş kodu. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) +- Bu `system.metrics` tablo şimdi var `VersionInteger` metr andik ve `system.build_options` eklenen satır var `VERSION_INTEGER`, ClickHouse sürümünün sayısal formunu içeren, örneğin `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) +- Karşılaştırma yeteneği kaldırıldı `Date` gibi olası hataları önlemek için bir sayı ile yazın `date = 2018-12-17`, tarih etrafında tırnak yanlışlıkla ihmal nerede. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) +- Durumsal işlevlerin davranışı gibi düzeltildi `rowNumberInAllBlocks`. Daha önce sorgu analizi sırasında başlatma nedeniyle bir sayı daha büyük olan bir sonuç çıkardılar. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3729) +- Eğer... `force_restore_data` dosya silinemez, bir hata mesajı görüntülenir. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3794) + +#### İyileştirmeler oluşturun: {#build-improvements-1} + +- Güncelleme `jemalloc` olası bir bellek sızıntısını gideren kitaplık. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3557) +- İle profil profilingleme `jemalloc` hata ayıklama yapıları için varsayılan olarak etkindir. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) +- Yalnızca entegrasyon testlerini çalıştırma yeteneği eklendi `Docker` sistemde yüklü. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) +- Select sorgularında fuzz ifade testi eklendi. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) +- Daha fazla yarış koşullarını tespit etmek için paralel ve rastgele sırayla fonksiyonel testler gerçekleştiren taahhütler için bir stres testi eklendi. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) +- Bir Docker görüntüsünde clickhouse-server başlatmak için yöntem geliştirildi. [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) +- Bir Docker görüntüsü için, dosyaları kullanarak veritabanlarını başlatmak için destek eklendi `/docker-entrypoint-initdb.d` dizin. [Konstantin Lebedev](https://github.com/ClickHouse/ClickHouse/pull/3695) +- Arm üzerine inşa için düzeltmeler. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes} + +- Karşılaştırma yeteneği kaldırıldı `Date` bir sayı ile yazın. Yerine `toDate('2018-12-18') = 17883`, açık tür dönüştürme kullanmanız gerekir `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) + +## ClickHouse sürümü 18.14 {#clickhouse-release-18-14} + +### ClickHouse yayın 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} + +#### Hata düzeltmeleri: {#bug-fixes-2} + +- ODBC kaynağı ile sözlükleri güncelleme ile ilgili sorunlara yol açan bir hata düzeltildi. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- DDL yürütülürken veritabanları doğru belirtilir `ON CLUSTER` sorgular. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- Sabit bir segfault eğer `max_temporary_non_const_columns` sınır aşıldı. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### İyileştirmeler oluşturun: {#build-improvements-2} + +- Arm üzerine inşa için düzeltmeler. + +### ClickHouse yayın 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} + +#### Hata düzeltmeleri: {#bug-fixes-3} + +- Sabit hata `dictGet...` tip sözlükler için işlev `range`, argümanlardan biri sabit ve diğeri değilse. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) +- Mesajlara neden olan Sabit hata `netlink: '...': attribute type 1 has an invalid length` Linux çekirdeği günlüğüne yazdırılmak üzere, bu sadece Linux çekirdeğinin yeterince taze sürümlerinde gerçekleşiyordu. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) +- Fonksiyon sabit segfault `empty` argüman için `FixedString` tür. [Daniel, Dao Quang Minh.](https://github.com/ClickHouse/ClickHouse/pull/3703) +- Büyük bir değer kullanırken sabit aşırı bellek ayırma `max_query_size` ayar (bir bellek yığını `max_query_size` bayt bir kerede önceden tahsis edildi). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) + +#### Yapı değişiklikleri: {#build-changes} + +- OS paketlerinden sürüm 7 LLVM/Clang kütüphaneleri ile sabit yapı (bu kütüphaneler çalışma zamanı sorgu derleme için kullanılır). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouse yayın 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} + +#### Hata düzeltmeleri: {#bug-fixes-4} + +- ODBC köprü işlemi ana sunucu işlemi ile sonlandırmak değil sabit durumlar. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) +- Sabit senkron ekleme içine `Distributed` uzak tablonun sütun listesinden farklı bir sütun listesi içeren tablo. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) +- Bir MergeTree tablosunu düşürürken bir kazaya yol açabilecek nadir bir yarış durumu düzeltildi. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- Sorgu iş parçacığı oluşturma ile başarısız olduğunda bir sorgu kilitlenme düzeltildi `Resource temporarily unavailable` hatasız. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- Sabit ayrıştırma `ENGINE` fık thera ne zaman `CREATE AS table` sözdizimi kullanıldı ve `ENGINE` fık thera daha önce belirt theilmişti `AS table` (hata, belirtilen motoru yok saymakla sonuçlandı). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) + +### ClickHouse yayın 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} + +#### Hata düzeltmeleri: {#bug-fixes-5} + +- Bellek yığınının boyutu, türün sütununu seri hale getirirken fazla tahmin edildi `Array(String)` bu yol açar “Memory limit exceeded” hatasızlar. Sorun 18.12.13 sürümünde ortaya çıktı. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) + +### ClickHouse yayın 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} + +#### Hata düzeltmeleri: {#bug-fixes-6} + +- Sabit `ON CLUSTER` küme güvenli (bayrak) olarak yapılandırıldığında sorgular ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) + +#### Yapı değişiklikleri: {#build-changes-1} + +- Sabit sorunlar (sistemden llvm-7, macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouse yayın 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} + +#### Hata düzeltmeleri: {#bug-fixes-7} + +- Sabit `Block structure mismatch in MergingSorted stream` hatasız. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) +- Sabit `ON CLUSTER` küme yapılandırmasında güvenli bağlantıların açık olması durumunda sorgular ( `` bayrak). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) +- Kullanılan sorgularda bir hata düzeltildi `SAMPLE`, `PREWHERE` ve alias sütunları. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) +- Sabit bir nadir `unknown compression method` hata ne zaman `min_bytes_to_use_direct_io` ayar etkinleştirildi. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) + +#### Performans iyileştirmeleri: {#performance-improvements} + +- Sorguların sabit performans gerilemesi `GROUP BY` AMD EPYC işlemciler üzerinde yürütülürken uint16 veya tarih türü sütunların. [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) +- Uzun dizeleri işleyen sorguların sabit performans gerilemesi. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) + +#### İyileştirmeler oluşturun: {#build-improvements-3} + +- Arcadia yapı basitleştirilmesi için iyileştirmeler. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) + +### ClickHouse yayın 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} + +#### Hata düzeltmeleri: {#bug-fixes-8} + +- İki isimsiz alt sorgu katılmadan bir kilitlenme düzeltildi. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) +- Hatalı sorgular oluşturma sabit (boş `WHERE` yan tümcesi) dış veritabanlarını sorgularken. [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) +- ODBC sözlüklerinde yanlış bir zaman aşımı değeri kullanılarak düzeltildi. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) + +### ClickHouse yayın 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} + +#### Hata düzeltmeleri: {#bug-fixes-9} + +- Hata düzeltildi `Block structure mismatch in UNION stream: different number of columns` sınır sorgularında. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) +- İç içe geçmiş yapılar içinde diziler içeren tablolarda veri birleştirirken sabit hatalar. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) +- Sabit yanlış sorgu sonuçları eğer `merge_tree_uniform_read_distribution` ayar devre dışı (varsayılan: etkin). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) +- Yerel formatta dağıtılmış bir tabloya ekler üzerinde bir hata düzeltildi. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) + +### ClickHouse yayın 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} + +- Bu `compile_expressions` ayar (ifadelerin JIT derlemesi) varsayılan olarak devre dışıdır. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) +- Bu `enable_optimize_predicate_expression` ayar varsayılan olarak devre dışıdır. + +### ClickHouse yayın 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} + +#### Yenilik: {#new-features-1} + +- Bu `WITH CUBE` değiştirici için `GROUP BY` (alternatif sözdizimi `GROUP BY CUBE(...)` ayrıca kullanılabilir) vardır. [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) +- Add theed the `formatDateTime` işlev. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) +- Add theed the `JDBC` masa motoru ve `jdbc` tablo işlevi (clickhouse-jdbc-bridge yüklenmesini gerektirir). [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- ISO hafta numarası ile çalışmak için fonksiyonlar eklendi: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, ve `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) +- Şimdi kullanabilirsiniz `Nullable` Col forum forns for `MySQL` ve `ODBC` Tablolar. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- İç içe geçmiş veri yapıları, iç içe geçmiş nesneler olarak okunabilir `JSONEachRow` biçimli. Add theed the `input_format_import_nested_json` ayar. [Veloman Yunkan](https://github.com/ClickHouse/ClickHouse/pull/3144) +- Paralel işleme birçok kişi için kullanılabilir `MATERIALIZED VIEW`s veri eklerken. Görmek `parallel_view_processing` ayar. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) +- Add theed the `SYSTEM FLUSH LOGS` sorgu (sistem tablolarına zorla günlük basması gibi `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) +- Şimdi önceden tanımlanmış kullanabilirsiniz `database` ve `table` bildirirken makrolar `Replicated` Tablolar. [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) +- Okuma yeteneği eklendi `Decimal` mühendislik notasyonunda değerleri yazın (on yetkilerini gösterir). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) + +#### Deneysel özellikler: {#experimental-features} + +- GROUP BY CLA clauseuse for `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) +- İçin ifadelerin optimize hesaplama `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) + +#### Geliştirmeler: {#improvements-2} + +- İle sorgular için önemli ölçüde azaltılmış bellek tüketimi `ORDER BY` ve `LIMIT`. Görmek `max_bytes_before_remerge_sort` ayar. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- Yokluğ theunda `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` kabul edilir. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) +- Nitelikli yıldız işaretleri ile sorgularda düzgün çalışır `JOIN`. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3202) +- Bu `ODBC` table engine doğru bir uzak veritabanı SQL lehçesinde tanımlayıcıları alıntı yöntemi seçer. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- Bu `compile_expressions` ayar (ifadelerin JIT derlemesi) varsayılan olarak etkindir. +- Varsa eşzamanlı damla veritabanı/tablo için sabit davranış ve varsa veritabanı/tablo oluşturun. Daha önce, bir `CREATE DATABASE ... IF NOT EXISTS` sorgu hata iletisi döndürebilir “File … already exists” ve `CREATE TABLE ... IF NOT EXISTS` ve `DROP TABLE IF EXISTS` sorgular dönebilir `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) +- MySQL veya ODBC tablolarından sorgularken sabit bir sağ yarıya sahip ifadelerde ve ifadelerde uzak sunucuya geçirilir. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- MySQL ve ODBC tablolarından sorgularken bir WHERE yan tümcesinde sabit ifadelerle karşılaştırmalar uzak sunucuya geçirilir. Önceden, sadece sabitler ile karşılaştırmalar geçirildi. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- Terminalde satır genişliğinin doğru hesaplanması `Pretty` hiyeroglifli dizeler de dahil olmak üzere formatlar. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/3257). +- `ON CLUSTER` için belirt forilebilir `ALTER UPDATE` sorgular. +- Verileri okumak için geliştirilmiş performans `JSONEachRow` biçimli. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) +- İçin eş anlamlı eklendi `LENGTH` ve `CHARACTER_LENGTH` uyumluluk için fonksiyonlar. Bu `CONCAT` işlev artık büyük / küçük harfe duyarlı değildir. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) +- Add theed the `TIMESTAMP` eşanlamlı `DateTime` tür. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) +- Günlük satırı bir sorgu ile ilişkili olmasa bile, sunucu günlüklerinde query\_id için ayrılmış alan her zaman vardır. Bu, sunucu metin günlüklerini üçüncü taraf araçlarla Ayrıştırmayı kolaylaştırır. +- Gigabayt bir tamsayı sonraki düzeyini aştığında, bir sorgu tarafından bellek tüketimi kaydedilir. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- Yerel protokolü kullanan istemci kitaplığı sunucu ekleme sorgusu için beklediğinden daha az sütun yanlışlıkla gönderdiğinde durum için uyumluluk modu eklendi. Bu senaryo, clickhouse-cpp kitaplığını kullanırken mümkün oldu. Daha önce, bu senaryo sunucunun çökmesine neden oldu. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) +- Kullanıcı tanımlı bir ifade içinde `clickhouse-copier` şimdi kullanabilirsiniz `partition_key` alias (kaynak tablo bölümüne göre ek filtreleme için). Bu, bölümleme şeması kopyalama sırasında değişirse, ancak yalnızca biraz değişirse yararlıdır. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) +- Bu iş akışı `Kafka` motor, yüksek yüklerde veri okuma hızını otomatik olarak azaltmak için bir arka plan iş parçacığı havuzuna taşındı. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- Okuma desteği `Tuple` ve `Nested` gibi yapıların değerleri `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) +- İçin en üst düzey etki alanlarının listesi `firstSignificantSubdomain` işlev artık etki alanını içerir `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) +- Dış sözlüklerin yapılandırmasında, `null_value` varsayılan veri türü değeri olarak yorumlanır. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) +- İçin destek `intDiv` ve `intDivOrZero` fonksiyonlar için `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) +- İçin destek `Date`, `DateTime`, `UUID`, ve `Decimal` anahtar olarak türleri `sumMap` toplama işlevi. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) +- İçin destek `Decimal` harici sözlüklerde veri türü. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) +- İçin destek `Decimal` veri türü `SummingMergeTree` Tablolar. [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) +- İçin uzmanlık eklendi `UUID` içinde `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) +- Sayısı azal thedı `open` ve `close` bir gelen okurken sistem çağrıları `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) +- A `TRUNCATE TABLE` sorgu (sorgu için lider yineleme geçirilir) herhangi bir yineleme üzerinde çalıştırılabilir. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3375) + +#### Hata düzeltmeleri: {#bug-fixes-10} + +- İle bir sorun düzeltildi `Dictionary` için tablolar `range_hashed` sözlükler. Bu hata 18.12.17 sürümünde oluştu. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) +- Yüklerken bir hata düzeltildi `range_hashed` söz dictionarieslükler (mesaj `Unsupported type Nullable (...)`). Bu hata 18.12.17 sürümünde oluştu. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- Sabit hatalar `pointInPolygon` birbirine yakın bulunan çok sayıda köşe ile çokgenler için yanlış hesaplamaların birikmesi nedeniyle işlev. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) +- Veri parçalarını birleştirdikten sonra, elde edilen parçanın sağlama toplamı, başka bir kopyadaki aynı birleştirme sonucundan farklıysa, birleştirme sonucu silinir ve veri kısmı diğer kopyadan indirilir (bu doğru davranıştır). Ancak, veri bölümünü indirdikten sonra, parçanın zaten var olduğu bir hata nedeniyle çalışma kümesine eklenemedi(çünkü veri kısmı birleşmeden sonra bir miktar gecikmeyle silindi). Bu, aynı verileri indirmek için döngüsel girişimlere yol açtı. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) +- Sorgularla toplam bellek tüketiminin yanlış hesaplanması düzeltildi (yanlış hesaplama nedeniyle, `max_memory_usage_for_all_queries` ayar yanlış çalıştı ve `MemoryTracking` metrik yanlış bir değere sahipti). Bu hata, 18.12.13 sürümünde oluştu. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) +- İşlevselliğini sabit `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` Bu hata, 18.12.13 sürümünde oluştu. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) +- İçin veri yapılarının sabit gereksiz hazırlanması `JOIN`eğer sorgu başlatan sunucuda s `JOIN` yalnızca uzak sunucularda gerçekleştirilir. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) +- Sabit hatalar `Kafka` motor: verileri okumaya başladığınızda istisnalardan sonra kilitlenmeler ve tamamlandıktan sonra kilitler [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- İçin `Kafka` tablolar, isteğe bağlı `schema` parametre geç notilm (edi (şema `Cap'n'Proto` biçimli). [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) +- ZooKeeper sunucularının topluluğu, bağlantıyı kabul eden ancak el sıkışmaya yanıt vermek yerine hemen kapatan sunuculara sahipse, ClickHouse başka bir sunucuya bağlanmayı seçer. Daha önce, bu hata üretti `Cannot read all data. Bytes read: 0. Bytes expected: 4.` ve sunucu başlatılamadı. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) +- Zookeeper sunucularının topluluğu, DNS sorgusunun bir hata döndürdüğü sunucular içeriyorsa, bu sunucular yoksayılır. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) +- Arasında sabit tip dönüşüm `Date` ve `DateTime` veri eklerken `VALUES` biçim (eğer `input_format_values_interpret_expressions = 1`). Daha önce, dönüşüm Unix dönem zamanındaki gün sayısının sayısal değeri ile Unix zaman damgası arasında gerçekleştirildi ve bu da beklenmedik sonuçlara yol açtı. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) +- Arasında düzeltilmiş tip dönüşümü `Decimal` ve tam sayı. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) +- Sabit hatalar `enable_optimize_predicate_expression` ayar. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3231) +- Varsayılan olmayan bir CSV ayırıcı kullanılıyorsa, kayan noktalı sayılarla CSV formatında bir ayrıştırma hatası düzeltildi `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) +- Sabit `arrayCumSumNonNegative` fonksiyon (akümülatör sıfırdan az ise negatif değerler biriktirmez). [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) +- Sabit nasıl `Merge` tablolar üstünde çalışır `Distributed` kullanırken tablolar `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) +- Hata düzeltmeleri `ALTER UPDATE` sorgu. +- Sabit hatalar `odbc` 18.12 sürümünde görünen tablo işlevi. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) +- İle toplama fonksiyon operationlarının çalışmasını sabit `StateArray` birleştiriciler. [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) +- Bir bölme sırasında bir kilitlenme düzeltildi `Decimal` sıfır değeri. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) +- Kullanarak işlemler için sabit çıktı türleri `Decimal` ve tamsayı argümanları. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) +- Sırasında segfault sabit `GROUP BY` üzerinde `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) +- Bu `log_query_threads` ayar (sorgu yürütme her iş parçacığı hakkında bilgi günlüğe kaydetme) şimdi yalnızca `log_queries` (sorgularla ilgili bilgileri günlüğe kaydetme) seçeneği 1 olarak ayarlanır. Sin thece the `log_query_threads` seçenek varsayılan olarak etkindir, sorgu günlüğü devre dışı bırakılmış olsa bile iş parçacıkları hakkında bilgi daha önce günlüğe kaydedildi. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) +- Quantiles toplama fonksiyonunun dağıtılmış işleminde bir hata düzeltildi (hata mesajı `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) +- Aynı anda sürüm 18.12.17 sunucuları ve eski sunuculardan oluşan bir küme üzerinde çalışırken uyumluluk sorunu düzeltildi. Hem sabit hem de sabit olmayan uzunluktaki GROUP BY anahtarlarıyla dağıtılmış sorgular için, toplanacak büyük miktarda veri varsa, döndürülen veriler her zaman tam olarak toplanmadı (iki farklı satır aynı toplama anahtarlarını içeriyordu). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) +- Değiştirmelerin sabit kullanımı `clickhouse-performance-test`, sorgu testte bildirilen değiştirmelerin yalnızca bir kısmını içeriyorsa. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) +- Kullanırken bir hata düzeltildi `FINAL` ile `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- Kullanırken bir hata düzeltildi `PREWHERE` sırasında eklenen sütun overların üzerinde `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- Olmaması için bir çek eklendi `arrayJoin` için `DEFAULT` ve `MATERIALIZED` ifadeler. Önceden, `arrayJoin` veri eklerken bir hataya yol açtı. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) +- Olmaması için bir çek eklendi `arrayJoin` in a `PREWHERE` yan. Daha önce, bu gibi mesajlara yol açtı `Size ... doesn't match` veya `Unknown compression method` sorguları yürütürken. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- Değiştirilen optimizasyon ve eşitlik değerlendirmelerinden karşılık gelen ifadeyle zincirler sonra nadir durumlarda ortaya çıkabilecek sabit segfault. [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) +- İçin küçük düzeltmeler `clickhouse-benchmark`: daha önce, istemci bilgileri sunucuya gönderilmedi; şimdi yürütülen sorguların sayısı kapatılırken ve yineleme sayısını sınırlamak için daha doğru hesaplanır. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-1} + +- Kaldır theılan `allow_experimental_decimal_type` seçenek. Bu `Decimal` veri türü varsayılan kullanım için kullanılabilir. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) + +## ClickHouse sürümü 18.12 {#clickhouse-release-18-12} + +### ClickHouse yayın 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} + +#### Yenilik: {#new-features-2} + +- `invalidate_query` (bir dış sözlük güncelleştirilmesi gerekip gerekmediğini denetlemek için bir sorgu belirtmek için yeteneği) `clickhouse` kaynaklı. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) +- Kullanma yeteneği eklendi `UInt*`, `Int*`, ve `DateTime` veri türleri (birlikte `Date` type) as a `range_hashed` aralıkların sınırlarını tanımlayan dış sözlük anahtarı. Şimdi `NULL` açık bir aralık belirlemek için kullanılabilir. [Vasily Nemkov](https://github.com/ClickHouse/ClickHouse/pull/3123) +- Bu `Decimal` şimdi yazın destekler `var*` ve `stddev*` toplama fonksiyonları. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- Bu `Decimal` tip artık matematiksel fonksiyonları destekliyor (`exp`, `sin` ve böyle devam eder.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- Bu `system.part_log` tablo şimdi var `partition_id` sütun. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### Hata düzeltmeleri: {#bug-fixes-11} + +- `Merge` şimdi düzgün çalışıyor `Distributed` Tablolar. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3159) +- Sabit uyumsuzluk (gereksiz bağımlılık `glibc` sürüm) bu Clickhouse'u çalıştırmayı imkansız hale getirdi `Ubuntu Precise` ve eski sürümleri. Uyumsuzluk 18.12.13 sürümünde ortaya çıktı. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) +- Sabit hatalar `enable_optimize_predicate_expression` ayar. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) +- 18.12.13'ten önceki sürümlerde bir kopya kümesiyle çalışırken ve aynı anda daha yeni bir sürüme sahip bir sunucuda bir tablonun yeni bir kopyasını oluştururken ortaya çıkan geriye dönük uyumluluğa sahip küçük bir sorun düzeltildi (mesajda gösterilir `Can not clone replica, because the ... updated to new ClickHouse version`, mantıklı, ama olmamalı). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-2} + +- Bu `enable_optimize_predicate_expression` seçenek varsayılan olarak etkindir (oldukça iyimser olan). Sütun adları için arama için ilgili sorgu çözümleme hataları oluşursa, ayarla `enable_optimize_predicate_expression` 0'a. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) + +### ClickHouse yayın 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} + +#### Yenilik: {#new-features-3} + +- İçin destek eklendi `ALTER UPDATE` sorgular. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) +- Add theed the `allow_ddl` kullanıcının DDL sorgularına erişimini kısıtlayan seçenek. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) +- Add theed the `min_merge_bytes_to_use_direct_io` seçeneği için `MergeTree` birleştirmenin toplam boyutu için bir eşik ayarlamanıza izin veren motorlar(eşiğin üstünde olduğunda, veri parçası dosyaları o\_direct kullanılarak işlenecektir). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) +- Bu `system.merges` sistem tablosu şimdi içerir `partition_id` sütun. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) + +#### Geliştirmeler {#improvements-3} + +- Bir veri parçası mutasyon sırasında değişmeden kalırsa, yinelemeler tarafından indirilmez. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) +- Otomatik tamamlama ile çalışırken ayarların adları için kullanılabilir `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) + +#### Hata düzeltmeleri: {#bug-fixes-12} + +- Elemanları olan dizilerin boyutları için bir kontrol eklendi `Nested` eklerken alanları yazın. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) +- İle harici sözlükler güncellenmesi bir hata düzeltildi `ODBC` kaynak ve `hashed` depolama. Bu hata, 18.12.13 sürümünde oluştu. +- Bir sorgudan geçici bir tablo oluştururken bir kilitlenme düzeltildi `IN` koşul. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3098) +- Olabilecek diziler için toplam işlevlerde bir hata düzeltildi `NULL` öğeler. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/3097) + +### ClickHouse yayın 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} + +#### Yenilik: {#new-features-4} + +- Add theed the `DECIMAL(digits, scale)` veri türü (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). Etkinleştirmek için ayarı kullanın `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) +- Yeni `WITH ROLLUP` değiştirici için `GROUP BY` (alternatif sözdizimi: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) +- JOIN ile sorgularda, yıldız karakteri SQL standardına uygun olarak tüm tablolardaki sütunların bir listesine genişletir. Ayarlayarak eski davranışı geri yükleyebilirsiniz `asterisk_left_columns_only` kullanıcı yapılandırma düzeyinde 1'e. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2787) +- Tablo fonksiyonları ile katılmak için destek eklendi. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) +- Clickhouse-client sekmesine basarak otomatik tamamlama. [Sergey Shcherbin](https://github.com/ClickHouse/ClickHouse/pull/2447) +- CTRL + C clickhouse-client girilen bir sorguyu temizler. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) +- Add theed the `join_default_strictness` ayar (değerler: `"`, `'any'`, `'all'`). Bu belirtmemenizi sağlar `ANY` veya `ALL` için `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) +- Sorgu işleme ile ilgili sunucu günlüğünün her satırı sorgu kimliğini gösterir. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Artık clickhouse-client'da sorgu yürütme günlükleri alabilirsiniz ( `send_logs_level` ayar). Dağıtılmış sorgu işleme ile günlükleri tüm sunuculardan basamaklandırılır. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Bu `system.query_log` ve `system.processes` (`SHOW PROCESSLIST`) bir sorgu çalıştırdığınızda tablolar artık tüm değiştirilen ayarları hakkında bilgi var (iç içe geçmiş yapısı `Settings` veriler). Add theed the `log_query_settings` ayar. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Bu `system.query_log` ve `system.processes` tablolar artık sorgu yürütülmesine katılan iş parçacığı sayısı hakkında bilgi gösterir (bkz. `thread_numbers` sütun). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Katma `ProfileEvents` ağ üzerinden okuma ve yazma ve diske okuma ve yazma için harcanan zamanı, ağ hatalarının sayısını ve ağ bant genişliği sınırlı olduğunda bekleyen harcanan zamanı ölçen sayaçlar. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Katma `ProfileEvents`rusage gelen ((bekle zaman G/Ç hakkında daha fazla bilgi edinmek için bu kullanım, CPU zamanı bekle ve veri miktarını okuma ve kaydedilen, sayfa önbelleği olan ve olmayan) kullanıcı alanı ve çekirdek, sayfa hataları ve bağlam anahtarlarının yanı sıra taskstats ölçülerine CPU kullanımı hakkında bilgi almak için onları kullanabilirsiniz) sistem ölçümlerini içeren sayaçları. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Bu `ProfileEvents` sayaçlar, genel olarak ve her sorgu için ve ayrıca sorguya göre kaynak tüketimini ayrıntılı olarak profillemenize izin veren her sorgu yürütme iş parçacığı için uygulanır. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Add theed the `system.query_thread_log` her sorgu yürütme iş parçacığı hakkında bilgi içeren tablo. Add theed the `log_query_threads` ayar. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Bu `system.metrics` ve `system.events` tablolar artık yerleşik belgelere sahiptir. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) +- Add theed the `arrayEnumerateDense` işlev. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2975) +- Add theed the `arrayCumSumNonNegative` ve `arrayDifference` işlevler. [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) +- Add theed the `retention` toplama işlevi. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) +- Artık Plus operatörünü kullanarak toplama işlevlerinin durumlarını ekleyebilir (birleştirebilirsiniz) ve toplama işlevlerinin durumlarını negatif olmayan bir sabitle çarpabilirsiniz. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) +- Mergetree ailesindeki tablolar artık sanal sütuna sahip `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### Deneysel özellikler: {#experimental-features-1} + +- Add theed the `LowCardinality(T)` veri türü. Bu veri türü otomatik olarak yerel bir değer sözlüğü oluşturur ve sözlüğü açmadan veri işlemeye izin verir. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) +- JIT derlenmiş işlevlerin bir önbellek ve derlemeden önce kullanım sayısı için bir sayaç eklendi. JIT derleme ifadeleri için `compile_expressions` ayar. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) + +#### Geliştirmeler: {#improvements-4} + +- Terkedilmiş yinelemeler olduğunda çoğaltma günlüğünün sınırsız birikimi ile ilgili sorun giderildi. Uzun bir gecikme ile kopyaları için etkili bir kurtarma modu eklendi. +- Geliştirilmiş performans `GROUP BY` bunlardan biri dize ve diğerleri sabit uzunlukta olduğunda birden fazla toplama alanı ile. +- Kullanırken geliştirilmiş performans `PREWHERE` ve ifad ofelerin örtülü olarak aktar ofılmasıyla `PREWHERE`. +- Metin formatları için geliştirilmiş ayrıştırma performansı (`CSV`, `TSV`). [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) +- İkili biçimlerde okuma dizeleri ve diziler geliştirilmiş performans. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2955) +- Sorgular için artan performans ve azaltılmış bellek tüketimi `system.tables` ve `system.columns` tek bir sunucuda çok sayıda tablo olduğunda. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) +- Bir hataya neden olan büyük bir sorgu akışı durumunda bir performans sorunu düzeltildi ( `_dl_addr` fonksiyon görünür `perf top`, ancak sunucu çok fazla CPU kullanmıyor). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) +- Koşullar Görünüm içine atılır (ne zaman `enable_optimize_predicate_expression` etkin) değildir. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) +- İçin işlevsellik geliştirmeleri `UUID` veri türü. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) +- Bu `UUID` veri türü-Simyacı sözlüklerde desteklenir. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) +- Bu `visitParamExtractRaw` işlev iç içe geçmiş yapılarla düzgün çalışır. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2974) +- Ne zaman `input_format_skip_unknown_fields` ayarı etkin, nesne alanları `JSONEachRow` biçim doğru atlanır. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) +- İçin... `CASE` koşullarla ifade, şimdi atlayabilirsiniz `ELSE` eşdeğer olan `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) +- ZooKeeper ile çalışırken operasyon zaman aşımı artık yapılandırılabilir. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) +- İçin bir ofset belirtebilirsiniz `LIMIT n, m` olarak `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- Kullanabilirsiniz `SELECT TOP n` için alternatif olarak sözdizimi `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- Sistem tablolarına yazmak için sıranın boyutunu arttırdı, böylece `SystemLog parameter queue is full` hata sık sık olmaz. +- Bu `windowFunnel` toplama işlevi artık birden çok koşulu karşılayan olayları destekler. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2801) +- Yinelenen sütunlar bir `USING` için fık forra `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) +- `Pretty` biçimlerin artık genişliğe göre sütun hizalaması üzerinde bir sınırı vardır. Kullan... `output_format_pretty_max_column_pad_width` ayar. Bir değer daha genişse, yine de bütünüyle görüntülenecektir, ancak tablodaki diğer hücreler çok geniş olmayacaktır. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) +- Bu `odbc` tablo işlevi artık veritabanı/şema adını belirtmenizi sağlar. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2885) +- Belirtilen bir kullanıcı adı kullanma yeteneği eklendi `clickhouse-client` yapılandırma dosyası. [Vladimir Kozbin](https://github.com/ClickHouse/ClickHouse/pull/2909) +- Bu `ZooKeeperExceptions` sayaç üç sayaçlara ayrılmıştır: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, ve `ZooKeeperOtherExceptions`. +- `ALTER DELETE` sorgular hayata görünümler için çalışır. +- Temizleme ipliğini periyodik olarak çalıştırırken randomizasyon eklendi `ReplicatedMergeTree` çok sayıda olduğunda periyodik yük ani önlemek için tablolar `ReplicatedMergeTree` Tablolar. +- İçin destek `ATTACH TABLE ... ON CLUSTER` sorgular. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) + +#### Hata düzeltmeleri: {#bug-fixes-13} + +- İle bir sorun düzeltildi `Dictionary` tablolar (atar `Size of offsets doesn't match size of column` veya `Unknown compression method` özel). Bu hata 18.10.3 sürümünde göründü. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- Birleştirirken bir hata düzeltildi `CollapsingMergeTree` veri parçalarından biri boşsa tablolar (bu parçalar birleştirme sırasında oluşturulur veya `ALTER DELETE` tüm veriler silindiyse) ve `vertical` birleştirme için algoritma kullanıldı. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) +- Sırasında bir yarış durumu düzeltildi `DROP` veya `TRUNCATE` için `Memory` eş zamanlı tablolar `SELECT`, sunucu çökmelerine neden olabilir. Bu hata 1.1.54388 sürümünde göründü. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) +- Takarken veri kaybı olasılığı düzeltildi `Replicated` tablolar ise `Session is expired` hata döndürülür (veri kaybı tarafından tespit edilebilir `ReplicatedDataLoss` ölçü). Bu hata 1.1.54378 sürümünde oluştu. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) +- Sırasında bir segfault sabit `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) +- Sütun adlarını ararken hata düzeltildi `WHERE` ifade tamamen nitelikli bir sütun adından oluşur, örneğin `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) +- Sabit “Not found column” uzak bir sunucudan bir alt sorgu ile bir In ifadesinden oluşan tek bir sütun isteniyorsa, dağıtılmış sorgular yürütülürken oluşan hata. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) +- Sabit `Block structure mismatch in UNION stream: different number of columns` kırıklardan biri yerel ve diğeri değilse dağıtılmış sorgular için oluşan hata ve taşınmanın en iyi duruma getirilmesi `PREWHERE` tetik .lenir. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) +- Sabit `pointInPolygon` dışbükey olmayan poligonların belirli durumları için işlev. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) +- Karşılaştırırken yanlış sonuç düzeltildi `nan` tamsayılarla. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) +- Sabit bir hata `zlib-ng` nadir durumlarda segfault yol açabilir kütüphane. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) +- Bir tabloya eklerken bir bellek sızıntısı düzeltildi `AggregateFunction` sütunlar, toplama işlevinin durumu basit değilse (belleği ayrı olarak ayırır) ve tek bir ekleme isteği birden çok küçük blokla sonuçlanırsa. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) +- Aynı şeyi oluştururken ve silerken bir yarış durumu düzeltildi `Buffer` veya `MergeTree` aynı anda tablo. +- Tuples gibi bazı önemsiz olmayan türlerinden oluşan tuples karşılaştırırken bir segfault olasılığını düzeltildi. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) +- Belirli çalıştırırken bir segfault olasılığı düzeltildi `ON CLUSTER` sorgular. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2960) +- Sabit bir hata `arrayDistinct` fonksiyonu için `Nullable` dizi elemanları. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) +- Bu `enable_optimize_predicate_expression` seçenek şimdi doğru olan durumları destekler `SELECT *`. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2929) +- Zookeeper oturumunu yeniden başlatırken segfault düzeltildi. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) +- ZooKeeper ile çalışırken sabit potansiyel engelleme. +- Bir iç içe veri yapıları eklemek için sabit yanlış kod `SummingMergeTree`. +- Toplama işlevlerinin durumları için bellek ayırırken, hizalama doğru bir şekilde dikkate alınır, bu da toplama işlevlerinin durumlarını uygularken hizalama gerektiren işlemlerin kullanılmasını mümkün kılar. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) + +#### Güvenlik düzeltme: {#security-fix} + +- ODBC veri kaynaklarının güvenli kullanımı. ODBC sürücüleri ile etkileşim ayrı bir `clickhouse-odbc-bridge` işleyiş. Üçüncü taraf ODBC sürücülerindeki hatalar artık sunucu kararlılığı veya güvenlik açıklarıyla ilgili sorunlara neden olmaz. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) +- Dosya yolunun yanlış doğrulanması düzeltildi `catBoostPool` tablo işlevi. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) +- Sistem tablolarının içeriği (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, ve `replication_queue`) kullanıcının veri tabanlarına yapılandırılmış erişimine göre filtrelenir (`allow_databases`). [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2856) + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-3} + +- JOIN ile sorgularda, yıldız karakteri SQL standardına uygun olarak tüm tablolardaki sütunların bir listesine genişletir. Ayarlayarak eski davranışı geri yükleyebilirsiniz `asterisk_left_columns_only` kullanıcı yapılandırma düzeyinde 1'e. + +#### Yapı değişiklikleri: {#build-changes-2} + +- Çoğu entegrasyon testleri artık commit tarafından çalıştırılabilir. +- Kod stili kontrolleri de commit tarafından çalıştırılabilir. +- Bu `memcpy` CentOS7/Fedora üzerinde inşa ederken uygulama doğru seçilir. [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) +- Oluşturmak için clang kullanırken, bazı uyarılar `-Weverything` düzenli ek olarak, eklenmiştir `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) +- Yapı hata ayıklama kullanır `jemalloc` hata ayıklama seçeneği. +- ZooKeeper ile etkileşim için kütüphanenin arayüzü soyut olarak ilan edilir. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) + +## ClickHouse sürümü 18.10 {#clickhouse-release-18-10} + +### ClickHouse yayın 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} + +#### Yenilik: {#new-features-5} + +- HTTPS çoğaltma için kullanılabilir. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) +- Fonksiyonları eklendi `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, ve `murmurHash3_128` mevcut ek olarak `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) +- ClickHouse ODBC sürücüsündeki null türleri için destek (`ODBCDriver2` çıkış biçimi). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) +- İçin destek `UUID` anahtar sütunlarda. + +#### Geliştirmeler: {#improvements-5} + +- Kümeler, yapılandırma dosyalarından silindiğinde sunucuyu yeniden başlatmadan kaldırılabilir. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) +- Dış sözlükler, yapılandırma dosyalarından kaldırıldıklarında sunucuyu yeniden başlatmadan kaldırılabilir. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) +- Katma `SETTINGS` için destek `Kafka` masa motoru. [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) +- İçin iyileştirmeler `UUID` veri türü (henüz tamamlanmadı). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) +- Birleştikten sonra boş parçalar için destek `SummingMergeTree`, `CollapsingMergeTree` ve `VersionedCollapsingMergeTree` motorlar. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) +- Tamamlanan Mut recordsasyon recordsların eski kayıtları silinir (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) +- Add theed the `system.merge_tree_settings` Tablo. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/2841) +- Bu `system.tables` tablo artık bağımlılık sütunları var: `dependencies_database` ve `dependencies_table`. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2851) +- Add theed the `max_partition_size_to_drop` yapılandırma seçeneği. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) +- Add theed the `output_format_json_escape_forward_slashes` seçenek. [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) +- Add theed the `max_fetch_partition_retries_count` ayar. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) +- Add theed the `prefer_localhost_replica` yerel bir yineleme tercihini devre dışı bırakmak ve işlemler arası etkileşim olmadan yerel bir yinelemeye gitmek için ayarlama. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) +- Bu `quantileExact` toplama işlevi döndürür `nan` boş bir toplama durumunda `Float32` veya `Float64` koymak. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) + +#### Hata düzeltmeleri: {#bug-fixes-14} + +- ODBC için bağlantı dizesi parametrelerinin gereksiz bir şekilde kaçması kaldırıldı, bu da bağlantı kurmayı imkansız hale getirdi. Bu hata 18.6.0 sürümünde oluştu. +- İşleme mantığı düzeltildi `REPLACE PARTITION` çoğaltma sırasındaki komutlar. İki tane varsa `REPLACE` aynı bölüm için komutlar, yanlış mantık, bunlardan birinin çoğaltma kuyruğunda kalmasına ve yürütülmesine neden olabilir. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) +- Tüm veri parçaları boş olduğunda bir birleştirme hatası düzeltildi (birleştirme veya `ALTER DELETE` tüm veriler silindiyse). Bu hata 18.1.0 sürümünde göründü. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) +- Eşzamanlı için bir hata düzeltildi `Set` veya `Join`. [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2823) +- Sabit `Block structure mismatch in UNION stream: different number of columns` için oluşan hata `UNION ALL` bir alt sorgu içindeki sorgular `SELECT` sorgular yinelenen sütun adları içerir. [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2094) +- Bir MySQL sunucusuna bağlanırken bir istisna oluşursa bir bellek sızıntısı düzeltildi. +- Bir sorgu hatası durumunda sabit yanlış clickhouse-istemci yanıt kodu. +- Farklı içeren hayata görünümleri sabit yanlış davranış. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) + +#### Geriye dönük uyumsuz değişiklikler {#backward-incompatible-changes-4} + +- Dağıtılmış tablolar için kontrol tablosu sorguları için destek kaldırıldı. + +#### Yapı değişiklikleri: {#build-changes-3} + +- Ayırıcı değiştirildi: `jemalloc` şimdi yerine kullanılır `tcmalloc`. Bazı senaryolarda, bu hız %20'ye kadar artar. Ancak, %20'ye kadar yavaşlamış sorgular vardır. Bellek tüketimi, bazı senaryolarda geliştirilmiş kararlılık ile yaklaşık %10 oranında azaltılmıştır. Son derece rekabetçi yüklerle, userspace ve sistemdeki CPU kullanımı sadece hafif bir artış gösterir. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) +- Bir alt modülden libressl kullanımı. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) +- Bir alt modülden unixodbc kullanımı. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) +- Bir alt modülden mariadb-connector-c kullanımı. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) +- Test verilerinin kullanılabilirliğine bağlı olan depoya işlevsel test dosyaları eklendi (şimdilik, test verilerinin kendisi olmadan). + +## ClickHouse sürümü 18.6 {#clickhouse-release-18-6} + +### ClickHouse yayın 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} + +#### Yenilik: {#new-features-6} + +- Sözdizimi üzerinde katılmak için ifadeler için destek eklendi: + `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` + İfade, and operatörü tarafından birleştirilen bir eşitlik zinciri olmalıdır. Eşitliğin her bir tarafı, tablolardan birinin sütunları üzerinde keyfi bir ifade olabilir. Tam sütun adlarının kullanımı desteklenir (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) doğru tablo için. [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) +- HTTPS çoğaltma için etkinleştirilebilir. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) + +#### Geliştirmeler: {#improvements-6} + +- Sunucu, sürümünün düzeltme eki bileşenini istemciye geçirir. Yama sürümü bileşeni ile ilgili veriler `system.processes` ve `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) + +## ClickHouse sürümü 18.5 {#clickhouse-release-18-5} + +### ClickHouse sürümü 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} + +#### Yenilik: {#new-features-7} + +- Karma fonksiyonu eklendi `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). + +#### Geliştirmeler: {#improvements-7} + +- Şimdi kullanabilirsiniz `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) ortam değişkenlerinden yapılandırma dosyalarındaki değerleri ayarlamak için öznitelik. +- Eklenen büyük / küçük harf duyarlı sürümleri `coalesce`, `ifNull`, ve `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). + +#### Hata düzeltmeleri: {#bug-fixes-15} + +- Bir kopya başlatırken olası bir hata düzeltildi [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). + +## ClickHouse sürümü 18.4 {#clickhouse-release-18-4} + +### ClickHouse yayın 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} + +#### Yenilik: {#new-features-8} + +- Eklenen sistem tabloları: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). +- Bir argüman olarak bir tablo yerine bir tablo işlevini kullanma yeteneği eklendi `remote` veya `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). +- İçin destek `HTTP Basic` çoğaltma protokolünde kimlik doğrulama [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). +- Bu `has` fonksiyon artık bir dizi sayısal bir değer için arama sağlar `Enum` değerler [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). +- Gelen okurken keyfi mesaj ayırıcılar eklemek için destek `Kafka` [Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2701). + +#### Geliştirmeler: {#improvements-8} + +- Bu `ALTER TABLE t DELETE WHERE` sorgu, where koşulundan etkilenmeyen veri bölümlerini yeniden yazmaz [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). +- Bu `use_minimalistic_checksums_in_zookeeper` seçeneği için `ReplicatedMergeTree` tablolar varsayılan olarak etkindir. Bu ayar 1.1.54378, 2018-04-16 sürümüne eklendi. 1.1.54378'den eski sürümler artık yüklenemez. +- Koşu için destek `KILL` ve `OPTIMIZE` belirten sorgular `ON CLUSTER` [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2689). + +#### Hata düzeltmeleri: {#bug-fixes-16} + +- Hata düzeltildi `Column ... is not under an aggregate function and not in GROUP BY` bir in ifadesi ile toplama için. Bu hata 18.1.0 sürümünde göründü. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +- Bu bir hata düzeltildi `windowFunnel aggregate function` [Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2735). +- Bu bir hata düzeltildi `anyHeavy` toplama fonksiyonu ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +- Kullanırken sabit sunucu çökmesi `countArray()` toplama işlevi. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-5} + +- İçin parametreler `Kafka` motor değiştirildi `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` -e doğru `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. Tablolar kullanıyorsanız `kafka_schema` veya `kafka_num_consumers` parametreleri, elle meta dosyaları düzenlemek zorunda `path/metadata/database/table.sql` ve Ekle `kafka_row_delimiter` parametre ile `''` değer. + +## ClickHouse sürümü 18.1 {#clickhouse-release-18-1} + +### ClickHouse sürümü 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} + +#### Yenilik: {#new-features-9} + +- İçin destek `ALTER TABLE t DELETE WHERE` çoğaltılmamış MergeTree tabloları için sorgu ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). +- İçin keyfi türleri için destek `uniq*` agrega fonksiyonları ailesi ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). +- Karşılaştırma operatörlerinde keyfi türler için destek ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). +- Bu `users.xml` dosya biçiminde bir alt ağ maskesi ayarlama sağlar `10.0.0.1/255.255.255.0`. Bu, ortada sıfır olan IPv6 ağları için Maskeler kullanmak için gereklidir ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). +- Add theed the `arrayDistinct` işlev ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). +- SummingMergeTree altyapısı artık AggregateFunction türü sütunları ile çalışabilir ([Constantin S. Pan](https://github.com/ClickHouse/ClickHouse/pull/2566)). + +#### Geliştirmeler: {#improvements-9} + +- Sürüm sürümleri için numaralandırma düzenini değiştirdi. Şimdi ilk bölüm sürüm yılını içeriyor (A. D., Moskova saat dilimi, eksi 2000), ikinci bölüm büyük değişikliklerin sayısını içeriyor (çoğu sürüm için artar) ve üçüncü bölüm yama sürümüdür. Aksi changelog belirtilmediği sürece bültenleri hala geriye dönük uyumludur. +- Kayan noktalı sayıların bir dizeye daha hızlı dönüştürülmesi ([Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2664)). +- Ayrıştırma hataları nedeniyle bir ekleme sırasında bazı satırlar atlanmışsa (bu, `input_allow_errors_num` ve `input_allow_errors_ratio` ayarları etkin), atlanan satır sayısı şimdi sunucu günlüğüne yazılır ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). + +#### Hata düzeltmeleri: {#bug-fixes-17} + +- Geçici tablolar için TRUNCATE komutu düzeltildi ([Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2624)). +- Yanıtı okurken bir ağ hatası olduğunda meydana gelen ZooKeeper istemci kütüphanesinde nadir bir kilitlenme düzeltildi ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +- Null türleri için bir döküm sırasında bir hata düzeltildi ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). +- Yanlış sonuç düzeltildi `maxIntersection()` aralıkların sınırları çakıştığında işlev ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). +- Bir işlev argümanında or ifade zincirinin yanlış dönüşümü düzeltildi ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). +- İçeren sorgular için sabit performans düşüşü `IN (subquery)` başka bir alt sorgu içindeki ifadeler ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). +- Bir kullanan dağıtılmış sorgularda farklı sürümleri ile sunucular arasında sabit uyumsuzluk `CAST` büyük harflerle olmayan işlev ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +- Harici bir DBMS sorguları için tanımlayıcıları alıntı eksik eklendi ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-6} + +- DateTime için sıfır sayısını içeren bir dize dönüştürme çalışmaz. Örnek: `SELECT toDateTime('0')`. Bu da nedeni `DateTime DEFAULT '0'` tablolarda da çalışmıyor `0` sözlüklerde. Çözüm: değiştirin `0` ile `0000-00-00 00:00:00`. + +## ClickHouse sürüm 1.1 {#clickhouse-release-1-1} + +### ClickHouse sürümü 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} + +#### Yenilik: {#new-features-10} + +- Add theed the `histogram` toplama fonksiyonu ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2521)). +- Şimdi `OPTIMIZE TABLE ... FINAL` için bölümler belirt specifyingmeden kullanılabilir `ReplicatedMergeTree` ([Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2600)). + +#### Hata düzeltmeleri: {#bug-fixes-18} + +- Çoğaltılmış verileri gönderirken ve indirirken okumak ve yazmak için soketler için çok küçük bir zaman aşımı (bir saniye) ile ilgili bir sorun düzeltildi, bu da ağ veya diskte bir yük varsa daha büyük parçaları indirmeyi imkansız hale getirdi (parçaları indirmek için döngüsel girişimlerle sonuçlandı). Bu hata 1.1.54388 sürümünde oluştu. +- Tabloda yinelenen veri blokları eklediyseniz zookeeper chroot kullanırken sorunlar giderildi. +- Bu `has` işlev artık boş öğelere sahip bir dizi için doğru çalışıyor ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). +- Bu `system.tables` tablo şimdi dağıtılmış sorgularda kullanıldığında düzgün çalışır. Bu `metadata_modification_time` ve `engine_full` sütunlar artık sanal değil. Tablodan yalnızca bu sütunlar sorgulanırsa oluşan bir hata düzeltildi. +- Sabit nasıl boş `TinyLog` tablo boş bir veri bloğu ekledikten sonra çalışır ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). +- Bu `system.zookeeper` zookeeper düğümün değeri NULL ise Tablo çalışır. + +### ClickHouse sürümü 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} + +#### Yenilik: {#new-features-11} + +- Sorgular gönderilebilir `multipart/form-data` biçim (içinde `query` alan), sorgu işleme için harici veriler de gönderilirse yararlıdır ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). +- CSV formatında veri okurken tek veya çift tırnak işleme etkinleştirmek veya devre dışı bırakmak için yeteneği eklendi. Bunu şu şekilde yapılandırabilirsiniz: `format_csv_allow_single_quotes` ve `format_csv_allow_double_quotes` ayarlar ([Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2574)). +- Şimdi `OPTIMIZE TABLE ... FINAL` olmayan çoğaltılmış varyantları için bölüm belirtmeden kullanılabilir `MergeTree` ([Amos Kuşu](https://github.com/ClickHouse/ClickHouse/pull/2599)). + +#### Geliştirmeler: {#improvements-10} + +- Geliştirilmiş performans, azaltılmış bellek tüketimi, ve doğru bellek tüketimi izleme kullanımı ile In operatör zaman bir tablo Endeksi kullanılabilir ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). +- Bir veri parçası eklerken sağlama toplamlarının gereksiz denetimi kaldırıldı. Bu, çok sayıda çoğaltma olduğunda önemlidir,çünkü bu durumlarda toplam kontrol sayısı n^2'ye eşittir. +- İçin destek eklendi `Array(Tuple(...))` arg theum argumentsents for the `arrayEnumerateUniq` işlev ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). +- Katma `Nullable` için destek `runningDifference` işlev ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). +- Çok sayıda ifade olduğunda geliştirilmiş sorgu analizi performansı ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). +- Birleştirme için veri parçalarının daha hızlı seçimi `ReplicatedMergeTree` Tablolar. ZooKeeper oturumunun daha hızlı iyileşmesi ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). +- Bu `format_version.txt` için dosya `MergeTree` eksikse tablolar yeniden oluşturulur, bu da dizin yapısını dosyalar olmadan kopyaladıktan sonra ClickHouse başlatılırsa mantıklı olur ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). + +#### Hata düzeltmeleri: {#bug-fixes-19} + +- ZooKeeper ile çalışırken, sunucuyu yeniden başlatmadan önce oturumu ve readonly tablo durumlarını kurtarmayı imkansız hale getirebilecek bir hata düzeltildi. +- Oturum kesilirse eski düğümlerin silinmemesine neden olabilecek ZooKeeper ile çalışırken bir hata düzeltildi. +- Sabit bir hata `quantileTDigest` Float argümanları için işlev (bu hata 1.1.54388 sürümünde tanıtıldı) ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2553)). +- Birincil anahtar sütunu, aynı boyuttaki imzalı ve imzasız tamsayılar arasındaki türleri dönüştürmek için işlevin içinde bulunuyorsa, mergetree tabloları için dizinde bir hata düzeltildi ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). +- Sabit segfault eğer `macros` kullanılır ancak yapılandırma dosyasında değildir ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). +- İstemci yeniden bağlanırken varsayılan veritabanına sabit anahtarlama ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). +- Ne zaman meydana gelen bir hata düzeltildi `use_index_for_in_with_subqueries` ayar devre dışı bırakıldı. + +#### Güvenlik düzeltme: {#security-fix-1} + +- Mysql'e bağlandığında dosya göndermek artık mümkün değil (`LOAD DATA LOCAL INFILE`). + +### ClickHouse sürümü 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} + +#### Yenilik: {#new-features-12} + +- İçin destek `ALTER TABLE t DELETE WHERE` çoğaltılmış tablolar için sorgu. Add theed the `system.mutations` bu tür sorguların ilerlemesini izlemek için tablo. +- İçin destek `ALTER TABLE t [REPLACE|ATTACH] PARTITION` \*MergeTree tabloları için sorgu. +- İçin destek `TRUNCATE TABLE` sorgu ([Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2260)) +- Birkaç yeni `SYSTEM` çoğaltılmış tablolar için sorgular (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +- MySQL motoru ve ilgili tablo fonksiyonu ile bir tabloya yazma yeteneği eklendi ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). +- Add theed the `url()` tablo fonksiyonu ve `URL` masa motoru ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). +- Add theed the `windowFunnel` toplama fonksiyonu ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). +- Yeni `startsWith` ve `endsWith` dizeler için işlevler ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). +- Bu `numbers()` tablo işlevi artık ofset belirtmenizi sağlar ([Kış Zhang](https://github.com/ClickHouse/ClickHouse/pull/2535)). +- Şifre için `clickhouse-client` etkileşimli olarak girilebilir. +- Sunucu günlükleri artık syslog'a gönderilebilir ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). +- Paylaşılan kitaplık kaynağı ile sözlüklerde oturum açma desteği ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). +- Özel CSV sınırlayıcılar için destek ([Ivan Zhukov](https://github.com/ClickHouse/ClickHouse/pull/2263)) +- Add theed the `date_time_input_format` ayar. Bu ayarı şu şekilde değiştirirseniz `'best_effort'`, DateTime değerleri biçimleri geniş bir yelpazede okunacaktır. +- Add theed the `clickhouse-obfuscator` veri gizleme için yardımcı program. Kullanım örneği: performans testlerinde kullanılan verileri yayınlama. + +#### Deneysel özellikler: {#experimental-features-2} + +- Hesaplamak için yeteneği eklendi `and` argümanlar sadece ihtiyaç duydukları yerde ([Anastasia Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) +- Yerel kod JIT derleme bazı ifadeler için artık kullanılabilir ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). + +#### Hata düzeltmeleri: {#bug-fixes-20} + +- Yinelemeler artık bir sorgu için görünmüyor `DISTINCT` ve `ORDER BY`. +- İle sorgular `ARRAY JOIN` ve `arrayFilter` artık yanlış bir sonuç döndürmez. +- İç içe geçmiş bir yapıdan bir dizi sütunu okurken bir hata düzeltildi ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). +- Gibi bir having yan tümcesi ile sorguları analiz ederken bir hata düzeltildi `HAVING tuple IN (...)`. +- Özyinelemeli takma adlarla sorguları analiz ederken bir hata düzeltildi. +- Tüm satırları filtreleyen PREWHERE bir koşul ile ReplacingMergeTree okurken bir hata düzeltildi ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). +- HTTP arabirimindeki oturumları kullanırken kullanıcı profili ayarları uygulanmadı. +- Clickhouse-local'deki komut satırı parametrelerinden ayarların nasıl uygulandığı düzeltildi. +- ZooKeeper istemci kitaplığı artık sunucudan alınan oturum zaman aşımını kullanır. +- İstemci zaman aşımı daha uzun sunucu yanıt bekledi ZooKeeper istemci kütüphanesinde bir hata düzeltildi. +- Bölüm anahtar sütunlarındaki koşullarla sorgular için parçaların sabit budaması ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). +- Birleşmeler şimdi mümkün `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). +- ODBC tablo işlevinde tür eşleme düzeltildi ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). +- Tür karşılaştır formaları Düzelt forildi `DateTime` saat dilimi ile ve olmadan ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). +- Sabit sözdizimsel ayrıştırma ve biçimlendirme `CAST` operatör. +- Dağıtılmış tablo motoru için somutlaştırılmış bir görünüme sabit ekleme ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). +- Veri yazarken bir yarış durumu düzeltildi `Kafka` motor için hayata görünümleri ([Yangkuan Liu](https://github.com/ClickHouse/ClickHouse/pull/2448)). +- Uzak () tablo işlevinde sabit SSRF. +- Sabit çıkış davranışı `clickhouse-client` çok satırlı modda ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). + +#### Geliştirmeler: {#improvements-11} + +- Çoğaltılmış tablolardaki arka plan görevleri artık ayrı iş parçacıkları yerine bir iş parçacığı havuzunda gerçekleştirilir ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). +- Geliştirilmiş lz4 sıkıştırma performansı. +- Çok sayıda birleştirme ve alt sorgu ile sorgular için daha hızlı analiz. +- Çok fazla ağ hatası olduğunda DNS önbelleği artık otomatik olarak güncellenir. +- Çok fazla parçaya sahip olduğundan, materialized görünümlerden birine ekleme mümkün değilse, tablo ekleri artık oluşmaz. +- Olay sayaçlarındaki tutarsızlık düzeltildi `Query`, `SelectQuery`, ve `InsertQuery`. +- Gibi ifadeler `tuple IN (SELECT tuple)` tuple türleri eşleşirse izin verilir. +- Çoğaltılmış tabloları olan bir sunucu, Zookeeper'ı yapılandırmasanız bile başlayabilir. +- Mevcut CPU çekirdeklerinin sayısını hesaplarken, C gruplarındaki sınırlar şimdi dikkate alınmaktadır ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). +- Systemd yapılandırma dosyasında yapılandırma dizinleri için chown eklendi ([Mikhail Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). + +#### Yapı değişiklikleri: {#build-changes-4} + +- Gcc8 derleyicisi yapılar için kullanılabilir. +- Alt modülden llvm oluşturma yeteneği eklendi. +- Librdkafka kütüphanesinin sürümü v0.11.4 olarak güncellendi. +- Sistem libcpuid kütüphanesini kullanma yeteneği eklendi. Kütüphane sürümü 0.4.0 olarak güncellendi. +- Vectorclass kütüphanesini kullanarak yapı düzeltildi ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). +- Cmake şimdi varsayılan olarak ninja için dosyalar üretir (kullanırken olduğu gibi `-G Ninja`). +- Libtermcap yerine libtınfo kütüphanesini kullanma yeteneği eklendi ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). +- Fedora Rawhide bir başlık dosyası çakışması düzeltildi ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-7} + +- Çıkar escapingıldı `Vertical` ve `Pretty*` format Andlar ve silinen `VerticalRaw` biçimli. +- 1.1.54388 (veya daha yeni) sürümüne sahip sunucular ve daha eski bir sürüme sahip sunucular dağıtılmış bir sorguda aynı anda kullanılıyorsa ve sorgunun `cast(x, 'Type')` ifade olmadan `AS` anahtar kelime ve kelime yok `cast` büyük harfle, bir istisna gibi bir mesajla atılır `Not found column cast(0, 'UInt8') in block`. Çözüm: tüm kümedeki sunucuyu güncelleyin. + +### ClickHouse sürümü 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} + +#### Hata düzeltmeleri: {#bug-fixes-21} + +- Bazı durumlarda zookeeper işlemlerinin engellenmesine neden olan bir hata düzeltildi. + +### ClickHouse sürümü 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} + +#### Hata düzeltmeleri: {#bug-fixes-22} + +- Bir tablo birçok yinelemeler varsa çoğaltma kuyruğunun bir yavaşlama düzeltildi. + +### ClickHouse sürümü 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} + +#### Hata düzeltmeleri: {#bug-fixes-23} + +- ClickHouse ZooKeeper sunucusuna bağlantı kaybettiğinde ZooKeeper bir düğüm sızıntısı düzeltildi. + +### ClickHouse sürüm 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} + +#### Yenilik: {#new-features-13} + +- Tablo fonksiyonu eklendi `file(path, format, structure)`. Örnek okuma baytları `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. + +#### Geliştirmeler: {#improvements-12} + +- Alt sorgular sarılabilir `()` sorgu okunabilirliğini artırmak için parantez. Mesela: `(SELECT 1) UNION ALL (SELECT 1)`. +- Basit `SELECT` gelen sorgular `system.processes` tablo dahil değildir `max_concurrent_queries` sınır. + +#### Hata düzeltmeleri: {#bug-fixes-24} + +- Sabit yanlış davranış `IN` operatör ne zaman seçin `MATERIALIZED VIEW`. +- Gibi ifadelerde bölüm indeksi ile sabit yanlış filtreleme `partition_key_column IN (...)`. +- Sabit yetersizlik yürütmek için `OPTIMIZE` eğer lider olmayan çoğaltma üzerinde sorgu `REANAME` masaya yapıldı. +- Yürütülürken yetkilendirme hatası düzeltildi `OPTIMIZE` veya `ALTER` olmayan bir lider çoğaltma sorgular. +- Sabit donma `KILL QUERY`. +- Saatlerin kaybına yol açan ZooKeeper istemci kütüphanesinde bir hata, dağıtılmış DDL kuyruğunun dondurulması ve boş olmayan bir çoğaltma kuyruğundaki yavaşlamalar düzeltildi `chroot` önek ZooKeeper yapılandırmasında kullanılır. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-8} + +- Gibi ifadeler için destek kaldırıldı `(a, b) IN (SELECT (a, b))` (eşdeğer ifadeyi kullanabilirsiniz `(a, b) IN (SELECT a, b)`). Önceki sürümlerde, bu ifadeler belirsizliğe yol açtı `WHERE` filtreleme veya neden olan hatalar. + +### ClickHouse sürümü 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} + +#### Yenilik: {#new-features-14} + +- Günlük düzeyi sunucuyu yeniden başlatmadan değiştirilebilir. +- Add theed the `SHOW CREATE DATABASE` sorgu. +- Bu `query_id` geç canilebilir `clickhouse-client` (elBroom). +- Yeni ayar: `max_network_bandwidth_for_all_users`. +- İçin destek eklendi `ALTER TABLE ... PARTITION ...` için `MATERIALIZED VIEW`. +- Sistem tablosunda sıkıştırılmamış formdaki veri parçalarının boyutu hakkında bilgi eklendi. +- Dağıtılmış tablolar için sunucudan sunucuya şifreleme desteği (`1` rep thelik thea config in ``). +- Tablo düzeyi için yapılandırma `ReplicatedMergeTree` Zookeeper saklanan veri miktarını en aza indirmek için aile: : `use_minimalistic_checksums_in_zookeeper = 1` +- Yapılandırma `clickhouse-client` istem. Varsayılan olarak, sunucu adları artık istemine çıktı. Sunucunun görünen adı değiştirilebilir. Ayrıca gönderilen `X-ClickHouse-Display-Name` HTTP Başlığı (Kirill Shvakov). +- Birden çok virgülle ayrılmış `topics` için belirt theilebilir `Kafka` motor (Tobias Adamson) +- Bir sorgu tarafından durdurulduğunda `KILL QUERY` veya `replace_running_query`, müşteri alır `Query was canceled` eksik bir sonuç yerine istisna. + +#### Geliştirmeler: {#improvements-13} + +- `ALTER TABLE ... DROP/DETACH PARTITION` sorgular, çoğaltma kuyruğunun önünde çalıştırılır. +- `SELECT ... FINAL` ve `OPTIMIZE ... FINAL` tablo tek bir veri parçası olduğunda bile kullanılabilir. +- A `query_log` el ile silindiyse tablo anında yeniden oluşturulur (Kirill Shvakov). +- Bu `lengthUTF8` fonksiyon daha hızlı çalışır (zhang2014). +- Senkron ekler geliştirilmiş performans `Distributed` Tablolar (`insert_distributed_sync = 1`) çok sayıda parça olduğunda. +- Sunucu kabul eder `send_timeout` ve `receive_timeout` istemciden ayarlar ve istemciye bağlanırken bunları uygular (bunlar ters sırada uygulanır: sunucu soketinin `send_timeout` için ayarlanır `receive_timeout` müşteriden alınan değer ve tersi). +- Asenkron ekleme için daha sağlam kilitlenme kurtarma `Distributed` Tablolar. +- Dönüş türü `countEqual` fonksiyon değiştirildi `UInt32` -e doğru `UInt64` (谢磊). + +#### Hata düzeltmeleri: {#bug-fixes-25} + +- Bir hata düzeltildi `IN` ifadenin sol tarafı olduğunda `Nullable`. +- Tuples ile birlikte kullanıldığında doğru sonuçlar şimdi döndürülür `IN` bazı tuple bileşenleri tablo dizininde olduğunda. +- Bu `max_execution_time` limit şimdi dağıtılmış sorgularla düzgün çalışır. +- Bileşik sütunların boyutunu hesaplarken sabit hatalar `system.columns` Tablo. +- Geçici bir tablo oluştururken bir hata düzeltildi `CREATE TEMPORARY TABLE IF NOT EXISTS.` +- Sabit hatalar `StorageKafka` (\#\#2075) +- Sabit sunucu, belirli toplam işlevlerin geçersiz argümanlarından çöker. +- Engellenen hata düzeltildi `DETACH DATABASE` için arka plan görevlerini durdurma sorgusu `ReplicatedMergeTree` Tablolar. +- `Too many parts` toplu materialized görünümler (\#\#2084) eklerken durum daha az olasıdır. +- Bir ikame aynı düzeyde başka bir ikame tarafından takip edilmesi gerekiyorsa, yapılandırmada değiştirmelerin düzeltilmiş özyinelemeli işleme. +- Bir oluştururken meta veri dosyasında sözdizimi düzeltildi `VIEW` bu bir sorgu ile kullanır `UNION ALL`. +- `SummingMergeTree` şimdi bir bileşik anahtar ile iç içe veri yapılarının toplamı için doğru çalışır. +- Lider seçerken bir yarış durumu olasılığı sabit `ReplicatedMergeTree` Tablolar. + +#### Yapı değişiklikleri: {#build-changes-5} + +- Yapı destekler `ninja` yerine `make` ve kullanımları `ninja` sürümleri oluşturmak için varsayılan olarak. +- Yeniden adlandırılan paketler: `clickhouse-server-base` içinde `clickhouse-common-static`; `clickhouse-server-common` içinde `clickhouse-server`; `clickhouse-common-dbg` içinde `clickhouse-common-static-dbg`. Yüklemek için kullanın `clickhouse-server clickhouse-client`. Eski adlara sahip paketler, geriye dönük uyumluluk için depolara yüklenmeye devam edecektir. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-9} + +- Sol tarafta bir dizi belirtilmişse, bir In ifadesinin özel yorumunu kaldırıldı. Daha önce, ifade `arr IN (set)` olarak yorum waslandı “at least one `arr` element belongs to the `set`”. Yeni sürümde aynı davranışı elde etmek için şunları yazın `arrayExists(x -> x IN (set), arr)`. +- Soket seçeneğinin yanlış kullanımı devre dışı bırakıldı `SO_REUSEPORT`, poco kitaplığında varsayılan olarak yanlış etkinleştirildi. Linux'ta artık adresleri aynı anda belirtmek için herhangi bir neden olmadığını unutmayın `::` ve `0.0.0.0` for listen – use just `::`(varsayılan çekirdek yapılandırma ayarları ile) IPv4 ve IPv6 üzerinden hem bağlantı dinleme sağlar. Belirterek önceki sürümlerden davranışa da geri dönebilirsiniz `1` config. + +### ClickHouse sürümü 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} + +#### Yenilik: {#new-features-15} + +- Add theed the `system.macros` tablo ve yapılandırma dosyası değiştirildiğinde makroların otomatik güncellenmesi. +- Add theed the `SYSTEM RELOAD CONFIG` sorgu. +- Add theed the `maxIntersections(left_col, right_col)` aynı anda kesişen aralıkların maksimum sayısını döndüren toplama işlevi `[left; right]`. Bu `maxIntersectionsPosition(left, right)` fonksiyonun başlangıcını döndürür “maximum” aralıklı. ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). + +#### Geliştirmeler: {#improvements-14} + +- Bir veri eklerken `Replicated` tablo, daha az istek yapılır `ZooKeeper` (ve kullanıcı düzeyinde hataların çoğu `ZooKeeper` günlük). +- Veri kümeleri için takma ad oluşturma yeteneği eklendi. Örnek: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. + +#### Hata düzeltmeleri: {#bug-fixes-26} + +- Sabit `Illegal PREWHERE` birleştirme tablolarından okunurken hata `Distributed`Tablolar. +- Eğer IPv4 sadece Docker kaplarda clickhouse-server başlatmak için izin eklendi düzeltmeler. +- Sistemden okurken bir yarış durumu düzeltildi `system.parts_columns tables.` +- Bir senkron ekleme sırasında çift tamponlama kaldırıldı `Distributed` zaman aşımı için bağlantı neden olabilir tablo. +- Başlamadan önce kullanılamayan bir kopya için aşırı uzun beklemelere neden olan bir hata düzeltildi `SELECT` sorgu. +- Sabit yanlış tarihler `system.parts` Tablo. +- İmkansız bir veri eklemek için yapılan bir hata düzeltildi `Replicated` tablo ise `chroot` yapılandırmada boş değildi `ZooKeeper` küme. +- Boş bir dikey birleştirme algoritması düzeltildi `ORDER BY` Tablo. +- Uzak tablo, bu sözlük istemcisi sunucu üzerinde yoksa bile sorgular sözlük kullanma olanağı sağlandı. Bu işlevsellik 1.1.54362 sürümünde kayboldu. +- Gibi sorgular için davranışı geri `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` ne zaman sağ tarafı `IN` bir uzaktan kumanda kullanmalı `default.table` yerel bir yerine. Bu davranış 1.1.54358 sürümünde bozuldu. +- Kaldırılan gereksiz hata düzeyi günlüğü `Not found column ... in block`. + +### ClickHouse Yayın 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} + +#### Yenilik: {#new-features-16} + +- Olmadan toplama `GROUP BY` boş bir set için (örneğin `SELECT count(*) FROM table WHERE 0`) şimdi SQL standardına uygun olarak toplam işlevler için boş değerlere sahip bir satırla bir sonuç döndürür. Eski davranışı geri yüklemek için (boş bir sonuç döndürür), `empty_result_for_aggregation_by_empty_set` 1'e. +- İçin Tip dönüştürme eklendi `UNION ALL`. Farklı takma ad adlarına izin verilir `SELECT` pozisyon inlar `UNION ALL`, SQL standardına uygun olarak. +- Keyfi ifadeler desteklenir `LIMIT BY` yanlar. Daha önce, yalnızca aşağıdakilerden kaynaklanan sütunları kullanmak mümkündü `SELECT`. +- Dizini `MergeTree` tablolar şu durumlarda kullanılır `IN` birincil anahtarın sütunlarından bir ifade kümesine uygulanır. Örnek: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova). +- Add theed the `clickhouse-copier` kümeleri ve resharding veri (beta) arasında kopyalama aracı. +- Tutarlı karma fonksiyonları eklendi: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. Onlar sonraki reshardings sırasında ağ trafiği miktarını azaltmak için bir sharding anahtar olarak kullanılabilir. +- Eklenen fonksiyonlar: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. +- Add theed the `arrayCumSum` fonksiyon (Javi Santana). +- Add theed the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, ve `parseDateTimeBestEffortOrNull` çok çeşitli Olası biçimlerde metin içeren bir dizeden datetime okumak için işlevler. +- Veriler, güncelleme sırasında harici sözlüklerden kısmen yeniden yüklenebilir (yalnızca belirtilen alanın değerinin önceki indirmeden daha büyük olduğu kayıtları yükleyin) (Arsen Hakobyan). +- Add theed the `cluster` tablo işlevi. Örnek: `cluster(cluster_name, db, table)`. Bu `remote` tablo işlevi, bir tanımlayıcı olarak belirtilirse, küme adını ilk bağımsız değişken olarak kabul edebilir. +- Bu `remote` ve `cluster` tablo fonksiyonları kullanılabilir `INSERT` sorgular. +- Add theed the `create_table_query` ve `engine_full` sanal sütunlar için `system.tables`Tablo . Bu `metadata_modification_time` sütun sanal. +- Add theed the `data_path` ve `metadata_path` sütunlar için `system.tables`ve`system.databases` tablolar ve eklenen `path` Col theum then to the `system.parts` ve `system.parts_columns` Tablolar. +- Birleştirme hakkında ek bilgi eklendi `system.part_log` Tablo. +- Keyfi bir bölümleme anahtarı için kullanılabilir `system.query_log` tablo (Kirill Shvakov). +- Bu `SHOW TABLES` sorgu şimdi de geçici tablolar gösterir. Geçici tablolar eklendi ve `is_temporary` sütun için `system.tables` (zhang2014). +- Katma `DROP TEMPORARY TABLE` ve `EXISTS TEMPORARY TABLE` sorgular (zhang2014). +- İçin destek `SHOW CREATE TABLE` geçici tablolar için (zhang2014). +- Add theed the `system_profile` iç işlemler tarafından kullanılan ayarlar için yapılandırma parametresi. +- Yükleme desteği `object_id` bir nitelik olarak `MongoDB` sözlükler (Pavel Litvinenko). +- Okuma `null` harici bir sözlük için veri yüklerken varsayılan değer olarak `MongoDB` kaynak (Pavel Litvinenko). +- Okuma `DateTime` değerleri `Values` tek tırnak işaretleri olmadan bir Unix zaman damgasından biçimlendirin. +- Yük devretme desteklenir `remote` tablo işlevleri bazı yinelemeler istenen tablo eksik olduğunda durumlar için. +- Çalıştırdığınızda yapılandırma ayarları komut satırında geçersiz kılınabilir `clickhouse-server`. Örnek: `clickhouse-server -- --logger.level=information`. +- Uygulanan `empty` fonksiyonu bir `FixedString` argüman: dize tamamen boş bayttan oluşuyorsa (zhang2014) işlev 1 döndürür. +- Add theed the `listen_try`bazı adresler dinlenemiyorsa (IPv4 veya IPv6 için engelli desteği olan sistemler için kullanışlıdır), dinleme adreslerinden en az birini bırakmadan dinlemek için yapılandırma parametresi. +- Add theed the `VersionedCollapsingMergeTree` masa motoru. +- Satır ve rasgele sayısal türleri için destek `library` sözlük kaynağı. +- `MergeTree` tablolar birincil anahtar olmadan kullanılabilir (belirtmeniz gerekir `ORDER BY tuple()`). +- A `Nullable` tipi olabilir `CAST` olmayan -`Nullable` bağımsız değişken değilse yazın `NULL`. +- `RENAME TABLE` için yapılabilir `VIEW`. +- Add theed the `throwIf` işlev. +- Add theed the `odbc_default_field_size` bir ODBC kaynağından yüklenen değerin en büyük boyutunu genişletmenizi sağlayan seçenek (varsayılan olarak, 1024'tür). +- Bu `system.processes` masa ve `SHOW PROCESSLIST` şimdi var `is_cancelled` ve `peak_memory_usage` sütun. + +#### Geliştirmeler: {#improvements-15} + +- Sonuç üzerindeki sınırlar ve kotalar artık Ara verilere uygulanmıyor `INSERT SELECT` sorgular veya `SELECT` alt sorgular. +- Daha az yanlış tetikleyici `force_restore_data` durumunu kontrol ederken `Replicated` sunucu başladığında tablolar. +- Add theed the `allow_distributed_ddl` seçenek. +- Nondeterministic işlevleri için ifadelerde izin verilmez `MergeTree` masa tuşları. +- Değiştirmeleri olan dosyalar `config.d` dizinler alfabetik sırayla yüklenir. +- Geliştirilmiş performans `arrayElement` elemanlardan biri olarak boş bir dizi ile sabit çok boyutlu bir dizi durumunda işlev. Örnek: `[[1], []][x]`. +- Sunucu, yapılandırma dosyalarını çok büyük ikamelerle (örneğin, çok büyük IP ağları listeleri) kullanırken daha hızlı başlar. +- Bir sorgu çalıştırırken, tablo değerli işlevleri bir kez çalıştırın. Önceden, `remote` ve `mysql` tablo değerli işlevler, tablo yapısını uzak bir sunucudan almak için aynı sorguyu iki kez gerçekleştirdi. +- Bu `MkDocs` belg .eleme üret .eci kullanılır. +- Bir tablo sütunu silmeye çalıştığınızda, `DEFAULT`/`MATERIALIZED` diğer sütunların ifadeleri bağlıdır, bir istisna atılır (zhang2014). +- İçin sayı 0 olarak metin biçimlerinde boş bir satır ayrıştırmak için yeteneği eklendi `Float` veri türleri. Bu özellik daha önce mevcuttu, ancak 1.1.54342 sürümünde kayboldu. +- `Enum` değerleri kullanılabilir `min`, `max`, `sum` ve diğer bazı fonksiyonlar. Bu durumlarda, karşılık gelen sayısal değerleri kullanır. Bu özellik daha önce mevcuttu, ancak 1.1.54337 sürümünde kayboldu. +- Katma `max_expanded_ast_elements` yinelemeli genişleyen takma adlar sonra AST boyutunu kısıtlamak için. + +#### Hata düzeltmeleri: {#bug-fixes-27} + +- Gereksiz sütunlar hatalı alt sorgulardan kaldırıldığında veya içeren alt sorgulardan kaldırılmadığında sabit durumlar `UNION ALL`. +- İçin birleştirir bir hata düzeltildi `ReplacingMergeTree` Tablolar. +- Sabit senkron eklemeler `Distributed` Tablolar (`insert_distributed_sync = 1`). +- Belirli kullanımlar için sabit segfault `FULL` ve `RIGHT JOIN` alt sorgularda yinelenen sütunlarla. +- Belirli kullanımlar için sabit segfault `replace_running_query` ve `KILL QUERY`. +- Sabit sipariş `source` ve `last_exception` Col theum thens in the `system.dictionaries` Tablo. +- Sabit hata zaman `DROP DATABASE` sorgu meta verileri ile dosyayı silmedi. +- Sabit `DROP DATABASE` sorgu için `Dictionary` veritabanılar. +- Sabit düşük hassasiyet `uniqHLL12` ve `uniqCombined` 100 milyondan fazla öğe için işlevler (Alex Bocharov). +- Aynı anda varsayılan açık ifadeleri hesaplamak için gerektiğinde örtülü varsayılan değerlerin hesaplanması düzeltildi `INSERT` sorgular (zhang2014). +- Bir sorgu için nadir bir durum düzeltildi `MergeTree` tablo bitiremedi (chenxing-xc). +- Bir çalışırken meydana gelen bir kilitlenme düzeltildi `CHECK` sorgu için `Distributed` tüm parçalar yerel ise tablolar (chenxing.xcc). +- Düzenli ifadeler kullanan işlevlerle hafif bir performans gerilemesi düzeltildi. +- Karmaşık ifadelerden çok boyutlu diziler oluştururken bir performans gerilemesi düzeltildi. +- Ekstra bir neden olabilecek bir hata düzeltildi `FORMAT` bir bölümde görün toecek bölüm `.sql` meta veriler içeren dosya. +- Neden bir hata düzeltildi `max_table_size_to_drop` s deleteilmeye çalış aılırken uygulanacak sınır `MATERIALIZED VIEW` açıkça belirtilen bir tabloya bakıyor. +- Eski istemcilerle sabit uyumsuzluk (eski istemciler bazen `DateTime('timezone')` anlamadıkları tür). +- Okurken bir hata düzeltildi `Nested` kullanılarak eklenen yapıların sütun elemanları `ALTER` ancak, bu sütunların koşulları taşındığında eski bölümler için boş `PREWHERE`. +- Tabloları sanal olarak filtrelerken bir hata düzeltildi `_table` sorgu columnslardaki sütunlar `Merge` Tablolar. +- Kullanırken bir hata düzeltildi `ALIAS` Col inum inns in `Distributed` Tablolar. +- Gelen toplam fonksiyonları ile sorgular için dinamik derleme imkansız hale bir hata düzeltildi `quantile` aile. +- Kullanırken çok nadir durumlarda meydana gelen sorgu yürütme boru hattında bir yarış durumu düzeltildi `Merge` çok sayıda tablo içeren tablolar ve kullanırken `GLOBAL` alt sorgular. +- Farklı boyutlarda dizileri geçerken bir kilitlenme düzeltildi `arrayReduce` birden çok bağımsız değişkenden toplam işlevleri kullanırken işlev. +- İle sorgu useların kullanılması yasaktır `UNION ALL` in a `MATERIALIZED VIEW`. +- Başlatma sırasında bir hata düzeltildi `part_log` sunucu başlatıldığında sistem tablosu (varsayılan olarak, `part_log` devre dışı) olur. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-10} + +- Kaldır theılan `distributed_ddl_allow_replicated_alter` seçenek. Bu davranış varsayılan olarak etkindir. +- Kaldır theılan `strict_insert_defaults` ayar. Bu işlevi kullanıyorsanız, `clickhouse-feedback@yandex-team.com`. +- Kaldır theılan `UnsortedMergeTree` motor. + +### ClickHouse Yayın 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} + +- Dağıtılmış DDL sorgularında ve dağıtılmış tabloların oluşturucularında küme adlarını tanımlamak için makro desteği eklendi: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. +- Şimdi gibi sorgular `SELECT ... FROM table WHERE expr IN (subquery)` kullanılarak iş arelenir `table` dizin. +- Çoğaltılmış tablolara eklerken çoğaltmaların işlenmesi geliştirildi, böylece artık çoğaltma kuyruğunun yürütülmesini yavaşlatmazlar. + +### ClickHouse Sürümü 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} + +Bu sürüm önceki sürüm 1.1.54337 için hata düzeltmeleri içerir: + +- 1.1.54337'de bir gerileme düzeltildi: varsayılan kullanıcı salt okunur erişime sahipse, sunucu mesajla başlamayı reddediyor `Cannot create database in readonly mode`. +- 1.1.54337 bir gerileme sabit: systemd ile sistemlerde, günlükleri her zaman ne olursa olsun yapılandırma syslog yazılır; watchdog komut dosyası hala init kullanır.d. +- Docker görüntüde yanlış varsayılan yapılandırma: 1.1.54337 bir gerileme düzeltildi. +- Graphıtemergetree sabit nondeterministic davranış (günlük mesajları görebilirsiniz `Data after merge is not byte-identical to the data on another replicas`). +- Çoğaltılmış tablolara sorguyu OPTİMİZE ettikten sonra tutarsız birleştirmelere yol açabilecek bir hata düzeltildi (günlük iletilerinde görebilirsiniz `Part ... intersects the previous part`). +- (ZHANG2014 tarafından) hedef tabloda MATERİALİZED sütunlar mevcut olduğunda arabellek tabloları şimdi düzgün çalışır. +- NULL uygulanmasında bir hata düzeltildi. + +### ClickHouse Sürümü 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} + +#### Yenilik: {#new-features-17} + +- Çok boyutlu diziler ve tuples depolanması için destek eklendi (`Tuple` veri türü) tablolarda. +- Masa fonksiyonları için destek `DESCRIBE` ve `INSERT` sorgular. Alt sorgular için destek eklendi `DESCRIBE`. Örnekler: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. İçin destek `INSERT INTO TABLE` ayrıca `INSERT INTO`. +- Saat dilimleri için geliştirilmiş destek. Bu `DateTime` veri türü ayrıştırma ve metin biçimlerinde biçimlendirme için kullanılan saat dilimi ile açıklamalı. Örnek: `DateTime('Europe/Moscow')`. Zaman dilimleri için işlevlerde belirtildiğinde `DateTime` bağımsız değişkenler, dönüş türü saat dilimini izler ve değer beklendiği gibi görüntülenir. +- Fonksiyonları eklendi `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. Bu `toRelativeHour`/`Minute`/`Second` fonksiyonlar bir tür değeri alabilir `Date` bir argüman olarak. Bu `now` işlev adı büyük / küçük harf duyarlıdır. +- Add theed the `toStartOfFifteenMinutes` fonksiyon (Kirill Shvakov). +- Add theed the `clickhouse format` sorguları biçimlendirme aracı. +- Add theed the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` biçimli. Şema dosyaları yalnızca belirtilen dizinde bulunabilir. +- Yapılandırma değiştirmeleri için destek eklendi (`incl` ve `conf.d`) dış sözlüklerin ve modellerin konfigürasyonu için (Pavel Yakunin). +- İçin belgeler içeren bir sütun eklendi `system.settings` tablo (Kirill Shvakov). +- Add theed the `system.parts_columns` her veri bölümünde sütun boyutları hakkında bilgi içeren tablo `MergeTree` Tablolar. +- Add theed the `system.models` yüklenen hakkında bilgi içeren tablo `CatBoost` makine öğrenme modelleri. +- Add theed the `mysql` ve `odbc` tablo fonksiyonu ve karşılık gelen `MySQL` ve `ODBC` uzak veritabanlarına erişmek için tablo motorları. Bu işlevsellik beta aşamasındadır. +- Türünde bir argüman geçme imkanı eklendi `AggregateFunction` için `groupArray` toplama işlevi (böylece bazı toplama işlevinin bir dizi durumunu oluşturabilirsiniz). +- Agrega fonksiyon birleştiricilerinin çeşitli kombinasyonlarındaki kısıtlamalar kaldırıldı. Örneğin, kullanabilirsiniz `avgForEachIf` keza `avgIfForEach` farklı davranışlara sahip toplam fonksiyonlar. +- Bu `-ForEach` toplama işlevi Birleştiricisi, birden çok bağımsız değişkenin toplama işlevleri için genişletilir. +- Toplam fonksiyonları için destek eklendi `Nullable` işlevin bir non döndürdüğü durumlarda bile argümanlar-`Nullable` sonuç (Silviu Caragea'nın katkısıyla eklendi). Örnek: `groupArray`, `groupUniqArray`, `topK`. +- Add theed the `max_client_network_bandwidth` için `clickhouse-client` (Kirill Shvakov). +- İle kullanıcılar `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). +- İle birden fazla tüketici kullanmak için destek eklendi `Kafka` motor. İçin genişletilmiş yapılandırma seçenekleri `Kafka` (Marek Vavruša). +- Add theed the `intExp3` ve `intExp4` işlevler. +- Add theed the `sumKahan` toplama işlevi. +- To \* Number\* ornull işlevleri eklendi, burada \* Number \* sayısal bir türdür. +- İçin destek eklendi `WITH` CLA anus anes for an `INSERT SELECT` sorgu (yazar: zhang2014). +- Eklenen ayarlar: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. Özellikle, bu ayarlar çoğaltma için veri parçalarını indirmek için kullanılır. Bu ayarları değiştirmek, ağ aşırı yüklenmişse daha hızlı yük devretme olanağı sağlar. +- İçin destek eklendi `ALTER` tip tablolar için `Null` (Anastasiya Tsarkova). +- Bu `reinterpretAsString` işlev, bellekte bitişik olarak depolanan tüm veri türleri için genişletilir. +- Add theed the `--silent` seçeneği için `clickhouse-local` aracı. Bu stderr yazdırma sorgu yürütme bilgi bastırır. +- Tip değerlerini okumak için destek eklendi `Date` ay ve/veya Ayın gün iki basamak (Amos kuş) yerine tek bir basamak kullanılarak belirtilen bir biçimde metinden. + +#### Performans iyileştirmeleri: {#performance-optimizations} + +- Agrega fonksiyonlarının geliştirilmiş performansı `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` dize argümanlarından. +- Fonksiyonların geliştirilmiş performansı `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. +- Ayrıştırma ve biçimlendirmenin geliştirilmiş performansı `Date` ve `DateTime` değerleri metin biçiminde yazın. +- Kayan nokta sayılarının ayrıştırılmasında geliştirilmiş performans ve hassasiyet. +- İçin bellek kullanımını azalt fortı `JOIN` sol ve sağ parçaların, içinde bulunmayan aynı adlara sahip sütunlara sahip olması durumunda `USING` . +- Agrega fonksiyonlarının geliştirilmiş performansı `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` hesaplama kararlılığını azaltarak. Eski fonksiyonlar isimler altında mevcuttur `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. + +#### Hata düzeltmeleri: {#bug-fixes-28} + +- Bir çalıştırdıktan sonra sabit veri tekilleştirme `DROP` veya `DETACH PARTITION` sorgu. Önceki sürümde, bir bölümü bırakmak ve aynı verileri tekrar eklemek işe yaramadı çünkü eklenen bloklar kopya olarak kabul edildi. +- Yanlış yorumlanmasına yol açabilecek bir hata düzeltildi `WHERE` için fık forra `CREATE MATERIALIZED VIEW` ile sorgular `POPULATE` . +- Kullanarak bir hata düzeltildi `root_path` param parametereter in the `zookeeper_servers` yapılandırma. +- Geçen sabit beklenmedik sonuçlar `Date` arg toum argumentent to `toStartOfDay` . +- Sabit `addMonths` ve `subtractMonths` fonksiyonlar ve aritmetik `INTERVAL n MONTH` sonuç bir önceki yıla sahip olduğu durumlarda. +- İçin eksik destek eklendi `UUID` için veri türü `DISTINCT` , `JOIN` , ve `uniq` agrega fonksiyonları ve dış sözlükler (Evgeniy Ivanov). İçin destek `UUID` hala eksik. +- Sabit `SummingMergeTree` satırların sıfıra toplandığı durumlarda davranış. +- İçin çeşitli düzeltmeler `Kafka` engine (Marek Vavruša). +- Sabit yanlış davranış `Join` masa motoru (Amos kuş). +- FreeBSD ve OS X altında yanlış ayırıcı davranışı düzeltildi. +- Bu `extractAll` fonksiyon artık boş eşleşmeleri destekliyor. +- Kullanımını engelleyen bir hata düzeltildi `libressl` yerine `openssl` . +- Sabit `CREATE TABLE AS SELECT` geçici tablolardan sorgu. +- Çoğaltma kuyruğunu güncelleme sabit olmayan atomicity. Bu, sunucu yeniden başlatılıncaya kadar eşlemelerin eşitlenmemesine neden olabilir. +- Sabit Olası taşma `gcd` , `lcm` ve `modulo` (`%` operatör) (Maks Skorokhod). +- `-preprocessed` dosyalar şimdi değiştirdikten sonra oluşturulur `umask` (`umask` config değişmiş) olabilir. +- Parçaların arka plan kontrol bir hata düzeltildi (`MergeTreePartChecker` ) özel bir bölüm anahtarı kullanırken. +- Tuples sabit ayrıştırma (değerleri `Tuple` veri türü) metin biçimlerinde. +- Uyumsuz türlerle ilgili geliştirilmiş hata mesajları `multiIf` , `array` ve diğer bazı fonksiyonlar. +- İçin yeniden tasarlanmış destek `Nullable` türler. Bir sunucu çökmesine yol açabilir sabit hatalar. İlgili hemen hemen tüm diğer hatalar düzeltildi `NULL` destek: INSERT SELECT yanlış tür dönüşümleri, sahip ve PREWHERE içinde Nullable için yetersiz destek, `join_use_nulls` mod, argümanlar olarak Nullable türleri `OR` operatör, vb. +- Veri türlerinin iç semantiği ile ilgili çeşitli hatalar düzeltildi. Örnekler: gereksiz toplama `Enum` type Fi fieldsel fieldsds in `SummingMergeTree` ; hizalama `Enum` yazmak `Pretty` format ,lar vb. +- Bileşik sütunların izin verilen kombinasyonları için daha sıkı kontroller. +- İçin çok büyük bir parametre belirlerken taşma düzeltildi `FixedString` veri türü. +- Bu bir hata düzeltildi `topK` genel bir durumda toplama işlevi. +- Toplam fonksiyonların n-ary varyantlarının argümanlarında dizi boyutlarının eşitliği için eksik kontrol eklendi `-Array` birleştirici. +- Bir hata düzeltildi `--pager` için `clickhouse-client` (yazar: ks1322). +- Sabit hassasiyet `exp10` işlev. +- Davranışı Düzelt theildi `visitParamExtract` belgelere daha iyi uyum için işlev. +- Yanlış veri türleri belirtildiğinde kilitlenme düzeltildi. +- Davranışı düzeltildi `DISTINCT` tüm sütunların sabitler olması durumunda. +- Kullanarak durumunda sabit sorgu biçimlendirme `tupleElement` tuple öğesi dizini olarak karmaşık bir sabit ifadeyle işlev. +- Bir hata düzeltildi `Dictionary` için tablolar `range_hashed` sözlükler. +- Sonucunda aşırı satırlara yol açan bir hata düzeltildi `FULL` ve `RIGHT JOIN` (Amos Kuşu). +- Geçici dosyaları oluştururken ve kaldırırken bir sunucu çökmesi düzeltildi `config.d` yapılandırma yeniden yükleme sırasında dizinler. +- Sabit `SYSTEM DROP DNS CACHE` sorgu: önbellek temizlendi, ancak küme düğümlerinin adresleri güncelleştirilmedi. +- Davranışı düzeltildi `MATERIALIZED VIEW` çalıştırdıktan sonra `DETACH TABLE` for the table under the view (Marek Vavruša). + +#### İyileştirmeler oluşturun: {#build-improvements-4} + +- Bu `pbuilder` aracı oluşturur için kullanılır. Yapı işlemi, yapı ana bilgisayarı ortamından neredeyse tamamen bağımsızdır. +- Farklı işletim sistemi sürümleri için tek bir yapı kullanılır. Paketler ve ikili Linux sistemleri geniş bir yelpazede ile uyumlu yapılmıştır. +- Add theed the `clickhouse-test` paket. Fonksiyonel testleri çalıştırmak için kullanılabilir. +- Kaynak tarball artık depoya yayınlanabilir. Github kullanmadan yapıyı yeniden oluşturmak için kullanılabilir. +- Travis CI ile sınırlı entegrasyon eklendi. Travis oluşturma süresi sınırları nedeniyle, yalnızca hata ayıklama yapı sınanır ve sınamaların sınırlı bir alt kümesi çalıştırılır. +- İçin destek eklendi `Cap'n'Proto` varsayılan yapı içinde. +- Dokümantasyon kaynaklarının biçimini değiştirdi `Restricted Text` -e doğru `Markdown`. +- İçin destek eklendi `systemd` (Vladimir Smirnov). Bazı işletim sistemi görüntüleri ile uyumsuzluk nedeniyle varsayılan olarak devre dışı bırakılır ve manuel olarak etkinleştirilebilir. +- Dinamik kod üretimi için, `clang` ve `lld` içine gömülü `clickhouse` ikilik. Olarak da çağr alsoıla invokedbilirler `clickhouse clang` ve `clickhouse lld` . +- GNU uzantılarının koddan kaldırılması. Etkin `-Wextra` seçenek. İle inşa ederken `clang` varsayılan değer `libc++` yerine `libstdc++`. +- Elde `clickhouse_parsers` ve `clickhouse_common_io` kütüphaneler hızlandırmak için çeşitli araçlar oluşturur. + +#### Geriye dönük uyumsuz değişiklikler: {#backward-incompatible-changes-11} + +- İşaret formatlerin biçimi `Log` içeren tabloları yazın `Nullable` sütunlar geriye dönük uyumsuz bir şekilde değiştirildi. Bu tablolar varsa, bunları dönüştürmek gerekir `TinyLog` yeni sunucu sürümünü başlatmadan önce yazın. Bunu yapmak için değiştirin `ENGINE = Log` ile `ENGINE = TinyLog` karşılık gelen `.sql` dosya içinde `metadata` dizin. Masanız yoksa `Nullable` sütun veya tablonuzun türü değilse `Log` o zaman hiçbir şey yapmanıza gerek yok. +- Kaldır theılan `experimental_allow_extended_storage_definition_syntax` ayar. Şimdi bu özellik varsayılan olarak etkindir. +- Bu `runningIncome` işlev olarak yeniden adlandırıldı `runningDifferenceStartingWithFirstvalue` karışıklığı önlemek için. +- Kaldır theılan `FROM ARRAY JOIN arr` array JOİN, tablo olmadan (Amos Bird) doğrudan FROM sonra belirtildiğinde sözdizimi. +- Kaldır theılan `BlockTabSeparated` sadece gösteri amaçlı kullanılan format. +- Toplu işlevler için durum biçimini değiştirdi `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Bu toplama işlevlerinin durumlarını tablolarda depoladıysanız ( `AggregateFunction` veri türü veya ilgili durumları ile hayata görünümleri), yazınız clickhouse-feedback@yandex-team.com. +- Önceki sunucu sürümlerinde belgelenmemiş bir özellik vardı: bir toplama işlevi parametrelere bağlıysa, yine de AggregateFunction veri türünde parametreler olmadan belirtebilirsiniz. Örnek: `AggregateFunction(quantiles, UInt64)` yerine `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. Bu özellik kayboldu. Belgesiz olmasına rağmen, gelecek sürümlerde tekrar desteklemeyi planlıyoruz. +- Enum veri türleri min / max toplama işlevlerinde kullanılamaz. Bu yetenek bir sonraki sürümde iade edilecektir. + +#### Yükseltme yaparken lütfen unutmayın: {#please-note-when-upgrading} + +- Bir kümede, bazı yinelemelerin Clickhouse'un eski sürümünü çalıştırdığı ve bazılarının yeni sürümü çalıştırdığı noktada yuvarlanan bir güncelleme yaparken, çoğaltma geçici olarak durdurulur ve ileti `unknown parameter 'shard'` günlüğünde görünür. Kümenin tüm yinelemeleri güncelleştirildikten sonra çoğaltma devam eder. +- Clickhouse'un farklı sürümleri küme sunucularında çalışıyorsa, aşağıdaki işlevleri kullanarak dağıtılmış sorguların yanlış sonuçları olması olasıdır: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Tüm küme düğümlerini güncelleştirmelisiniz. + +## [2017 için Changelog](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/tr/whats_new/changelog/2019.md b/docs/tr/whats_new/changelog/2019.md new file mode 100644 index 00000000000..065cb822bc1 --- /dev/null +++ b/docs/tr/whats_new/changelog/2019.md @@ -0,0 +1,2074 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 77 +toc_title: '2019' +--- + +## ClickHouse sürüm v19. 17 {#clickhouse-release-v19-17} + +### ClickHouse sürümü v19.17.6. 36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### Hata Düzeltme {#bug-fix} + +- Sıkıştırmada sabit potansiyel tampon taşması. Kötü niyetli kullanıcı, arabellekten sonra okunmasına neden olabilecek sıkıştırılmış verileri iletebilir. Bu sorun Yandex bilgi güvenliği ekibinden Eldar Zaitov tarafından bulundu. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit Olası sunucu çökmesi (`std::terminate`) sunucu, dize veri türü (UTF-8 doğrulaması gerektiren) değerleriyle json veya XML biçiminde veri gönderemez veya yazamaz veya sonuç verilerini Brotli algoritması ile sıkıştırırken veya diğer bazı nadir durumlarda. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bir clickhouse kaynak ile sabit sözlükler `VIEW`, şimdi bu tür sözlükleri okumak hataya neden olmaz `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Bir istemci ana bilgisayar kullanıcıları belirtilen host\_regexp tarafından izin verilip verilmediğini kontrol sabit.xml. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) +- `RENAME TABLE` dağıtılmış bir tablo için artık parçalara göndermeden önce eklenen verileri içeren klasörü yeniden adlandırır. Bu, ardışık yeniden adlarla ilgili bir sorunu giderir `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- `range_hashed` DDL sorguları tarafından oluşturulan dış sözlükler artık rasgele sayısal tür aralıklarına izin verir. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) +- Sabit `INSERT INTO table SELECT ... FROM mysql(...)` tablo işlevi. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- Sabit segfault içinde `INSERT INTO TABLE FUNCTION file()` mevcut olmayan bir dosyaya eklerken. Şimdi bu durumda dosya oluşturulur ve daha sonra insert işlenir. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- Birleştirilmiş bir bitmap ve bir skaler bitmap kesişen sabit bitmapAnd hatası. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) +- Sabit segfault zaman `EXISTS` sorgu olmadan kullanıldı `TABLE` veya `DICTIONARY` niteleyici, tıpkı `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fonksiyonlar için sabit dönüş tipi `rand` ve `randConstant` geçersiz argüman durumunda. Şimdi fonksiyonlar her zaman geri döner `UInt32` ve asla `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Sabit `DROP DICTIONARY IF EXISTS db.dict`, şimdi Eğer istisna atmaz `db` yok. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +- Sunucu çökmesi nedeniyle bir tablo tamamen düşmediyse, sunucu geri yüklemeyi ve yüklemeyi deneyecektir [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- İkiden fazla shard yerel tablo varsa, dağıtılmış bir tablo için önemsiz bir sayım sorgusu düzeltildi. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- DB::Blockstreamprofileınfo::calculateRowsBeforeLimit bir veri yarış yol Sabit hata() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) +- Sabit `ALTER table MOVE part` belirtilen parçayı birleştirdikten hemen sonra yürütülür, bu da belirtilen parçanın birleştirildiği bir parçanın taşınmasına neden olabilir. Şimdi belirtilen kısmı doğru hareket ettirir. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Sözlükler için ifadeler şimdi dizeler olarak belirtilebilir. Bu, ClickHouse olmayan kaynaklardan veri ayıklarken özniteliklerin hesaplanması için yararlıdır, çünkü bu ifadeler için ClickHouse olmayan sözdizimini kullanmanıza izin verir. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) +- Çok nadir bir yarış sabit `clickhouse-copier` zxıd'de bir taşma nedeniyle. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +- Sorgu başarısız olduktan sonra hata düzeltildi (nedeniyle “Too many simultaneous queries” örneğin) harici tablolar bilgisini okumaz ve + sonraki istek, bu bilgiyi bir sonraki sorgunun başlangıcı olarak yorumlayacak ve aşağıdaki gibi bir hataya neden olacaktır `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- Sonra null dereference kaçının “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- Tüm YBÜ yerel destek Geri Yükleme, sabit ifadeler için harmanlama uygulamak ve sisteme dil adı eklemek için yeteneği ekleyin.harmanlama tablosu. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) +- Okuma için akış sayısı `StorageFile` ve `StorageHDFS` bellek sınırını aşmamak için artık sınırlıdır. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) +- Sabit `CHECK TABLE` sorgu için `*MergeTree` anahtarsız tablolar. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) +- Mutasyon olmaması durumunda bir parça adından mutasyon numarası kaldırıldı. Bu kaldırma eski sürümleri ile uyumluluk geliştirilmiş. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) +- Mutasyonlar nedeniyle data\_version bazı ekli parçalar için atlanır hata tablo mutasyon sürümü daha büyüktür düzeltildi. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) +- Başka bir cihaza taşıdıktan sonra yedek parça kopyalarıyla sunucuyu başlatmaya izin verin. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Hata düzeltildi “Sizes of columns doesn’t match” bu, toplama işlevi sütunlarını kullanırken görünebilir. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- Şimdi LİMİT BY yanında bağları ile kullanılması durumunda bir istisna atılır. Ve şimdi LİMİT ile üst kullanmak mümkündür. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Varsa sözlük yeniden yükle `invalidate_query`, güncellemeleri durdurdu ve önceki güncelleme denemelerinde bazı istisnalar. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) + +### ClickHouse sürümü v19.17.4. 11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change} + +- Daha iyi performans için skaler alt sorgu sonuçlarını depolamak için AST yerine sütun kullanma. Ayar `enable_scalar_subquery_optimization` 19.17'de eklendi ve varsayılan olarak etkinleştirildi. Gibi hat toalara yol açar [bu](https://github.com/ClickHouse/ClickHouse/issues/7851) önceki sürümlerden 19.17.2 veya 19.17.3'e yükseltme sırasında. Bu ayar, 19.17.4'te, 19.16 ve daha eski sürümlerden hatasız yükseltme yapılmasını sağlamak için varsayılan olarak devre dışı bırakıldı. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Kuşu](https://github.com/amosbird)) + +#### Yenilik {#new-feature} + +- DDL sorguları ile sözlükler oluşturma yeteneği ekleyin. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin)) +- Yapmak `bloom_filter` endeks destek türü `LowCardinality` ve `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fonksiyon Ekle `isValidJSON` geçirilen dizenin geçerli bir json olduğunu kontrol etmek için. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) +- Uygulamak `arrayCompact` işlev [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Not](https://github.com/Joeywzr)) +- Oluşturulan işlev `hex` ondalık sayılar için. Gibi çalışır `hex(reinterpretAsString())`, ancak son sıfır bayt silmez. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) +- Eklemek `arrayFill` ve `arrayReverseFill` dizideki ön / arka öğelerdeki diğer öğelerle öğeleri değiştiren işlevler. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) +- Eklemek `CRC32IEEE()`/`CRC64()` destek [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) +- Uygulamak `char` fonksiyon birine benzer [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) +- Eklemek `bitmapTransform` işlev. Değerler bir dizi için bir bit eşlem değerler dizisi dönüştüren, sonuçta yeni bir bit eşlem [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) +- Uyguluyordu `javaHashUTF16LE()` işlev [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) +- Eklemek `_shard_num` dağıtılmış motor için sanal sütun [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) + +#### Deneysel Özellik {#experimental-feature} + +- İşlemciler için destek (yeni sorgu yürütme boru hattı) `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Hata Düzeltme {#bug-fix-1} + +- Yanlış float ayrıştırma düzeltme `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- Trace\_log etkinleştirildiğinde oluşabilecek nadir kilitlenmeyi düzeltin. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- Kafka tablosu üretirken mesaj çoğaltmasını önle, ondan seçim yapan herhangi bir MVs var [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([I ivanvan](https://github.com/abyss7)) +- İçin destek `Array(LowCardinality(Nullable(String)))` içinde `IN`. Gideriyor [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) +- Add hand ofling of `SQL_TINYINT` ve `SQL_BIGINT`, ve düzeltme işleme `SQL_FLOAT` ODBC köprüsünde veri kaynağı türleri. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- Toplama düzeltme (`avg` ve quantiles) boş ondalık sütunlar üzerinde [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) +- Düzeltmek `INSERT` ile dağıtılan içine `MATERIALIZED` sütun [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- Yapmak `MOVE PARTITION` bölümün bazı bölümleri zaten hedef disk veya birimde ise çalışın [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- İçinde mutasyonlar sırasında oluşturulacak başarısız hardlinks ile Sabit hata `ReplicatedMergeTree` çoklu disk yapılandırmalarında. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Bütün parça değişmeden kalır ve en iyi alan başka bir diskte bulunurken bir MergeTree üzerinde bir mutasyon ile bir hata düzeltildi [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Sabit hata ile `keep_free_space_ratio` disk yapılandırmasından okunmuyor [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Tablo ile Fix hata sadece içerir `Tuple` karmaşık yolları olan sütunlar veya sütunlar. Düzeltiyor [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin)) +- Bellek Max\_memory\_usage sınırında arabellek altyapısı için hesap değil [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) +- Son işaret kullanımını düzeltin `MergeTree` tarafından sipariş edilen tablolar `tuple()`. Nadir durumlarda yol açabilir `Can't adjust last granule` seçerken hata. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ)) +- Çökmelere veya garip istisnalara yol açabilecek bağlam gerektiren eylemlerle (örneğin json için işlevler) yüklü olan mutasyonlardaki hatayı düzeltin. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin)) +- Kaçan veritabanı ve tablo adlarının uyumsuzluğunu düzeltin `data/` ve `shadow/` dizinler [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Bu durumda kazayı düzeltin. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) +- Düzeltmek `Not found column in block` ifadeye doğru veya tam birleştirme ile katılırken. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) +- Sonsuz döngüyü düzeltmek için bir girişim daha `PrettySpace` biçimli [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) +- Hatayı düzeltin `concat` tüm argümanlar olduğunda işlev `FixedString` aynı boyutta. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin)) +- S3, URL ve HDFS depolarını tanımlarken 1 argüman kullanılması durumunda sabit istisna. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Sorgu ile görünümler için Interpretersselectquery'nin kapsamını düzeltin [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) + +#### Geliştirme {#improvement} + +- `Nullable` ODBC-bridge tarafından doğru işlenen sütunlar tanınan ve NULL değerleri [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- Atomically dağıtılmış göndermek için geçerli toplu yazma [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) +- Sorguda sütun adı için tablo algılayamazsak bir istisna atın. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) +- Eklemek `merge_max_block_size` ayarı için `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) +- İle sorgular `HAVING` ve olmadan `GROUP BY` sabit olarak grup varsayalım. Böyle, `SELECT 1 HAVING 1` şimdi bir sonuç döndürür. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Kuşu](https://github.com/amosbird)) +- Destek ayrıştırma `(X,)` python'a benzer tuple olarak. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Kuşu](https://github.com/amosbird)) +- Yapmak `range` işlev davranışları neredeyse pythonic gibi. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) +- Eklemek `constraints` tabloya sütunlar `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) +- Tcp işleyicisi için daha iyi boş biçim, böylece kullanmak mümkün `select ignore() from table format Null` clickhouse-client ile perf ölçümü için [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Kuşu](https://github.com/amosbird)) +- Gibi sorgular `CREATE TABLE ... AS (SELECT (1, 2))` doğru ayrıştırılır [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) + +#### Performans İyileştirme {#performance-improvement} + +- Kısa dize anahtarları üzerinde toplama performansı artırıldı. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Kuşu](https://github.com/amosbird)) +- Sabit yüklemler katlandıktan sonra olası optimizasyonları almak için sözdizimi/ifade analizinin başka bir geçişini çalıştırın. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Kuşu](https://github.com/amosbird)) +- Önemsiz değerlendirmek için depolama meta bilgisi kullanın `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Kuşu](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov)) +- Vectorize işleme `arrayReduce` toplayıcı benzer `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Kuşu](https://github.com/amosbird)) +- Performansında küçük iyileştirmeler `Kafka` tüketim [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([I ivanvan](https://github.com/abyss7)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement} + +- AARCH64 CPU mimarisine çapraz derleme için destek ekleyin. Refactor packager komut. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([I ivanvan](https://github.com/abyss7)) +- Paketleri oluştururken darwin-x86\_64 ve linux-aarch64 araç zincirlerini monte edilmiş Docker hacmine açın [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([I ivanvan](https://github.com/abyss7)) +- İkili Paketleyici için Docker görüntüsünü güncelle [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([I ivanvan](https://github.com/abyss7)) +- MacOS Catalina'da sabit derleme hataları [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) +- Sorgu analizi mantığında bazı yeniden düzenleme: karmaşık sınıfı birkaç basit sınıfa ayırın. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) +- Submodules olmadan yapı düzeltme [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) +- İyileştirmek `add_globs` cmake dosyalarında [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Kuşu](https://github.com/amosbird)) +- Kodlanmış yolları Kaldır `unwind` hedef [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) +- Ssl olmadan mysql formatını kullanmasına izin ver [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) + +#### Diğer {#other} + +- ClickHouse SQL lehçesi için ANTLR4 dilbilgisi eklendi [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouse sürüm v19. 16 {#clickhouse-release-v19-16} + +#### ClickHouse yayın v19. 16. 14. 65, 2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} + +- Birden argümanlar (10'dan fazla) üçlü mantıksal OPs toplu hesaplamalarda bir hata düzeltildi. [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) Bu hata düzeltme Altınity özel bir istek ile sürüm 19.16 backported edildi. + +#### ClickHouse yayın v19. 16. 14. 65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- Dağıtılmış alt sorgular uyumsuzluğunu eski CH sürümleriyle düzeltin. Düzeltiyor [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(tabplubix)](https://github.com/tavplubix) +- Yürüt whenürken `CREATE` sorgu, depolama motoru argümanlarında sabit ifadeleri katlayın. Boş veritabanı adı geçerli veritabanı ile değiştirin. Düzeltiyor [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Ayrıca yerel adresi kontrol edin `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- Şimdi arka plan birleşir `*MergeTree` tablo motorları ailesi depolama ilkesi hacim sırasını daha doğru bir şekilde korur. + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Veri kaybını önlemek `Kafka` nadir durumlarda istisna sonek okuduktan sonra ancak taahhütten önce gerçekleşir. Düzeltiyor [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). İlgili: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) +- Kullanmaya / bırakmaya çalışırken sunucu sonlandırmasına giden hatayı düzeltin `Kafka` tablo yanlış parametrelerle oluşturuldu. Düzeltiyor [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Birleşiyor [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) +- Kullanmaya izin ver `MaterializedView` yukarıdaki alt sorgularla `Kafka` Tablolar. + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) + +#### Yenilik {#new-feature-1} + +- Eklemek `deduplicate_blocks_in_dependent_materialized_views` hayata manzaralı tablolara idempotent ekler davranışlarını kontrol etmek için Seçenek. Bu yeni özellik, altınity'den özel bir istek ile bugfix sürümüne eklendi. + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse sürümü v19.16.2. 2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-1} + +- Count/counİf için eksik arity doğrulama ekleyin. + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) +- Eski Kaldır `asterisk_left_columns_only` ayar (varsayılan olarak devre dışıdır). + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem + Zuikov](https://github.com/4ertus2)) +- Şablon veri biçimi için Biçim dizeleri artık dosyalarda belirtilmiştir. + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) + +#### Yenilik {#new-feature-2} + +- Uint\_max büyük önemlilik hesaplamak için uniqcombined64() tanıtmak. + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat + Khuzhin](https://github.com/azat)) +- Dizi sütunlarında Bloom filtre indekslerini destekleyin. + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([achimbab](https://github.com/achimbab)) +- Bir işlev Ekle `getMacro(name)` bu karşılık gelen değeri ile dize döndürür `` + sunucu yapılandırmasından. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bir HTTP kaynağına dayalı bir sözlük için iki yapılandırma seçeneği belirleyin: `credentials` ve + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- Yeni bir ProfileEvent Ekle `Merge` bu, başlatılan arka plan birleşimlerinin sayısını sayar. + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail + Korotov](https://github.com/millb)) +- Tam etki alanı adı döndüren fullHostName işlevi ekleyin. + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) +- Fonksiyon Ekle `arraySplit` ve `arrayReverseSplit` bir diz byiyi böl anen “cut off” + şartlar. Zaman dizisinin işlenmesinde kullanışlıdırlar. + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) +- Multimatch işlev ailesine eşleşen tüm dizinlerin dizisini döndüren yeni işlevler ekleyin. + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila. + Kutenin](https://github.com/danlark1)) +- Yeni bir veritabanı altyapısı Ekle `Lazy` bu, çok sayıda küçük günlük depolamak için optimize edilmiştir + Tablolar. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita + Vasilev](https://github.com/nikvas0)) +- Bitmap sütunları için groupBitmapAnd, - veya-Xor toplam işlevleri ekleyin. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang + Yu](https://github.com/yuzhichang)) +- Null döndüren toplam işlev birleştiricileri-OrNull ve-OrDefault ekleyin + veya toplamak için hiçbir şey olmadığında varsayılan değerler. + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([hcz](https://github.com/hczhcz)) +- Özel kaçan destekleyen CustomSeparated veri biçimini tanıtmak ve + sınırlayıcı kuralları. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) +- Destek Redis harici sözlük kaynağı olarak. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton + Popov](https://github.com/CurtizJ)) + +#### Hata Düzeltme {#bug-fix-2} + +- Varsa yanlış sorgu sonucunu düzeltin `WHERE IN (SELECT ...)` bölüm ve `optimize_read_in_order` oluyor + kullanılmış. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton + Popov](https://github.com/CurtizJ)) +- Proje dışındaki dosyalara bağlı olarak engelli MariaDB kimlik doğrulama eklentisi. + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Yuriy + Baranov](https://github.com/yurriy)) +- İstisnayı düzeltin `Cannot convert column ... because it is constant but values of constants are different in source and result` fonksiyonlar olduğunda nadiren olabilir `now()`, `today()`, + `yesterday()`, `randConstant()` kullanılır. + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolay + Kochetov](https://github.com/KochetovNicolai)) +- TCP yerine HTTP keep alive timeout kullanarak sabit sorunu alive timeout tutun. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily + Nemkov](https://github.com/Enmk)) +- Groupbitmapor'da bir segmentasyon hatası düzeltildi (sorun [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang + Yu](https://github.com/yuzhichang)) +- Materyalize edilmiş görünümler için, tüm veriler yazıldıktan sonra Kafka için taahhüt denir. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([I ivanvan](https://github.com/abyss7)) +- Sabit yanlış `duration_ms` değer içinde `system.part_log` Tablo. On kez kapalıydı. + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- Canlı Görünüm tablosundaki çökmeyi çözmek ve tüm Canlı Görünüm testlerini yeniden etkinleştirmek için hızlı bir düzeltme. + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([vzakaznikov](https://github.com/vzakaznikov)) +- MERGETREE parçalarının min/max dizinlerinde NULL değerleri doğru şekilde seri hale getirin. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([İskender + Kuzmenkov](https://github.com/akuzm)) +- Sanal sütunları koymayın .tablo olarak oluşturulduğunda sql meta verileri `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([I ivanvan](https://github.com/abyss7)) +- Segmentasyon hatasını düzeltin `ATTACH PART` sorgu. + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([alesapin](https://github.com/alesapin)) +- Alt sorgularda boş ve boş optimizasyonu ile verilen bazı sorgular için yanlış sonucu düzeltin + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolay + Kochetov](https://github.com/KochetovNicolai)) +- Live VİEW getHeader() yönteminde AddressSanitizer hatası düzeltildi. + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([vzakaznikov](https://github.com/vzakaznikov)) + +#### Geliştirme {#improvement-1} + +- Queue\_wait\_max\_ms bekleme durumunda bir ileti ekleme gerçekleşir. + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat + Khuzhin](https://github.com/azat)) +- Yapılan ayar `s3_min_upload_part_size` masa seviyesi. + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- STORAGEFACTORY TTL kontrol edin. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([sundyli](https://github.com/sundy-li)) +- Kısmi birleştirme birleşiminde Squash sol blokları (optimizasyon). + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem + Zuikov](https://github.com/4ertus2)) +- Çoğaltılmış tablo motorlarının mutasyonlarında deterministik olmayan işlevlere izin vermeyin, çünkü bu + kopyaları arasındaki tutarsızlıkları tanıtabilir. + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([İskender + Kazakov](https://github.com/Akazz)) +- Özel durum yığını izini dizeye dönüştürürken bellek izleyicisini devre dışı bırakın. Bu kaybı önleyebilir + Tür hata mesaj oflarının `Memory limit exceeded` neden olan sunucuda `Attempt to read after eof` istemci üzerinde istisna. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Çeşitli biçim geliştirmeleri. Gideriyor + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([tavplubix](https://github.com/tavplubix)) +- ClickHouse sola dönüştürülebilir olmayan In işlecinin sağ tarafındaki değerleri yok sayar + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([İskender + Kuzmenkov](https://github.com/akuzm)) +- ASOF JOİN için eksik eşitsizlikleri destekleyin. Daha az veya eşit varyant ve katı katılmak mümkündür + sözdiziminde asof sütunu için daha büyük ve daha az varyant. + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem + Zuikov](https://github.com/4ertus2)) +- Kısmi birleştirme birleşimini Optimize edin. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([Artem Zuikov](https://github.com/4ertus2)) +- UNİQCOMBİNED işlevlerinde 98 k'dan fazla bellek kullanmayın. + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat + Khuzhin](https://github.com/azat)) +- Partialmergejoin diskteki sağ birleştirme tablosunun parçalarını yıkayın (yeterli değilse + bellek). Gerektiğinde verileri geri yükleyin. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([Artem Zuikov](https://github.com/4ertus2)) + +#### Performans İyileştirme {#performance-improvement-1} + +- Veri çoğaltma kaçınarak const argümanları ile joinget hızlandırın. + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos + Kuş](https://github.com/amosbird)) +- Alt sorgu boşsa erken dönün. + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- Değerlerdeki SQL ifadesinin ayrıştırılmasını Optimize edin. + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([tavplubix](https://github.com/tavplubix)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-1} + +- Mac OS için çapraz derleme için bazı katkılar devre dışı bırakın. + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([I ivanvan](https://github.com/abyss7)) +- Clickhouse\_common\_ıo için pocoxml ile eksik bağlantı ekleyin. + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat + Khuzhin](https://github.com/azat)) +- Clickhouse-test'te birden fazla test filtresi argümanını kabul edin. + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([İskender + Kuzmenkov](https://github.com/akuzm)) +- Kol için musl ve jemalloc'u etkinleştirin. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([Amos Kuşu](https://github.com/amosbird)) +- Katma `--client-option` parametre için `clickhouse-test` müşteriye ek parametreler aktarmak için. + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolay + Kochetov](https://github.com/KochetovNicolai)) +- Rpm paket yükseltmesinde mevcut yapılandırmaları koruyun. + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([filimonov](https://github.com/filimonov)) +- PVS tarafından tespit edilen hataları düzeltin. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem + Zuikov](https://github.com/4ertus2)) +- Darwin için yapı düzeltin. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([I ivanvan](https://github.com/abyss7)) +- glibc 2.29 uyumluluk. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos + Kuş](https://github.com/amosbird)) +- Dh\_clean potansiyel kaynak dosyaları dokunmaz emin olun. + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos + Kuş](https://github.com/amosbird)) +- Altınity rpm'den güncellerken çakışmayı önlemeye çalışın-ayrı olarak paketlenmiş yapılandırma dosyası vardır + clickhouse-sunucu-ortak. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([filimonov](https://github.com/filimonov)) +- Daha hızlı yeniden oluşturmak için bazı başlık dosyalarını Optimize edin. + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([İskender + Kuzmenkov](https://github.com/akuzm)) +- Tarih ve DateTime için performans testleri ekleyin. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vasily + Nemkov](https://github.com/Enmk)) +- Deterministik olmayan mutasyonlar içeren bazı testleri düzeltin. + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([İskender + Kazakov](https://github.com/Akazz)) +- MemorySanitizer ile CI için yapı ekleyin. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Metricstransmitter başlatılmamış değerleri kullanmaktan kaçının. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat + Khuzhin](https://github.com/azat)) +- MemorySanitizer tarafından bulunan alanlarda bazı sorunları düzeltin. + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([İskender + Kuzmenkov](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([Amos Kuşu](https://github.com/amosbird)) +- Murmurhash32'de tanımsız davranışı düzeltin. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos + Kuş](https://github.com/amosbird)) +- Storagesınfostream tanımsız davranışı düzeltin. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([tavplubix](https://github.com/tavplubix)) +- Harici veritabanı motorları (MySQL, ODBC, JDBC) için katlama sabit sabit ifadeler. Önceki + sürümler birden fazla sabit ifade için çalışmadı ve Tarih için hiç çalışmadı, + DateTime ve UUID. Bu düzeltmeler [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- No\_users\_thread değişkenine erişirken canlı görünümde ThreadSanitizer veri yarışı hatası düzeltildi. + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([vzakaznikov](https://github.com/vzakaznikov)) +- Libcommon'daki malloc sembollerinden kurtulun + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos + Kuş](https://github.com/amosbird)) +- Tüm kitaplıkları devre dışı bırakmak için genel bayrak ENABLE\_LİBRARİES ekleyin. + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([proller](https://github.com/proller)) + +#### Kod temizleme {#code-cleanup} + +- Sözlükler için DDL'YE hazırlanmak için yapılandırma deposunu genelleştirin. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([alesapin](https://github.com/alesapin)) +- Herhangi bir anlamsal olmadan sözlükler DDL için ayrıştırıcı. + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([alesapin](https://github.com/alesapin)) +- Bölünmüş ParserCreateQuery farklı küçük ayrıştırıcılar içine. + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([alesapin](https://github.com/alesapin)) +- Küçük refactoring ve dış sözlükler yakın yeniden adlandırma. + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([alesapin](https://github.com/alesapin)) +- Refactor bazı kod rol tabanlı erişim kontrolü için hazırlamak. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly. + Baranov](https://github.com/vitlibar)) +- Veritabanında bazı geliştirmelersıradan kod. + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita + Vasilev](https://github.com/nikvas0)) +- Karma tabloların find() ve emplace() yöntemlerinde yineleyiciler kullanmayın. + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([İskender + Kuzmenkov](https://github.com/akuzm)) +- Parametre kökü boş olmadığında getmultiplevaluesfromconfig'i düzeltin. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([Mikhail Korotov](https://github.com/millb)) +- Bazı kopyala yapıştır (TemporaryFile ve TemporaryFileStream) kaldırın) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem + Zuikov](https://github.com/4ertus2)) +- Geliştirilmiş kod okunabilirliği biraz (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- Yerel nesneler kullanan tüm zamanlanmış işleri bekleyin, eğer `ThreadPool::schedule(...)` atmalar + özel. Adlandırmak `ThreadPool::schedule(...)` -e doğru `ThreadPool::scheduleOrThrowOnError(...)` ve + atabileceği açık hale getirmek için yorumları düzeltin. + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([tavplubix](https://github.com/tavplubix)) + +## ClickHouse sürümü 19.15 {#clickhouse-release-19-15} + +### ClickHouse sürümü 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### Hata Düzeltme {#bug-fix-3} + +- SQL\_TINYINT ve SQL\_BIGINT işleme eklendi ve ODBC Köprüsü SQL\_FLOAT veri kaynağı türlerinin işlenmesini düzeltin. + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- Taşıma bölümünde hedef disk veya birim üzerinde bazı parçaların olmasına izin verilir. + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- ODBC köprüsü aracılığıyla null sütunlarda sabit NULL değerleri. + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- SOMUTLAŞTIRILMIŞ sütunlarla dağıtılmış yerel olmayan düğüme sabit ekleme. + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- Sabit fonksiyon getMultipleValuesFromConfig. + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) +- TCP yerine HTTP keep alive timeout kullanarak sabit sorunu alive timeout tutun. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) +- Tüm işlerin istisnada bitmesini bekleyin (nadir segfault'ları düzeltir). + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) +- Kafka tabloya eklerken MVs için itmeyin. + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([I ivanvan](https://github.com/abyss7)) +- İstisna yığını için bellek izleyicisini devre dışı bırakın. + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Harici veritabanı için sorgu dönüştürme sabit kötü kod. + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Metricstransmitter başlatılmamış değerleri kullanmaktan kaçının. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) +- Testler için makrolarla örnek yapılandırma eklendi ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### Hata Düzeltme {#bug-fix-4} + +- Karma sözlükte sabit bad\_variant. + ([alesapin](https://github.com/alesapin)) +- Bölüm sorgu Ekle segmentasyon hatası ile hata düzeltildi. + ([alesapin](https://github.com/alesapin)) +- Sabit zaman hesaplama `MergeTreeData`. + ([Vladimir Chebotarev](https://github.com/excitoon)) +- Yazma tamamlandıktan sonra açıkça Kafka'ya taahhüt edin. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([I ivanvan](https://github.com/abyss7)) +- MERGETREE parçalarının min/max dizinlerinde NULL değerleri doğru şekilde seri hale getirin. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +### ClickHouse sürümü 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} + +#### Yenilik {#new-feature-3} + +- Katmanlı depolama: MergeTree motoru ile tablolar için birden fazla depolama birimleri kullanmak için destek. Yeni verileri SSD'DE saklamak ve eski verileri otomatik olarak HDD'ye taşımak mümkündür. ([örnek](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alesapin](https://github.com/alesapin)) +- Tablo fonksiyonu Ekle `input` gelen verileri okumak için `INSERT SELECT` sorgu. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Anton Popov](https://github.com/CurtizJ)) +- Add a `sparse_hashed` sözlük düzeni, bu işlevsel olarak eşdeğerdir `hashed` düzen, ancak daha fazla bellek verimli. Daha yavaş değer alma pahasına yaklaşık iki kat daha az bellek kullanır. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) +- Sözlüklere erişim için kullanıcıların listesini tanımlama yeteneğini uygular. Sadece geçerli bağlı veritabanı kullanarak. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Eklemek `LIMIT` seçeneği `SHOW` sorgu. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- Eklemek `bitmapSubsetLimit(bitmap, range_start, limit)` en küçük alt kümesini döndüren işlev `limit` küm valuesedeki değer smallerlerden daha küçük olmayan değerler `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) +- Eklemek `bitmapMin` ve `bitmapMax` işlevler. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) +- Fonksiyon Ekle `repeat` ile ilgili [sayı-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([flynn](https://github.com/ucasFL)) + +#### Deneysel Özellik {#experimental-feature-1} + +- Geçerli boru hattını değiştirmeyen birleştirme birleştirme varyantını (bellekte) uygulayın. Sonuç kısmen birleştirme anahtarına göre sıralanır. Koymak `partial_merge_join = 1` bu özelliği kullanmak için. Birleştirme birleştirme hala geliştirme aşamasındadır. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) +- Eklemek `S3` motor ve masa fonksiyonu. Hala geliştirme aşamasında (henüz kimlik doğrulama desteği yok). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) + +#### Geliştirme {#improvement-2} + +- Kafka'dan okunan her mesaj atomik olarak eklenir. Bu, Kafka engine ile bilinen hemen hemen tüm sorunları çözer. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([I ivanvan](https://github.com/abyss7)) +- Dağıtılmış sorguların yerine çalışma için iyileştirmeler. Kurtarma süresini kısaltın, ayrıca şimdi yapılandırılabilir ve görülebilir `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) +- Doğrudan Enums için sayısal değerleri destekleyin `IN` bölme. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) +- Destek (isteğe bağlı, varsayılan olarak devre dışı) URL depolama yönlendirir. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([makroll](https://github.com/maqroll)) +- Eski bir sürümü olan istemci bir sunucuya bağlandığında bilgi iletisi ekleyin. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- Dağıtılmış tablolarda veri göndermek için maksimum geri dönüş uyku süresi sınırını kaldırın [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) +- Grafit kümülatif değerleri ile profil olayları (sayaçlar) göndermek için yeteneği ekleyin. Altında etkinleştir canilebilir `` server inda `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) +- Otomatik döküm türü Ekle `T` -e doğru `LowCardinality(T)` veri türü sütununa eklerken `LowCardinality(T)` HTTP üzerinden yerel formatta. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fonksiyonu kullanma yeteneği ekleyin `hex` kullanmadan `reinterpretAsString` için `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-2} + +- Hata ayıklama bilgisi ile clickhouse ikili için gdb-ındex ekleyin. Bu başlangıç süresini hızlandıracaktır `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alesapin](https://github.com/alesapin)) +- Kullanan yamalı dpkg-deb ile deb ambalajını hızlandırın `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alesapin](https://github.com/alesapin)) +- Koymak `enable_fuzzing = 1` tüm proje kodu libfuzzer enstrümantasyon etkinleştirmek için. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) +- CI bölünmüş yapı duman testi ekleyin. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alesapin](https://github.com/alesapin)) +- MemorySanitizer ile CI için yapı ekleyin. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Değişmek `libsparsehash` ile `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) + +#### Hata Düzeltme {#bug-fix-5} + +- Büyük tablolarda karmaşık anahtarlar üzerinde endeks analizi sabit performans düşüşü. Bu düzeltmeler \# 6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kafka boş konudan seçerken segfaults'a neden olan mantıksal hatayı düzeltin. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([I ivanvan](https://github.com/abyss7)) +- Çok erken MySQL bağlantısını düzeltin `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Çok eski Linux çekirdekleri için geri destek (düzeltme [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Olası veri kaybını düzeltin `insert select` giriş akışında boş blok durumunda sorgu. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fonksiyon için düzeltme `АrrayEnumerateUniqRanked` params boş diziler ile [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Dizi birleşimleri ve genel alt sorgularla karmaşık sorguları düzeltin. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([I ivanvan](https://github.com/abyss7)) +- Düzeltmek `Unknown identifier` birden fazla birleşim ile ORDER BY ve GROUP BY hatası [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) +- Sabit `MSan` fonksiyonu çalıştırırken uyarı `LowCardinality` değişken. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-2} + +- Bitmap'in seri hale getirme biçimi değiştirildi \* performansı artırmak için toplama işlevi durumları. Önceki sürümlerden bitmap\* seri hale getirilmiş durumları okunamıyor. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) + +## ClickHouse sürümü 19.14 {#clickhouse-release-19-14} + +### ClickHouse sürümü 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### Hata Düzeltme {#bug-fix-6} + +- Bu sürüm aynı zamanda 19.11.12.69 tüm hata düzeltmeleri içerir. +- 19.14 ve önceki sürümleri arasında dağıtılmış sorgular için sabit uyumluluk. Bu düzeltmeler [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### Hata Düzeltme {#bug-fix-7} + +- Fonksiyon için düzeltme `АrrayEnumerateUniqRanked` params boş diziler ile. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Sorgularda sabit alt sorgu adı `ARRAY JOIN` ve `GLOBAL IN subquery` takma ad ile. Belirtilirse, dış tablo adı için alt sorgu takma adını kullanın. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([I ivanvan](https://github.com/abyss7)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-3} + +- Düzeltmek [çırpan](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) test `00715_fetch_merged_or_mutated_part_zookeeper` mutasyonların uygulanmasını beklemek zorunda olduğu için bir kabuk komut dosyasına yeniden yazarak. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) +- Fonksiyon sabit UBSan ve MemSan hatası `groupUniqArray` emtpy dizi argümanı ile. Bu boş yerleştirerek neden oldu `PaddedPODArray` sıfır hücre değeri için yapıcı çağrılmadığı için karma tabloya sıfır hücre. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Amos Kuşu](https://github.com/amosbird)) + +### ClickHouse sürümü 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} + +#### Yenilik {#new-feature-4} + +- `WITH FILL` değiştirici için `ORDER BY`. (devam [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) +- `WITH TIES` değiştirici için `LIMIT`. (devam [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) +- Ayrıştırılmamış `NULL` NULL olarak değişmez (eğer ayar `format_csv_unquoted_null_literal_as_null=1`). Bu alanın veri türü null değilse, null alanları varsayılan değerlerle başlatın (eğer ayar `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) +- Tablo işlevlerinin yollarındaki joker karakterler için destek `file` ve `hdfs`. Yol joker karakterler içeriyorsa, tablo salt okunur olacaktır. Kullanım örneği: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` ve `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) +- Yeni `system.metric_log` değerleri depo tablelayan tablo `system.events` ve `system.metrics` belirtilen zaman aralığı ile. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse metin günlükleri yazmak için izin ver `system.text_log` Tablo. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yığın izlerinde özel sembolleri göster (bu, elf dosyalarının sembol tablolarını ayrıştırma yoluyla yapılır). Hata ayıklama bilgisi varsa yığın izlerinde dosya ve satır numarası hakkında bilgi eklendi. Programda mevcut indeksleme sembolleri ile Speedup sembol adı arama. İç gözlem için yeni SQL işlevleri eklendi: `demangle` ve `addressToLine`. Yeniden adlandırılan işlev `symbolizeAddress` -e doğru `addressToSymbol` tutarlılık için. İşlev `addressToSymbol` performans nedenleriyle karıştırılmış adını döndürür ve uygulamak zorunda `demangle`. Ayar eklendi `allow_introspection_functions` varsayılan olarak kapalı olan. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tablo fonksiyonu `values` (adı büyük / küçük harf duyarsız). Bu okumak için izin verir `VALUES` önerilen liste [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Örnek: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) +- Depolama ayarlarını değiştirmek için bir yetenek eklendi. Sözdizimi: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alesapin](https://github.com/alesapin)) +- Müstakil parçaların çıkarılması için destek. Sözdizimi: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) +- Tablo kısıtlamaları. Ekleme sırasında kontrol edilecek tablo tanımına kısıtlama eklemenize izin verir. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Basamaklı hayata görünümler için Suppport. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Amos Kuşu](https://github.com/amosbird)) +- Her sorgu yürütme iş parçacığı saniyede bir örnek için varsayılan olarak sorgu profiler'ı açın. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Giriş biçimi `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) +- İki yeni fonksiyon eklendi: `sigmoid` ve `tanh` (bu makine öğrenme uygulamaları için yararlıdır). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İşlev `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` verilen jetonun samanlıkta olup olmadığını kontrol etmek için. Token, iki alfasayısal olmayan ASCII karakteri (veya samanlığın sınırları) arasında maksimum uzunlukta bir alt dizedir. Token sabit bir dize olmalıdır. Tokenbf\_v1 Endeksi uzmanlaşma tarafından desteklenmektedir. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) +- Yeni fonksiyon `neighbor(value, offset[, default_value])`. Bir veri bloğunda sütun içindeki önceki / sonraki değere ulaşmayı sağlar. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) +- Bir işlev oluşturuldu `currentUser()`, yetkili kullanıcının giriş dönen. Add aliased al aliasias `user()` MySQL ile uyumluluk için. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) +- Yeni toplama fonksiyonları `quantilesExactInclusive` ve `quantilesExactExclusive` hangi teklif edildi [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) +- İşlev `bitmapRange(bitmap, range_begin, range_end)` hangi döner yeni set ile belirtilen aralığı (dahil değil `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) +- İşlev `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` hangi verilen alanı kapsayan geohash-kutuları hassas uzun dizeleri dizisi oluşturur. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) +- INSERT query ile destek uygulamak `Kafka` Tablolar. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([I ivanvan](https://github.com/abyss7)) +- İçin destek eklendi `_partition` ve `_timestamp` Kafka motoruna sanal sütunlar. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([I ivanvan](https://github.com/abyss7)) +- Hassas verileri kaldırma imkanı `query_log`, sunucu günlükleri, regexp tabanlı kurallar ile işlem listesi. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) + +#### Deneysel Özellik {#experimental-feature-2} + +- Giriş ve çıkış Veri formatı `Template`. Bu giriş ve çıkış için özel biçim dizesi belirtmek için izin verir. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) +- Uygulanması `LIVE VIEW` başlangıçta önerilen tablolar [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898) hazırlıklı olarak [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), ve daha sonra güncellendi [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). Görmek [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) detaylı açıklama için. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov) Not bu `LIVE VIEW` özellik sonraki sürümlerinde kaldırılabilir. + +#### Hata Düzeltme {#bug-fix-8} + +- Bu sürüm aynı zamanda 19.13 ve 19.11 tüm hata düzeltmeleri içerir. +- Tablo atlama endeksleri ve dikey birleştirme gerçekleştiğinde segmentasyon hatasını düzeltin. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alesapin](https://github.com/alesapin)) +- Önemsiz olmayan sütun varsayılanlarıyla sütun başına TTL'Yİ düzeltin. Daha önce kuvvet durumunda `OPTIMIZE ... FINAL` sorgu, süresi dolmuş değerler, kullanıcı tarafından belirtilen sütun varsayılanları yerine tür varsayılanları ile değiştirildi. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Anton Popov](https://github.com/CurtizJ)) +- Normal sunucu yeniden başlatmada Kafka mesajları çoğaltma sorununu düzeltin. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([I ivanvan](https://github.com/abyss7)) +- Kafka mesajlarını okurken sabit sonsuz döngü. Tüketiciyi abonelikte hiç duraklatmayın/devam ettirmeyin - aksi takdirde bazı senaryolarda süresiz olarak duraklatılabilir. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([I ivanvan](https://github.com/abyss7)) +- Düzeltmek `Key expression contains comparison between inconvertible types` istisna içinde `bitmapContains` işlev. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) +- Etkin olan segfault'u düzeltin `optimize_skip_unused_shards` ve kayıp sharding anahtarı. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) +- Bellek bozulmasına yol açabilecek mutasyonlarda yanlış kod düzeltildi. Adresin okunması ile sabit segfault `0x14c0` bu eşzamanlı nedeniyle happed olabilir `DROP TABLE` ve `SELECT` itibaren `system.parts` veya `system.parts_columns`. Mutasyon sorgularının hazırlanmasında sabit yarış durumu. Sabit kilitlenme neden `OPTIMIZE` çoğaltılmış tablolar ve değiştirir gibi eşzamanlı değişiklik işlemleri. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MySQL arayüzü kaldırıldı ekstra ayrıntılı günlüğü [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Boolean ayarlarını ayrıştırma yeteneğini döndürür ‘true’ ve ‘false’ yapılandırma dosyasında. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) +- Çökmeyi düzeltin `quantile` ve `median` fonksiyonu üzerinde `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) +- Tarafından iade edilen Olası eksik sonuç düzeltildi `SELECT` ile sorgu `WHERE` birincil anahtarda durum, Float türüne dönüşüm içeriyordu. Bu Monotonluk yanlış kontrol neden oldu `toFloat` işlev. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Kontrol `max_expanded_ast_elements` mutasyonlar için ayarlama. Sonra açık mutasyonlar `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Kış Zhang](https://github.com/zhang2014)) +- İle kullanıldığında anahtar sütunlar için birleştirme sonuçlarını düzeltin `join_use_nulls`. Sütun varsayılanları yerine Boşları ekleyin. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) +- Dikey birleştirme ve değiştirme ile atlama endeksleri için düzeltin. Fix for `Bad size of marks file` özel. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alesapin](https://github.com/alesapin)) +- Nadir kazayı düzeltin `ALTER MODIFY COLUMN` ve birleştirilmiş/değiştirilmiş parçalardan biri boş olduğunda dikey birleştirme (0 satır) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) +- Dönüşüm Sabit hata `LowCardinality` yazmak `AggregateFunctionFactory`. Bu düzeltmeler [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Yanlış davranışı ve olası segfault'ları düzeltin `topK` ve `topKWeighted` toplu fonksiyonlar. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Anton Popov](https://github.com/CurtizJ)) +- Sabit güvensiz kod etrafında `getIdentifier` işlev. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MySQL Tel protokolünde Sabit hata (ClickHouse form MySQL istemcisine bağlanırken kullanılır). Yığın arabellek taşması nedeniyle `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Yuriy Baranov](https://github.com/yurriy)) +- Sabit bellek sızıntısı `bitmapSubsetInRange` işlev. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) +- Granülerlik değişiminden sonra mutasyon yürütüldüğünde nadir hatayı düzeltin. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alesapin](https://github.com/alesapin)) +- Varsayılan olarak tüm alanlarla protobuf iletisine izin ver. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) +- İle bir hatayı gidermek `nullIf` fonksiyonu zaman biz göndermek bir `NULL` ikinci argüman üzerinde argüman. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Sonsuz bellek tüketimine yol açan dize alanları ile karmaşık anahtar önbellek sözlüklerinde yanlış bellek ayırma/ayırma ile nadir hatayı düzeltin (bellek sızıntısı gibi görünüyor). Dize boyutu sekizden (8, 16, 32, vb.) başlayarak iki güç olduğunda hata çoğalır. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) +- İstisnaya neden olan küçük dizilerde sabit goril kodlaması `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) +- Birleşimlerde null olmayan türleri kullanmasına izin ver `join_use_nulls` etkin. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) +- Sakatlamak `Poco::AbstractConfiguration` sorguda yer değiştirmeler `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kilitlenmeyi önlemek `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kullanım `arrayReduce` sabit argümanlar için segfault yol açabilir. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Replica sonra geri yüklenmişse görünebilecek tutarsız parçaları düzeltin `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- Sabit asmak `JSONExtractRaw` işlev. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yanlış atlama endeksleri seri hale getirme ve adaptif tanecikli toplama ile hatayı düzeltin. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alesapin](https://github.com/alesapin)) +- Düzeltmek `WITH ROLLUP` ve `WITH CUBE` değiştiriciler `GROUP BY` iki seviyeli toplama ile. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) +- Adaptif tanecikli ikincil endeksler işaretleri yazma ile hatayı düzeltin. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) +- Sunucu başlatma sırasında başlatma sırasını düzeltin. Beri `StorageMergeTree::background_task_handle` içinde Başlat isılmıştır `startup()` bu `MergeTreeBlockOutputStream::write()` başlatmadan önce kullanmayı deneyebilir. Eğer başlatıldı ise sadece kontrol. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([I ivanvan](https://github.com/abyss7)) +- Bir hata ile tamamlanan önceki okuma işleminden veri arabelleğini temizleme. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) +- Çoğaltılan \* MergeTree tablo için yeni bir çoğaltma oluştururken adaptif taneciklik sağlayan Fix hata. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) +- İstisna durumunda sunucu başlatma sırasında Olası kilitlenme düzeltildi `libunwind` başlatılmamış erişim sırasında istisna sırasında `ThreadStatus` yapılı. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Çökmeyi düzeltin `yandexConsistentHash` işlev. Fuzz testi ile bulundu. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sunucu aşırı ve küresel iş parçacığı havuzu tam yakın olduğunda sorguları asılı olasılığı düzeltildi. Dağıtılmış sorgular, her bir parçaya bağlantı başına bir iş parçacığı ayırdığından, çok sayıda parçaya (yüzlerce) sahip kümelerde daha yüksek şansa sahiptir. Örneğin, bir küme 330 kırıkları 30 eşzamanlı dağıtılmış sorgu işleme, bu sorun yeniden oluşturabilir. Bu sorun, 19.2'den başlayarak tüm sürümleri etkiler. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit mantık `arrayEnumerateUniqRanked` işlev. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sembol tablosunu çözerken segfault'u düzeltin. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Kuşu](https://github.com/amosbird)) +- Döküm sabit alakasız istisna `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Kaldırılan ekstra açıklamadan alıntı `system.settings` Tablo. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Olası kilitlenmeyi önlemek `TRUNCATE` çoğaltılan tablonun. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Anahtarı sıralama sırasına göre okumayı düzeltin. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) +- Düzeltmek `ALTER TABLE ... UPDATE` ile tablolar için sorgu `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- Tarafından açılan hatayı düzeltin [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (19.4.0'dan beri). Herhangi bir sütunu sorgulamadığımızda Mergetree tabloları üzerinden dağıtılmış tablolara sorgularda çoğaltır (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) +- İmzalı türün imzasız türe tamsayı bölümünde sabit taşma. Davranış tam olarak C veya C++ dilinde (tamsayı promosyon kuralları) olduğu gibi şaşırtıcı olabilir. Büyük imzalı numarayı büyük imzasız numaraya bölerken veya tam tersi durumda taşmanın hala mümkün olduğunu lütfen unutmayın (ancak bu durum daha az olağandır). Sorun tüm sunucu sürümlerinde mevcuttu. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Limit maksimum uyku süresi için kısma zaman `max_execution_speed` veya `max_execution_speed_bytes` ayar .lanmıştır. Gibi sabit yanlış hatalar `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kullanma ile ilgili sabit sorunlar `MATERIALIZED` sütunlar ve Takma adlar `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Kuşu](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltmek `FormatFactory` işlemci olarak uygulanmayan giriş akışları için davranış. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Sabit yazım hatası. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) +- Hata iletisinde yazım hatası (is -\> are ). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) +- Tür bir virgül içeriyorsa, sütun listesinin dizeden ayrıştırılması sırasında hata düzeltildi (bu sorun AŞAĞIDAKİLERLE ilgiliydi `File`, `URL`, `HDFS` depolamalar) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) + +#### Güvenlik Düzeltme {#security-fix} + +- Bu sürüm aynı zamanda 19.13 ve 19.11 tüm hata güvenlik düzeltmeleri içerir. +- SQL ayrıştırıcısında yığın taşması nedeniyle sunucu çökmesine neden olmak için fabrikasyon bir sorgu olasılığı düzeltildi. Birleştirme ve dağıtılmış tablolarda yığın taşması olasılığı, alt sorguları içeren satır düzeyinde güvenlik için somutlaştırılmış görünümler ve koşullar düzeltildi. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Geliştirme {#improvement-3} + +- İçin üçlü mantığın doğru uygulanması `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) +- Şimdi değerler ve süresi dolmuş TTL ile satırlar sonra kaldırılacak `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` sorgu. Eklenen sorgular `SYSTEM STOP/START TTL MERGES` izin vermek/izin vermek için tüm birleştirmelerde TTL ve filtre süresi dolmuş değerleri ile birleşmeleri atayın. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Anton Popov](https://github.com/CurtizJ)) +- Imkanı kullanarak istemci için ClickHouse geçmiş dosyasının konumunu değiştirmek için `CLICKHOUSE_HISTORY_FILE` en .v. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) +- Kaldırmak `dry_run` 'dan bayrak `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Destek `ASOF JOIN` ile `ON` bölme. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) +- Mutasyonlar ve çoğaltma için Atlama dizinlerinin daha iyi desteği. İçin destek `MATERIALIZE/CLEAR INDEX ... IN PARTITION` sorgu. `UPDATE x = x` sütun kullanan tüm indeksleri yeniden hesaplar `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) +- İzin ver `ATTACH` canlı görünümler (örneğin, sunucu başlangıçta) ne olursa olsun `allow_experimental_live_view` ayar. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sorgu profiler tarafından toplanan yığın izlemeleri için sorgu profiler kendisi tarafından oluşturulan yığın çerçeveleri içermez. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi tablo fonksiyonları `values`, `file`, `url`, `hdfs` ALİAS sütunları için destek var. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eğer bir istisna atmak `config.d` dosya, yapılandırma dosyası olarak ilgili kök öğeye sahip değildir. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- İçin özel durum iletisinde ekstra bilgi Yazdır `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) +- Bir kırıkları belirlerken `Distributed` bir okuma sorgusu tarafından kapsanacak tablo (için `optimize_skip_unused_shards` = 1) ClickHouse şimdi her ikisinden de koşulları kontrol eder `prewhere` ve `where` select deyimi yan tümceleri. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) +- Etkin `SIMDJSON` avx2 olmayan ancak sse 4.2 ve PCLMUL komut seti ile makineler için. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse olmadan dosya sistemleri üzerinde çalışabilir `O_DIRECT` ek ayar yapmadan destek (ZFS ve BtrFS gibi). [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Destek son alt sorgu için yüklemi aşağı itin. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İyileştirmek `JOIN ON` anahtarlar çıkarma [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) +- Güncel `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin en küçük sütunun seçilmesini Optimize edin `SELECT count()` sorgu. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Amos Kuşu](https://github.com/amosbird)) +- Katma `strict` param parametereter in `windowFunnel()`. Ne zaman `strict` ayarlanırsa, `windowFunnel()` yalnızca benzersiz değerler için koşulları uygular. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) +- Daha güvenli arayüzü `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) +- Seçenekler satır boyutu ile yürütülürken `--help` seçenek şimdi terminal boyutuna karşılık gelir. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) +- Sakatlamak “read in order” anahtarsız toplama için optimizasyon. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Anton Popov](https://github.com/CurtizJ)) +- İçin HTTP durum kodu `INCORRECT_DATA` ve `TYPE_MISMATCH` hata kodları varsayılan olarak değiştirildi `500 Internal Server Error` -e doğru `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) +- Birleştirme nesnesini taşı `ExpressionAction` için `AnalyzedJoin`. `ExpressionAnalyzer` ve `ExpressionAction` hakkında bilmiyorum `Join` sınıf artık. Mantığı giz bylidir `AnalyzedJoin` ifface. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) +- Kırıklardan biri localhost olduğunda ancak sorgu ağ bağlantısı üzerinden gönderildiğinde dağıtılmış sorguların Olası kilitlenmesi düzeltildi. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Birden çok tablonun semantik değişti `RENAME` Olası kilitlenmeleri önlemek için. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bellekte tam paket yükü yüklenmesini önlemek için yeniden yazılan MySQL uyumluluk sunucusu. Her bağlantı için bellek tüketiminin yaklaşık olarak azalması `2 * DBMS_DEFAULT_BUFFER_SIZE` (okuma / yazma tamponları). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) +- Sorgu semantiği hakkında hiçbir şey bilmek zorunda olmayan ayrıştırıcıdan AST alias yorumlama mantığını taşıyın. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) +- Biraz daha güvenli ayrıştırma `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-copier`: Kullanımına izin ver `where_condition` config ile `partition_key` bölüm varlığını kontrol etmek için sorgudaki diğer ad (daha önce yalnızca veri sorgularını okumada kullanıldı). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) +- İsteğe bağlı mesaj argümanı eklendi `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) +- Ekleme verilerini gönderirken sunucu istisnası şimdi istemcide de işleniyor. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) +- Bir metrik eklendi `DistributedFilesToInsert` bu, dağıtılmış tablolarla uzak sunuculara göndermek için seçilen dosya sistemindeki toplam dosya sayısını gösterir. Sayı tüm parçalar arasında toplanır. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Birleş ofimlerin çoğunu taşı `ExpressionAction/ExpressionAnalyzer` -e doğru `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) +- Tsan'ı Düzeltin [uyarıcı](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) +- Linux yetenekleri eksikliği hakkında daha iyi bilgi mesajları. İle önemli hataları günlüğe kaydetme “fatal” seviye, bu daha kolay bulmak için yapacak `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sırasında bellek kullanımını kısıtlamak için diske geçici veri damping etkinleştirdiğinizde `GROUP BY`, `ORDER BY`, boş disk alanını kontrol etmedi. Düzeltme yeni bir ayar Ekle `min_free_disk_space`, boş disk alanı daha küçük olduğunda eşik, sorgu durur ve atar `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İş parçacığı tarafından özyinelemeli rwlock kaldırıldı. Hiçbir anlam ifade etmiyor, çünkü iş parçacıkları sorgular arasında yeniden kullanılıyor. `SELECT` sorgu bir iş parçacığında bir kilit edinebilir, başka bir iş parçacığından bir kilit tutabilir ve ilk iş parçacığından çıkabilir. Aynı zamanda, ilk iş parçacığı tarafından yeniden kullanılabilir `DROP` sorgu. Bu yanlış yol açacaktır “Attempt to acquire exclusive lock recursively” iletiler. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bölme `ExpressionAnalyzer.appendJoin()`. Bir yer hazırlayın `ExpressionAnalyzer` için `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) +- Katma `mysql_native_password` MySQL uyumluluk sunucusuna kimlik doğrulama eklentisi. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Yuriy Baranov](https://github.com/yurriy)) +- Daha az sayıda `clock_gettime` aramalar; hata ayıklama / sürüm arasındaki sabit ABİ uyumluluğu `Allocator` (önemsiz konu). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Taşınmak `collectUsedColumns` itibaren `ExpressionAnalyzer` -e doğru `SyntaxAnalyzer`. `SyntaxAnalyzer` yapıyor `required_source_columns` şimdi kendisi. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) +- Ayar Ekle `joined_subquery_requires_alias` alt seçimler ve tablo işlevleri için takma adlar istemek için `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) +- Ayıklamak `GetAggregatesVisitor` sınıf fromından `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) +- `system.query_log`: veri türünü değiştir `type` sütun için `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Statik bağlama `sha256_password` kimlik doğrulama eklentisi. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Yuriy Baranov](https://github.com/yurriy)) +- Ayar için ekstra bağımlılıktan kaçının `compile` çalışmak. Önceki sürümlerde, kullanıcı gibi hata alabilirsiniz `cannot open crti.o`, `unable to find library -lc` vb. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kötü amaçlı kopyadan gelebilecek girdinin daha fazla doğrulaması. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi `clickhouse-obfuscator` dosya mevcuttur `clickhouse-client` paket. Önceki sürümlerde şu şekilde mevcuttu `clickhouse obfuscator` (boşluk ile). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) +- Farklı sırayla en az iki tablo ve tablolardan birinde DDL işlemi gerçekleştiren başka bir sorgu okumak en az iki sorgu olduğunda sabit kilitlenme. Başka bir çok nadir kilitlenme düzeltildi. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Katma `os_thread_ids` sütun için `system.processes` ve `system.query_log` daha iyi hata ayıklama olanakları için. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- PHP mysqlnd uzantısı hataları için bir geçici çözüm `sha256_password` varsayılan kimlik doğrulama eklentisi olarak kullanılır ( [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Yuriy Baranov](https://github.com/yurriy)) +- Değiştirilen nullability sütunlarıyla gereksiz yeri kaldırın. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) +- Set default value of `queue_max_wait_ms` sıfıra, çünkü mevcut değer (beş saniye) hiçbir anlam ifade etmiyor. Bu ayarların herhangi bir kullanımı olduğunda nadir durumlar vardır. Eklenen ayarlar `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` ve `connection_pool_max_wait_ms` anlam ayrımı için. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Ayıklamak `SelectQueryExpressionAnalyzer` itibaren `ExpressionAnalyzer`. Seçilmemiş sorgular için sonuncusu tutun. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) +- Giriş ve çıkış formatlarını çoğaltarak kaldırıldı. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Kullanıcının geçersiz kılmasına izin ver `poll_interval` ve `idle_connection_timeout` bağlantı ayarları. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `MergeTree` şimdi ek bir seçenek var `ttl_only_drop_parts` (varsayılan olarak devre dışı) parçaların kısmi budamasını önlemek için, böylece bir parçadaki tüm satırların süresi dolduğunda tamamen düştüler. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Sergi Vladykin](https://github.com/svladykin)) +- Tür set dizin işlevleri için denetler. Fonksiyon yanlış bir türe sahipse istisna atın. Bu UBSan ile fuzz testini düzeltir. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Performans İyileştirme {#performance-improvement-2} + +- İle Optimize sorguları `ORDER BY expressions` fık ,ra, nerede `expressions` sıralama anahtarı ile çakışan önek var `MergeTree` Tablolar. Bu optimizasyon tarafından kontrol edilir `optimize_read_in_order` ayar. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Anton Popov](https://github.com/CurtizJ)) +- Parça yükleme ve çıkarma sırasında birden fazla diş kullanmasına izin verin. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Toplu işlev durumlarının güncellenmesi için uygulanan toplu varyant. Performans avantaj .larına yol açabilir. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kullanım `FastOps` fonksiyonlar için kütüphane `exp`, `log`, `sigmoid`, `tanh`. FastOps Michael Parakhin (Yandex CTO) bir hızlı vektör matematik kütüphanesidir. Geliştirilmiş performans `exp` ve `log` fonksiyonları fazla 6 kez. İşlev `exp` ve `log` itibaren `Float32` argüman geri dönecek `Float32` (önceki sürümlerde her zaman geri dönerler `Float64`). Şimdi `exp(nan)` dön mayebilir `inf`. Sonucu `exp` ve `log` fonksiyonlar, gerçek cevaba en yakın makine temsil edilebilir numarası olmayabilir. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) Fastops çalışmasını sağlamak için Danila Kutenin varyantını kullanma [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Ardışık anahtar optimizasyonunu devre dışı bırak `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) +- Geliştirilmiş performans `simdjson` içinde dinamik tahs ofis ridat ridtan Kurtul ofarak kütüphane `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) +- Bellek ayırırken ön hata sayfaları `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) +- Performans hatasını düzeltin `Decimal` karşılaştırma. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-4} + +- Derleyici (çalışma zamanı şablonu örneği) kaldırın, çünkü performansı kazandık. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Gcc-9'daki performansın daha izole bir şekilde bozulmasını göstermek için performans testi eklendi. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eklenen tablo fonksiyonu `numbers_mt`, çok iş parçacıklı sürümü olan `numbers`. Karma fonksiyonları ile güncellenmiş performans testleri. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Karşılaştırma modu `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) +- Yığın izlerini yazdırmak için en iyi çaba. Ayrıca eklendi `SIGPROF` çalışan bir iş parçacığının yığın izini yazdırmak için hata ayıklama sinyali olarak. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Her fonksiyon kendi dosyasında, Bölüm 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İki katına çıkar const `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) +- İçin biçimlendirme değişiklikleri `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) +- Birleştirme oluşturma için daha iyi alt sorgu `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) +- Gereksiz bir koşulu kaldırın (PVS Studio tarafından bulunur). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) +- İçin karma tablo arayüzünü ayırın `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) +- Ayarların yeniden düzenlenmesi. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alesapin](https://github.com/alesapin)) +- İçin yorum ekle `set` dizin fonksiyonları. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) +- Linux'ta hata ayıklama sürümünde oom puanını artırın. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) +- HDFS ha şimdi hata ayıklama yapısında çalışıyor. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) +- Bir test eklendi `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kafka tablosu için birden fazla somutlaştırılmış görünüm için test ekleyin. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([I ivanvan](https://github.com/abyss7)) +- Daha iyi bir yapı şeması yapın. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([I ivanvan](https://github.com/abyss7)) +- Sabit `test_external_dictionaries` kök olmayan kullanıcı altında çalıştırılması durumunda entegrasyon. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Yazılı paketlerin toplam boyutu aştığında hata çoğalır `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Yuriy Baranov](https://github.com/yurriy)) +- İçin bir test eklendi `RENAME` tablo yarış durumu [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Ayarlarda veri yarışından kaçının `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bir önbellek sözlüğü ile hataları işlemek için entegrasyon testi ekleyin. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) +- Mac OS'de elf nesne dosyalarının ayrıştırılmasını devre dışı bırakın, çünkü mantıklı değil. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Changelog jeneratörünü daha iyi hale getirmeye çalışın. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Ekleme `-Wshadow` GCC'YE geçin. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- İçin eski kod kaldırıldı `mimalloc` destek. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `zlib-ng` x86 yeteneklerini belirler ve bu bilgiyi genel değişkenlere kaydeder. Bu, aynı anda farklı iş parçacıkları tarafından yapılabilen defalteınit çağrısında yapılır. Çok iş parçacıklı yazılardan kaçınmak için, kitaplık başlangıcında yapın. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) +- İçinde gider wasilen bir hata için regresyon testi [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) +- Sabit MSan raporu. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltme çırparak TTL testi. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Anton Popov](https://github.com/CurtizJ)) +- Sabit yanlış veri yarışı `MergeTreeDataPart::is_frozen` alan. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fuzz testinde sabit zaman aşımları. Önceki sürümde, sorguda yanlış hangup bulmayı başardı `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Hata ayıklama kontrolleri eklendi `static_cast` sütunların. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Resmi RPM paketlerinde Oracle Linux için destek. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Değiştirilen json perftests `once` -e doğru `loop` tür. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` tanımlıyor `main()` bu yüzden dahil edilmemelidir `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) +- Çarpışma testi `FULL|RIGHT JOIN` sağ tablonun tuşlarında boş alanlar var. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) +- Her ihtimale karşı takma adların genişletilmesiyle ilgili sınır için bir test eklendi. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Anahtar fromlı `boost::filesystem` -e doğru `std::filesystem` uygun olan yerlerde. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Web sitesine RPM paketleri eklendi. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit için bir test Ekle `Unknown identifier` istisna içinde `IN` bölme. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) +- Basitleştirmek `shared_ptr_helper` çünkü insanlar bunu anlamakta zorluk çekiyorlar. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit Gorilla ve DoubleDelta codec için performans testleri eklendi. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) +- Entegrasyon testini bölün `test_dictionaries` 4 ayrı test içine. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) +- Pvs-Studio uyarısını düzeltin `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Kullanmasına izin ver `library` asan ile sözlük kaynağı. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- PRs listesinden changelog oluşturmak için seçenek eklendi. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Lock the `TinyLog` depolama okurken. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) +- CI kırık symlinks için kontrol edin. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin zaman aşımını artırın “stack overflow” hata ayıklama yapı uzun zaman alabilir, çünkü sınayın. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Çift whitespaces için bir çek eklendi. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltmek `new/delete` bellek izleme zaman sanitizers ile inşa. İzleme net değil. Sadece testlerde bellek sınırı istisnalarını önler. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) +- Bağlantı sırasında tanımlanmamış sembollerin kontrolünü geri etkinleştirin. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([I ivanvan](https://github.com/abyss7)) +- Yeniden inşa etmekten kaçının `hyperscan` hergün. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit UBSan raporu `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Uyumlu olmadığı için sanitizers ile sorgu profiler kullanmak için izin vermeyin. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Zamanlayıcı tarafından başarısız olduktan sonra bir sözlüğü yeniden yüklemek için test ekleyin. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) +- Tutarsızlığı düzeltin `PipelineExecutor::prepareProcessor` argüman türü. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Kötü Urı'ler için bir test eklendi. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Daha fazla kontrol eklendi `CAST` işlev. Bu, bulanık testte segmentasyon hatası hakkında daha fazla bilgi almalıdır. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Katma `gcc-9` güçlendirmek `docker/builder` yerel olarak görüntü oluşturan konteyner. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Birincil anahtar için Test `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) +- Yavaş yığın izleri yazdırma etkilenen sabit testler. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kilitlenme için bir test durumu ekleyin `groupUniqArray` sabit içinde [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) +- Sabit endeks mutasyonlar testleri. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) +- Performans testinde, çalıştırmadığımız sorgular için sorgu günlüğünü okuma. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) +- Materialized görünüm şimdi şüpheli düşük kardinalite türleri hakkında ayar ne olursa olsun herhangi bir düşük kardinalite türleri ile oluşturulabilir. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) +- İçin güncel testslenmiş testler `send_logs_level` ayar. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Gcc-8.2 altında yapı düzeltin. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) +- Dahili libc++ile yapı düzeltin. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([I ivanvan](https://github.com/abyss7)) +- Paylaşılan yapıyı düzeltin `rdkafka` kitaplık [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([I ivanvan](https://github.com/abyss7)) +- Mac OS build için düzeltmeler (eksik). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) +- Düzeltmek “splitted” yapmak. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Diğer yapı düzeltmeleri: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Amos Kuşu](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([I ivanvan](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-3} + +- Kaldırıldı nadiren kullanılan tablo fonksiyonu `catBoostPool` ve depolama `CatBoostPool`. Bu tablo işlevini kullandıysanız, lütfen e-posta yazınız `clickhouse-feedback@yandex-team.com`. CatBoost entegrasyonunun devam ettiğini ve destekleneceğini unutmayın. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sakatlamak `ANY RIGHT JOIN` ve `ANY FULL JOIN` varsayılan olarak. Koymak `any_join_distinct_right_table_keys` bunları etkinleştirmek için ayarlama. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) + +## ClickHouse sürümü 19.13 {#clickhouse-release-19-13} + +### ClickHouse sürümü 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### Hata Düzeltme {#bug-fix-9} + +- Bu sürüm aynı zamanda 19.11.12.69 tüm hata düzeltmeleri içerir. + +### ClickHouse sürümü 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### Hata Düzeltme {#bug-fix-10} + +- Bu sürüm aynı zamanda 19.14.6.12 tüm hata düzeltmeleri içerir. +- Yürütülürken tablonun Olası tutarsız durumu düzeltildi `DROP` zookeeper erişilebilir değilken çoğaltılmış tablo için sorgu. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Storagemerge'de veri yarışı için düzeltme [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Soketten sonsuz recv yol açar sorgu profiler tanıtılan Fix hata. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alesapin](https://github.com/alesapin)) +- Çalışırken aşırı CPU kullanımını düzeltin `JSONExtractRaw` bir Boole değeri üzerinde işlev. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) +- Gerçekleşmiş görünüme iterken gerilemeyi düzeltir. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([I ivanvan](https://github.com/abyss7)) +- Tablo fonksiyonu `url` güvenlik açığı saldırganın istekte rasgele HTTP üstbilgileri enjekte etmesine izin vermişti. Bu konu tarafından bulundu [Nikita Tikhomirov](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İşe yaramaz düzeltmek `AST` set dizini kontrol edin. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) +- Sabit ayrıştırma `AggregateFunction` sorguya gömülü değerler. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) +- Sabit yanlış davranış `trim` fonksiyonlar aile. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### Hata Düzeltme {#bug-fix-11} + +- Bu sürüm aynı zamanda 19.11.9.52 ve 19.11.10.54 tüm hata güvenlik düzeltmeleri içerir. +- Sabit veri yarışı `system.parts` masa ve `ALTER` sorgu. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Akışlarda sabit eşleşmeyen başlık, örnek ve prewhere ile boş dağıtılmış tablodan okuma durumunda oldu. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Kullanırken sabit çökme `IN` bir tuple ile bir alt sorgu ile yan tümce. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Aynı sütun adlarıyla durumu düzeltin `GLOBAL JOIN ON` bölme. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) +- Türleri döküm yaparken çökmeyi düzeltin `Decimal` bu onu desteklemiyor. Bunun yerine istisna atın. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) +- Sabit çökme `extractAll()` işlev. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) +- İçin sorgu dönüşümü `MySQL`, `ODBC`, `JDBC` tablo işlevleri artık düzgün çalışıyor `SELECT WHERE` birden fazla sorgu `AND` ifadeler. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) +- MySQL 8 entegrasyonu için önceki beyan kontrolleri eklendi. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Rafael David Tinoco](https://github.com/rafaeldtinoco)) + +#### Güvenlik Düzeltme {#security-fix-1} + +- Dekompresyon aşamasında kodeklerdeki iki güvenlik açığını düzeltin (kötü niyetli kullanıcı, dekompresyonda tampon taşmasına yol açacak sıkıştırılmış verileri üretebilir). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouse sürümü 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### Hata Düzeltme {#bug-fix-12} + +- Düzeltmek `ALTER TABLE ... UPDATE` ile tablolar için sorgu `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- Bir tuple ile bir alt sorgu ile yan tümcesinde kullanırken npe düzeltin. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Eski bir kopya canlı hale gelirse, yine de açılan bölüm tarafından kaldırılan veri parçalarına sahip olabileceği sorunu düzeltildi. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- CSV ayrıştırma ile ilgili sorun giderildi [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- Sistemde sabit veri yarışı.parçalar tablo ve alter sorgu. Bu düzeltmeler [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bellek bozulmasına yol açabilecek mutasyonlarda yanlış kod düzeltildi. Adresin okunması ile sabit segfault `0x14c0` bu eşzamanlı nedeniyle happed olabilir `DROP TABLE` ve `SELECT` itibaren `system.parts` veya `system.parts_columns`. Mutasyon sorgularının hazırlanmasında sabit yarış durumu. Sabit kilitlenme neden `OPTIMIZE` çoğaltılmış tablolar ve değiştirir gibi eşzamanlı değişiklik işlemleri. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sonra sabit olası veri kaybı `ALTER DELETE` tabloda dizin atlama ile sorgu. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Güvenlik Düzeltme {#security-fix-2} + +- Saldırganın Zookeeper'a yazma erişimi varsa ve Clickhouse'un çalıştığı ağdan kullanılabilen özel sunucuyu çalıştırabiliyorsa, ClickHouse replica olarak hareket edecek ve Zookeeper'a kaydedecek özel olarak oluşturulmuş kötü amaçlı sunucu oluşturabilir. Başka bir çoğaltma, kötü amaçlı kopyadan veri bölümünü getirdiğinde, clickhouse-Server'ı dosya sistemindeki keyfi yola yazmaya zorlayabilir. Yandex'teki bilgi güvenliği ekibi Eldar Zaitov tarafından bulundu. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} + +#### Yenilik {#new-feature-5} + +- Sorgu düzeyinde örnekleme profiler. [Örnek](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- Sütunların bir listesini belirtmek için izin ver `COLUMNS('regexp')` daha sofistike bir varyantı gibi çalışan ifade `*` Yıldız işareti. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` artık mümkün [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) +- Stokastik Gradyan iniş için Adam optimizer varsayılan olarak kullanılır `stochasticLinearRegression()` ve `stochasticLogisticRegression()` agrega fonksiyonları, çünkü neredeyse herhangi bir ayar yapmadan iyi kalite gösterir. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Andy Yang](https://github.com/andyyzh)) +- `RENAME` sorgular artık tüm depolarla çalışır. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([I ivanvan](https://github.com/abyss7)) +- Şimdi istemci ayarlayarak istenilen düzeyde sunucudan günlükleri almak `send_logs_level` sunucu ayarlarında belirtilen günlük düzeyine bakılmaksızın. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-4} + +- Ayar `input_format_defaults_for_omitted_fields` varsayılan olarak etkinleştirilir. Dağıtılmış tablolardaki ekler bu ayarın kümede aynı olması gerekir (güncellemeyi çalıştırmadan önce ayarlamanız gerekir). Atlanmış alanlar için karmaşık varsayılan ifadelerin hesaplanmasını sağlar `JSONEachRow` ve `CSV*` biçimliler. Beklenen davranış olmalı, ancak ihmal edilebilir performans farkına yol açabilir. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) + +#### Deneysel özellikler {#experimental-features} + +- Yeni sorgu işleme boru hattı. Kullanmak `experimental_use_processors=1` etkinleştirmek için Seçenek. Kendi sorun için kullanın. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Hata Düzeltme {#bug-fix-13} + +- Kafka entegrasyonu bu sürümde düzeltildi. +- Sabit `DoubleDelta` kodlama `Int64` büyük için `DoubleDelta` değerler, geliştirilmiş `DoubleDelta` rastgele veri için kodlama `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) +- Sabit aşırı tahmin `max_rows_to_read` eğer ayar `merge_tree_uniform_read_distribution` 0 olarak ayarlanır. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Geliştirme {#improvement-4} + +- Eğer bir istisna atar `config.d` dosya, yapılandırma dosyası olarak ilgili kök öğeye sahip değil [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) + +#### Performans İyileştirme {#performance-improvement-3} + +- Getirmek `count()`. Şimdi en küçük sütunu kullanır (mümkünse). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Amos Kuşu](https://github.com/amosbird)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-5} + +- Performans testlerinde bellek kullanımını bildirin. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) +- Harici yapı ile Düzeltme `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([I ivanvan](https://github.com/abyss7)) +- Paylaşılan yapıyı düzeltin `rdkafka` kitaplık [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([I ivanvan](https://github.com/abyss7)) + +## ClickHouse yayın 19.11 {#clickhouse-release-19-11} + +### ClickHouse sürümü 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### Hata Düzeltme {#bug-fix-14} + +- Sabit nadir kazasında `ALTER MODIFY COLUMN` ve birleştirilmiş/değiştirilmiş parçalardan biri boş olduğunda dikey birleştirme (0 satır). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) +- Manuel güncelleme `SIMDJSON`. Bu sahte json teşhis mesajları ile stderr dosyalarının Olası sel giderir. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) +- Sabit hata ile `mrk` Mut fileasyonlar için dosya uzantısı ([alesapin](https://github.com/alesapin)) + +### ClickHouse yayın 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### Hata Düzeltme {#bug-fix-15} + +- Büyük tablolarda karmaşık anahtarlar üzerinde endeks analizi sabit performans düşüşü. Bu düzeltmeler [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Dağıtılmış motorlu tablolarda veri gönderirken nadir SIGSEGV'DEN kaçının (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) +- Düzeltmek `Unknown identifier` birden fazla birleşim ile. Bu düzeltmeler [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouse sürümü 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} + +- Kafka boş konudan seçerken segfaults'a neden olan mantıksal hatayı düzeltin. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([I ivanvan](https://github.com/abyss7)) +- Fonksiyon için düzeltme `АrrayEnumerateUniqRanked` params boş diziler ile. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) + +### ClickHouse sürümü 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### Hata Düzeltme {#bug-fix-16} + +- Tüm bölümler için hepsini bir kerede işlemek için Kafka mesajları için uzaklıkları manuel olarak saklayın. Potansiyel çoğaltmayı giderir “one consumer - many partitions” senaryo. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([I ivanvan](https://github.com/abyss7)) + +### ClickHouse yayın 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} + +- Önbellek sözlüklerde hata işleme geliştirin. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fonksiyonu Sabit hata `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) +- Düzeltmek `JSONExtract` bir ayıklarken işlev `Tuple` JSON dan. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) +- Sonra sabit olası veri kaybı `ALTER DELETE` tabloda dizin atlama ile sorgu. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) +- Sabit performans testi. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Parke: Boolean sütunlarını okumayı düzeltin. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit yanlış davranış `nullIf` sabit argümanlar için işlev. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Normal sunucu yeniden başlatmada Kafka mesajları çoğaltma sorununu düzeltin. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([I ivanvan](https://github.com/abyss7)) +- Uzun zaman bir sorun düzeltildi `ALTER UPDATE` veya `ALTER DELETE` düzenli birleştirmelerin çalışmasını engelleyebilir. Yeterli boş iş parçacığı yoksa mutasyonların yürütülmesini önleyin. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) +- İşleme ile Sabit hata “timezone” sunucu yapılandırma dosyasında. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kafka testlerini düzeltin. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([I ivanvan](https://github.com/abyss7)) + +#### Güvenlik Düzeltme {#security-fix-3} + +- Saldırganın Zookeeper'a yazma erişimi varsa ve Clickhouse'un çalıştığı ağdan kullanılabilen özel sunucuyu çalıştırabiliyorsa, ClickHouse replica olarak hareket edecek ve Zookeeper'a kaydedecek özel olarak oluşturulmuş kötü amaçlı sunucu oluşturabilir. Başka bir çoğaltma, kötü amaçlı kopyadan veri bölümünü getirdiğinde, clickhouse-Server'ı dosya sistemindeki keyfi yola yazmaya zorlayabilir. Yandex'teki bilgi güvenliği ekibi Eldar Zaitov tarafından bulundu. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} + +#### Hata Düzeltme {#bug-fix-17} + +- Düzeltmek `ALTER TABLE ... UPDATE` ile tablolar için sorgu `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- Bir tuple ile bir alt sorgu ile yan tümcesinde kullanırken npe düzeltin. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Eski bir kopya canlı hale gelirse, yine de açılan bölüm tarafından kaldırılan veri parçalarına sahip olabileceği sorunu düzeltildi. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- CSV ayrıştırma ile ilgili sorun giderildi [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- Sistemde sabit veri yarışı.parçalar tablo ve alter sorgu. Bu düzeltmeler [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bellek bozulmasına yol açabilecek mutasyonlarda yanlış kod düzeltildi. Adresin okunması ile sabit segfault `0x14c0` bu eşzamanlı nedeniyle happed olabilir `DROP TABLE` ve `SELECT` itibaren `system.parts` veya `system.parts_columns`. Mutasyon sorgularının hazırlanmasında sabit yarış durumu. Sabit kilitlenme neden `OPTIMIZE` çoğaltılmış tablolar ve değiştirir gibi eşzamanlı değişiklik işlemleri. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} + +#### Hata düzeltme {#bug-fix-18} + +- Kafka entegrasyonu bu sürümde düzeltildi. +- Kullanırken segfault'u düzeltin `arrayReduce` sürekli argümanlar için. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit `toFloat()` Monotonluk. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Etkin olan segfault'u düzeltin `optimize_skip_unused_shards` ve kayıp sharding anahtarı. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) +- Sabit mantık `arrayEnumerateUniqRanked` işlev. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MySQL işleyicisinden ekstra ayrıntılı günlük kaydı kaldırıldı. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yanlış davranışı ve olası segfault'ları düzeltin `topK` ve `topKWeighted` toplu fonksiyonlar. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) +- Sanal sütunları göstermeyin `system.columns` Tablo. Bu geriye dönük uyumluluk için gereklidir. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Karmaşık anahtar önbellek sözlüğünde dize alanları için bellek tahsisi ile hatayı düzeltin. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) +- İçin yeni bir çoğaltma oluştururken uyarlamalı ayrıntıyı etkinleştirerek hatayı düzeltin `Replicated*MergeTree` Tablo. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) +- Kafka mesajlarını okurken sonsuz döngüyü düzeltin. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) +- SQL ayrıştırıcısında yığın taşması ve yığın taşması olasılığı nedeniyle sunucu çökmesine neden olmak için fabrikasyon bir sorgu olasılığı düzeltildi `Merge` ve `Distributed` Tablolar [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Küçük dizilerde sabit goril kodlama hatası. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) + +#### Geliştirme {#improvement-5} + +- Kullanıcının geçersiz kılmasına izin ver `poll_interval` ve `idle_connection_timeout` bağlantı ayarları. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### Hata düzeltme {#bug-fix-19} + +- Sunucu aşırı yüklendiğinde sorguları asılı olasılığı düzeltildi. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- YANDEXCONSİSTENTHASH işlevinde FPE'Yİ düzeltin. Bu düzeltmeler [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Dönüşüm Sabit hata `LowCardinality` yazmak `AggregateFunctionFactory`. Bu düzeltmeler [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ayrıştırmayı düzeltin `bool` ayarlar `true` ve `false` yapılandırma dosyalarındaki dizeler. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) +- Sorgularda uyumsuz akış başlıklarıyla nadir hatayı düzeltin `Distributed` masa üstü `MergeTree` tablo parça `WHERE` MOV toes to `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) +- İmzalı türün imzasız türe tamsayı bölümünde sabit taşma. Bu düzeltmeler [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-5} + +- `Kafka` hala kırık. + +### ClickHouse sürümü 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} + +#### Hata Düzeltme {#bug-fix-20} + +- Adaptif tanecikli ikincil endeksler işaretleri yazma ile hatayı düzeltin. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) +- Düzeltmek `WITH ROLLUP` ve `WITH CUBE` değiştiriciler `GROUP BY` iki seviyeli toplama ile. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) +- Sabit asmak `JSONExtractRaw` işlev. Sabit [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ExternalLoader::reloadOutdated () içinde segfault Fix. [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) +- Sunucu dinleme soketlerini kapatabilir, ancak kapanmaz ve kalan sorguları sunmaya devam edebilir. İki çalışan clickhouse-server süreçleri ile sona erebilir. Bazen, sunucu bir hata döndürebilir `bad_function_call` kalan sorgular için. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ODBC, MySQL, ClickHouse ve HTTP üzerinden harici sözlüklerin ilk yüklenmesi için güncelleme alanında işe yaramaz ve yanlış durum düzeltildi. Bu düzeltmeler [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Döküm sabit alakasız istisna `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Deterministik olmayan sonucu düzeltin “uniq” aşırı nadir durumlarda toplama fonksiyonu. Hata tüm ClickHouse sürümlerinde mevcuttu. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Segfault biz fonksiyonu üzerinde biraz fazla yüksek CIDR ayarladığınızda `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Sunucu birçok farklı bağlamdan birçok istisna attığında sabit küçük bellek sızıntısı. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tüketici abonelikten önce duraklatıldığında ve daha sonra devam etmediğinde durumu düzeltin. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([I ivanvan](https://github.com/abyss7)) Kafka'nın bu sürümde bozulduğunu unutmayın. +- Bir hatayla tamamlanan önceki okuma işleminden Kafka veri arabelleğini temizleme [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) Kafka'nın bu sürümde bozulduğunu unutmayın. +- Beri `StorageMergeTree::background_task_handle` içinde Başlat isılmıştır `startup()` bu `MergeTreeBlockOutputStream::write()` başlatmadan önce kullanmayı deneyebilir. Eğer başlatıldı ise sadece kontrol. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([I ivanvan](https://github.com/abyss7)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-6} + +- Resmi eklendi `rpm` paketler. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alesapin](https://github.com/alesapin)) +- Oluşturmak için bir yetenek ekleyin `.rpm` ve `.tgz` paketleri ile `packager` komut. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alesapin](https://github.com/alesapin)) +- İçin düzeltmeler “Arcadia” sistemi oluşturmak. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-6} + +- `Kafka` bu sürümde bozuldu. + +### ClickHouse sürümü 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} + +#### Yenilik {#new-feature-6} + +- Hazırlanan ifadeler için destek eklendi. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([İskender](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `DoubleDelta` ve `Gorilla` sütun kodekleri [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) +- Katma `os_thread_priority` kontrol etmeyi sağlayan ayar “nice” dinamik zamanlama önceliğini ayarlamak için OS tarafından kullanılan sorgu işleme iş parçacıklarının değeri. Bu gerektirir `CAP_SYS_NICE` yetenekleri çalışmak. Bu uygular [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Uygulamak `_topic`, `_offset`, `_key` Kafka motoru için sütunlar [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([I ivanvan](https://github.com/abyss7)) Kafka'nın bu sürümde bozulduğunu unutmayın. +- Toplama fonksiyonu birleştirici Ekle `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) +- Toplama fonksiyonları `groupArrayMovingSum(win_size)(x)` ve `groupArrayMovingAvg(win_size)(x)`, pencere boyutu sınırlaması olan veya olmayan hareketli toplamı/AVG'yi hesaplar. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([ınv2004](https://github.com/inv2004)) +- Synonim Ekle `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) +- Intergate H3 fonksiyonu `geoToH3` Uber gelen. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Hata Düzeltme {#bug-fix-21} + +- Zaman uyumsuz güncelleştirme ile DNS önbelleğini uygulayın. Ayrı iş parçacığı tüm ana bilgisayarları çözer ve DNS önbelleğini nokta (ayar) ile günceller `dns_cache_update_period`). Ana bilgisayarların IP'si sık sık değiştiğinde yardımcı olmalıdır. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Anton Popov](https://github.com/CurtizJ)) +- Segfault'u düzeltin `Delta` 32 bit boyutundan daha küçük değerlere sahip sütunları etkileyen codec bileşeni. Hata rastgele bellek bozulmasına yol açtı. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) +- TTL fix segfault blok fiziksel olmayan sütunlarla birleştirme. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) +- Parçanın kontrol edilmesinde nadir hatayı düzeltin `LowCardinality` sütun. Önceden `checkDataPart` her zaman ile parçası için başarısız `LowCardinality` sütun. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) +- Sunucu iş parçacığı havuzu dolduğunda bağlantıları asılı kaçının. Bağlantılar için önemlidir `remote` uzun bağlantı zaman aşımı olduğunda, tablo işlevi veya kopyaları olmayan bir parçaya bağlantılar. Bu düzeltmeler [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit argümanlar için destek `evalMLModel` işlev. Bu düzeltmeler [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse varsayılan saat dilimini belirlediğinde sorun giderildi `UCT` yerine `UTC`. Bu düzeltmeler [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit tampon underflow içinde `visitParamExtractRaw`. Bu düzeltmeler [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi dağıtılmış `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` sorgular doğrudan lider çoğaltma üzerinde yürütülür. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) +- Düzeltmek `coalesce` için `ColumnConst` ile `ColumnNullable` + ilgili değişiklikler. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix the `ReadBufferFromKafkaConsumer` böylece sonra yeni mesajlar okumaya devam ediyor `commit()` daha önce durmuş olsa bile [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([I ivanvan](https://github.com/abyss7)) +- Düzeltmek `FULL` ve `RIGHT` Üzerinde katılırken sonuçları katılın `Nullable` sağ tablodaki tuşlar. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) +- Düşük öncelikli sorguların sonsuz uyku Olası düzeltme. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bazı sorguların query\_log'da görünmemesine neden olan Yarış durumunu düzeltin `SYSTEM FLUSH LOGS` sorgu. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) +- Sabit `heap-use-after-free` Clustercopier'de asan uyarısı, zaten kaldırılmış fotokopi nesnesini kullanmaya çalışan saatin neden olduğu. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Sabit yanlış `StringRef` bazı uygulamalar tarafından döndürülen işaretçi `IColumn::deserializeAndInsertFromArena`. Bu hata sadece birim testlerini etkiledi. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Aynı ad sütunlarını maskeleme kaynak ve ara dizi birleştirme sütunlarını önleyin. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) +- Insert Fix ve MySQL stil tanımlayıcı Alıntı ile MySQL motoruna sorgu seçin. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Kış Zhang](https://github.com/zhang2014)) +- Şimdi `CHECK TABLE` sorgu MergeTree engine ailesi ile çalışabilir. Her bir parça için (veya simplier motorları durumunda dosya) varsa kontrol durumunu ve mesajı döndürür. Ayrıca, kırık bir parçanın getirilmesinde hatayı düzeltin. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alesapin](https://github.com/alesapin)) +- Split\_shared\_libraries çalışma zamanını düzeltme [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) +- Sabit zaman dilimi başlatma `/etc/localtime` göreceli bir sembolik bağ gibi mi `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- clickhouse-fotokopi makinesi: kullanımı düzeltin-kapatmadan sonra ücretsiz [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) +- Güncel `simdjson`. Sıfır bayt ile bazı geçersiz JSONs başarıyla ayrıştırmak sorunu düzeltildi. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sistem günlüklerinin kapatılmasını düzeltin [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Anton Popov](https://github.com/CurtizJ)) +- Invalidate\_query'deki koşul bir sözlüğe bağlı olduğunda askıda kalmayı düzeltin. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) + +#### Geliştirme {#improvement-6} + +- Küme yapılandırmasında çözümlenemeyen adreslere izin ver. Onlar kullanılamaz kabul ve her bağlantı girişimi çözmek için çalışılacaktır. Bu özellikle Kubernetes için yararlıdır. Bu düzeltmeler [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Boş TCP bağlantılarını kapatın (varsayılan olarak bir saat zaman aşımı ile). Bu, her sunucuda birden çok dağıtılmış tablo bulunan büyük kümeler için özellikle önemlidir, çünkü her sunucu bir bağlantı havuzunu diğer her sunucuya saklayabilir ve en yüksek sorgu eşzamanlılığından sonra bağlantılar duracaktır. Bu düzeltmeler [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Daha kaliteli `topK` işlev. Yeni öğenin daha büyük bir ağırlığı varsa, son öğeyi kaldırmak için SavingSpace set davranışını değiştirdi. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Etki alanlarıyla çalışmak için URL işlevleri artık şema olmadan tamamlanmamış URL'ler için çalışabilir [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alesapin](https://github.com/alesapin)) +- Checksums eklendi `system.parts_columns` Tablo. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Katma `Enum` veri türü için bir synonim olarak `Enum8` veya `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) +- Tam bit transpose varyantı için `T64` codec. İle daha iyi sıkıştırma yol açabilir `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) +- Cond conditionition on `startsWith` fonksiyon şimdi birincil anahtar kullanır. Bu düzeltmeler [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) ve [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) +- Kullanmasına izin ver `clickhouse-copier` boş veritabanı adı izin vererek çapraz çoğaltma küme topolojisi ile. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) +- Kullanmak `UTC` olmadan bir sistemde varsayılan zaman dilimi olarak `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` yazdırıldı ve sunucu veya istemci başlatmayı reddetti. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fonksiyonda kayan nokta argümanı için geri destek döndü `quantileTiming` geriye dönük uyumluluk için. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Hata iletilerinde hangi tablonun eksik olduğunu göster. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([I ivanvan](https://github.com/abyss7)) +- Çeşitli kullanıcılar tarafından aynı query\_id ile sorgu çalıştırmasına izin verme [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) +- Grafite metrikleri göndermek için daha sağlam kod. Uzun çoklu sırasında bile çalışacaktır `RENAME TABLE` işleyiş. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ThreadPool yürütme için bir görev zamanlayamıyor, daha bilgilendirici hata iletileri görüntülenir. Bu düzeltmeler [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Ngramsearch'i daha sezgisel olarak tersine çevirmek [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) +- HDFS engine builder'da kullanıcı ayrıştırma Ekle [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) +- Güncelleme varsayılan değeri `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) +- Eski ayarlar kavramı eklendi. Eski ayar `allow_experimental_low_cardinality_type` hiçbir etkisi ile kullanılabilir. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) + +#### Performans İyileştirme {#performance-improvement-4} + +- İş parçacıklarının daha düzgün dağılımı için birleştirme tablosundan seçilecek akış sayısını artırın. Ayar eklendi `max_streams_multiplier_for_merge_tables`. Bu düzeltmeler [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-7} + +- Clickhouse'un farklı sürümleriyle istemci-sunucu etkileşimi için geriye dönük uyumluluk testi ekleyin. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alesapin](https://github.com/alesapin)) +- Her taahhüt ve çekme isteğinde Test kapsamı bilgileri. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alesapin](https://github.com/alesapin)) +- Özel ayırıcılarımızı desteklemek için adres dezenfektanı ile işbirliği yapın (`Arena` ve `ArenaWithFreeLists`) daha iyi hata ayıklama için “use-after-free” hatasızlar. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) +- Değiştirmek [Llvm libunwind uygulaması](https://github.com/llvm-mirror/libunwind) C++ özel durum işleme ve yığın izleri yazdırma için [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) +- \- Weverything'den iki uyarı daha ekleyin [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bellek dezenfektanı ile ClickHouse oluşturmak için izin verin. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit ubsan raporu hakkında `bitTest` fuzz testinde işlev. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Docker: kimlik doğrulaması gerektiren bir ClickHouse örneğini başlatmak için olasılık eklendi. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) +- Librdkafka'yı 1.1.0 sürümüne güncelleyin [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([I ivanvan](https://github.com/abyss7)) +- Entegrasyon testleri için genel zaman aşımı ekleyin ve test kodunda bazılarını devre dışı bırakın. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alesapin](https://github.com/alesapin)) +- Bazı iş parçacıklarını düzeltinnitizer arızaları. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) +- Bu `--no-undefined` seçenek, bağlayıcıyı, bağlantı sırasında varoluş için tüm dış adları denetlemeye zorlar. Bölünmüş yapı modunda kütüphaneler arasındaki gerçek bağımlılıkları izlemek çok yararlıdır. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([I ivanvan](https://github.com/abyss7)) +- İçin performans testi eklendi [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Gcc-7 ile sabit uyumluluk. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Gcc-9 için destek eklendi. Bu düzeltmeler [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Libunwind yanlış bağlanabilir Sabit hata. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- PVS-Studio tarafından bulunan birkaç uyarı düzeltildi. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin ilk destek eklendi `clang-tidy` statik analiz cihazı. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- BSD/Linux endian makrolarını dönüştürme( ‘be64toh’ ve ‘htobe64’) Mac OS X eşdeğentser thelerine [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Fu Ch Chenen](https://github.com/fredchenbj)) +- Geliştirilmiş entegrasyon testleri kılavuzu. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) +- MacOSX + gcc9'da yapı sabitleme [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) +- Sabit bir nokta yazım hatası düzeltin: aggreAGte - \> agrega. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) +- Freebsd yapısını düzeltin [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) +- Web sitesine deneysel YouTube kanalına bağlantı Ekle [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) +- Cmake: kapsama bayrakları için Seçenek Ekle: WİTH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) +- Bazı satır içi Podarray'ın başlangıç boyutunu düzeltin. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) +- clickhouse-sunucu.postınst: centos 6 için işletim sistemi algılamasını düzeltin [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) +- Arch linux paketi üretimi eklendi. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Split Common/config.libs tarafından h (dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) +- İçin düzeltmeler “Arcadia” yapı platformu [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) +- Alışılmamış yapı için düzeltmeler (gcc9, alt modül yok) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) +- Hata eğilimli olduğu kanıtlanmış olduğu için unalignedstore'da açık tip gerektirir [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) +- Düzeltmeler MacOS yapı [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) +- Burada istendiği gibi, daha büyük veri kümesine sahip yeni JIT özelliği ile ilgili performans testi [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Stres testinde durumsal testler çalıştırın [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alesapin](https://github.com/alesapin)) + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-7} + +- `Kafka` bu sürümde bozuldu. +- Etkinleştirmek `adaptive_index_granularity` = Yeni için varsayılan olarak 10MB `MergeTree` Tablolar. 19.11 + sürümünde yeni MergeTree tabloları oluşturduysanız, 19.6'dan önceki sürümlere düşürme imkansız olacaktır. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alesapin](https://github.com/alesapin)) +- Yandex tarafından kullanılan eski belgesiz gömülü sözlükler kaldırıldı.Metrica. İşlev `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` artık kullanılamaz. Bu işlevleri kullanıyorsanız, e-posta yazın clickhouse-feedback@yandex-team.com. not: son anda bu işlevleri bir süre tutmaya karar verdik. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouse yayın 19.10 {#clickhouse-release-19-10} + +### ClickHouse sürümü 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} + +#### Yenilik {#new-feature-7} + +- Yeni sütun codec Ekle: `T64`. (U)IntX/EnumX/Data(Time)/DecimalX sütunları için yapılmıştır. Sabit veya küçük Aralık değerlerine sahip sütunlar için iyi olmalıdır. Codec kendisi büyütmek veya yeniden sıkıştırma olmadan veri türünü küçültmek sağlar. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) +- Veritabanı Altyapısı Ekle `MySQL` uzak MySQL sunucusundaki tüm tabloları görüntülemenize izin veren [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Kış Zhang](https://github.com/zhang2014)) +- `bitmapContains` uygulanış. 2x daha hızlı `bitmapHasAny` ikinci bit eşlem bir öğe içeriyorsa. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) +- İçin destek `crc32` işlev (tam olarak MySQL veya PHP'DE olduğu gibi davranışla). Bir karma işlevine ihtiyacınız varsa kullanmayın. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) +- Uyguluyordu `SYSTEM START/STOP DISTRIBUTED SENDS` zaman uyumsuz ekler içine denetlemek için sorgular `Distributed` Tablolar. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Kış Zhang](https://github.com/zhang2014)) + +#### Hata Düzeltme {#bug-fix-22} + +- Mutasyonları yürütürken birleştirme sınırları için sorgu yürütme sınırlarını ve en fazla parça boyutunu yoksayın. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Anton Popov](https://github.com/CurtizJ)) +- Normal blokların tekilleştirilmesine (son derece nadir) ve yinelenen blokların eklenmesine (daha sık) yol açabilecek hatayı düzeltin. [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alesapin](https://github.com/alesapin)) +- Fonksiyonun düzeltilmesi `arrayEnumerateUniqRanked` boş dizilere sahip argümanlar için [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) +- Herhangi bir mesaj yoklamak niyetiyle olmadan Kafka konulara abone etmeyin. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([I ivanvan](https://github.com/abyss7)) +- Ayar yap `join_use_nulls` Nullable içinde olamaz türleri için hiçbir etkisi olsun [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) +- Sabit `Incorrect size of index granularity` hatasızlar [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) +- Float'ı ondalık dönüştürme taşmasına düzeltin [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) +- Floş tampon zaman `WriteBufferFromHDFS`'In yıkıcı denir. Bu düzeltmeler içine yazma `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) + +#### Geliştirme {#improvement-7} + +- Boş hücreleri tedavi edin `CSV` varsayılan değerler olarak zaman ayarı `input_format_defaults_for_omitted_fields` etkindir. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) +- Harici sözlüklerin engellenmeyen yüklenmesi. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) +- Ayarlara göre önceden kurulmuş bağlantılar için Ağ zaman aşımları dinamik olarak değiştirilebilir. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) +- Kullanım “public\_suffix\_list” fonksiyonlar için `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. Tarafından oluşturulan mükemmel bir karma tablo kullanıyor `gperf` dosyadan oluşturulan bir liste ile: https://publicsuffix.org/list/public\_suffix\_list.dat. (örneğin, şimdi etki alanını tanıyoruz `ac.uk` olmayan önemli). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Kabul edilen `IPv6` sistem tablolarında veri türü; Birleşik istemci bilgi sütunları `system.processes` ve `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MySQL uyumluluk protokolü ile bağlantılar için oturumları kullanma. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) +- Destek daha fazla `ALTER` sorgular `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) +- Destek `` bölümünde `clickhouse-local` yapılandırma dosyası. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) +- Sorgu çalıştırmasına izin ver `remote` tablo fonksiyonu `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) + +#### Performans İyileştirme {#performance-improvement-5} + +- Mergetree sütunların sonunda son işareti yazma imkanı ekleyin. İşe yaramaz önlemek için tablo veri aralığı anahtarları için okuma sağlar. Yalnızca uyarlamalı dizin parçalı yapı kullanımda ise etkinleştirilir. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alesapin](https://github.com/alesapin)) +- Sayısını azaltarak çok yavaş dosya sistemlerinde mergetree tabloları geliştirilmiş performans `stat` syscalls. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sürüm 19.6 tanıtıldı MergeTree tablolardan okuma sabit performans düşüşü. Düzeltmeler \# 5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-8} + +- Uyguluyordu `TestKeeper` test için kullanılan ZooKeeper arayüzünün bir uygulaması olarak [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) +- Bundan sonra `.sql` testler rasgele veritabanı ile paralel olarak, sunucu tarafından izole çalıştırılabilir. Onları daha hızlı çalıştırmanıza, özel sunucu yapılandırmaları ile yeni testler eklemenize ve farklı testlerin birbirini etkilemediğinden emin olmanızı sağlar. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([I ivanvan](https://github.com/abyss7)) +- Kaldırmak `` ve `` performans test fromlerinden [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) +- Sabit “select\_format” performans testi için `Pretty` biçimliler [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouse sürüm 19.9 {#clickhouse-release-19-9} + +### ClickHouse yayın 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} + +#### Hata Düzeltme {#bug-fix-23} + +- 32 bit boyutundan daha küçük değerlere sahip sütunları etkileyen Delta codec'te segfault'u düzeltin. Hata rastgele bellek bozulmasına yol açtı. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) +- LowCardinality sütunu ile parçanın kontrol nadir hata düzeltin. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) +- TTL fix segfault blok fiziksel olmayan sütunlarla birleştirme. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) +- Düşük öncelikli sorguların potansiyel sonsuz uykusunu düzeltin. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Clickhouse'un varsayılan saat dilimini UTC yerine UCT olarak nasıl belirlediğini düzeltin. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Lider çoğaltma önce takipçi çoğaltma üzerinde küme sorguları üzerinde dağıtılmış damla/ALTER/TRUNCATE/OPTİMİZE yürütme hakkında Fix hata. Şimdi doğrudan lider kopya üzerinde idam edilecektir. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) +- Bazı sorgular anında sistem floş günlükleri sorgu sonra query\_log görünmeyebilir neden yarış durumu, düzeltin. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) +- Sabit argümanlar için eksik destek eklendi `evalMLModel` işlev. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} + +#### Yenilik {#new-feature-8} + +- Dondurulmuş parçalar hakkında bilgi Yazdır `system.parts` Tablo. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) +- Clickhouse'da istemci şifresini sor-argümanlarda ayarlanmamışsa tty üzerinde istemci Başlat [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) +- Uygulamak `dictGet` ve `dictGetOrDefault` ondalık türleri için fonksiyonlar. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Geliştirme {#improvement-8} + +- Debian init: servis durdurma zaman aşımı Ekle [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) +- Lowcardinality için şüpheli türlerle tablo oluşturmak için varsayılan olarak yasaklanan ayar Ekle [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) +- Regresyon işlevleri, işlevde durum olarak kullanılmadığında model ağırlıklarını döndürür `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) +- Regresyon yöntemlerini yeniden adlandırın ve geliştirin. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) +- Dize arayanların daha net arayüzleri. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) + +#### Hata Düzeltme {#bug-fix-24} + +- Kafka'daki potansiyel veri kaybını düzeltin [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([I ivanvan](https://github.com/abyss7)) +- Potansiyel sonsuz döngüyü düzeltin `PrettySpace` sıfır sütun ile çağrıldığında Biçimlendir [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) +- Doğrusal modellerde sabit uint32 taşma hatası. Const olmayan model argümanı için eval ML modeline izin verin. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` sağlanan dizin yoksa bir özel durum yükseltmek değil [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Segfault ile Düzeltme `bitmapHasAny` skaler alt sorguda [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- Sabit hata çoğaltma bağlantı havuzu DNS önbellek düştü bile, ana bilgisayarı çözmek için yeniden deneme değil. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- Sabit `ALTER ... MODIFY TTL` ReplicatedMergeTree üzerinde. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) +- SOMUTLAŞTIRILMIŞ sütun ile dağıtılmış tabloya ekleme düzeltin [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) +- Birleştirme depolama alanını keserken hatalı ayırma düzeltildi [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) +- Tzdata paketinin son sürümlerinde bazı dosyalar artık sembolik bağlardır. Varsayılan zaman dilimini tespit etmek için geçerli mekanizma bozulur ve bazı zaman dilimleri için yanlış isimler verir. Şimdi en azından saat dilimi adını, sağlanırsa TZ içeriğine zorlarız. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([I ivanvan](https://github.com/abyss7)) +- Toplamı sabit iğneler en az 16KB uzun olduğunda MultiVolnitsky searcher ile bazı son derece nadir durumlarda düzeltin. Algoritma cevapsız veya yanlış sonuca yol açabilir önceki sonuçları overwrote `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) +- ExternalData istekleri için ayarlar ClickHouse ayarlarını kullanamadığında sorunu düzeltin. Ayrıca, şimdilik, ayarlar `date_time_input_format` ve `low_cardinality_allow_in_native_format` adların belirsizliği nedeniyle kullanılamaz (dış verilerde tablo biçimi olarak yorumlanabilir ve sorguda bir ayar olabilir). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) +- Parçaları Zookeeper onları düşürmeden sadece FS kaldırıldı hata düzeltin. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) +- MySQL protokolünden hata ayıklama günlüğü kaldırma [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- DDL sorgu işleme sırasında znonode atla [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- Fix mix `UNION ALL` sonuç sütun türü. Tutarsız veri ve sonuç sütunlarının sütun türleri olan durumlar vardı. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- Yanlış tamsayılar üzerinde bir istisna atmak `dictGetT` çökme yerine işlevler. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- Karma sözlük için yanlış element\_count ve load\_factor Fix `system.dictionaries` Tablo. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-9} + +- Olmadan sabit yapı `Brotli` HTTP sıkıştırma desteği (`ENABLE_BROTLI=OFF` cmake değişkeni). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) +- Kükreyen dahil.kükreyen/kükreyen olarak h.sa [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) +- Hyperscan'daki gcc9 uyarılarını düzeltin (\#line yönergesi kötüdür!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) +- Gcc-9 ile derlerken tüm uyarıları düzeltin. Bazı contrib sorunları düzeltin. Gcc9 ICE'Yİ düzeltin ve bugzilla'ya gönderin. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) +- Lld ile sabit bağlantı [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sözlüklerde kullanılmayan uzmanlıkları kaldırma [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) +- Farklı dosya türleri için tabloları biçimlendirme ve ayrıştırma için iyileştirme performans testleri [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) +- Paralel test çalışması için düzeltmeler [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) +- Docker: clickhouse-test'ten yapılandırmaları kullanın [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) +- FreeBSD için derlemeyi düzeltin [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) +- Yükseltme boost için 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) +- Yapı clickhouse'u submodule olarak düzeltin [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) +- Jsonextract performans testlerini geliştirin [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) + +## ClickHouse yayın 19.8 {#clickhouse-release-19-8} + +### ClickHouse yayın 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} + +#### Yenilik {#new-features} + +- Json ile çalışmak için fonksiyonlar eklendi [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) +- Bir çok dilde var olan bir basename işlevine benzer bir davranışa sahip bir işlev basename ekleyin (`os.path.basename` PY inth inon'da, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Katma `LIMIT n, m BY` veya `LIMIT m OFFSET n BY` limit BY yan tümcesi için n ofset ayarlamak için sözdizimi. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Anton Popov](https://github.com/CurtizJ)) +- Yeni veri türü eklendi `SimpleAggregateFunction`, bir ışık toplama ile sütunlara sahip olmasını sağlar `AggregatingMergeTree`. Bu sadece aşağıdaki gibi basit işlevlerle kullanılabilir `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) +- Fonksiyonda sabit olmayan argümanlar için destek eklendi `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) +- Eklenen fonksiyonlar `skewPop`, `skewSamp`, `kurtPop` ve `kurtSamp` sıra eğriliği, örnek eğrilik, kurtozis ve örnek kurtozis sırasıyla hesaplamak için. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) +- Destek yeniden adlandırma işlemi için `MaterializeView` depolama. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Guillaume Tassery](https://github.com/YiuRULE)) +- MySQL istemcisini kullanarak Clickhouse'a bağlanmayı sağlayan sunucu eklendi. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Yuriy Baranov](https://github.com/yurriy)) +- Eklemek `toDecimal*OrZero` ve `toDecimal*OrNull` işlevler. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) +- Destek ondalık türleri fonksiyonları: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) +- Katma `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) +- Katma `format` işlev. Argümanlarda listelenen dizelerle sabit desen (basitleştirilmiş Python biçim deseni) biçimlendirme. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) +- Katma `system.detached_parts` ayrılmış bölümleri hakkında bilgi içeren tablo `MergeTree` Tablolar. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) +- Katma `ngramSearch` iğne ve Samanlık arasındaki simetrik olmayan farkı hesaplamak için işlev. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) +- Temel makine öğrenimi yöntemlerinin (stokastik doğrusal regresyon ve lojistik regresyon) agrega fonksiyonları arayüzünü kullanarak uygulanması. Model ağırlıklarını güncellemek için farklı stratejilere sahiptir (basit Gradyan iniş, momentum yöntemi, Nesterov yöntemi). Ayrıca özel boyuttaki mini partileri de destekler. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) +- Uygulanması `geohashEncode` ve `geohashDecode` işlevler. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) +- Toplama fonksiyonu eklendi `timeSeriesGroupSum`, hangi zaman damgası hizalama değil örnek farklı zaman serileri toplayabilir. İki örnek zaman damgası arasında doğrusal enterpolasyon kullanacak ve daha sonra zaman serilerini birlikte toplayacaktır. Toplama fonksiyonu eklendi `timeSeriesGroupRateSum`, zaman serisi ve daha sonra toplam oranları birlikte oranını hesaplar. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Yangkuan Liu](https://github.com/LiuYangkuan)) +- Eklenen fonksiyonlar `IPv4CIDRtoIPv4Range` ve `IPv6CIDRtoIPv6Range` bir CIDR kullanarak alt ağdaki bir IP için alt ve üst sınırları hesaplamak için. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Etkin ayar ile HTTP kullanarak bir sorgu gönderdiğimizde bir X-ClickHouse-Summary Başlığı ekleyin `send_progress_in_http_headers`. Sorguda kaç satır ve bayt eklendiği gibi ek bilgilerle birlikte, x-ClickHouse-Progress'in olağan bilgilerini döndürün. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Guillaume Tassery](https://github.com/YiuRULE)) + +#### Geliştirmeler {#improvements} + +- Katma `max_parts_in_total` bölüm anahtarı \#5166 güvenli olmayan belirtimini engelleyen mergetree ailesi tabloları (varsayılan: 100 000) için ayarlama. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`: ilk tohum sütun adı, sütun konumu ile birleştirerek tek tek sütunlar için tohum türetmek. Bu, veri kümelerini birden çok ilgili tablolarla dönüştürmek için tasarlanmıştır, böylece tablolar dönüşümden sonra birleştirilebilir kalır. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eklenen fonksiyonlar `JSONExtractRaw`, `JSONExtractKeyAndValues`. Yeniden adlandırılan işlevler `jsonExtract` -e doğru `JSONExtract`. Bir şeyler ters gittiğinde bu işlevler muhabir değerleri döndürür, değil `NULL`. Modifiye fonksiyonu `JSONExtract`, şimdi son parametresinden dönüş türünü alır ve nullables enjekte etmez. Avx2 talimatlarının mevcut olmaması durumunda Rapidjson'a geri dönüş uygulandı. Simdjson KÜTÜPHANESİ yeni bir sürüme güncellendi. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) +- Şimdi `if` ve `multiIf` fonksiyonlar koşulun güvenmeyin `Nullable`, ancak SQL uyumluluğu için şubelere güvenin. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu.](https://github.com/janplus)) +- `In` yüklemi şimdi üretir `Null` sonucu `Null` gibi girdi `Equal` işlev. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu.](https://github.com/janplus)) +- Kafka'dan her (flush\_interval / poll\_timeout) satır sayısını zaman sınırını kontrol edin. Bu, Kafka tüketicisinden okumayı daha sık kırmaya ve üst düzey akışların zaman sınırlarını kontrol etmeye izin verir [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([I ivanvan](https://github.com/abyss7)) +- Paketlenmiş SASL ile bağlantı RDKAFKA. SASL SCRAM kimlik doğrulamasını kullanmasına izin vermelidir [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([I ivanvan](https://github.com/abyss7)) +- Tüm birleşimler için rowreflist'in toplu sürümü. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) +- clickhouse-sunucu: daha bilgilendirici dinleme hata mesajları. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) +- Clickhouse destek sözlükler-fonksiyonlar için fotokopi `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) +- Yeni ayar Ekle `kafka_commit_every_batch` Kafka'nın taahhüt politikasını düzenlemek. + Taahhüt modunu ayarlamanıza izin verir: her mesaj Partisi işlendikten sonra veya tüm blok depolamaya yazıldıktan sonra. Bazı mesajları kaybetmek veya bazı aşırı durumlarda iki kez okumak arasında bir takas. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([I ivanvan](https://github.com/abyss7)) +- Yapmak `windowFunnel` diğer imzasız tamsayı türlerini destekleyin. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) +- Sanal sütunun gölgesine izin ver `_table` birleştirme motorunda. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([I ivanvan](https://github.com/abyss7)) +- Yapmak `sequenceMatch` toplu işlevler diğer imzasız tamsayı türlerini destekler [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) +- Sağlama toplamı uyuşmazlığı büyük olasılıkla donanım hatalarından kaynaklanıyorsa daha iyi hata iletileri. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Altta yatan tabloların örneklemeyi desteklediğini kontrol edin `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([I ivanvan](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- MySQL Wire protokolünün iyileştirmeleri. MySQLWire biçiminin adını değiştirdi. Rsa\_free çağırmak için RAII kullanma. Bağlam oluşturulamıyorsa SSL'Yİ devre dışı bırakma. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Yuriy Baranov](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) +- Dağıtılmış tablolara zaman uyumsuz ekler sorgu ayarlarını saygı. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) +- Yeniden adlandırılan işlevler `leastSqr` -e doğru `simpleLinearRegression`, `LinearRegression` -e doğru `linearRegression`, `LogisticRegression` -e doğru `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Performans İyileştirmeleri {#performance-improvements} + +- Parallelize işleme parçaları olmayan çoğaltılmış MergeTree tabloları alter değiştirme sorgu. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) +- Düzenli ifadeler çıkarma optimizasyonlar. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) +- Yalnızca joın on bölümünde kullanılıyorsa, sonucu birleştirmek için doğru joın anahtar sütunu eklemeyin. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) +- İlk boş yanıttan sonra Kafka tamponunu dondurun. Bu birden invokations önler `ReadBuffer::next()` bazı satır ayrıştırma akışlarında boş sonuç için. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([I ivanvan](https://github.com/abyss7)) +- `concat` birden çok argüman için işlev optimizasyonu. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) +- Daha hızlı dekompresyona sahip olmak için lz4 uygulamamızı referansla yükseltin. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) +- Uygulanan MSD radix sıralama (kxsort dayalı) ve kısmi sıralama. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) + +#### Hata Düzeltmeleri {#bug-fixes} + +- Fix itme birleştirme ile sütunlar gerektirir [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Kış Zhang](https://github.com/zhang2014)) +- ClickHouse systemd tarafından çalıştırıldığında Sabit hata, komut `sudo service clickhouse-server forcerestart` beklendiği gibi çalışmadı. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) +- Datapartsexchange'deki http hata kodlarını düzeltin (9009 bağlantı noktasındaki ınterserver http Sunucusu, hatalarda bile her zaman 200 kodunu döndürdü). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) +- Max\_small\_string\_size daha uzun dize için SimpleAggregateFunction Fix [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) +- İçin hatayı Düzelt fixin `Decimal` -e doğru `Nullable(Decimal)` içinde dönüşüm. (Farklı ölçekler dahil) ondalık dönüşümler diğer ondalık destekleyin. [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) +- Yanlış hesaplama yol simdjson kütüphanesinde sabit FPU clobbering `uniqHLL` ve `uniqCombined` toplama fonksiyonu ve matematik fonksiyonları gibi `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Json işlevlerinde karışık const / nonconst durumlarda sabit taşıma. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) +- Düzeltmek `retention` işlev. Şimdi bir veri satırında tatmin eden tüm koşullar veri durumuna eklenir. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- Sonuç türünü düzeltme `quantileExact` ondalık sayılarla. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Belge {#documentation} + +- İçin belgeleri tercüme `CollapsingMergeTree` Çin. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- Tablo motorları ile ilgili bazı belgeleri çince'ye çevirin. + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([Nev leeer lee](https://github.com/neverlee)) + +#### Yapı / Test / Ambalaj Geliştirmeleri {#buildtestingpackaging-improvements} + +- Olası kullanım sonrası ücretsiz gösteren bazı dezenfektan raporlarını düzeltin.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([I ivanvan](https://github.com/abyss7)) +- Kolaylık sağlamak için performans testlerini ayrı dizinlerden çıkarın. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yanlış performans testlerini düzeltin. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alesapin](https://github.com/alesapin)) +- Donanım Sorunlarını ayıklamak için bit flips'in neden olduğu sağlama toplamlarını hesaplamak için bir araç eklendi. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Runner betiğini daha kullanışlı hale getirin. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) +- Performans testlerinin nasıl yazılacağı konusunda küçük talimatlar ekleyin. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alesapin](https://github.com/alesapin)) +- Performans testlerinde create, fill ve drop sorgularında değişiklik yapma yeteneği ekleme [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) + +## ClickHouse yayın 19.7 {#clickhouse-release-19-7} + +### ClickHouse yayın 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} + +#### Hata Düzeltme {#bug-fix-25} + +- JOIN ile bazı sorgularda performans gerilemesini düzeltin. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Kış Zhang](https://github.com/zhang2014)) + +### ClickHouse yayın 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### Yenilik {#new-features-1} + +- Bitmap ile ilgili işlevler eklendi `bitmapHasAny` ve `bitmapHasAll` benzer `hasAny` ve `hasAll` diziler için fonksiyonlar. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Sergi Vladykin](https://github.com/svladykin)) + +#### Hata Düzeltmeleri {#bug-fixes-1} + +- Segfault'u düzeltin `minmax` Null değeri ile dizin. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) +- Tüm giriş sütunlarını LİMİT olarak gerekli çıktı olarak işaretleyin. Düzelt itiyor ‘Not found column’ bazı dağıtılmış sorgularda hata. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S. Pan](https://github.com/kvap)) +- Düzeltmek “Column ‘0’ already exists” er errorror in `SELECT .. PREWHERE` ÖNTAN columnımlı süt onunda [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) +- Düzeltmek `ALTER MODIFY TTL` sorgu üzerinde `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Anton Popov](https://github.com/CurtizJ)) +- Kafka tüketiciler başlatmak için başarısız olduğunda sunucu çökmesine etmeyin. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([I ivanvan](https://github.com/abyss7)) +- Sabit bitmap işlevleri yanlış sonuç üretir. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) +- Karma sözlük için element\_count'u düzeltin (çiftleri dahil etmeyin) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) +- Ortam değişkeni TZ içeriğini saat dilimi adı olarak kullanın. Bazı durumlarda varsayılan zaman dilimini doğru bir şekilde tespit etmeye yardımcı olur.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([I ivanvan](https://github.com/abyss7)) +- Tamsayıları dönüştürmeye çalışmayın `dictGetT` fonksiyonlar, çünkü düzgün çalışmıyor. Bunun yerine bir istisna atın. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- Harici ayarları düzeltinveri HTTP isteği. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila. + Kutenin](https://github.com/danlark1)) +- Parçaları Zookeeper onları düşürmeden sadece FS kaldırıldı hata düzeltin. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) +- Segmentasyon hatasını düzeltin `bitmapHasAny` işlev. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- Sabit hata çoğaltma bağlantı havuzu DNS önbellek düştü bile, ana bilgisayarı çözmek için yeniden deneme değil. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- Sabit `DROP INDEX IF EXISTS` sorgu. Şimdi `ALTER TABLE ... DROP INDEX IF EXISTS ...` sağlanan dizin yoksa, sorgu bir özel durum oluşturmaz. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Birliği tüm supertype sütununu düzeltin. Tutarsız veri ve sonuç sütunlarının sütun türleri olan durumlar vardı. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- DDL sorgu işleme sırasında znonode atlayın. Başka bir düğüm, görev sırasındaki znode'u kaldırmadan önce, + bunu işlemedi, ancak zaten çocukların listesini al, DDLWorker iş parçacığını sonlandıracak. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- Somutlaştırılmış sütun ile dağıtılmış() tabloya Ekle düzeltin. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) + +### ClickHouse yayın 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} + +#### Yenilik {#new-features-2} + +- Kullanıcı tarafından belirtilebilecek bir ayar aralığını sınırlamaya izin verin. + Bu kısıtlamalar kullanıcı ayarları profilinde ayarlanabilir. + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Vitaly. + Baranov](https://github.com/vitlibar)) +- İşlevin ikinci bir sürümünü ekleyin `groupUniqArray` isteğe bağlı + `max_size` elde edilen dizinin boyutunu sınırlayan parametre. Bu + davranış benzer `groupArray(max_size)(x)` işlev. + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- Tsvwithnames / CSVWithNames giriş dosya formatları için, sütun sırası şimdi olabilir + dosya başlığından belirlenir. Bu tarafından kontrol edilir + `input_format_with_names_use_header` parametre. + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([İskender](https://github.com/Akazz)) + +#### Hata Düzeltmeleri {#bug-fixes-2} + +- Birleştirme sırasında uncompressed\_cache + JOİN ile kilitlenme (\#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila. + Kutenin](https://github.com/danlark1)) +- Sistem tablolarına bir clickhouse-istemci sorgusunda segmentasyon hatası. \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([I ivanvan](https://github.com/abyss7)) +- KafkaEngine üzerinden ağır yükte veri kaybı (\#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([I ivanvan](https://github.com/abyss7)) +- Sistemden en az iki seçmeyi içeren UNION ile bir sorgu yürütülürken gerçekleşebilecek çok nadir veri yarışı durumu düzeltildi.sütunlar, sistem.tablolar, sistem.parçalar, sistem.parts\_tables veya birleştirme ailesinin tabloları ve ilgili tabloların sütunlarının aynı anda değiştirilmesi. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Performans İyileştirmeleri {#performance-improvements-1} + +- Tek bir sayısal sütuna göre sıralama için radix sıralamasını kullanın `ORDER BY` olarak + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([Evgenii Pravda](https://github.com/kvinty), + [alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Belge {#documentation-1} + +- Bazı tablo motorları için belgeleri Çince'ye çevirin. + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([asla + lee](https://github.com/neverlee)) + +#### Yapı / Test / Ambalaj Geliştirmeleri {#buildtestingpackaging-improvements-1} + +- UTF-8 karakterlerini düzgün bir şekilde yazdırın `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Her zaman öneri yüklemek için clickhouse-client için komut satırı parametresi ekleyin + veriler. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bazı PVS-Studio uyarılarını çözün. + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Güncelleme LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila. + Kutenin](https://github.com/danlark1)) +- Yaklaşan çekme isteği \#5030 için gereksinimleri oluşturmak için gperf ekleyin. + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([proller](https://github.com/proller)) + +## ClickHouse yayın 19.6 {#clickhouse-release-19-6} + +### ClickHouse yayın 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} + +#### Hata Düzeltmeleri {#bug-fixes-3} + +- Tablo işlevlerinden sorgular için koşul pushdown sabit `mysql` ve `odbc` ve ilgili masa motorları. Bu düzeltmeler \# 3540 ve \# 2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Zookeeper kilitlenme Fix. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) +- CSV'DE alıntı ondalık sayılara izin ver. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) +- Float Inf/Nan'dan ondalık sayılara dönüştürmeye izin verme (istisna atma). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) +- Yeniden adlandırma sorgusunda veri yarışını düzeltin. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Kış Zhang](https://github.com/zhang2014)) +- Lfalloc'u geçici olarak devre dışı bırakın. Lfalloc kullanımı, UncompressedCache tahsis edilmesinde ve yüksek yüklü sunuculardaki sorguların çökmesine neden olan bir çok MAP\_FAİLED'E yol açabilir. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) + +### ClickHouse yayın 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} + +#### Yenilik {#new-features-3} + +- Sütunlar ve tablolar için TTL ifadeleri. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Anton Popov](https://github.com/CurtizJ)) +- İçin destek eklendi `brotli` http yanıtları için sıkıştırma (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Mikhail](https://github.com/fandyushin)) +- Yeni fonksiyon eklendi `isValidUTF8` bir bayt kümesinin doğru utf-8 kodlanmış olup olmadığını kontrol etmek için. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) +- Yeni Yük Dengeleme ilkesi Ekle `first_or_random` bu, belirtilen ilk ana bilgisayara sorgular gönderir ve erişilemezse, rasgele ana bilgisayarlara sorgular gönderir. Çapraz çoğaltma topoloji kurulumları için kullanışlıdır. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) + +#### Deneysel Özellikler {#experimental-features-1} + +- Ayar Ekle `index_granularity_bytes` (adaptive ındex granularity) MergeTree\* tablolar ailesi için. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alesapin](https://github.com/alesapin)) + +#### Geliştirmeler {#improvements-1} + +- Fonksiyon için sabit olmayan ve negatif boyut ve uzunluk argümanları için destek eklendi `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sol joın sağ tablo, sol tablo sağ joın ve her iki tablo tam joın push-aşağı devre dışı bırakın. Bu, bazı durumlarda yanlış birleştirme sonuçlarını düzeltir. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([I ivanvan](https://github.com/abyss7)) +- `clickhouse-copier`: otomatik yükleme görev yapılandırması `--task-file` seçenek [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) +- Depolama Fabrikası ve masa fonksiyonları Fabrikası için yazım hataları işleyicisi eklendi. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) +- Alt sorgular olmadan birden fazla birleşim için yıldız işaretlerini ve nitelikli yıldız işaretlerini destekleyin [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) +- Eksik sütun hata mesajını daha kullanıcı dostu hale getirin. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Performans İyileştirmeleri {#performance-improvements-2} + +- ASOF JOİN önemli hızlanma [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) + +#### Geriye Dönük Uyumsuz Değişiklikler {#backward-incompatible-changes} + +- HTTP Başlığı `Query-Id` olarak değiştirildi `X-ClickHouse-Query-Id` tutarlılık için. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Mikhail](https://github.com/fandyushin)) + +#### Hata Düzeltmeleri {#bug-fixes-4} + +- Sabit potansiyel boş işaretçi dereference `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) +- JOİN + ARRAY JOİN ile sorguda Sabit hata [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) +- Bir sözlük engine=Dictionary ile bir veritabanı üzerinden başka bir sözlüğe bağlı olduğunda sunucunun başlangıcında asılı sabit. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) +- Potansiyel olarak yanlış sonucu düzeltin `SELECT DISTINCT` ile `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) +- Sistemden en az iki seçmeyi içeren UNION ile bir sorgu yürütülürken gerçekleşebilecek çok nadir veri yarışı durumu düzeltildi.sütunlar, sistem.tablolar, sistem.parçalar, sistem.parts\_tables veya birleştirme ailesinin tabloları ve ilgili tabloların sütunlarının aynı anda değiştirilmesi. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Yapı / Test / Ambalaj Geliştirmeleri {#buildtestingpackaging-improvements-2} + +- Farklı ana bilgisayarda clickhouse-server çalıştırırken sabit test hataları [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) +- clickhouse-test: tty olmayan ortamda renk kontrol dizilerini devre dışı bırakın. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alesapin](https://github.com/alesapin)) +- clickhouse-test: herhangi bir test veritabanını kullanmasına izin ver (Kaldır `test.` mümkün olduğu yerde yeterlilik) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) +- Ubsan hatalarını düzeltin [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) +- Yandex LFAlloc, Segfault'ları daha güvenilir yakalamak için MarkCache ve UncompressedCache verilerini farklı şekillerde ayırmak için Clickhouse'a eklendi [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) +- Python util backports ve changelogs ile yardımcı olmak için. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([I ivanvan](https://github.com/abyss7)) + +## ClickHouse sürümü 19.5 {#clickhouse-release-19-5} + +### ClickHouse yayın 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} + +#### Hata düzeltmeleri {#bug-fixes-5} + +- Bitmap sabit Olası kazasında \* fonksiyonlar [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Andy Yang](https://github.com/andyyzh)) +- Sistemden en az iki seçmeyi içeren UNION ile bir sorgu yürütülürken gerçekleşebilecek çok nadir veri yarışı durumu düzeltildi.sütunlar, sistem.tablolar, sistem.parçalar, sistem.parts\_tables veya birleştirme ailesinin tabloları ve ilgili tabloların sütunlarının aynı anda değiştirilmesi. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit hata `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. Bu hata, lowcardinality sütunu birincil anahtarın bir parçasıysa oldu. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Tutma işlevinin değiştirilmesi: bir satır hem ilk hem de n. koşulu yerine getirirse, veri durumuna yalnızca ilk tatmin edilen koşul eklenir. Şimdi bir veri satırında tatmin eden tüm koşullar veri durumuna eklenir. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### ClickHouse yayın 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} + +#### Hata düzeltmeleri {#bug-fixes-6} + +- Sabit ayar türü `max_partitions_per_insert_block` boolean'dan Uint64'e. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Muhammed Hüseyin Sekhavat](https://github.com/mhsekhavat)) + +### ClickHouse yayın 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} + +#### Yenilik {#new-features-4} + +- [Hyperscan](https://github.com/intel/hyperscan) çoklu düzenli ifade eşleştirme eklendi (fonksiyonlar `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) +- `multiSearchFirstPosition` işlevi eklendi. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) +- Tablolar için satır başına önceden tanımlanmış ifade filtresini uygulayın. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([I ivanvan](https://github.com/abyss7)) +- Bloom filtrelerine dayalı endeksleri atlama veri yeni bir tür (için kullanılabilir `equal`, `in` ve `like` işlevler). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) +- Katma `ASOF JOIN` bilinen en son değere katılan sorguları çalıştırmaya izin verir. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) +- Birden fazla yeniden yaz `COMMA JOIN` -e doğru `CROSS JOIN`. Sonra onları yeniden yaz `INNER JOIN` mümkünse. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Geliştirme {#improvement-9} + +- `topK` ve `topKWeighted` Şimdi özel destekler `loadFactor` (düzeltmeler sorunu [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) +- Kullanmasına izin ver `parallel_replicas_count > 1` örnekleme olmadan tablolar için bile (ayar sadece onlar için göz ardı edilir). Önceki sürümlerde istisna yol oldu. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) +- İçin destek `CREATE OR REPLACE VIEW`. Bir görünüm oluşturmak veya tek bir deyimde yeni bir tanım ayarlamak için izin ver. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) +- `Buffer` tablo motoru şimdi destekler `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Yangkuan Liu](https://github.com/LiuYangkuan)) +- Zookeeper içinde meta veri olmadan çoğaltılmış tablo başlatmak için yeteneği ekleyin `readonly` modu. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alesapin](https://github.com/alesapin)) +- Clickhouse-client ilerleme çubuğunun sabit titreme. Kullanırken sorun en dikkat çekiciydi `FORMAT Null` akış sorguları ile. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İle işlevleri devre dışı bırakmak için izin ver `hyperscan` potansiyel olarak aşırı ve kontrolsüz kaynak kullanımını sınırlamak için kullanıcı bazında kütüphane. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tüm hatalarda sürüm numarası günlüğü ekleyin. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) +- Kısıtlama eklendi `multiMatch` içine sığacak şekilde dize boyutu gerektiren işlevler `unsigned int`. Ayrıca bağımsız değişken sayısı sınırı eklendi `multiSearch` işlevler. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) +- Hyperscan çizik alanı ve hata işleme geliştirilmiş kullanımı. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) +- Doldurma `system.graphite_detentions` bir tablo yapılandırmasından `*GraphiteMergeTree` motor tabloları. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Adlandırmak `trigramDistance` fonksiyonu için `ngramDistance` ve daha fazla fonksiyon ekleyin `CaseInsensitive` ve `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) +- Geliştirilmiş veri endeksleri hesaplama atlama. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) +- Sıradan tutmak, `DEFAULT`, `MATERIALIZED` ve `ALIAS` tek bir listedeki sütunlar (düzeltmeler sorunu [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +#### Hata Düzeltme {#bug-fix-26} + +- Önlemek `std::terminate` bellek ayırma hatası durumunda. Şimdi `std::bad_alloc` istisna beklendiği gibi atılır. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tampondan capnproto okumasını düzeltir. Bazen dosyalar HTTP tarafından başarıyla yüklenmedi. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) +- Hatayı Düzelt `Unknown log entry type: 0` sonra `OPTIMIZE TABLE FINAL` sorgu. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Kuşu](https://github.com/amosbird)) +- Yanlış argümanlar `hasAny` veya `hasAll` fonksiyonlar segfault yol açabilir. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yürütme sırasında kilitlenme oluşabilir `DROP DATABASE dictionary` sorgu. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tanımsız davranışı düzeltin `median` ve `quantile` işlevler. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Sıkıştırma seviyesi algılamasını ne zaman düzeltin `network_compression_method` küçük harfle. V19. 1'de kırık. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Sabit ceh ofalet `UTC` ayar (düzeltmeler sorunu [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Düzeltmek `histogram` fonksiyon davranışı ile `Distributed` Tablolar. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Sabit tsan raporu `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Nedeniyle sistem günlükleri kullanımında yarış durumuna kapatma sabit TSan raporu. Part\_log etkinleştirildiğinde kapatma sırasında sabit potansiyel kullanım sonrası serbest. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Parçaları tekrar kontrol edin `ReplicatedMergeTreeAlterThread` hata durumunda. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ara toplama işlev durumlarındaki aritmetik işlemler sabit argümanlar (alt sorgu sonuçları gibi) için çalışmadı. [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Her zaman meta veri sütun adlarını backquote. Aksi takdirde, adlandırılmış sütunlu bir tablo oluşturmak imkansızdır `index` (sunucu hatalı biçimlendirilmiş nedeniyle yeniden başlatılmaz `ATTACH` metadata sorgu). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Çökmeyi düzeltin `ALTER ... MODIFY ORDER BY` üzerinde `Distributed` Tablo. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Segfault'u düzeltin `JOIN ON` ile etkin `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Kış Zhang](https://github.com/zhang2014)) +- Kafka'dan bir protobuf mesajı tükettikten sonra yabancı bir satır ekleyerek hatayı düzeltin. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- 'Nin çökmesini düzeltin `JOIN` null olmayan vs null sütun üzerinde. Düzeltmek `NULLs` sağ tuş inlarda `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) +- Segmentasyon hatasını düzeltin `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Sabit yarış durumu `SELECT` itibaren `system.tables` tablo aynı anda yeniden adlandırılırsa veya değiştirilirse. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Zaten eskimiş olan veri bölümünü getirirken sabit veri yarışı. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sırasında meydana gelebilecek sabit nadir veri yarışı `RENAME` MergeTree ailesinin tablo. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fonksiyonda sabit segmentasyon hatası `arrayIntersect`. Fonksiyon karışık sabit ve sıradan argümanlarla çağrılırsa segmentasyon hatası olabilir. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- Sabit okuma `Array(LowCardinality)` sütun nadir durumda, sütun uzun bir boş diziler dizisi içerdiğinde. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Çökmeyi düzeltin `FULL/RIGHT JOIN` biz nullable vs değil nullable üzerinde katılırken. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Düzeltmek `No message received` kopyaları arasındaki parçaları getirirken istisna. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) +- Sabit `arrayIntersect` tek dizide birkaç tekrarlanan değerler durumunda işlev yanlış sonuç. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Eşzamanlı sırasında bir yarış durumunu düzeltin `ALTER COLUMN` bir sunucu çökmesine neden olabilecek sorgular (düzeltmeler sorunu [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- Yanlış sonucu düzeltin `FULL/RIGHT JOIN` const sütunu ile. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) +- Çiftleri düzeltin `GLOBAL JOIN` yıldız işareti ile. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) +- Parametre kesintisini düzeltin `ALTER MODIFY` of Col ofum ofn `CODEC` sütun türü belirtilmediğinde. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) +- İşlevler `cutQueryStringAndFragment()` ve `queryStringAndFragment()` şimdi ne zaman doğru çalışıyor `URL` bir parça ve hiçbir sorgu içerir. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- Ayarlarken nadir hatayı düzeltin `min_bytes_to_use_direct_io` iş parçacığı sütun dosyasında geriye aramak zorunda olduğunda oluşan sıfırdan büyüktür. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) +- Toplama işlevleri için yanlış argüman türlerini düzeltin `LowCardinality` argümanlar (düzeltmeler sorunu [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Yanlış isim kalifikasyonunu düzeltin `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) +- Düzeltme fonksiyonu `toISOWeek` 1970 yılı için sonuç. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltmek `DROP`, `TRUNCATE` ve `OPTIMIZE` sorgular, çoğaltma, dosya üzerinde `ON CLUSTER` için `ReplicatedMergeTree*` tablolar aile. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-8} + +- Ayarı yeniden adlandır `insert_sample_with_metadata` ayarlamak `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) +- Ayar eklendi `max_partitions_per_insert_block` (varsayılan değer 100 ile). Eklenen blok daha fazla sayıda bölüm içeriyorsa, bir özel durum atılır. Sınırı kaldırmak istiyorsanız 0 olarak ayarlayın (önerilmez). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Çoklu arama işlevleri yeniden adlandırıldı (`multiPosition` -e doğru `multiSearchAllPositions`, `multiSearch` -e doğru `multiSearchAny`, `firstMatch` -e doğru `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) + +#### Performans İyileştirme {#performance-improvement-6} + +- Birçok İğne veya birçok benzer bigrams ile sorgular için yaklaşık %5-10 arama iyileştirme vererek, inlining tarafından volnitsky searcher Optimize edin. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) +- Ayarlarken performans sorununu düzeltin `use_uncompressed_cache` önbellekte bulunan tüm okuma verileri göründüğünde ortaya çıkan sıfırdan büyüktür. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alesapin](https://github.com/alesapin)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-10} + +- Sertleştirme hata ayıklama yapı: daha ayrıntılı bellek eşlemeleri ve ASLR; ışareti önbellek ve dizin için bellek koruması ekleyin. Bu ASan ve MSan bunu yapamaz durumda daha fazla bellek stomping hata bulmanızı sağlar. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Cmake değişkenleri için destek Ekle `ENABLE_PROTOBUF`, `ENABLE_PARQUET` ve `ENABLE_BROTLI` yukarıdaki özellikleri etkinleştirmeye / devre dışı bırakmaya izin verir (librdkafka, mysql, vb.için yapabileceğimiz gibi). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) +- Bazı sorgular test çalıştırıldıktan sonra asılırsa, işlem listesi ve tüm iş parçacıklarının stacktraces yazdırmak için yeteneği ekleyin. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alesapin](https://github.com/alesapin)) +- Yeniden deneme Ekle `Connection loss` er errorror in `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alesapin](https://github.com/alesapin)) +- Vagrant ile freebsd build ekleyin ve packager komut dosyasına iş parçacığı sanitizer ile oluşturun. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alesapin](https://github.com/alesapin)) +- Şimdi kullanıcı kullanıcı için şifre istedi `'default'` kurulum sırasında. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) +- İçinde uyarı bastır warningmak `rdkafka` kitaplık. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Ssl olmadan oluşturma yeteneğine izin ver. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) +- Clickhouse-sunucu görüntüsünü özel bir kullanıcıdan başlatmanın bir yolunu ekleyin. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Contrib Boost'u 1.69'a yükseltin. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) +- Kullanımını devre dışı bırak `mremap` iplik dezenfektanı ile derlendiğinde. Şaşırtıcı bir şekilde, TSan kesişmiyor `mremap` (kes doesmesine rağmen `mmap`, `munmap`) bu yanlış pozitiflere yol açar. Durumsal testlerde sabit TSan raporu. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- HTTP arayüzü üzerinden biçim şemasını kullanarak test denetimi ekleyin. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) + +## ClickHouse sürüm 19.4 {#clickhouse-release-19-4} + +### ClickHouse yayın 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} + +#### Hata Düzeltmeleri {#bug-fixes-7} + +- Önlemek `std::terminate` bellek ayırma hatası durumunda. Şimdi `std::bad_alloc` istisna beklendiği gibi atılır. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tampondan capnproto okumasını düzeltir. Bazen dosyalar HTTP tarafından başarıyla yüklenmedi. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) +- Hatayı Düzelt `Unknown log entry type: 0` sonra `OPTIMIZE TABLE FINAL` sorgu. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Kuşu](https://github.com/amosbird)) +- Yanlış argümanlar `hasAny` veya `hasAll` fonksiyonlar segfault yol açabilir. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yürütme sırasında kilitlenme oluşabilir `DROP DATABASE dictionary` sorgu. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tanımsız davranışı düzeltin `median` ve `quantile` işlevler. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Sıkıştırma seviyesi algılamasını ne zaman düzeltin `network_compression_method` küçük harfle. V19. 1'de kırık. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Sabit ceh ofalet `UTC` ayar (düzeltmeler sorunu [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Düzeltmek `histogram` fonksiyon davranışı ile `Distributed` Tablolar. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Sabit tsan raporu `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Nedeniyle sistem günlükleri kullanımında yarış durumuna kapatma sabit TSan raporu. Part\_log etkinleştirildiğinde kapatma sırasında sabit potansiyel kullanım sonrası serbest. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Parçaları tekrar kontrol edin `ReplicatedMergeTreeAlterThread` hata durumunda. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ara toplama işlev durumlarındaki aritmetik işlemler sabit argümanlar (alt sorgu sonuçları gibi) için çalışmadı. [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Her zaman meta veri sütun adlarını backquote. Aksi takdirde, adlandırılmış sütunlu bir tablo oluşturmak imkansızdır `index` (sunucu hatalı biçimlendirilmiş nedeniyle yeniden başlatılmaz `ATTACH` metadata sorgu). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Çökmeyi düzeltin `ALTER ... MODIFY ORDER BY` üzerinde `Distributed` Tablo. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Segfault'u düzeltin `JOIN ON` ile etkin `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Kış Zhang](https://github.com/zhang2014)) +- Kafka'dan bir protobuf mesajı tükettikten sonra yabancı bir satır ekleyerek hatayı düzeltin. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- Segmentasyon hatasını düzeltin `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Sabit yarış durumu `SELECT` itibaren `system.tables` tablo aynı anda yeniden adlandırılırsa veya değiştirilirse. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Zaten eskimiş olan veri bölümünü getirirken sabit veri yarışı. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sırasında meydana gelebilecek sabit nadir veri yarışı `RENAME` MergeTree ailesinin tablo. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fonksiyonda sabit segmentasyon hatası `arrayIntersect`. Fonksiyon karışık sabit ve sıradan argümanlarla çağrılırsa segmentasyon hatası olabilir. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- Sabit okuma `Array(LowCardinality)` sütun nadir durumda, sütun uzun bir boş diziler dizisi içerdiğinde. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Düzeltmek `No message received` kopyaları arasındaki parçaları getirirken istisna. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) +- Sabit `arrayIntersect` tek dizide birkaç tekrarlanan değerler durumunda işlev yanlış sonuç. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Eşzamanlı sırasında bir yarış durumunu düzeltin `ALTER COLUMN` bir sunucu çökmesine neden olabilecek sorgular (düzeltmeler sorunu [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- Parametre kesintisini düzeltin `ALTER MODIFY` of Col ofum ofn `CODEC` sütun türü belirtilmediğinde. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) +- İşlevler `cutQueryStringAndFragment()` ve `queryStringAndFragment()` şimdi ne zaman doğru çalışıyor `URL` bir parça ve hiçbir sorgu içerir. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- Ayarlarken nadir hatayı düzeltin `min_bytes_to_use_direct_io` iş parçacığı sütun dosyasında geriye aramak zorunda olduğunda oluşan sıfırdan büyüktür. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) +- Toplama işlevleri için yanlış argüman türlerini düzeltin `LowCardinality` argümanlar (düzeltmeler sorunu [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Düzeltme fonksiyonu `toISOWeek` 1970 yılı için sonuç. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltmek `DROP`, `TRUNCATE` ve `OPTIMIZE` sorgular, çoğaltma, dosya üzerinde `ON CLUSTER` için `ReplicatedMergeTree*` tablolar aile. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) + +#### Geliştirmeler {#improvements-2} + +- Sıradan tutmak, `DEFAULT`, `MATERIALIZED` ve `ALIAS` tek bir listedeki sütunlar (düzeltmeler sorunu [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +### ClickHouse yayın 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} + +#### Hata Düzeltmeleri {#bug-fixes-8} + +- Çökmeyi düzeltin `FULL/RIGHT JOIN` biz nullable vs değil nullable üzerinde katılırken. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Segmentasyon hatasını düzeltin `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-11} + +- Clickhouse-sunucu görüntüsünü özel bir kullanıcıdan başlatmanın bir yolunu ekleyin. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### ClickHouse sürümü 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} + +#### Hata Düzeltmeleri {#bug-fixes-9} + +- Sabit okuma `Array(LowCardinality)` sütun nadir durumda, sütun uzun bir boş diziler dizisi içerdiğinde. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +### ClickHouse yayın 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} + +#### Hata Düzeltmeleri {#bug-fixes-10} + +- Her ikisini de içeren sabit uzak sorgular `LIMIT BY` ve `LIMIT`. Daha önce `LIMIT BY` ve `LIMIT` uzak sorgu için kullanıldı, `LIMIT` daha önce olabilirdi `LIMIT BY`, çok filtrelenmiş sonuca yol açtı. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S. Pan](https://github.com/kvap)) + +### ClickHouse yayın 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} + +#### Yenilik {#new-features-5} + +- İçin tam destek eklendi `Protobuf` biçim (giriş ve çıkış, iç içe veri yapıları). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) +- Kükreyen bitmapler ile bitmap fonksiyonları eklendi. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Andy Yang](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) +- Parke formatı desteği. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) +- Bulanık dize karşılaştırması için N-gram mesafesi eklendi. R dilinde q-gram ölçümlerine benzer. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) +- Özel toplama ve tutma kalıplarından grafit toplaması için kuralları birleştirin. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Katma `max_execution_speed` ve `max_execution_speed_bytes` kaynak kullanımını sınırlamak için. Katma `min_execution_speed_bytes` Tamam tolayacak ayar `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Kış Zhang](https://github.com/zhang2014)) +- Uygulanan işlev `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexey-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) +- Eklenen fonksiyonlar `arrayEnumerateDenseRanked` ve `arrayEnumerateUniqRanked` (sanki `arrayEnumerateUniq` ancak, çok boyutlu dizilerin içine bakmak için dizi derinliğini ince ayarlamaya izin verir). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Hata Düzeltmeleri {#bug-fixes-11} + +- Bu sürüm aynı zamanda 19.3 ve 19.1 tüm hata düzeltmeleri içerir. +- Veri atlama endekslerinde Sabit hata: İNSERTTEN sonra granüllerin sırası yanlıştı. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) +- Sabit `set` Ind Forex for `Nullable` ve `LowCardinality` sütun. Ondan önce, `set` ile ind withex `Nullable` veya `LowCardinality` sütun hataya yol açtı `Data type must be deserialized with multiple streams` seçerken. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Update\_time'ı tam olarak doğru şekilde ayarlayın `executable` sözlük güncelleme. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) +- 19.3'te kırık ilerleme çubuğunu düzeltin. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) +- Belirli durumlarda bellek bölgesi küçüldüğünde Memorytracker'ın tutarsız değerleri düzeltildi. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ThreadPool sabit tanımsız davranış. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Mesajla çok nadir bir kaza düzeltildi `mutex lock failed: Invalid argument` bu, bir MERGETREE tablosu bir SELECT ile aynı anda bırakıldığında gerçekleşebilir. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) +- ODBC sürücüsü ile uyumluluk `LowCardinality` veri türü. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) +- FreeBSD: için düzeltme `AIOcontextPool: Found io_event with unknown id 0` hatasız. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `system.part_log` tablo yapılandırmaya bakılmaksızın oluşturuldu. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tanımsız davranışı düzeltin `dictIsIn` önbellek sözlükler için işlev. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) +- Biz kendi elde edene kadar varsayılan olarak compile\_expressions devre dışı bırakın `llvm` contrib ve ile test edebilirsiniz `clang` ve `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) +- Önlemek `std::terminate` ne zaman `invalidate_query` için `clickhouse` dış sözlük kaynağı yanlış resultset döndürdü (boş veya birden fazla satır veya birden fazla sütun). Sabit sorun ne zaman `invalidate_query` ne olursa olsun her beş saniyede yapıldı `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kilitlenme önlemek zaman `invalidate_query` ile bir sözlük için `clickhouse` kaynak içeren oldu `system.dictionaries` tablo veya `Dictionaries` veritabanı (nadir durum). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Çapraz için düzeltmeler boş nerede ile katılmak. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) +- Fonksiyon sabit segfault “replicate” sabit argüman geçirildiğinde. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yükleme doktoru ile Lambda işlevini düzeltin. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Kış Zhang](https://github.com/zhang2014)) +- Birden çok düzeltmeleri katıldı. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Geliştirmeler {#improvements-3} + +- Sağ tablo sütunları için JOİN on bölümündeki takma adları destekleyin. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) +- Birden fazla sonuç subselects kullanılacak doğru sonucu ismi Katıldı. Düz takma adları kaynak adları ile sonuç olarak değiştirin. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) +- Birleştirilmiş ifadeler için aşağı itme mantığını geliştirin. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([I ivanvan](https://github.com/abyss7)) + +#### Performans İyileştirmeleri {#performance-improvements-3} + +- Geliştirilmiş sezgisel “move to PREWHERE” optimizasyon. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 8-bit ve 16-bit anahtarlar için HashTable API kullanan uygun arama tabloları kullanın. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Kuşu](https://github.com/amosbird)) +- Dize karşılaştırma geliştirilmiş performans. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Ayrı bir iş parçacığında dağıtılmış DDL kuyruğunu temizleme, böylece dağıtılmış DDL görevlerini işleyen ana döngüyü yavaşlatmaz. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) +- Ne zaman `min_bytes_to_use_direct_io` 1 olarak ayarlanır, her dosya O\_DİRECT modu ile açılmamıştır, çünkü okunacak veri boyutu bazen sıkıştırılmış bir bloğun boyutuna göre hafife alınmıştır. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-12} + +- Clang-9 için destek eklendi [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yanlış Düzelt `__asm__` talimatlar (tekrar) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) +- Ayarları belirtmek için yeteneği ekleyin `clickhouse-performance-test` komut satırından. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alesapin](https://github.com/alesapin)) +- Entegrasyon testlerine sözlükler testleri ekleyin. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alesapin](https://github.com/alesapin)) +- Otomatik performans testlerine web sitesinde kriter sorguları eklendi. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `xxhash.h` harici lz4'te mevcut değildir, çünkü bir uygulama ayrıntısıdır ve sembolleri ile adlandır `XXH_NAMESPACE` makro. Lz4 harici olduğunda, xxHash da harici olmalı ve bağımlıların buna bağlanması gerekir. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) +- Sabit bir durumda zaman `quantileTiming` toplama işlevi, negatif veya kayan nokta argümanı ile çağrılabilir (bu, tanımsız davranış dezenfektanı ile fuzz testini düzeltir). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yazım hatası düzeltme. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) +- Mac'te derlemeyi düzeltin. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) +- FreeBSD ve çeşitli sıradışı yapı yapılandırmaları için düzeltmeler oluşturun. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) + +## ClickHouse sürümü 19.3 {#clickhouse-release-19-3} + +### ClickHouse yayın 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} + +#### Hata Düzeltmeleri {#bug-fixes-12} + +- Çökmeyi düzeltin `FULL/RIGHT JOIN` biz nullable vs değil nullable üzerinde katılırken. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Segmentasyon hatasını düzeltin `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Sabit okuma `Array(LowCardinality)` sütun nadir durumda, sütun uzun bir boş diziler dizisi içerdiğinde. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-13} + +- Özel bir kullanıcıdan clickhouse-server görüntüsünü başlatmak için bir yol ekleyin [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### ClickHouse yayın 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} + +#### Hata düzeltmeleri {#bug-fixes-13} + +- \#3920'de Sabit hata. Bu hata kendisini rasgele önbellek bozulması (mesajlar) olarak gösterir `Unknown codec family code`, `Cannot seek through file`) ve segfaults. Bu hata ilk olarak 19.1 sürümünde ortaya çıktı ve 19.1.10 ve 19.3.6'ya kadar olan sürümlerde mevcut. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse yayın 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} + +#### Hata düzeltmeleri {#bug-fixes-14} + +- Bir iş parçacığı havuzunda 1000'den fazla iş parçacığı olduğunda, `std::terminate` iş parçacığı çıkışında meydana gelebilir. [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi oluşturmak mümkün `ReplicatedMergeTree*` varsayılanları olmayan sütunlarda yorumlar içeren tablolar ve yorum ve varsayılanları olmayan sütun kodekleri içeren tablolar. Ayrıca codec karşılaştırmasını düzeltin. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alesapin](https://github.com/alesapin)) +- Dizi veya tuple ile katılmak sabit kazasında. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) +- Clickhouse sabit kazasında-mesaj ile fotokopi `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) +- Dağıtılmış DDL kullanılmışsa, sunucu kapanmasında sabit kapatma. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) +- Yanlış sütun numaraları, 10'dan büyük sayı ile sütunlar için ayrıştırma metin biçimi hakkında hata iletisinde yazdırıldı. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Yapı / Test / Ambalaj Geliştirmeleri {#buildtestingpackaging-improvements-3} + +- Avx etkin sabit yapı. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Genişletilmiş muhasebe ve IO muhasebesini, derlendiği çekirdek yerine iyi bilinen sürüme göre etkinleştirin. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) +- Core\_dump ayarını atlamaya izin ver.size\_limit, limit set başarısız olursa atmak yerine uyarı. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) +- Kaldır theılan `inline` tags of `void readBinary(...)` içinde `Field.cpp`. Ayrıca birleştirilmiş gereksiz `namespace DB` Bloklar. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) + +### ClickHouse yayın 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} + +#### Hata düzeltmeleri {#bug-fixes-15} + +- Büyük http ekleme sorguları işleme ile Sabit hata. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alesapin](https://github.com/alesapin)) +- Yanlış uygulama nedeniyle eski sürümlerle geriye dönük uyumsuzluk düzeltildi `send_logs_level` ayar. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tablo fonksiyonunun geriye dönük uyumsuzluğu düzeltildi `remote` sütun yorumları ile tanıtıldı. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse yayın 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} + +#### Geliştirmeler {#improvements-4} + +- Yaparken tablo dizin boyutu bellek sınırları için hesaplandı değil `ATTACH TABLE` sorgu. Bir tablo müstakil sonra eklenemez olasılığını Kaçınılması. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Zookeeper'dan alınan maksimum dize ve dizi boyutu üzerindeki sınırı hafifçe yükseltti. Bu artan boyutu ile çalışmaya devam sağlar `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` ZooKeeper üzerinde. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Zaten kendi kuyruğunda düğümlerin çok sayıda olsa bile terk edilmiş çoğaltma onarmak için izin verin. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bir gerekli argüman Ekle `SET` dizin (en fazla saklanan satır numarası). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Hata Düzeltmeleri {#bug-fixes-16} + +- Sabit `WITH ROLLUP` tek grup için sonuç `LowCardinality` anahtar. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Set dizinindeki Sabit hata (daha fazlasını içeriyorsa bir granül bırakarak `max_rows` satırlar). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) +- Bir sürü FreeBSD yapı düzeltmesi. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) +- Aynı takma ad içeren alt sorgularla sorgularda sabit takma ad değiştirme (sorun [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Yapı / Test / Ambalaj Geliştirmeleri {#buildtestingpackaging-improvements-4} + +- Çalıştırmak için yeteneği ekleyin `clickhouse-server` docker ımage vatansız testler için. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) + +### ClickHouse yayın 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} + +#### Yenilik {#new-features-6} + +- Add theed the `KILL MUTATION` bazı nedenlerle sıkışmış olan mutasyonların giderilmesine izin veren ifade. Katma `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` alanlar için `system.mutations` daha kolay sorun giderme için tablo. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) +- Toplama fonksiyonu eklendi `entropy` Shannon entropisini hesaplar. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) +- Sorguları göndermek için yeteneği eklendi `INSERT INTO tbl VALUES (....` yarat withoutmadan sunucuya `query` ve `data` parçalar. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alesapin](https://github.com/alesapin)) +- Genel uygulama `arrayWithConstant` işlevi eklendi. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Uyguluyordu `NOT BETWEEN` karşılaştırma operatörü. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) +- Uygulamak `sumMapFiltered` değerlerin toplanacağı anahtar sayısını sınırlayabilmek için `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Desteği eklendi `Nullable` yazmak `mysql` tablo işlevi. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- İçinde keyfi sabit ifadeler için destek `LIMIT` yan. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) +- Katma `topKWeighted` (imzasız tamsayı) ağırlığı ile ek argüman alır toplama işlevi. [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Andrew Golman](https://github.com/andrewgolman)) +- `StorageJoin` şimdi destekler `join_any_take_last_row` aynı anahtarın varolan değerlerinin üzerine yazmayı sağlayan ayar. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Amos Kuşu](https://github.com/amosbird) +- Eklendi fonksiyonu `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) +- Katma `RowBinaryWithNamesAndTypes` biçimli. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V. Kozlyuk](https://github.com/DarkWanderer)) +- Katma `IPv4` ve `IPv6` veri türleri. Daha etkili uygulamalar `IPv*` işlevler. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) +- Eklendi fonksiyonu `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) +- Katma `Protobuf` çıkış biçimi. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) +- Veri içe aktarma (ekler) için HTTP arayüzü için brotli desteği eklendi. [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Mikhail](https://github.com/fandyushin)) +- Kullanıcı işlev adına yazım hatası yaparken veya komut satırı istemcisinde yazarken ipuçları eklendi. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) +- Katma `Query-Id` sunucunun HTTP yanıt başlığına. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Mikhail](https://github.com/fandyushin)) + +#### Deneysel özellikler {#experimental-features-2} + +- Katma `minmax` ve `set` MergeTree tablo motorları ailesi için veri atlama endeksleri. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- Eklenen dönüşüm `CROSS JOIN` -e doğru `INNER JOIN` mümkünse. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Hata Düzeltmeleri {#bug-fixes-17} + +- Sabit `Not found column` yinelenen sütunlar için `JOIN ON` bölme. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- Yapmak `START REPLICATED SENDS` komut Başlat çoğaltılmış gönderir. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Sabit toplam fonksiyonları yürütme ile `Array(LowCardinality)` değişkenler. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Yaparken yanlış davranış düzeltildi `INSERT ... SELECT ... FROM file(...)` sorgu ve dosya var `CSVWithNames` veya `TSVWIthNames` biçim ve ilk veri satırı eksik. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sözlük mevcut değilse sözlük yeniden sabit kazasında. Bu hata 19.1.6'da ortaya çıktı. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Sabit `ALL JOIN` sağ tabloda çiftleri ile. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- Sabit segmentasyon hatası `use_uncompressed_cache=1` ve yanlış sıkıştırılmamış boyutta istisna. Bu hata 19.1.6'da ortaya çıktı. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) +- Sabit `compile_expressions` büyük (ınt16'dan daha fazla) tarihlerin karşılaştırılması ile hata. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) +- Tablo işlevinden seçerken sabit sonsuz döngü `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yüklem optimizasyonunu geçici olarak devre dışı bırak `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Kış Zhang](https://github.com/zhang2014)) +- Sabit `Illegal instruction` eski CPU'larda base64 işlevlerini kullanırken hata. Bu hata yalnızca ClickHouse gcc-8 ile derlendiğinde yeniden üretildi. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit `No message received` TLS bağlantısı üzerinden PostgreSQL ODBC sürücüsü ile etkileşimde bulunurken hata. MySQL ODBC sürücüsü kullanırken de segfault giderir. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit yanlış sonuç ne zaman `Date` ve `DateTime` argümanlar koşullu operatörün dallarında kullanılır (işlev `if`). Fonksiyon için genel durum eklendi `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse sözlükler şimdi içinde yük `clickhouse` işleyiş. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit kilitlenme zaman `SELECT` ile bir tablo fromdan `File` motor sonra yeniden denendi `No such file or directory` hatasız. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Seçerken sabit yarış durumu `system.tables` verebilir `table doesn't exist` hatasız. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-client` etkileşimli modda çalıştırıldıysa, komut satırı önerileri için veri yüklerken çıkışta segfault yapabilir. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçeren mutasyonların yürütülmesi bir hata düzeltildi `IN` operatörler yanlış sonuçlar üretiyordu. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- Sabit hata: bir veritabanı varsa `Dictionary` motor, tüm sözlükler sunucu başlangıçta yüklemeye zorlanır ve localhost'tan ClickHouse kaynağı olan bir sözlük varsa, sözlük yüklenemez. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sistem günlükleri sunucu kapatma yeniden oluşturmak için denendiğinde hata düzeltildi. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Doğru türü doğru şekilde döndürün ve kilitleri düzgün şekilde tutun `joinGet` işlev. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Kuşu](https://github.com/amosbird)) +- Katma `sumMapWithOverflow` işlev. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Sabit segfault ile `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) +- Yanlış ile Sabit hata `Date` ve `DateTime` karşılaştırma. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Tanımsız davranış sanitizer altında sabit fuzz testi: eklendi parametre tipi kontrol için `quantile*Weighted` fonksiyonlar ailesi. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eski veri parçalarının çıkarılması ile başarısız olabilir sabit nadir yarış durumu `File not found` hatasız. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eksik /etc/clickhouse-server/config ile paketi yükleyin.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +#### Yapı / Test / Ambalaj Geliştirmeleri {#buildtestingpackaging-improvements-5} + +- Debian paketi: yapılandırmaya göre /etc/clickhouse-server/preprocessed bağlantısını düzeltin. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- FreeBSD için çeşitli yapı düzeltmeleri. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) +- Perftest'te tablolar oluşturma, doldurma ve bırakma yeteneği eklendi. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alesapin](https://github.com/alesapin)) +- Yinelenen kontrol etmek için bir komut dosyası eklendi içerir. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Performans testinde dizin sorguları çalıştırmak için yeteneği eklendi. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alesapin](https://github.com/alesapin)) +- Hata ayıklama sembolleri ile paket yüklü olması önerilir. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Performansın yeniden düzenlenmesi-test. Daha iyi günlüğü ve sinyalleri işleme. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alesapin](https://github.com/alesapin)) +- Anonimleştirilmiş yandex'e dokümanlar eklendi.Metrika veri setleri. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alesapin](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) +- S3'te iki veri kümesi hakkında dokümanlar eklendi. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) +- Çekme istekleri açıklamasından changelog oluşturan komut dosyası eklendi. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- ClickHouse için kukla modülü eklendi. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- Bir grup belgesiz işlev için dokümanlar eklendi. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Kış Zhang](https://github.com/zhang2014)) +- Arm yapı düzeltmeleri. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) +- Sözlük testleri artık çalıştırmak mümkün `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) +- Şimdi `/etc/ssl` SSL sertifikaları ile varsayılan dizin olarak kullanılır. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Başlangıçta SSE ve AVX talimat kontrol eklendi. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) +- İnit betiği, başlayana kadar sunucuyu bekleyecektir. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) + +#### Geriye Dönük Uyumsuz Değişiklikler {#backward-incompatible-changes-1} + +- Kaldırıyordu `allow_experimental_low_cardinality_type` ayar. `LowCardinality` veri türleri üretime hazırdır. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kullanılabilir bellek miktarına göre işaretle önbellek boyutunu ve sıkıştırılmamış önbellek boyutunu azaltın. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) +- Eklenen anahtar kelime `INDEX` içinde `CREATE TABLE` sorgu. Adı olan bir sütun `index` backticks veya çift tırnak ile alıntı olmalıdır: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- `sumMap` şimdi taşma yerine sonuç türünü tanıtın. Eskiler `sumMap` davranış kullanılarak elde edilebilir `sumMapWithOverflow` işlev. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### Performans İyileştirmeleri {#performance-improvements-4} + +- `std::sort` yerine göre `pdqsort` olmadan sorgular için `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) +- Şimdi sunucu, genel iş parçacığı havuzundan iş parçacıklarını yeniden kullanır. Bu, bazı köşe durumlarda performansı etkiler. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Geliştirmeler {#improvements-5} + +- FreeBSD için AIO desteği uygulandı. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` şimdi geri dön `a` ve `b` sadece sol tablodan sütunlar. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) +- Vermek `-C` olarak çalışmak için müşterinin seçeneği `-c` seçenek. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) +- Şimdi seçenek `--password` değer olmadan kullanılan stdın'den şifre gerektirir. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) +- İçeren dize değişmezleri unescaped metacharacters eklendi vurgulama `LIKE` ifadeler veya regexps. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İstemci soketi kaybolursa HTTP salt okunur sorgularının iptal edilmesi eklendi. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) +- Şimdi sunucu, istemci bağlantılarını canlı tutmak için ilerlemeyi bildiriyor. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([I ivanvan](https://github.com/abyss7)) +- Sorgu ile OPTİMİZE etmek için biraz daha iyi mesaj `optimize_throw_if_noop` ayar Etkin. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Desteği eklendi `--version` clickhouse sunucusu için Seçenek. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) +- Katma `--help/-h` seçeneği `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Yuriy Baranov](https://github.com/yurriy)) +- Toplam işlev durumu sonucu ile skaler alt sorgular için destek eklendi. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Geliştirilmiş sunucu kapatma süresi ve bekleme süresini değiştirir. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sisteme replicated\_can\_become\_leader ayarı hakkında bilgi eklendi.yinelemeler ve çoğaltma lider olmaya çalışmayacaksa günlüğü ekleyin. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) + +## ClickHouse sürümü 19.1 {#clickhouse-release-19-1} + +### ClickHouse sürümü 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} + +- Sabit hata `Column ... queried more than once` bu ayar eğer gerçekleşebilir `asterisk_left_columns_only` kullanılması durumunda 1 olarak ayarlanır `GLOBAL JOIN` ile `SELECT *` (nadir bir durum). Sorun 19.3 ve daha yeni sürümlerde mevcut değil. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouse sürümü 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} + +Bu sürüm, 19.3.7 ile tam olarak aynı yamalar kümesini içerir. + +### ClickHouse sürümü 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} + +Bu sürüm, 19.3.6 ile tam olarak aynı yamalar kümesini içerir. + +## ClickHouse sürümü 19.1 {#clickhouse-release-19-1-1} + +### ClickHouse sürümü 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} + +#### Hata düzeltmeleri {#bug-fixes-18} + +- Yanlış uygulama nedeniyle eski sürümlerle geriye dönük uyumsuzluk düzeltildi `send_logs_level` ayar. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tablo fonksiyonunun geriye dönük uyumsuzluğu düzeltildi `remote` sütun yorumları ile tanıtıldı. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} + +#### Hata Düzeltmeleri {#bug-fixes-19} + +- Eksik /etc/clickhouse-server/config ile paketi yükleyin.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +## ClickHouse sürümü 19.1 {#clickhouse-release-19-1-2} + +### ClickHouse sürümü 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} + +#### Hata Düzeltmeleri {#bug-fixes-20} + +- Doğru türü doğru şekilde döndürün ve kilitleri düzgün şekilde tutun `joinGet` işlev. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Kuşu](https://github.com/amosbird)) +- Sistem günlükleri sunucu kapatma yeniden oluşturmak için denendiğinde hata düzeltildi. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit hata: bir veritabanı varsa `Dictionary` motor, tüm sözlükler sunucu başlangıçta yüklemeye zorlanır ve localhost'tan ClickHouse kaynağı olan bir sözlük varsa, sözlük yüklenemez. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçeren mutasyonların yürütülmesi bir hata düzeltildi `IN` operatörler yanlış sonuçlar üretiyordu. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- `clickhouse-client` etkileşimli modda çalıştırıldıysa, komut satırı önerileri için veri yüklerken çıkışta segfault yapabilir. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Seçerken sabit yarış durumu `system.tables` verebilir `table doesn't exist` hatasız. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit kilitlenme zaman `SELECT` ile bir tablo fromdan `File` motor sonra yeniden denendi `No such file or directory` hatasız. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bir sorun düzeltildi: yerel ClickHouse sözlükleri TCP üzerinden yüklenir, ancak işlem içinde yüklenmelidir. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit `No message received` TLS bağlantısı üzerinden PostgreSQL ODBC sürücüsü ile etkileşimde bulunurken hata. MySQL ODBC sürücüsü kullanırken de segfault giderir. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yüklem optimizasyonunu geçici olarak devre dışı bırak `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Kış Zhang](https://github.com/zhang2014)) +- Tablo işlevinden seçerken sabit sonsuz döngü `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit `compile_expressions` büyük (ınt16'dan daha fazla) tarihlerin karşılaştırılması ile hata. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) +- Sabit segmentasyon hatası `uncompressed_cache=1` ve yanlış sıkıştırılmamış boyutta istisna. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) +- Sabit `ALL JOIN` sağ tabloda çiftleri ile. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- Yaparken yanlış davranış düzeltildi `INSERT ... SELECT ... FROM file(...)` sorgu ve dosya var `CSVWithNames` veya `TSVWIthNames` biçim ve ilk veri satırı eksik. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit toplam fonksiyonları yürütme ile `Array(LowCardinality)` değişkenler. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Debian paketi: yapılandırmaya göre /etc/clickhouse-server/preprocessed bağlantısını düzeltin. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Tanımsız davranış sanitizer altında sabit fuzz testi: eklendi parametre tipi kontrol için `quantile*Weighted` fonksiyonlar ailesi. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yapmak `START REPLICATED SENDS` komut Başlat çoğaltılmış gönderir. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Sabit `Not found column` join on bölümündeki yinelenen sütunlar için. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- Şimdi `/etc/ssl` SSL sertifikaları ile varsayılan dizin olarak kullanılır. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sözlük mevcut değilse sözlük yeniden sabit kazasında. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Yanlış ile Sabit hata `Date` ve `DateTime` karşılaştırma. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Sabit yanlış sonuç ne zaman `Date` ve `DateTime` argümanlar koşullu operatörün dallarında kullanılır (işlev `if`). Fonksiyon için genel durum eklendi `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse sürümü 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} + +#### Yenilik {#new-features-7} + +- Tablolar için sütun sıkıştırma codec başına özel. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alesapin](https://github.com/alesapin), [Kış Zhang](https://github.com/zhang2014), [Anatoly](https://github.com/Sindbag)) +- Eklendi sıkıştırma codec `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alesapin](https://github.com/alesapin)) +- İzin ver `ALTER` sıkıştırma codec. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alesapin](https://github.com/alesapin)) +- Eklenen fonksiyonlar `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` SQL standart uyumluluk için. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) +- Yazma desteği `HDFS` tablolar ve `hdfs` tablo işlevi. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alesapin](https://github.com/alesapin)) +- Büyük samanlıkta birden fazla sabit dizeleri aramak için fonksiyonlar eklendi: `multiPosition`, `multiSearch` ,`firstMatch` ayrıca ile `-UTF8`, `-CaseInsensitive`, ve `-CaseInsensitiveUTF8` varyantlar. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) +- Kullanılmayan parçaların budaması `SELECT` sharding anahtarına göre sorgu filtreleri (ayar `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [I ivanvan](https://github.com/abyss7)) +- Vermek `Kafka` blok başına bazı ayrıştırma hatalarını görmezden gelmek için motor. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([I ivanvan](https://github.com/abyss7)) +- İçin destek eklendi `CatBoost` multiclass modelleri değerlendirme. İşlev `modelEvaluate` multiclass modelleri için sınıf başına ham tahminleri ile tuple döndürür. `libcatboostmodel.so` ile inşa edilmelidir [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Eklenen fonksiyonlar `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) +- Karma fonksiyonları eklendi `xxHash64` ve `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) +- Katma `gccMurmurHash` aynı karma tohumu kullanan karma işlevi (GCC aromalı üfürüm hash) [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) +- Karma fonksiyonları eklendi `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) +- Eklenen tablo fonksiyonu `remoteSecure`. Fonksiyonu olarak çalışır `remote`, ancak güvenli bağlantı kullanır. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) + +#### Deneysel özellikler {#experimental-features-3} + +- Birden fazla Katıl emımlı em emülasyon eklendi (`allow_experimental_multiple_joins_emulation` ayar). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Hata Düzeltmeleri {#bug-fixes-21} + +- Yapmak `compiled_expression_cache_size` bellek tüketimini azaltmak için varsayılan olarak sınırlı ayarı. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alesapin](https://github.com/alesapin)) +- Çoğaltılmış tabloların değiştirmelerini gerçekleştiren iş parçacıklarında ve zookeeper'dan yapılandırmayı güncelleyen iş parçacıklarında hangup'lara yol açan bir hatayı düzeltin. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) +- Dağıtılmış bir ALTER görevi yürütürken bir yarış durumu düzeltildi. Yarış durumu, görevi yürütmeye çalışan birden fazla kopyaya ve bir ZooKeeper hatasıyla başarısız olan tüm kopyalara yol açtı. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) +- Bir hatayı düzeltin `from_zk` zookeeper zaman aşımına uğradıktan sonra yapılandırma öğeleri yenilenmedi. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) +- IPv4 alt ağ maskeleri için yanlış önek ile hatayı düzeltin. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alesapin](https://github.com/alesapin)) +- Sabit kaza (`std::terminate`) nadir durumlarda, tükenmiş kaynaklar nedeniyle yeni bir iş parçacığı oluşturulamazsa. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix hata ne zaman `remote` In için yanlış kısıtlamalar kullanıldığında tablo işlevi yürütme `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alesapin](https://github.com/alesapin)) +- Netlink soketleri sızıntısını düzeltin. Hiçbir zaman silinmedikleri bir havuza yerleştirildiler ve tüm geçerli soketler kullanımda olduğunda yeni bir iş parçacığının başlangıcında yeni soketler oluşturuldu. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) +- Kapanış ile hatayı düzeltin `/proc/self/fd` tüm fds'den önceki dizin okundu `/proc` çatal sonra `odbc-bridge` alt işlem. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alesapin](https://github.com/alesapin)) +- Birincil anahtarda kullanım dizesi durumunda uint monotonik dönüşüm için sabit dize. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Kış Zhang](https://github.com/zhang2014)) +- Tamsayı dönüşüm fonksiyonu Monotonluk hesaplanmasında Sabit hata. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit segfault içinde `arrayEnumerateUniq`, `arrayEnumerateDense` bazı geçersiz argümanlar durumunda işlevler. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- STORAGEMERGE'DE ub'yi düzeltin. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Amos Kuşu](https://github.com/amosbird)) +- Fonksiyonlarda sabit segfault `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit hata: fonksiyonlar `round`, `floor`, `trunc`, `ceil` tamsayı argümanı ve büyük negatif ölçekte yürütüldüğünde sahte sonuç döndürebilir. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tarafından uyarılan bir hata düzeltildi ‘kill query sync’ bu da bir çekirdek çöplüğüne yol açar. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) +- Boş çoğaltma kuyruğundan sonra uzun gecikmeyle hatayı düzeltin. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alesapin](https://github.com/alesapin)) +- İle tabloya ekleme durumunda sabit aşırı bellek kullanımı `LowCardinality` birincil anahtar. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Sabit `LowCardinality` için serileştirme `Native` boş diziler durumunda format. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Tek LowCardinality sayısal sütun tarafından dıstınct kullanırken yanlış sonuç düzeltildi. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- LowCardinality anahtarı ile sabit özel toplama (durumunda `compile` ayar etkinse) ' dir. [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Çoğaltılmış tablo sorguları için kullanıcı ve şifre iletimini düzeltin. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alesapin](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- Sözlükleri yeniden yüklerken sözlük veritabanındaki tabloları listelerken oluşabilecek çok nadir yarış durumu düzeltildi. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Toplaması veya küp ile kullanıldığında yanlış sonuç düzeltildi. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Sam Chou](https://github.com/reflection)) +- Sorgu için sabit sütun takma adları `JOIN ON` sözdizimi ve dağıtılmış tablolar. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Kış Zhang](https://github.com/zhang2014)) +- Dahili uygulamada Sabit hata `quantileTDigest` (Artem Vakhrushev tarafından bulundu). Bu hata hiçbir zaman Clickhouse'da olmaz ve yalnızca ClickHouse kod tabanını doğrudan bir kütüphane olarak kullananlar için geçerlidir. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Geliştirmeler {#improvements-6} + +- İçin destek `IF NOT EXISTS` içinde `ALTER TABLE ADD COLUMN` ile birlikte ifadeler `IF EXISTS` içinde `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) +- İşlev `parseDateTimeBestEffort`: format desteği `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` ve benzeri. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` şimdi pürüzlü yapıları destekleyin. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) +- Kullanılabilirlik iyileştirme: sunucu işleminin veri dizininin sahibinden başlatıldığı bir kontrol eklendi. Veriler kök olmayan kullanıcıya aitse, sunucunun kökünden başlatılmasına izin vermeyin. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-v-galtsev](https://github.com/sergey-v-galtsev)) +- Birleştirme ile sorguların analizi sırasında gerekli sütunları kontrol etmenin daha iyi mantığı. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) +- Tek bir sunucuda çok sayıda dağıtılmış tablo durumunda bağlantı sayısını azalttı. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Kış Zhang](https://github.com/zhang2014)) +- Desteklenen toplamlar için satır `WITH TOTALS` ODBC sürücüsü için sorgu. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) +- Kullanmasına izin `Enum`eğer fonksiyon içinde tamsayılar olarak s. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([I ivanvan](https://github.com/abyss7)) +- Katma `low_cardinality_allow_in_native_format` ayar. Devre dışı bırakılırsa, kullanmayın `LowCadrinality` yazmak `Native` biçimli. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Daha düşük bellek kullanımı için derlenmiş ifadeler önbelleğinden bazı gereksiz nesneleri kaldırıldı. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alesapin](https://github.com/alesapin)) +- Add check that `SET send_logs_level = 'value'` sorgu uygun değeri kabul eder. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) +- Sabit veri türü türü dönüştürme işlevleri kontrol edin. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Kış Zhang](https://github.com/zhang2014)) + +#### Performans İyileştirmeleri {#performance-improvements-5} + +- MergeTree ayarı ekleme `use_minimalistic_part_header_in_zookeeper`. Etkinse, çoğaltılmış tablolar kompakt parça meta verilerini tek parça znode depolar. Bu, ZooKeeper anlık görüntü boyutunu önemli ölçüde azaltabilir (özellikle tablolarda çok fazla sütun varsa). Bu ayarı etkinleştirdikten sonra, desteklemeyen bir sürüme indiremeyeceğinizi unutmayın. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) +- İşlevler için DFA tabanlı bir uygulama ekleyin `sequenceMatch` ve `sequenceCount` durumda desen zaman içermez. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Tam sayı serileştirme için performans iyileştirme. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Kuşu](https://github.com/amosbird)) +- Sıfır sol dolgu PODArray böylece -1 öğesi her zaman geçerli ve sıfırlanır. Ofsetlerin dalsız hesaplanması için kullanılır. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Kuşu](https://github.com/amosbird)) +- Döndürüyordu `jemalloc` performans düşüşüne yol sürüm. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Geriye Dönük Uyumsuz Değişiklikler {#backward-incompatible-changes-2} + +- Kaldırılan belgesiz özellik `ALTER MODIFY PRIMARY KEY` çünkü onun yerini aldı. `ALTER MODIFY ORDER BY` komut. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) +- Kaldırılan işlev `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Türün sonucu ile skaler alt sorguları kullanmayı yasaklayın `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([I ivanvan](https://github.com/abyss7)) + +#### Yapı / Test / Ambalaj Geliştirmeleri {#buildtestingpackaging-improvements-6} + +- PowerPC için destek eklendi (`ppc64le`) yapmak. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) +- Duruma göre fonksiyonel testlerin ortak kullanılabilir veri kümesi üzerinde çalışır. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sunucu ile başlatılamıyor Sabit hata `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` Docker veya systemd-nspawn içindeki mesaj. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Güncel `rdkafka` kütüphane v1.0.0-RC5 için. Ham c arayüzü yerine cppkafka kullanılır. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([I ivanvan](https://github.com/abyss7)) +- Güncel `mariadb-client` kitaplık. UBSan tarafından bulunan sorunlardan biri düzeltildi. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- UBSan için bazı düzeltmeler oluşturur. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- UBSan yapı ile testlerin başına taahhüt ishal eklendi. +- PVS-Studio statik analyzer başına taahhüt ishal eklendi. +- PVS-Studio tarafından bulunan sabit hatalar. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit glibc uyumluluk sorunları. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Docker resimlerini 18.10'a taşıyın ve glibc \>= 2.28 için uyumluluk dosyası ekleyin [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) +- Kullanıcı sunucu Docker görüntü dizinleri chown istemiyorsanız env değişken ekleyin. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) +- Gelen uyar theıların en etkin `-Weverything` çınlama içinde. Etkin `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sadece clang 8'de bulunan birkaç uyarı daha eklendi. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bağlanmak `libLLVM` paylaşılan bağlantı kullanırken bireysel LLVM libs yerine. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) +- Test görüntüleri için sanitizer değişkenleri eklendi. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alesapin](https://github.com/alesapin)) +- `clickhouse-server` debian paketi tavsiye edecek `libcap2-bin` kullanmak için paket `setcap` yetenekleri ayarlamak için bir araç. Bu isteğe bağlıdır. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Geliştirilmiş derleme süresi, sabit içerir. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) +- Karma fonksiyonlar için performans testleri eklendi. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) +- Sabit döngüsel kütüphane bağımlılıkları. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) +- Düşük kullanılabilir bellek ile geliştirilmiş derleme. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) +- Performans düşüşünü yeniden oluşturmak için test betiği eklendi `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yorumlarda ve dize değişmezlerinde sabit yazım hataları `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) +- Yorumlarda sabit yazım hataları. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) + +## [2018 için Changelog](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/tr/whats_new/changelog/index.md b/docs/tr/whats_new/changelog/index.md new file mode 100644 index 00000000000..5f446b526c7 --- /dev/null +++ b/docs/tr/whats_new/changelog/index.md @@ -0,0 +1,668 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Changelog +toc_priority: 74 +toc_title: '2020' +--- + +## ClickHouse sürüm v20. 3 {#clickhouse-release-v20-3} + +### ClickHouse yayın v20.3. 4. 10, 2020-03-20 {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### Hata Düzeltme {#bug-fix} + +- Bu sürüm ayrıca 20.1.8.41 tüm hata düzeltmeleri içerir +- Eksik düzeltme `rows_before_limit_at_least` http üzerinden sorgular için (işlemciler boru hattı ile). Bu düzeltmeler [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +### ClickHouse yayın v20.3. 3. 6, 2020-03-17 {#clickhouse-release-v20-3-3-6-2020-03-17} + +#### Hata Düzeltme {#bug-fix-1} + +- Bu sürüm ayrıca 20.1.7.38 tüm hata düzeltmeleri içerir +- Kullanıcı önceki sürümde mutasyonlar yürüttüyse, çoğaltmanın çalışmasına izin vermeyen bir çoğaltmada hatayı düzeltin. Bu düzeltmeler [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)). 20.3 sürümünü tekrar geriye dönük uyumlu hale getirir. +- Ayar Ekle `use_compact_format_in_distributed_parts_names` hangi dosyaları yazmak için izin verir `INSERT` içine sorgular `Distributed` daha kompakt formatlı tablo. Bu düzeltmeler [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)). 20.3 sürümünü tekrar geriye dönük uyumlu hale getirir. + +### ClickHouse yayın v20.3. 2. 1, 2020-03-12 {#clickhouse-release-v20-3-2-1-2020-03-12} + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change} + +- Sorunu düzelt fixedildi `file name too long` için veri gönderirken `Distributed` çok sayıda çoğaltma için tablolar. Çoğaltma kimlik bilgileri sunucu günlüğüne maruz sorunu düzeltildi. Diskteki dizin adı biçimi değiştirildi `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([Mikhail Korotov](https://github.com/millb)) Yeni sürüme yükselttikten sonra, eski sunucu sürümü yeni dizin biçimini tanımadığı için manuel müdahale olmadan düşüremezsiniz. Downgrade yapmak istiyorsanız, ilgili dizinleri eski biçime el ile yeniden adlandırmanız gerekir. Bu değişiklik yalnızca zaman uyumsuz kullandıysanız geçerlidir `INSERT`s to `Distributed` Tablolar. 20.3.3 sürümünde, yeni formatı kademeli olarak etkinleştirmenize izin verecek bir ayar sunacağız. +- Mutasyon komutları için çoğaltma günlük girişlerinin biçimini değiştirdi. Yeni sürümü yüklemeden önce eski mutasyonların işlenmesini beklemeniz gerekir. +- Stacktraces döker basit bellek profiler uygulamak `system.trace_log` her n bayt yumuşak ayırma sınırı üzerinde [\#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([I ivanvan](https://github.com/abyss7)) [\#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([alexey-milovidov](https://github.com/alexey-milovidov) Olan ) sütun `system.trace_log` 'den değiştirildi `timer_type` -e doğru `trace_type`. Bu, üçüncü taraf performans analizi ve flamegraph işleme araçlarında değişiklikler gerektirecektir. +- İç iş parçacığı numarası yerine her yerde OS iş parçacığı kimliğini kullanın. Bu düzeltmeler [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) Yaş `clickhouse-client` sunucu ne zaman ayarı gönderme günlükleri alınamıyor `send_logs_level` yapılandırılmış günlük iletilerinin adları ve türleri değiştirildiğinden etkindir. Öte yandan, farklı sunucu sürümleri birbirine farklı türlerde günlükleri gönderebilir. Kullan whenmay theınca `send_logs_level` ayar, umursamamalısın. [\#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kaldırmak `indexHint` işlev [\#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kaldırmak `findClusterIndex`, `findClusterValue` işlevler. Bu düzeltmeler [\#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). Bu işlevleri kullanıyorsanız, bir e-posta gönderin `clickhouse-feedback@yandex-team.com` [\#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi sütun oluşturmak veya sütun eklemek için izin verilmez `SELECT` varsayılan ifade olarak alt sorgu. [\#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([alesapin](https://github.com/alesapin)) +- JOİN alt sorgular için takma adlar gerektirir. [\#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +- Gelişmiş `ALTER MODIFY/ADD` sorgu mantığı. Şimdi yapamazsın `ADD` türü olmayan sütun, `MODIFY` varsayılan ifade sütun türünü değiştirmez ve `MODIFY` tür varsayılan ifade değerini kaybetmez. Düzeltiyor [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([alesapin](https://github.com/alesapin)) +- Günlük yapılandırma değişiklikleri uygulamak için yeniden başlatılması için sunucu gerektirir. Bu, sunucunun silinmiş bir günlük dosyasına oturum açtığı hatayı önlemek için geçici bir geçici çözümdür (bkz. [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Ayar `experimental_use_processors` varsayılan olarak etkinleştirilir. Bu ayar, yeni sorgu potansiyel kullanımını sağlar. Bu dahili refactoring ve görünür bir değişiklik beklemiyoruz. Eğer herhangi bir sorun göreceksiniz, sıfır geri ayarlayın. [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Yenilik {#new-feature} + +- Eklemek `Avro` ve `AvroConfluent` giriş / çıkış biçimleri [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([Andrew Onyshchuk](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([Andrew Onyshchuk](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Süresi dolmuş anahtarların çok iş parçacıklı ve engellenmeyen güncellemeleri `cache` sözlükler (eskileri okumak için isteğe bağlı izin ile). [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Sorgu Ekle `ALTER ... MATERIALIZE TTL`. Süresi dolmuş verileri TTL ile kaldırmaya zorlayan mutasyonu çalıştırır ve TTL ile ilgili meta bilgileri tüm kısımlarda yeniden hesaplar. [\#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([Anton Popov](https://github.com/CurtizJ)) +- Gerekirse Hashjoin'den Mergejoin'e (diskte) geçin [\#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +- Katma `MOVE PARTITION` için komut `ALTER TABLE` [\#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [\#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Anında yapılandırma dosyasından depolama yapılandırmasını yeniden yükleme. [\#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Değiştirmek için izin `storage_policy` daha az zengin olana. [\#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +- S3 depolama ve masa fonksiyonu için globs/joker karakterler için destek eklendi. [\#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Uygulamak `bitAnd`, `bitOr`, `bitXor`, `bitNot` için `FixedString(N)` datatype. [\#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Eklendi fonksiyonu `bitCount`. Bu düzeltmeler [\#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [\#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ıkopylov](https://github.com/ikopylov)) +- Eklemek `generateRandom` verilen şema ile rastgele satırlar oluşturmak için tablo işlevi. Rasgele test tablosunu verilerle doldurmaya izin verir. [\#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([Ilya Yatsishin](https://github.com/qoega)) +- `JSONEachRowFormat`: destek özel durumda zaman nesneleri üst düzey dizi içine. [\#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([Kruglov Pavel](https://github.com/Avogar)) +- Şimdi bir sütun oluşturmak mümkün `DEFAULT` varsayılan bir sütuna bağlı olan ifade `ALIAS` ifade. [\#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([alesapin](https://github.com/alesapin)) +- Belirtmek için izin ver `--limit` kaynak veri boyutundan daha fazla `clickhouse-obfuscator`. Veri farklı rastgele tohum ile kendini tekrar edecektir. [\#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Katma `groupArraySample` fonksiyonu (benzer `groupArray`) rezervuar örnekleme algoritması ile. [\#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([Amos Kuşu](https://github.com/amosbird)) +- Şimdi güncelleme kuyruğunun boyutunu izleyebilirsiniz `cache`/`complex_key_cache` sistem metrikleri aracılığıyla sözlükler. [\#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Crlf'yi CSV çıkış biçiminde bir hat ayırıcı olarak ayarlamaya izin ver `output_format_csv_crlf_end_of_line` 1 set [\#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [\#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [\#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([Mikhail Korotov](https://github.com/millb)) +- Daha fazla işlev uygulamak [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` ve `stringToH3` [\#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([Nico Mandery](https://github.com/nmandery)) +- Yeni ayar tanıt introducedıldı: `max_parser_depth` maksimum yığın boyutunu kontrol etmek ve büyük karmaşık sorgulara izin vermek. Bu düzeltmeler [\#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) ve [\#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [\#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([Maxim Smirnov](https://github.com/qMBQx8GH)) +- Ayar ekleme `force_optimize_skip_unused_shards` kullanılmayan parçaların atlanması mümkün değilse atma ayarı [\#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +- Göndermek için veri depolamak için birden fazla Disk/birim yapılandırmaya izin ver `Distributed` motor [\#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +- Destek depolama politikası (``) geçici veri depolamak için. [\#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +- Katma `X-ClickHouse-Exception-Code` Özel durum veri göndermeden önce atıldı, ayarlanmış http üstbilgisi. Bu uygular [\#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [\#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([Mikhail Korotov](https://github.com/millb)) +- Eklendi fonksiyonu `ifNotFinite`. Bu sadece sözdizimsel bir şeker: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [\#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Katma `last_successful_update_time` sütun içinde `system.dictionaries` Tablo [\#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Eklemek `blockSerializedSize` işlev (sıkıştırma olmadan diskte boyut) [\#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +- Fonksiyon Ekle `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +- Eklenen sistem tabloları `system.zeros` ve `system.zeros_mt` yanı sıra masal fonksiyonları `zeros()` ve `zeros_mt()`. Tablolar (ve tablo işlevleri) adı ile tek sütun içerir `zero` ve tip `UInt8`. Bu sütun sıfır içerir. Birçok satır oluşturmak için en hızlı yöntem olarak test amaçları için gereklidir. Bu düzeltmeler [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Deneysel Özellik {#experimental-feature} + +- Yeni kompakt parça formatı ekleyin `MergeTree`- tüm sütunların tek bir dosyada saklandığı aile tabloları. Küçük ve sık insertlerin performansını artırmaya yardımcı olur. Eski biçim (sütun başına bir dosya) şimdi geniş olarak adlandırılır. Veri depolama biçimi ayarları tarafından kontrol edilir `min_bytes_for_wide_part` ve `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([Anton Popov](https://github.com/CurtizJ)) +- S3 depolama için destek `Log`, `TinyLog` ve `StripeLog` Tablolar. [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([Pavel Kovalenko](https://github.com/Jokser)) + +#### Hata Düzeltme {#bug-fix-2} + +- Günlük mesajlarında sabit tutarsız whitespaces. [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İsimsiz dizilerin dizilerinin tablo oluşturulmasında iç içe geçmiş yapılar olarak düzleştirildiği hatayı düzeltin. [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) +- Sorunu ne zaman düzeltildi “Too many open files” glob deseniyle eşleşen çok fazla dosya varsa hata oluşabilir `File` tablo veya `file` tablo işlevi. Şimdi dosyalar tembel açılır. Bu düzeltmeler [\#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [\#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bırak geçici tablo şimdi sadece geçici tablo düşer. [\#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([Vitaly Baranov](https://github.com/vitlibar)) +- Sunucuyu kapattığımızda veya bir tabloyu AYIRDIĞIMIZDA/EKLEDİĞİMİZDE eski bölümü kaldırın. [\#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Varsayılan diskin boş alanı nasıl hesapladığı için `data` dizin. Boş alan miktarı doğru hesaplanmazsa sorunu düzeltildi `data` dizin ayrı bir cihaza monte edilir (nadir durum). Bu düzeltmeler [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([Mikhail Korotov](https://github.com/millb)) +- In () ınside ile virgül (çapraz) birleşmesine izin ver. [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +- WHERE bölümünde operatör gibi \[değil\] varsa, iç BİRLEŞİME çapraz yeniden yazmaya izin verin. [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +- Sonra olası yanlış sonucu düzeltin `GROUP BY` etkin ayarı ile `distributed_aggregation_memory_efficient`. Düzeltiyor [\#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [\#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Bulunan anahtarlar önbellek sözlüklerinin metriklerinde cevapsız olarak sayıldı. [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Çoğaltma protokolü uyumsuzluğunu düzeltme [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +- Sabit yarış durumu `queue_task_handle` başlangıçta `ReplicatedMergeTree` Tablolar. [\#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Belirteç `NOT` işe yar inamadı `SHOW TABLES NOT LIKE` sorgu [\#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [\#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fonksiyona Aralık kontrolü eklendi `h3EdgeLengthM`. Bu kontrol olmadan, arabellek taşması mümkündür. [\#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Birden argümanlar (10'dan fazla) üçlü mantıksal OPs toplu hesaplamalarda bir hata düzeltildi. [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) +- Segfaults'a yol açabilecek prewhere optimizasyonunun hatasını düzeltin veya `Inconsistent number of columns got from MergeTreeRangeReader` özel. [\#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([Anton Popov](https://github.com/CurtizJ)) +- Beklenmeyen düzeltme `Timeout exceeded while reading from socket` zaman aşımı gerçekten aşıldı ve sorgu profiler etkinleştirildiğinde önce güvenli bağlantıda rasgele olur özel durum. Ayrıca ekleyin `connect_timeout_with_failover_secure_ms` ayarlar (varsayılan 100 MS), hangi benzer `connect_timeout_with_failover_ms`, ancak güvenli bağlantılar için kullanılır (çünkü SSL el sıkışması normal TCP bağlantısından daha yavaştır) [\#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +- Mutasyon ile devlet asmak olabilir mutasyonlar sonuçlandırılması ile Fix hata `parts_to_do=0` ve `is_done=0`. [\#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([alesapin](https://github.com/alesapin)) +- İle Yeni herhangi bir birleştirme mantığı kullanın `partial_merge_join` ayar. Yapmak mümkün `ANY|ALL|SEMI LEFT` ve `ALL INNER` ile birleş joinsir `partial_merge_join=1` şimdi. [\#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +- Shard şimdi bir istisna atmak yerine başlatıcıdan gelen ayarları shard'ın constaintlerine sıkıştırıyor. Bu düzeltme, başka bir kısıtlamalarla bir parçaya sorgular göndermeye izin verir. [\#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([Vitaly Baranov](https://github.com/vitlibar)) +- Sabit bellek yönetimi sorunu `MergeTreeReadPool`. [\#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Düzeltmek `toDecimal*OrNull()` dize ile çağrıldığında işlevler ailesi `e`. Düzeltiyor [\#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [\#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +- Emin olun `FORMAT Null` istemciye hiçbir veri gönderir. [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Bu zaman damgasını düzeltin `LiveViewBlockInputStream` güncellenmeyecek. `LIVE VIEW` deneysel bir özelliktir. [\#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [\#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +- Sabit `ALTER MODIFY TTL` eski TTL ifadelerini silmeye izin vermeyen yanlış davranış. [\#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Mergetreeındexset sabit UBSan raporu. Bu düzeltmeler [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Davranışı Düzelt theildi `match` ve `extract` haystack sıfır bayt olduğunda işlevler. Haystack sabit olduğunda davranış yanlıştı. Bu düzeltmeler [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Apache Avro 3. parti kütüphanesinde yıkıcıdan atmaktan kaçının. [\#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([Andrew Onyshchuk](https://github.com/oandrew)) +- Bir toplu işlem yoklama `Kafka` kısmen verilerde deliklere yol açabilir. [\#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov)) +- Düzeltmek `joinGet` null dönüş türleri ile. https://github.com/ClickHouse/ClickHouse/issues/8919 [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([Amos Kuşu](https://github.com/amosbird)) +- Sıkıştırıldığında veri uyumsuzluğunu düzeltin `T64` codec. [\#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2)) Veri türü kimliklerini düzeltin `T64` etkilenen sürümlerde yanlış (de)sıkıştırmaya yol açan sıkıştırma codec bileşeni. [\#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +- Ayar Ekle `enable_early_constant_folding` ve hatalara yol açan bazı durumlarda devre dışı bırakın. [\#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +- Pushdown predicate optimizer'ı görünümle düzeltin ve Testi etkinleştirin [\#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([Kış Zhang](https://github.com/zhang2014)) +- Segfault'u düzeltin `Merge` tablo readinglardan, okuma sırasında meydana gelebilecek `File` depolamalar [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +- Depolama politikası için bir kontrol eklendi `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. Aksi takdirde, yeniden başlatıldıktan sonra parçanın verilerini erişilemez hale getirebilir ve Clickhouse'un başlatılmasını önleyebilir. [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Tablo için ayarlanmış TTL varsa, değişiklikleri düzeltin. [\#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([Anton Popov](https://github.com/CurtizJ)) +- Ne zaman gerçekleşebilecek yarış durumunu düzeltin `SYSTEM RELOAD ALL DICTIONARIES` bazı sözlük değiştirilirken/eklenirken/kaldırılırken yürütülür. [\#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([Vitaly Baranov](https://github.com/vitlibar)) +- Önceki sürümlerde `Memory` Veritabanı Altyapısı boş veri yolu kullanır, böylece tablolar `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [\#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +- Varsayılan disk veya ilke eksik hakkında sabit yanlış günlük mesajları. [\#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Dizi türlerinin bloom\_filter dizini için değil(has()) düzeltin. [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- Bir tablodaki ilk sütunlara izin ver `Log` motor takma isim ol [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([I ivanvan](https://github.com/abyss7)) +- Okurken aralıkların sırasını düzeltin `MergeTree` bir iş parçacığı tablo. İstisn fromalara yol açabilir `MergeTreeRangeReader` veya yanlış sorgu sonuçları. [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([Anton Popov](https://github.com/CurtizJ)) +- Yapmak `reinterpretAsFixedString` dönmek `FixedString` yerine `String`. [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([Andrew Onyshchuk](https://github.com/oandrew)) +- Kullanıcı yanlış hata mesajı alabildiğinde son derece nadir durumlardan kaçının (`Success` ayrıntılı hata açıklaması yerine). [\#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kullanırken çökmesine etmeyin `Template` boş satır şablonu ile biçimlendirin. [\#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Sistem tabloları için meta veri dosyaları yanlış yerde oluşturulabilir [\#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix)) Düzeltiyor [\#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +- Önbellek sözlüğünde exception\_ptr üzerindeki veri yarışını düzeltin [\#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [\#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Sorgu için bir özel durum atmayın `ATTACH TABLE IF NOT EXISTS`. Daha önce tablo zaten varsa atıldı, buna rağmen `IF NOT EXISTS` yan. [\#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([Anton Popov](https://github.com/CurtizJ)) +- Özel durum mesajında eksik kapanış paren düzeltildi. [\#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Mesajdan kaçının `Possible deadlock avoided` etkileşimli modda clickhouse-client başlangıcında. [\#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Base64 kodlanmış değerin sonunda dolgu hatalı biçimlendirilmiş olabilir sorunu düzeltildi. Base64 kütüphanesini güncelleyin. Bu düzeltmeler [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), yaklar [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Veri kaybını önlemek `Kafka` nadir durumlarda istisna sonek okuduktan sonra ancak taahhütten önce gerçekleşir. Düzeltiyor [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([filimonov](https://github.com/filimonov)) +- Sabit istisna `DROP TABLE IF EXISTS` [\#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([Nikita Vasilev](https://github.com/nikvas0)) +- Bir kullanıcı denediğinde çökmeyi düzeltin `ALTER MODIFY SETTING` eski formatlı için `MergeTree` masa motorları ailesi. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +- Json ile ilgili işlevlerde Int64'e uymayan Uİnt64 numaraları için destek. Master simdjson güncelleyin. Bu düzeltmeler [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kesinlikle monotinik olmayan fonksiyonel indeks kullanıldığında ters yüklemlerin sabit yürütülmesi. [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([Alexander Kazakov](https://github.com/Akazz)) +- Kat trylamaya çalış trymayın `IN` sabit in `GROUP BY` [\#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([Amos Kuşu](https://github.com/amosbird)) +- Hatayı düzeltin `ALTER DELETE` endeks bozulmasına yol açan mutasyonlar. Bu düzeltmeler [\#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) ve [\#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). Ayrıca son derece nadir yarış koşullarını düzeltin `ReplicatedMergeTree` `ALTER` sorgular. [\#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([alesapin](https://github.com/alesapin)) +- Zaman ayarı `compile_expressions` etkin mi, alabilirsiniz `unexpected column` içinde `LLVMExecutableFunction` kullan whendığımızda `Nullable` tür [\#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([Guillaume Tassery](https://github.com/YiuRULE)) +- İçin çoklu düzeltmeler `Kafka` motor: 1) tüketici grubu rebalance sırasında görünen çiftleri düzeltin. 2) nadir düzeltmek ‘holes’ veriler bir anket ile birkaç bölümden yoklandığında ve kısmen işlendiğinde ortaya çıktı (şimdi her zaman tüm anketli mesaj bloğunu işliyoruz / işliyoruz). 3) blok boyutuna göre yıkamaları düzeltin (bundan önce sadece zaman aşımı ile kızarma düzgün çalışıyordu). 4) daha iyi abonelik prosedürü (atama geri bildirimi ile). 5) testlerin daha hızlı çalışmasını sağlayın (varsayılan aralıklarla ve zaman aşımlarıyla). Verilerin daha önce blok boyutuna göre temizlenmemesi nedeniyle (belgelere göre olması gerektiği gibi), bu PR, varsayılan ayarlarla bazı performans düşüşlerine yol açabilir(daha sık ve daha az optimal olan daha küçük yıkama nedeniyle). Bu değişiklikten sonra performans sorunuyla karşılaşırsanız-lütfen artırın `kafka_max_block_size` tabloda daha büyük değere (örneğin `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). Düzeltiyor [\#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [\#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([filimonov](https://github.com/filimonov)) +- Düzeltmek `Parameter out of bound` prewhere optimizasyonlarından sonra bazı sorgularda istisna. [\#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +- Fonksiyon argümanlarının karışık sabitliği durumu düzeltildi `arrayZip`. [\#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yürüt whenürken `CREATE` sorgu, depolama motoru argümanlarında sabit ifadeleri katlayın. Boş veritabanı adı geçerli veritabanı ile değiştirin. Düzeltiyor [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +- Artık basit döngüsel takma adlarla sütunlar oluşturmak veya eklemek mümkün değil `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +- Bozuk orijinal parçası olabilir çift hareket ile bir hata düzeltildi. Eğer kullanıyorsanız bu önemlidir `ALTER TABLE MOVE` [\#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Vermek `interval` backticks olmadan doğru ayrıştırmak için tanımlayıcı. Bir sorgu bile yürütülemez sabit sorun `interval` tanımlayıcı backticks veya çift tırnak içine alınır. Bu düzeltmeler [\#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [\#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit fuzz testi ve yanlış davranış `bitTestAll`/`bitTestAny` işlevler. [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Olası kilitlenme/yanlış satır sayısını düzeltin `LIMIT n WITH TIES` n'th satırına eşit çok sayıda satır olduğunda. [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- Enabled ile yazılmış parçalarla mutasyonları düzeltin `insert_quorum`. [\#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([alesapin](https://github.com/alesapin)) +- İmha veri yarışı Fix `Poco::HTTPServer`. Sunucu başlatıldığında ve hemen kapatıldığında gerçekleşebilir. [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([Anton Popov](https://github.com/CurtizJ)) +- Çalışırken yanıltıcı bir hata mesajının gösterildiği hatayı düzeltin `SHOW CREATE TABLE a_table_that_does_not_exist`. [\#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) +- Sabit `Parameters are out of bound` bazı nadir durumlarda istisna `SELECT` CLA anuse when we have an `ORDER BY` ve bir `LIMIT` yan. [\#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Mutasyonlar kesinleşmesini düzeltin, zaten mutasyon yapıldığında durum olabilir `is_done=0`. [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([alesapin](https://github.com/alesapin)) +- Yürütül executingmesini engellemek `ALTER ADD INDEX` eski sözdizimi ile MergeTree tabloları için, çünkü çalışmıyor. [\#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([Mikhail Korotov](https://github.com/millb)) +- Sunucu başlatma sırasında erişim tablosu, hangi `LIVE VIEW` bağlıdır, böylece sunucu başlatmak mümkün olacak. Ayrıca kaldırmak `LIVE VIEW` ayırma sırasında bağımlılıklar `LIVE VIEW`. `LIVE VIEW` deneysel bir özelliktir. [\#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +- Olası segfault'u düzeltin `MergeTreeRangeReader`, Yürüt whileürken `PREWHERE`. [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([Anton Popov](https://github.com/CurtizJ)) +- Sütun TTLs ile olası eşleşmeyen sağlama toplamlarını düzeltin. [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) +- Sadece bir birim olduğunda parçalar durumunda TTL kurallarına göre arka planda taşınmadığında bir hata düzeltildi. [\#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Sorunu düzelt fixedildi `Method createColumn() is not implemented for data type Set`. Bu düzeltmeler [\#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [\#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi mutasyonları daha sık sonuçlandırmaya çalışacağız. [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +- Düzeltmek `intDiv` eksi bir sabit tarafından [\#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +- Olası yarış durumunu düzeltin `BlockIO`. [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Kullanmaya / bırakmaya çalışırken sunucu sonlandırmasına giden hatayı düzeltin `Kafka` tablo yanlış parametrelerle oluşturuldu. [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([filimonov](https://github.com/filimonov)) +- OS için yanlış sonuç döndürürse geçici çözüm eklendi `timer_create` işlev. [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kullanımında Sabit hata `min_marks_for_seek` parametre. Dağıtılmış tabloda sharding anahtarı olmadığında hata mesajı düzeltildi ve kullanılmayan parçaları atlamaya çalışıyoruz. [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### Geliştirme {#improvement} + +- Uygulamak `ALTER MODIFY/DROP` için Mut topasyon topların üstüne sorgular `ReplicatedMergeTree*` motorlar ailesi. Şimdi `ALTERS` yalnızca meta veri güncelleme aşamasında engeller ve bundan sonra engellemez. [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([alesapin](https://github.com/alesapin)) +- İle iç Birleşimlere çapraz yeniden yazma yeteneği ekleyin `WHERE` unqialified adları içeren bölüm. [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +- Yapmak `SHOW TABLES` ve `SHOW DATABASES` sorgular destek `WHERE` ifadeler ve `FROM`/`IN` [\#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +- Bir ayar eklendi `deduplicate_blocks_in_dependent_materialized_views`. [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +- Son değişikliklerden sonra MySQL istemcisi onaltılık ikili dizeleri yazdırmaya başladı ve böylece onları okunabilir hale getirdi ([\#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). Clickhouse'daki geçici çözüm, dize sütunlarını her zaman değil, genellikle durum olan UTF-8 olarak işaretlemektir. [\#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([Yuriy Baranov](https://github.com/yurriy)) +- İçin dize ve FixedString tuşları desteği ekleyin `sumMap` [\#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +- SummingMergeTree haritalarında dize anahtarlarını destekleyin [\#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +- İş parçacığı özel durum atılmış olsa bile iş parçacığı havuzu için iş parçacığı sonlandırma sinyali [\#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +- Ayarlamak için izin ver `query_id` içinde `clickhouse-benchmark` [\#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([Anton Popov](https://github.com/CurtizJ)) +- Garip ifadelere izin verme `ALTER TABLE ... PARTITION partition` sorgu. Bu adresler [\#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [\#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tablo `system.table_engines` şimdi özellik desteği (gibi) hakkında bilgi sağlar `supports_ttl` veya `supports_sort_order`). [\#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +- Etkinleştirmek `system.metric_log` varsayılan olarak. ProfileEvents, CurrentMetrics değerleri ile toplanan satırları içerecektir “collect\_interval\_milliseconds” aralığı (varsayılan olarak bir saniye). Tablo çok küçüktür (genellikle megabayt sırasına göre) ve bu verileri varsayılan olarak toplamak mantıklıdır. [\#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [\#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [\#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([I ivanvan](https://github.com/abyss7)) +- Şimdi geçici `LIVE VIEW` tarafından oluşturulur `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` yerine `CREATE TEMPORARY LIVE VIEW ...`, önceki sözdizimi ile tutarlı olmadığı için `CREATE TEMPORARY TABLE ...` [\#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +- Text\_log ekleyin.giden girişleri sınırlamak için seviye yapılandırma parametresi `system.text_log` Tablo [\#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +- İndir partdiğiniz parçayı TTL kurallarına göre disk /lere/birim putlere koy allowmaya izin verin [\#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Harici MySQL sözlükleri için, MySQL bağlantı havuzunun mutualize edilmesine izin verin “share” sözlükler arasında. Bu seçenek, MySQL sunucularına bağlantı sayısını önemli ölçüde azaltır. [\#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Quantiles için en yakın sorgu yürütme süresini göster `clickhouse-benchmark` enterpolasyonlu değerler yerine çıktı. Bazı sorguların yürütme süresine karşılık gelen değerleri göstermek daha iyidir. [\#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kafka'ya veri eklerken mesaj için anahtar ve zaman damgası ekleme imkanı. Düzeltiyor [\#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [\#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([filimonov](https://github.com/filimonov)) +- Sunucu terminalden çalıştırılırsa, iş parçacığı numarası, sorgu kimliği ve günlük önceliğini renklere göre vurgulayın. Bu, geliştiriciler için ilişkili günlük iletilerinin daha iyi okunabilirliği içindir. [\#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tablolar yüklenirken daha iyi istisna mesajı `Ordinary` veritabanı. [\#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Uygulamak `arraySlice` toplama işlev durumları olan diziler için. Bu düzeltmeler [\#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [\#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- In operatörünün sağ tarafında sabit işlevlerin ve sabit dizilerin kullanılmasına izin verin. [\#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([Anton Popov](https://github.com/CurtizJ)) +- Sistem için veri getirilirken zookeeper istisnası olduysa.yinelemeler, ayrı bir sütunda görüntüler. Bu uygular [\#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [\#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Atomically kaldırmak mergetree veri parçaları üzerinde yok. [\#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Dağıtılmış tablolar için satır düzeyinde güvenliği destekleyin. [\#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([I ivanvan](https://github.com/abyss7)) +- Now we recognize suffix (like KB, KiB…) in settings values. [\#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([Mikhail Korotov](https://github.com/millb)) +- Büyük bir birleştirme sonucu oluştururken bellek yetersizliğini önleyin. [\#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +- Etkileşimli modda önerilere kümelerin isimleri eklendi `clickhouse-client`. [\#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [\#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([I ivanvan](https://github.com/abyss7)) +- Eklenen sütun `exception_code` içinde `system.query_log` Tablo. [\#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([Mikhail Korotov](https://github.com/millb)) +- Bağlantı noktasında etkin MySQL uyumluluk sunucusu `9004` varsayılan sunucu yapılandırma dosyasında. Yapılandırmada örnekte sabit şifre oluşturma komutu. [\#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([Yuriy Baranov](https://github.com/yurriy)) +- Dosya sistemi readonly ise kapatma üzerinde iptal önleyin. Bu düzeltmeler [\#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [\#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- HTTP POST sorgusunda uzunluk gerektiğinde daha iyi özel durum iletisi. [\#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eklemek `_path` ve `_file` sanal sütunlar için `HDFS` ve `File` motorlar ve `hdfs` ve `file` tablo fonksiyonları [\#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +- Hatayı Düzelt `Cannot find column` içine takarken `MATERIALIZED VIEW` durumda yeni sütun view'ın iç tabloya eklendi eğer. [\#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [\#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [\#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [\#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Son güncellemeden sonra ilerleme göndererek (günlük gibi) yerel istemci-sunucu protokolü üzerinden ilerlemeyi düzeltin. Bu, yalnızca yerel protokolü kullanan bazı üçüncü taraf araçlarla ilgili olabilir. [\#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +- MySQL protokolünü kullanarak istemci bağlantılarının sayısını izleyen bir sistem metriği ekleyin ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([Eugene Klimov](https://github.com/Slach)) +- Şu andan itibaren, HTTP yanıtları olacak `X-ClickHouse-Timezone` başlık, aynı zaman dilimi değerine ayarlanmış `SELECT timezone()` rapor verecek. [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### Performans İyileştirme {#performance-improvement} + +- In ile analiz endeksinin performansını artırın [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([Anton Popov](https://github.com/CurtizJ)) +- Mantıksal işlevlerde daha basit ve daha verimli kod + kod temizleme. Bir takip için [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([Alexander Kazakov](https://github.com/Akazz)) +- Genel performans iyileştirme (%5 aralığında..Etkilenen sorgular için %200) C++20 özellikleri ile daha sıkı takma sağlayarak. [\#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([Amos Kuşu](https://github.com/amosbird)) +- Karşılaştırma işlevlerinin iç döngüleri için daha sıkı takma ad. [\#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Aritmetik fonksiyonların iç döngüleri için daha sıkı takma ad. [\#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- A ~ 3 kat daha hızlı uygulama için ColumnVector::replicate(), hangi aracılığıyla ColumnConst:: convertToFullColumn () uygulanır. Sabitleri gerçekleştirirken testlerde de yararlı olacaktır. [\#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([Alexander Kazakov](https://github.com/Akazz)) +- Bir başka küçük performans iyileştirme `ColumnVector::replicate()` (bu hızlandırır `materialize` fonksiyonu ve daha yüksek mertebeden fonksiyonları) daha da geliştirilmesi için [\#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [\#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([Alexander Kazakov](https://github.com/Akazz)) +- Geliştirilmiş performans `stochasticLinearRegression` toplama işlevi. Bu yama Intel tarafından katkıda bulunmuştur. [\#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Performansını artırmak `reinterpretAsFixedString` işlev. [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin istemciye blok gönder notmeyin `Null` işlemciler boru hattında Biçimlendir. [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement} + +- Özel durum işleme artık Linux için Windows Alt Sisteminde düzgün çalışıyor. Bkz. https://github.com/ClickHouse-Extras/libunwind/pull/3 bu düzeltmeler [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +- Değişmek `readline` ile `replxx` etkileşimli çizgi düzenleme için `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([I ivanvan](https://github.com/abyss7)) +- FunctionsComparison daha iyi inşa süresi ve daha az şablon örnekleri. [\#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İle entegrasyon eklendi `clang-tidy` CI. Ayrıca bakınız [\#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [\#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi kullanarak cı içinde ClickHouse bağlantı `lld` için bile `gcc`. [\#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([alesapin](https://github.com/alesapin)) +- İş parçacığı zamanlama rastgele izin ve glitches eklemek zaman `THREAD_FUZZER_*` ortam değişkenleri ayarlanır. Bu test yardımcı olur. [\#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Durumsuz testlerde güvenli soketleri etkinleştirme [\#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +- Split\_shared\_libraries = daha sağlam yap [\#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +- Yapmak “performance\_introspection\_and\_logging” rastgele sunucuya güvenilir test sıkışmış. Bu CI ortamında gerçekleşebilir. Ayrıca bakınız [\#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [\#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- XML stil onayında doğrulayın. [\#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Testte sabit yarış durumu `00738_lock_for_inner_table`. Bu test uykuya dayanıyordu. [\#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tür performans testlerini kaldırın `once`. Bu, tüm performans testlerini istatistiksel karşılaştırma modunda çalıştırmak için gereklidir (daha güvenilir). [\#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Aritmetik fonksiyonlar için performans testi eklendi. [\#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin performans testi eklendi `sumMap` ve `sumMapWithOverflow` toplama fonksiyonları. İçin takip [\#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [\#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Üslup ile ErrorCodes stil sağlamak. [\#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Testler geçmişi için komut dosyası ekleyin. [\#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([alesapin](https://github.com/alesapin)) +- GCC uyarısı Ekle `-Wsuggest-override` tüm yerleri bulmak ve düzeltmek için `override` anahtar kelime kullanılmalıdır. [\#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Tanımlanmalıdır çünkü Mac OS X altında zayıf sembolü Yoksay [\#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([Silinmiş kullanıcı](https://github.com/ghost)) +- Performans testlerinde bazı sorguların çalışma süresini normalleştirin. Bu, tüm performans testlerini karşılaştırma modunda çalıştırmak için hazırlık olarak yapılır. [\#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sorgu testleriyle pytest'i desteklemek için bazı testleri düzeltin [\#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([I ivanvan](https://github.com/abyss7)) +- Ssl'yi msan ile oluşturmada etkinleştirin, böylece durumsuz testler çalıştırırken sunucu başlangıçta başarısız olmaz [\#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +- Test sonuçlarında veritabanı ikamesini düzeltin [\#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([Ilya Yatsishin](https://github.com/qoega)) +- Çeşitli platformlar için düzeltmeler oluşturun [\#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [\#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [\#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +- Stateless-with-coverage test docker görüntüsüne diskler bölümü eklendi [\#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([Pavel Kovalenko](https://github.com/Jokser)) +- GRPC ile oluştururken kaynak ağacı dosyalarından kurtulun [\#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([Amos Kuşu](https://github.com/amosbird)) +- Sessioncleaner'ı bağlamdan kaldırarak biraz daha hızlı oluşturma süresi. SessionCleaner kodunu daha basit hale getirin. [\#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Clickhouse-test komut dosyasında asılı sorgular için güncelleme denetimi [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) +- Depodan bazı gereksiz dosyaları kaldırıldı. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Değişen matematik perftests türü `once` -e doğru `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Bizim kod tabanı için interaktif kod tarayıcı HTML raporu oluşturmanıza olanak sağlar docker görüntü ekleyin. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Görmek [Woboq Kod Tarayıcı](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- MSan altında bazı test hatalarını bastırın. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Hızlanma “exception while insert” test. Bu sınama genellikle hata ayıklama-with-coverage derlemede zaman aşımına uğradı. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Güncel `libcxx` ve `libcxxabi` yenmek. Hazırlan preparationmasında [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Flacky testi düzeltin `00910_zookeeper_test_alter_compression_codecs`. [\#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yinelenen bağlayıcı bayrakları temizleyin. Bağlayıcının beklenmedik bir sembol aramayacağından emin olun. [\#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([Amos Kuşu](https://github.com/amosbird)) +- Eklemek `clickhouse-odbc` test görüntüleri içine sürücü. Bu, ClickHouse ile clickhouse etkileşimini kendi ODBC sürücüsü aracılığıyla test etmeyi sağlar. [\#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) +- Birim testlerinde birkaç hatayı düzeltin. [\#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([alesapin](https://github.com/alesapin)) +- Etkinleştirmek `-Wmissing-include-dirs` Mevcut olmayan tüm içeriklerin ortadan kaldırılması için GCC uyarısı-çoğunlukla cmake komut dosyası hatalarının bir sonucu olarak [\#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Sorgu profiler çalışamaz, nedenleri açıklayın. Bu için tasarlanmıştır [\#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [\#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Openssl'yi upstream master'a güncelleyin. TLS bağlantıları mesajla başarısız olabilir sorunu düzeltildi `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` ve `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. Sorun 20.1 sürümünde mevcuttu. [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sunucu için Dockerfile güncelleme [\#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +- Build-gcc-from-sources komut dosyasında küçük düzeltmeler [\#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +- Değişmek `numbers` -e doğru `zeros` perftests nerede `number` sütun kullanılmaz. Bu daha temiz test sonuçlarına yol açacaktır. [\#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Sütun yapıcılarında ınitializer\_list kullanırken yığın taşması sorununu düzeltin. [\#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([Silinmiş kullanıcı](https://github.com/ghost)) +- V1.3.0 için librdkafka yükseltin. Birlikte etkinleştir `rdkafka` ve `gsasl` Mac OS X kitaplıkları. [\#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([Andrew Onyshchuk](https://github.com/oandrew)) +- gcc 9.2.0 üzerinde düzeltme oluşturun [\#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + +## ClickHouse yayın v20. 1 {#clickhouse-release-v20-1} + +### ClickHouse yayın v20. 1. 8. 41, 2020-03-20 {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### Hata Düzeltme {#bug-fix-3} + +- Olası kalıcı düzeltme `Cannot schedule a task` hata (işlenmeyen özel durum nedeniyle `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). Bu düzeltmeler [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +- Aşırı bellek tüketimini düzeltin `ALTER` sorgular (Mut (asyonlar). Bu düzeltmeler [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) ve [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([alesapin](https://github.com/alesapin)) +- Dış sözlükler DDL backquoting hatayı düzeltin. Bu düzeltmeler [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([alesapin](https://github.com/alesapin)) + +### ClickHouse yayın v20.1. 7. 38, 2020-03-18 {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### Hata Düzeltme {#bug-fix-4} + +- Sabit yanlış iç işlev adları için `sumKahan` ve `sumWithOverflow`. Bu işlevleri uzak sorgularda kullanırken istisnaya yol açarım. [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). Bu sorun tüm ClickHouse sürümlerindeydi. +- Vermek `ALTER ON CLUSTER` -den `Distributed` iç çoğaltma ile tablolar. Bu düzeltmeler [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). Bu sorun tüm ClickHouse sürümlerindeydi. +- Olası istisnaları düzeltin `Size of filter doesn't match size of column` ve `Invalid number of rows in Chunk` içinde `MergeTreeRangeReader`. Yürüt whileürken görün theyebilirler `PREWHERE` bazı durumlarda. Düzeltiyor [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) +- Sorunu düzeltildi: Eğer gibi basit bir aritmetik ifade yazarsanız zaman dilimi korunmuş değildi `time + 1` (gibi bir ifadenin aksine `time + INTERVAL 1 SECOND`). Bu düzeltmeler [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)). Bu sorun tüm ClickHouse sürümlerindeydi. +- Artık basit döngüsel takma adlarla sütunlar oluşturmak veya eklemek mümkün değil `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +- Base64 kodlanmış değerin sonunda dolgu hatalı biçimlendirilmiş olabilir sorunu düzeltildi. Base64 kütüphanesini güncelleyin. Bu düzeltmeler [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), yaklar [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İmha veri yarışı Fix `Poco::HTTPServer`. Sunucu başlatıldığında ve hemen kapatıldığında gerçekleşebilir. [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([Anton Popov](https://github.com/CurtizJ)) +- Olası kilitlenme/yanlış satır sayısını düzeltin `LIMIT n WITH TIES` n'th satırına eşit çok sayıda satır olduğunda. [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- Sütun TTLs ile olası eşleşmeyen sağlama toplamlarını düzeltin. [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) +- Bir kullanıcı denediğinde çökmeyi düzeltin `ALTER MODIFY SETTING` eski formatlı için `MergeTree` masa motorları ailesi. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +- Şimdi mutasyonları daha sık sonuçlandırmaya çalışacağız. [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +- Çoğaltma protokolü uyumsuzluğunu düzeltme [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +- Dizi türlerinin bloom\_filter dizini için değil(has()) düzeltin. [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- Davranışı Düzelt theildi `match` ve `extract` haystack sıfır bayt olduğunda işlevler. Haystack sabit olduğunda davranış yanlıştı. Bu düzeltmeler [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-1} + +- Özel durum işleme artık Linux için Windows Alt Sisteminde düzgün çalışıyor. Bkz. https://github.com/ClickHouse-Extras/libunwind/pull/3 bu düzeltmeler [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + +### ClickHouse yayın v20. 1. 6. 30, 2020-03-05 {#clickhouse-release-v20-1-6-30-2020-03-05} + +#### Hata Düzeltme {#bug-fix-5} + +- Sıkıştırıldığında veri uyumsuzluğunu düzeltin `T64` codec. + [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +- Mergetree tablosundan bir iş parçacığında okurken aralıkların sırasını düzeltin. Düzeltiyor [\#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). + [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ)](https://github.com/CurtizJ) +- Olası segfault'u düzeltin `MergeTreeRangeReader`, Yürüt whileürken `PREWHERE`. Düzeltiyor [\#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). + [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ)](https://github.com/CurtizJ) +- Düzeltmek `reinterpretAsFixedString` dönmek `FixedString` yerine `String`. + [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +- Düzeltmek `joinGet` null dönüş türleri ile. Düzeltiyor [\#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) + [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(amosbird)](https://github.com/amosbird) +- Fuzz testini ve bittestall/bitTestAny işlevlerinin yanlış davranışını düzeltin. + [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(alexey-milovidov)](https://github.com/alexey-milovidov) +- Haystack sıfır bayt olduğunda match ve extract işlevlerinin davranışını düzeltin. Haystack sabit olduğunda davranış yanlıştı. Düzeltiyor [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) + [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(alexey-milovidov)](https://github.com/alexey-milovidov) +- Kesinlikle monotinik olmayan fonksiyonel indeks kullanıldığında ters yüklemlerin sabit yürütülmesi. Düzeltiyor [\#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) + [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +- Yeniden yazmak için izin ver `CROSS` -e doğru `INNER JOIN` varsa `[NOT] LIKE` operat inor in `WHERE` bölme. Düzeltiyor [\#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) + [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4ertus2)](https://github.com/4ertus2) +- Log engine ile bir tablodaki ilk sütunların bir diğer ad olmasına izin verin. + [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +- Virgülle birleşmeye izin ver `IN()` için. Düzeltiyor [\#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). + [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) +- Geliştirmek `ALTER MODIFY/ADD` sorgu mantığı. Şimdi yapamazsın `ADD` türü olmayan sütun, `MODIFY` varsayılan ifade sütun türünü değiştirmez ve `MODIFY` tür varsayılan ifade değerini kaybetmez. Düzeltiyor [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). + [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +- Mutasyonlar kesinleşmesini düzeltin, zaten mutasyon yapıldığında is\_done = 0 durumuna sahip olabilir. + [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +- Destek “Processors” sistem için boru hattı.sayılar ve sistem.numbers\_mt. Bu aynı zamanda hatayı giderir `max_execution_time` saygın değildir. + [\#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- Yanlış saymayı düzeltin `DictCacheKeysRequestedFound` ölçü. + [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +- Depolama politikası için bir kontrol eklendi `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` aksi halde, yeniden başlatıldıktan sonra parçanın verilerini erişilemez hale getirebilir ve Clickhouse'un başlatılmasını önleyebilir. + [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(excitoon)](https://github.com/excitoon) +- Sabit UBSan raporu `MergeTreeIndexSet`. Bu düzeltmeler [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) + [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(alexey-milovidov)](https://github.com/alexey-milovidov) +- BlockİO Olası datarace Fix. + [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- İçin destek `UInt64` json ile ilgili işlevlerde Int64'e uymayan sayılar. Güncelleştirme `SIMDJSON` yenmek. Bu düzeltmeler [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) + [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(alexey-milovidov)](https://github.com/alexey-milovidov) +- Veri dizini ayrı bir aygıta takılıysa, boş alan miktarı doğru hesaplanmadığında sorunu düzeltin. Varsayılan disk için veri alt dizinindeki boş alanı hesaplayın. Bu düzeltmeler [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) + [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(millb)](https://github.com/millb) +- Mesajla TLS bağlantıları başarısız olduğunda sorunu düzeltin `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` Openssl'yi upstream master'a güncelleyin. + [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(alexey-milovidov)](https://github.com/alexey-milovidov) +- Yürüt whenürken `CREATE` sorgu, depolama motoru argümanlarında sabit ifadeleri katlayın. Boş veritabanı adı geçerli veritabanı ile değiştirin. Düzeltiyor [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Ayrıca ClickHouseDictionarySource yerel adresi için onay düzeltin. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- Segfault'u düzeltin `StorageMerge`, StorageFile okurken olabilir. + [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +- Veri kaybını önlemek `Kafka` nadir durumlarda istisna sonek okuduktan sonra ancak taahhütten önce gerçekleşir. Düzeltiyor [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). İlgili: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) +- Kullanmaya / bırakmaya çalışırken sunucu sonlandırmasına giden hatayı düzeltin `Kafka` tablo yanlış parametrelerle oluşturuldu. Düzeltiyor [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Birleşiyor [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) + +#### Yenilik {#new-feature-1} + +- Eklemek `deduplicate_blocks_in_dependent_materialized_views` hayata manzaralı tablolara idempotent ekler davranışlarını kontrol etmek için Seçenek. Bu yeni özellik, altınity'den özel bir istek ile bugfix sürümüne eklendi. + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse yayın v20.1. 2. 4, 2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} + +#### Geriye Dönük Uyumsuz Değişim {#backward-incompatible-change-1} + +- Ayarı yap `merge_tree_uniform_read_distribution` eski. Sunucu hala bu ayarı tanır, ancak hiçbir etkisi yoktur. [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fonksiyonun dönüş türünü değiştirdi `greatCircleDistance` -e doğru `Float32` çünkü şimdi hesaplamanın sonucu `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi sorgu parametrelerinin şu şekilde temsil edilmesi bekleniyor “escaped” biçimli. Örneğin, dize geçmek için `ab` yazmak zorundasın `a\tb` veya `a\b` ve sırasıyla, `a%5Ctb` veya `a%5C%09b` URL in'de. Bu, NULL olarak geçme olasılığını eklemek için gereklidir `\N`. Bu düzeltmeler [\#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [\#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Etkinleştirmek `use_minimalistic_part_header_in_zookeeper` için ayar `ReplicatedMergeTree` varsayılan olarak. Bu, Zookeeper'da depolanan veri miktarını önemli ölçüde azaltacaktır. Bu ayar 19.1 sürümünden beri desteklenmektedir ve zaten yarım yıldan fazla bir süredir herhangi bir sorun yaşamadan birden fazla hizmette üretimde kullanıyoruz. 19.1'den eski sürümlere geçiş yapma şansınız varsa bu ayarı devre dışı bırakın. [\#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Veri atlama endeksleri üretim hazır ve varsayılan olarak etkindir. Ayar `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` ve `allow_experimental_multiple_joins_emulation` artık eskimiş ve hiçbir şey yapma. [\#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yeni Ekle `ANY JOIN` için mantık `StorageJoin` ile tutarlı `JOIN` işleyiş. Davranış değişiklikleri olmadan yükseltmek için eklemeniz gerekir `SETTINGS any_join_distinct_right_table_keys = 1` motor tabloları meta katılmak veya yükseltmeden sonra bu tabloları yeniden oluşturmak için. [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) +- Günlük yapılandırma değişiklikleri uygulamak için yeniden başlatılması için sunucu gerektirir. Bu, sunucunun silinmiş bir günlük dosyasına oturum açtığı hatayı önlemek için geçici bir geçici çözümdür (bkz. [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### Yenilik {#new-feature-2} + +- Parça yolları hakkında bilgi eklendi `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Yürütme yeteneği Ekle `SYSTEM RELOAD DICTIONARY` sorgu içinde `ON CLUSTER` modu. [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Yürütme yeteneği Ekle `CREATE DICTIONARY` sorgular `ON CLUSTER` modu. [\#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([alesapin](https://github.com/alesapin)) +- Şimdi kullanıcının profili `users.xml` birden çok profil devralır. [\#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Katma `system.stack_trace` tüm sunucu iş parçacıklarının yığın izlerine bakmayı sağlayan tablo. Bu, geliştiricilerin sunucu durumunu iç gözlemlemesi için kullanışlıdır. Bu düzeltmeler [\#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [\#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eklemek `DateTime64` yapılandırılabilir alt ikinci hassasiyetle veri türü. [\#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([Vasily Nemkov](https://github.com/Enmk)) +- Tablo fonksiyonu Ekle `clusterAllReplicas` kümedeki tüm düğümleri sorgulamaya izin verir. [\#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) +- Toplama işlevi Ekle `categoricalInformationValue` ayrık bir özelliğin bilgi değerini hesaplar. [\#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) +- Veri dosyalarının ayrıştırılmasını hızlandırın `CSV`, `TSV` ve `JSONEachRow` paralel olarak yaparak Biçimlendir .in. [\#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Fonksiyon Ekle `bankerRound` bankacı yuvarlama yapar. [\#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) +- Bölge adları için gömülü sözlükte daha fazla dil desteği: ‘ru’, ‘en’, ‘ua’, ‘uk’, ‘by’, ‘kz’, ‘tr’, ‘de’, ‘uz’, ‘lv’, ‘lt’, ‘et’, ‘pt’, ‘he’, ‘vi’. [\#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tutarlılıktaki gelişmeler `ANY JOIN` mantıksal. Şimdi `t1 ANY LEFT JOIN t2` eşitlikler `t2 ANY RIGHT JOIN t1`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- Ayar Ekle `any_join_distinct_right_table_keys` için eski davranışları sağlayan `ANY INNER JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- Yeni Ekle `SEMI` ve `ANTI JOIN`. Yaş `ANY INNER JOIN` davranış şu anda mevcut `SEMI LEFT JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- Katma `Distributed` format for `File` motor ve `file` okumak için izin veren tablo işlevi `.bin` asenkron ekler tarafından oluşturulan dosyalar `Distributed` Tablo. [\#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- İsteğe bağlı Sıfırla sütun argümanı Ekle `runningAccumulate` hangi her yeni anahtar değeri için toplama sonuçlarını sıfırlamak için izin verir. [\#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([Sergey Kononenko](https://github.com/kononencheg)) +- Prometheus bitiş noktası olarak ClickHouse kullanma yeteneği ekleyin. [\#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([vdimir](https://github.com/Vdimir)) +- Bölüm Ekle `` içinde `config.xml` uzak tablo motorları ve tablo işlevleri için izin verilen ana bilgisayarları kısıtlayan `URL`, `S3`, `HDFS`. [\#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([Mikhail Korotov](https://github.com/millb)) +- Eklendi fonksiyonu `greatCircleAngle` bir küre üzerindeki mesafeyi derece cinsinden hesaplar. [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- H3 kütüphanesi ile tutarlı olacak şekilde dünya yarıçapı değiştirildi. [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Katma `JSONCompactEachRow` ve `JSONCompactEachRowWithNamesAndTypes` giriş ve çıkış biçimleri. [\#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([Mikhail Korotov](https://github.com/millb)) +- Dosya ile ilgili tablo motorları ve tablo işlevleri için özellik eklendi (`File`, `S3`, `URL`, `HDFS`) okuma ve yaz allowsmayı sağlayan `gzip` ek motor parametresi veya dosya uzantısına dayalı dosyalar. [\#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([Andrey Bodrov](https://github.com/apbodrov)) +- Add theed the `randomASCII(length)` fonksiyon, rastgele bir dizi ile bir dize oluşturma [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) yazdırılabilir karakterler. [\#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([Süngü](https://github.com/BayoNet)) +- Eklendi fonksiyonu `JSONExtractArrayRaw` ayrıştırılmamış json dizi öğelerinde bir dizi döndürür `JSON` dize. [\#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([Oleg Matrokhin](https://github.com/errx)) +- Eklemek `arrayZip` eşit uzunluktaki birden fazla diziyi bir dizi diziye birleştirmeyi sağlayan işlev. [\#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([Kış Zhang](https://github.com/zhang2014)) +- Yapılandırılmış göre diskler arasında veri taşımak için yeteneği ekleyin `TTL`- ifadeler için `*MergeTree` masa motorları ailesi. [\#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Yeni toplama fonksiyonu eklendi `avgWeighted` hangi ağırlıklı ortalama hesaplamak için izin verir. [\#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([Andrey Bodrov](https://github.com/apbodrov)) +- Şimdi paralel ayrıştırma için varsayılan olarak etkindir `TSV`, `TSKV`, `CSV` ve `JSONEachRow` biçimliler. [\#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Birkaç coğrafi işlev Ekle `H3` kitaplık: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` ve `h3kRing`. [\#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([Konstantin Malanchev](https://github.com/hombit)) +- Brotli için destek eklendi (`br`) dosya ile ilgili Depolarda ve tablo işlevlerinde sıkıştırma. Bu düzeltmeler [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Eklemek `groupBit*` fonksiyonlar için `SimpleAggregationFunction` tür. [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([Guillaume Tassery](https://github.com/YiuRULE)) + +#### Hata Düzeltme {#bug-fix-6} + +- Tabloların yeniden adlandırılmasını düzeltin `Distributed` motor. Düzeltmeler sorunu [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- Şimdi sözlükler desteği `EXPRESSION` non-ClickHouse SQL lehçesinde keyfi dizedeki öznitelikler için. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) +- Kırık düzeltmek `INSERT SELECT FROM mysql(...)` sorgu. Bu düzeltmeler [\#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) ve [\#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- Hatayı Düzelt “Mismatch column sizes” varsayılan eklerken `Tuple` itibaren `JSONEachRow`. Bu düzeltmeler [\#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [\#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) +- Şimdi kullanılması durumunda bir istisna atılacaktır `WITH TIES` birlikte `LIMIT BY`. Ayrıca kullanma yeteneği ekleyin `TOP` ile `LIMIT BY`. Bu düzeltmeler [\#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Taze glibc sürümünden istenmeyen bağımlılığı düzeltin `clickhouse-odbc-bridge` ikilik. [\#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([Amos Kuşu](https://github.com/amosbird)) +- Kontrol fonksiyonunda hatayı düzeltin `*MergeTree` motorlar ailesi. Şimdi, son granül ve son işarette (nihai olmayan) eşit miktarda satıra sahip olduğumuzda başarısız olmaz. [\#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([alesapin](https://github.com/alesapin)) +- Insert Into Fix `Enum*` sonra sütunlar `ALTER` sorgu, altta yatan sayısal tür tablo belirtilen türe eşit olduğunda. Bu düzeltmeler [\#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [\#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([Anton Popov](https://github.com/CurtizJ)) +- İzin verilen sabit olmayan negatif “size” fonksiyon için argüman `substring`. Yanlışlıkla izin verilmedi. Bu düzeltmeler [\#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [\#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yanlış sayıda argüman geçtiğinde ayrıştırma hatasını düzeltin `(O|J)DBC` masa motoru. [\#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([alesapin](https://github.com/alesapin)) +- Syslog için günlükleri gönderirken çalışan clickhouse işleminin komut adını kullanma. Önceki sürümlerde, komut adı yerine boş dize kullanıldı. [\#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([Michael Nacharov](https://github.com/mnach)) +- İzin verilen ana bilgisayarların kontrolünü düzeltin `localhost`. Bu PR, sağlanan çözümü düzeltir [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) +- Nadir kazayı düzeltin `argMin` ve `argMax` sonuç kullanıldığında, uzun dize bağımsız değişkenleri için işlevler `runningAccumulate` işlev. Bu düzeltmeler [\#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [\#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([dinozor](https://github.com/769344359)) +- İle tablolar için bellek overcommit Fix `Buffer` motor. [\#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([Azat Khuzhin](https://github.com/azat)) +- Alabilir fonksiyonları sabit potansiyel hata `NULL` bağımsız değişkenlerden biri olarak ve NULL olmayan döndürür. [\#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Arka plan işlemleri için iş parçacığı havuzunda daha iyi metrik hesaplamaları `MergeTree` masa motorları. [\#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Düzeltme fonksiyonu `IN` için `WHERE` satır düzeyinde tablo filtresi varsa deyimi. Düzeltiyor [\#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [\#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([I ivanvan](https://github.com/abyss7)) +- Şimdi, integral değeri ayarlar değerleri için tamamen ayrıştırılmazsa bir istisna atılır. [\#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([Mikhail Korotov](https://github.com/millb)) +- Toplama işlevi, sorguda ikiden fazla yerel parça içeren dağıtılmış tabloya kullanıldığında özel durumu düzeltin. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- Şimdi bloom filtresi sıfır uzunluk dizilerini işleyebilir ve gereksiz hesaplamalar yapmaz. [\#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) +- Bir istemci ana bilgisayar için istemci ana bilgisayar eşleştirerek izin verilip verilmediğini kontrol sabit `host_regexp` belirtilen `users.xml`. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([Vitaly Baranov](https://github.com/vitlibar)) +- Çoklu olarak yanlış pozitiflere yol açan belirsiz sütun kontrolünü rahatlatın `JOIN ON` bölme. [\#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) +- Sabit Olası sunucu çökmesi (`std::terminate`) sunucu veri gönder cannotem whenediğinde veya yaz theama thedığında `JSON` veya `XML` değerleri ile biçim `String` veri türü (gerektiren `UTF-8` doğrulama) veya sonuç verilerini Brotli algoritması ile sıkıştırırken veya diğer bazı nadir durumlarda. Bu düzeltmeler [\#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yarış durumunu düzeltin `StorageDistributedDirectoryMonitor` CI tarafından bulundu. Bu düzeltmeler [\#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [\#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Şimdi arka plan birleşir `*MergeTree` tablo motorları ailesi depolama ilkesi hacim sırasını daha doğru bir şekilde korur. [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Şimdi masa motoru `Kafka` ile düzgün çalışır `Native` biçimli. Bu düzeltmeler [\#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [\#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [\#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- Başlıklarla sabit formatlar (gibi `CSVWithNames`) tablo motoru için EOF hakkında istisna atıyorlardı `Kafka`. [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- Sağ kısmında alt sorgudan set yapma ile bir hata düzeltildi `IN` bölme. Bu düzeltmeler [\#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) ve [\#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [\#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Depolama alanından okurken Olası kazayı düzeltin `File`. [\#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Dosyaların sabit okuma `Parquet` tür sütunları içeren biçim `list`. [\#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([maxulan](https://github.com/maxulan)) +- Hatayı Düzelt `Not found column` ile dağıtılmış sorgular için `PREWHERE` örnekleme anahtarına bağımlı durum `max_parallel_replicas > 1`. [\#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Hatayı Düzelt `Not found column` sorgu kullanılırsa `PREWHERE` tablonun diğer adı ve sonuç kümesi bağımlı birincil anahtar koşulu nedeniyle boştu. [\#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fonksiyonlar için sabit dönüş tipi `rand` ve `randConstant` durumunda `Nullable` değişken. Şimdi fonksiyonlar her zaman geri döner `UInt32` ve asla `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Engelli yüklem push-down için `WITH FILL` ifade. Bu düzeltmeler [\#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [\#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([Kış Zhang](https://github.com/zhang2014)) +- Sabit yanlış `count()` res forult for `SummingMergeTree` ne zaman `FINAL` bölüm kullanılır. [\#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [\#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Uzak sunuculardan sabit işlevler için olası yanlış sonucu düzeltin. Gibi işlevlerle sorgular için oldu `version()`, `uptime()` vb. farklı sunucular için farklı sabit değerler döndürür. Bu düzeltmeler [\#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [\#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Yanlış sonuçlara yol açan push-down yüklemi optimizasyonunda karmaşık hatayı düzeltin. Bu, push-down yüklemi optimizasyonu ile ilgili birçok sorunu giderir. [\#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([Kış Zhang](https://github.com/zhang2014)) +- Çökmeyi düzeltin `CREATE TABLE .. AS dictionary` sorgu. [\#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([Azat Khuzhin](https://github.com/azat)) +- Çeşitli iyileştirmeler clickhouse dilbilgisi `.g4` Dosya. [\#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([taiyang-li](https://github.com/taiyang-li)) +- Çökmelere yol açan hatayı düzeltin `JOIN`motorlu tablolar ile s `Join`. Bu düzeltmeler [\#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [\#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [\#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [\#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [\#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2)) +- Gereksiz sözlükleri yeniden yükleyin `CREATE DATABASE`. [\#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat)) +- Okuma için maksimum akış sayısını sınırlayın `StorageFile` ve `StorageHDFS`. Düzeltmeler https://github.com/ClickHouse/ClickHouse/issues/7650. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) +- Hatayı düzeltin `ALTER ... MODIFY ... CODEC` sorgu, kullanıcı hem varsayılan ifade hem de codec belirttiğinde. Düzeltiyor [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [\#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([alesapin](https://github.com/alesapin)) +- Sütunların arka plan birleşimindeki hatayı düzeltin `SimpleAggregateFunction(LowCardinality)` tür. [\#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Sabit tip kontrol fonksiyonu `toDateTime64`. [\#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([Vasily Nemkov](https://github.com/Enmk)) +- Şimdi sunucu üzerinde çökme yok `LEFT` veya `FULL JOIN` ve Birleştirme motoru ve desteklenmeyen `join_use_nulls` ayarlar. [\#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) +- Şimdi `DROP DICTIONARY IF EXISTS db.dict` sorgu, istisna atmıyor `db` yok. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +- Tablo işlevlerinde Olası çökmeleri düzeltin (`file`, `mysql`, `remote`) referans kullanımı nedeniyle Kaldır toılan `IStorage` nesne. Tablo işlevine ekleme sırasında belirtilen sütunların yanlış ayrıştırılmasını düzeltin. [\#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) +- Başlamadan önce ağ olun `clickhouse-server`. Bu düzeltmeler [\#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [\#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([Zhichang Yu](https://github.com/yuzhichang)) +- Güvenli bağlantılar için zaman aşımlarını düzeltin, böylece sorgular belirsiz bir şekilde askıda kalmaz. Bu düzeltmeler [\#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [\#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltmek `clickhouse-copier`'In eşzamanlı işçiler arasındaki gereksiz çekişme. [\#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +- Şimdi mutasyonlar, mutasyon versiyonları mevcut mutasyon versiyonundan daha büyük olsa bile, ekli parçaları atlamaz. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) +- Gereksiz kopyaları Yoksay `*MergeTree` başka bir diske ve sunucuya taşındıktan sonra veri parçaları yeniden başlatılır. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Çökmeyi düzeltin `FULL JOIN` ile `LowCardinality` içinde `JOIN` anahtar. [\#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) +- Sütun adını birden çok kez ekleme sorgusu gibi kullanmak yasaktır `INSERT INTO tbl (x, y, x)`. Bu düzeltmeler [\#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [\#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [\#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([alesapin](https://github.com/alesapin)) +- Bilinmeyen CPU'lar için fiziksel CPU çekirdeği sayısını tespit etmek için geri dönüş eklendi (mantıksal CPU çekirdeği sayısını kullanarak). Bu düzeltmeler [\#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [\#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltmek `There's no column` materialized ve alias sütunlar için hata. [\#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) +- Sabit sever kazasında zaman `EXISTS` sorgu olmadan kullanıldı `TABLE` veya `DICTIONARY` niteleyici. Gibi `EXISTS t`. Bu düzeltmeler [\#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). Bu hata 19.17 sürümünde tanıtıldı. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Nadir hatayı hata ile düzeltin `"Sizes of columns doesn't match"` kullanırken bu görünebilir `SimpleAggregateFunction` sütun. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- Boş olan kullanıcının bulunduğu hatayı düzeltin `allow_databases` tüm veritabanlarına erişim var (ve aynı `allow_dictionaries`). [\#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) +- Sunucu zaten istemciden bağlantısı kesildiğinde istemci çökmesini düzeltin. [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- Düzeltmek `ORDER BY` birincil anahtar öneki ve birincil anahtar soneki ile sıralama durumunda davranış. [\#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([Anton Popov](https://github.com/CurtizJ)) +- Tabloda nitelikli sütun mevcut olup olmadığını kontrol edin. Bu düzeltmeler [\#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [\#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([Artem Zuikov](https://github.com/4ertus2)) +- İle sabit davranış `ALTER MOVE` belirtilen birleştirme bitiş hamle superpart hemen sonra koştu. Düzeltiyor [\#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Kullanırken Olası sunucu çökmesini düzeltin `UNION` farklı sayıda sütun ile. Düzeltiyor [\#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [\#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- İşlev için sonuç alt dizesinin boyutunu düzeltin `substr` negatif boyutu ile. [\#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Şimdi server part Mut mutasyonu Yürüt inmüyor `MergeTree` arka plan havuzunda yeterli boş iş parçacığı yoksa. [\#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([tavplubix](https://github.com/tavplubix)) +- Biçimlendirmede küçük bir yazım hatası düzeltildi `UNION ALL` AST. [\#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([litao91](https://github.com/litao91)) +- Negatif sayılar için sabit yanlış bloom filtre sonuçları. Bu düzeltmeler [\#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [\#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([Kış Zhang](https://github.com/zhang2014)) +- Sıkıştırmada sabit potansiyel tampon taşması. Kötü niyetli kullanıcı, arabellekten sonra okunmasına neden olacak sıkıştırılmış verileri iletebilir. Bu sorun Yandex bilgi güvenliği ekibinden Eldar Zaitov tarafından bulundu. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Tamsayılar taşması nedeniyle yanlış sonucu düzeltin `arrayIntersect`. [\#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Şimdi `OPTIMIZE TABLE` sorgu işlemi gerçekleştirmek çevrimdışı yinelemeler için beklemez. [\#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([javi santana](https://github.com/javisantana)) +- Sabit `ALTER TTL` par forser for `Replicated*MergeTree` Tablolar. [\#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Sunucu ve istemci arasındaki iletişimi düzeltin, böylece sunucu sorgu hatasından sonra geçici tablolar bilgilerini okur. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- Düzeltmek `bitmapAnd` birleştirilmiş bir bit eşlem ve bir skaler bit eşlem kesişen işlev hatası. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) +- Tanımı rafine `ZXid` hata düzeltmeleri ZooKeeper programcı Kılavuzu göre `clickhouse-cluster-copier`. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +- `odbc` tablo fonksiyonu şimdi saygı `external_table_functions_use_nulls` ayar. [\#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([Vasily Nemkov](https://github.com/Enmk)) +- Nadir bir veri yarışına yol açan Sabit hata. [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) +- Şimdi `SYSTEM RELOAD DICTIONARY` göz ardı ederek bir sözlüğü tamamen yeniden yükler `update_field`. Bu düzeltmeler [\#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [\#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([Vitaly Baranov](https://github.com/vitlibar)) +- Sözlüğün sorgu oluştur'da olup olmadığını kontrol etme yeteneği ekleyin. [\#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([alesapin](https://github.com/alesapin)) +- Düzeltmek `Float*` ayrıştırma `Values` biçimli. Bu düzeltmeler [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- Bazı arka plan işlemlerinde yer ayıramadığımızda çökmeyi düzeltin `*MergeTree` masa motorları ailesi. [\#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Tablo içerdiğinde birleştirme işleminin çökmesini düzeltin `SimpleAggregateFunction(LowCardinality)` sütun. Bu düzeltmeler [\#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [\#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([Azat Khuzhin](https://github.com/azat)) +- Tüm YBÜ yerel destek geri yükleme ve sabit ifadeler için harmanlama uygulamak için yeteneği ekleyin. Ayrıca dil adı ekleyin `system.collations` Tablo. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) +- Sıfır minimum ömrü olan harici sözlükler olduğunda hatayı düzeltin (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)`) arka planda güncelleme yapmayın. [\#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([alesapin](https://github.com/alesapin)) +- ClickHouse kaynağı ile harici sözlük sorguda alt sorgu olduğunda çökmesini düzeltin. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Motor ile tablodaki dosya uzantısının yanlış ayrıştırılmasını düzeltin `URL`. Bu düzeltmeler [\#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [\#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([Andrey Bodrov](https://github.com/apbodrov)) +- Düzeltmek `CHECK TABLE` sorgu için `*MergeTree` anahtarsız tablolar. Düzeltiyor [\#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) +- Sabit dönüşüm `Float64` MySQL türü için. [\#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([Yuriy Baranov](https://github.com/yurriy)) +- Şimdi Eğer tablo tamamen sunucu çökmesi nedeniyle düştü değildi, sunucu geri yüklemek ve yüklemek için çalışacağız. [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- Tablo işlevinde sabit çökme `file` mevcut olmayan dosyaya eklerken. Şimdi bu durumda dosya oluşturulur ve daha sonra insert işlenir. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- Ne zaman olabilir nadir kilitlenme Fix `trace_log` etkin olduğunu. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- Yanında farklı türleri ile çalışmak için yeteneği ekleyin `Date` içinde `RangeHashed` DDL sorgusundan oluşturulan dış sözlük. Düzeltiyor [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) +- Düzeltmeler çökme zaman `now64()` başka bir işlevin sonucu ile çağrılır. [\#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([Vasily Nemkov](https://github.com/Enmk)) +- Mysql Tel protokolü üzerinden bağlantılar için istemci IP tespit ile Sabit hata. [\#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([Dmitry Muzyka](https://github.com/dmitriy-myz)) +- Boş dizi işlemeyi düzeltin `arraySplit` işlev. Bu düzeltmeler [\#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [\#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([hcz](https://github.com/hczhcz)) +- Sorunu ne zaman düzeltildi `pid-file` başka bir koşu `clickhouse-server` silinebilir. [\#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([Weiqing Xu](https://github.com/weiqxu)) +- Varsa sözlük yeniden yükle `invalidate_query`, güncellemeleri durdurdu ve önceki güncelleme denemelerinde bazı istisnalar. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) +- Fonksiyonda Sabit hata `arrayReduce` bu yol açabilir “double free” ve toplu işlev birleştiricisinde hata `Resample` bu bellek sızıntısına neden olabilir. Toplama fonksiyonu eklendi `aggThrow`. Bu fonksiyon test amaçlı kullanılabilir. [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Geliştirme {#improvement-1} + +- İle çalışırken geliştirilmiş günlük kaydı `S3` masa motoru. [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- Çağrılırken hiçbir bağımsız değişken geçirildiğinde yazdırılan yardım iletisi `clickhouse-local`. Bu düzeltmeler [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([Andrey Nagorny](https://github.com/Melancholic)) +- Ayar Ekle `mutations_sync` hangi beklemek sağlar `ALTER UPDATE/DELETE` eşzamanlı sorgular. [\#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([alesapin](https://github.com/alesapin)) +- Göreli ayarlamak için izin ver `user_files_path` içinde `config.xml` (bir şekilde benzer `format_schema_path`). [\#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([hcz](https://github.com/hczhcz)) +- Dönüştürme işlevleri için yasadışı türler için istisna Ekle `-OrZero` postfix. [\#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([Andrey Konyaev](https://github.com/akonyaev90)) +- Bir dağıtılmış sorgu bir parça için veri gönderme başlığındaki kolay bir biçim. [\#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([Vitaly Baranov](https://github.com/vitlibar)) +- `Live View` tablo motoru refactoring. [\#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +- DDL sorgularından oluşturulan dış sözlükler için ek kontroller ekleyin. [\#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin)) +- Hatayı Düzelt `Column ... already exists` kullanırken `FINAL` ve `SAMPLE` together, e.g. `select count() from table final sample 1/2`. Düzeltiyor [\#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [\#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Şimdi tablo ilk argüman `joinGet` fonksiyon tablo indentifier olabilir. [\#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Kuşu](https://github.com/amosbird)) +- Kullanmaya izin ver `MaterializedView` yukarıdaki alt sorgularla `Kafka` Tablolar. [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) +- Şimdi arka plan diskler arasında hareket eder, seprate iş parçacığı havuzunu çalıştırır. [\#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) +- `SYSTEM RELOAD DICTIONARY` şimdi eşzamanlı olarak yürütür. [\#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar)) +- Yığın izleri artık sanal bellek adresleri yerine (nesne dosyasının yüklendiği) fiziksel adresleri (nesne dosyasındaki uzaklıklar) görüntüler. Bu kullanımına izin verir `addr2line` ikili konum bağımsız ve ASLR etkin olduğunda. Bu düzeltmeler [\#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [\#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Satır düzeyinde güvenlik filtreleri için yeni sözdizimi desteği: `
    `. Düzeltiyor [\#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [\#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([I ivanvan](https://github.com/abyss7)) +- Şimdi `cityHash` fonksiyonu ile çalışabilir `Decimal` ve `UUID` türler. Düzeltiyor [\#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [\#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([Mikhail Korotov](https://github.com/millb)) +- Uyarlanabilir tanecikliliğin uygulanmasından sonra eskimiş olduğu için sistem günlüklerinden sabit dizin tanecikliliği (1024 idi) kaldırıldı. [\#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse SSL olmadan derlendiğinde etkin MySQL uyumluluk sunucusu. [\#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([Yuriy Baranov](https://github.com/yurriy)) +- Şimdi sunucu sağlama toplamları toplu halde bozuk veri durumunda daha ayrıntılı hatalar veren partiler dağıttı. [\#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([Azat Khuzhin](https://github.com/azat)) +- Destek `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` ve `ATTACH TABLE` için `MySQL` veritabanı motoru. [\#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([Kış Zhang](https://github.com/zhang2014)) +- S3 tablo fonksiyonu ve tablo motoru kimlik doğrulama ekleyin. [\#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([Vladimir Chebotarev](https://github.com/excitoon)) +- İlave parçalar için kontrol eklendi `MergeTree` farklı disklerde, tanımlanmamış disklerdeki veri parçalarını kaçırmamak için. [\#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Mac istemci ve sunucu için SSL desteğini etkinleştirin. [\#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([I ivanvan](https://github.com/abyss7)) +- Şimdi ClickHouse MySQL Federe sunucu olarak çalışabilir (bkz https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html). [\#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- `clickhouse-client` şimdi sadece etkinleştir `bracketed-paste` multiquery açık ve çok satırlı kapalı olduğunda. Bu düzeltmeler (\#7757) \[https://github.com/ClickHouse/ClickHouse/issues/7757\]. [\#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([Amos Kuşu](https://github.com/amosbird)) +- Destek `Array(Decimal)` içinde `if` işlev. [\#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2)) +- Destek ondalık `arrayDifference`, `arrayCumSum` ve `arrayCumSumNegative` işlevler. [\#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2)) +- Katma `lifetime` sütun için `system.dictionaries` Tablo. [\#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [\#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekule](https://github.com/kekekekule)) +- İçin farklı disklerde mevcut parçalar için geliştirilmiş kontrol `*MergeTree` masa motorları. Kur [\#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [\#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([Vladimir Chebotarev](https://github.com/excitoon)) +- İle entegrasyon `AWS SDK` için `S3` tüm S3 özelliklerini kutudan çıkarmaya izin veren etkileşimler. [\#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([Pavel Kovalenko](https://github.com/Jokser)) +- Alt sorgular için destek eklendi `Live View` Tablolar. [\#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +- Kullanmak için kontrol edin `Date` veya `DateTime` sütun fromundan `TTL` ifadeler kaldırıldı. [\#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Disk hakkında bilgi eklendi `system.detached_parts` Tablo. [\#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Şimdi ayarlar `max_(table|partition)_size_to_drop` yeniden başlatma olmadan değiştirilebilir. [\#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- Hata mesajlarının biraz daha iyi kullanılabilirliği. Kullanıcıdan aşağıdaki satırları kaldırmamasını isteyin `Stack trace:`. [\#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Daha iyi okuma mesajları `Kafka` sonra çeşitli biçim enginelerde motor [\#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [\#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([I ivanvan](https://github.com/abyss7)) +- Desteklemeyen MySQL istemcileri ile daha iyi uyumluluk `sha2_password` auth eklentisi. [\#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([Yuriy Baranov](https://github.com/yurriy)) +- MySQL uyumluluk sunucusunda daha fazla sütun türünü destekleyin. [\#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([Yuriy Baranov](https://github.com/yurriy)) +- Uygulamak `ORDER BY` için optimizasyon `Merge`, `Buffer` ve `Materilized View` altta yatan depolar `MergeTree` Tablolar. [\#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([Anton Popov](https://github.com/CurtizJ)) +- Şimdi her zaman POSIX uygulamasını kullanıyoruz `getrandom` eski çekirdeklerle daha iyi uyumluluğa sahip olmak (\<3.17). [\#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([Amos Kuşu](https://github.com/amosbird)) +- Bir hareket TTL kuralında geçerli hedef için daha iyi kontrol edin. [\#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Kırık ekleme partileri için daha iyi kontroller `Distributed` masa motoru. [\#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([Azat Khuzhin](https://github.com/azat)) +- Gelecekte işlem mustesi gereken parça adı diz ofisiyle sütun ekleme `system.mutations` Tablo. [\#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([alesapin](https://github.com/alesapin)) +- İşlemciler için paralel birleştirme sıralama optimizasyonu. [\#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ayar `mark_cache_min_lifetime` artık eskimiş ve hiçbir şey yapmıyor. Önceki sürümlerde, Mark cache bellek daha büyük büyüyebilir `mark_cache_size` içinde veri karşılamak için `mark_cache_min_lifetime` ikincilikler. Bu, karışıklığa ve beklenenden daha yüksek bellek kullanımına yol açıyordu, bu özellikle bellek kısıtlı sistemlerde kötüydü. Bu sürümü yükledikten sonra performans düşüşü görecekseniz, `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kullanıma hazırlık `tid` her yerde. Bu için gereklidir [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Performans İyileştirme {#performance-improvement-1} + +- İşlemciler boru hattında performans iyileştirmeleri. [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Önbellek sözlüklerinde süresi dolmuş anahtarların engellenmeyen güncellemeleri (eskileri okuma izni ile). [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Olmadan ClickHouse derlemek `-fno-omit-frame-pointer` küresel bir daha kayıt yedek. [\#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([Amos Kuşu](https://github.com/amosbird)) +- Hızlanma `greatCircleDistance` fonksiyon ve bunun için performans testleri ekleyin. [\#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fonksiyonun geliştirilmiş performansı `roundDown`. [\#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Geliştirilmiş performans `max`, `min`, `argMin`, `argMax` için `DateTime64` veri türü. [\#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([Vasily Nemkov](https://github.com/Enmk)) +- Bir sınırı olmadan veya büyük sınırı ve dış sıralama ile sıralama geliştirilmiş performans. [\#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kayan nokta sayılarını 6 kata kadar biçimlendirmenin geliştirilmiş performansı. [\#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Geliştirilmiş performans `modulo` işlev. [\#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([Amos Kuşu](https://github.com/amosbird)) +- Optimum `ORDER BY` ve tek sütun tuşu ile birleştirme. [\#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin daha iyi uygulama `arrayReduce`, `-Array` ve `-State` birleştiriciler. [\#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([Amos Kuşu](https://github.com/amosbird)) +- Şimdi `PREWHERE` olarak en az verimli olacak şekilde optimize edilmelidir `WHERE`. [\#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([Amos Kuşu](https://github.com/amosbird)) +- Yolu geliştirmek `round` ve `roundBankers` negatif sayılar işleme. [\#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([hcz](https://github.com/hczhcz)) +- Geliştirilmiş çözme performansı `DoubleDelta` ve `Gorilla` codec bileşenleri yaklaşık %30-40 oranında. Bu düzeltmeler [\#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [\#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([Vasily Nemkov](https://github.com/Enmk)) +- Geliştirilmiş performans `base64` ilgili fonksiyonlar. [\#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Bir işlev eklendi `geoDistance`. Bu benzer `greatCircleDistance` ancak wgs-84 elipsoid modeline yaklaşım kullanır. Her iki fonksiyonun performansı da aynıdır. [\#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Hızla `min` ve `max` için toplama fonksiyonları `Decimal` veri türü. [\#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([Artem Zuikov](https://github.com/4ertus2)) +- Vectorize işleme `arrayReduce`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Kuşu](https://github.com/amosbird)) +- `if` zincirler şimdi olarak optimize edilmiştir `multiIf`. [\#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([kamalov-ruslan](https://github.com/kamalov-ruslan)) +- Performans gerilemesini düzeltin `Kafka` masa motoru 19.15'te tanıtıldı. Bu düzeltmeler [\#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [\#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([filimonov](https://github.com/filimonov)) +- Kaldırıyordu “pie” kod üretimi `gcc` Debian paketlerinden bazen varsayılan olarak getirir. [\#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Paralel ayrıştırma veri formatları [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Optimize edilmiş ayrıştırıcıyı etkinleştir `Values` varsayılan olarak ifadelerle (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) + +#### Yapı / Test / Ambalaj Geliştirme {#buildtestingpackaging-improvement-2} + +- İçin düzeltmeler oluşturun `ARM` ve minimal modda. [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) +- İçin kapsama dosyası Ekle `clickhouse-server` std:: atexit çağrılmadığında. Ayrıca kapsama ile vatansız testlerde biraz geliştirilmiş günlüğü. [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([alesapin](https://github.com/alesapin)) +- Contrib'de llvm kütüphanesini güncelleyin. OS paketlerinden LLVM kullanmaktan kaçının. [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Paket yapmak `curl` tamamen sessiz inşa. [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([Pavel Kovalenko](https://github.com/Jokser)) +- Bazı düzeltmek `MemorySanitizer` uyarmalar. [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Kullanmak `add_warning` ve `no_warning` makro inlar içinde `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([I ivanvan](https://github.com/abyss7)) +- Minio S3 uyumlu nesne desteği ekleyin (https://min.io/) daha iyi entegrasyon testleri için. [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([Pavel Kovalenko](https://github.com/Jokser)) +- İthal `libc` başlıklar contrib için. Oluşturur daha tutarlı (sadece için çeşitli sistemler üzerinde yapmak için izin verir `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kaldırmak `-fPIC` bazı kütüphanelerden. [\#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Temizlemek `CMakeLists.txt` kıvırmak için. Bkz. https://github.com/ClickHouse/ClickHouse/pull/8011\#issuecomment-569478910 [\#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sessiz uyarılar `CapNProto` kitaplık. [\#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kısa dize optimize karma tablolar için performans testleri ekleyin. [\#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([Amos Kuşu](https://github.com/amosbird)) +- Şimdi ClickHouse üzerine inşa edecek `AArch64` bile `MADV_FREE` kullanılamaz. Bu düzeltmeler [\#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [\#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([Amos Kuşu](https://github.com/amosbird)) +- Güncelleştirme `zlib-ng` bellek dezenfektanı sorunları düzeltmek için. [\#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [\#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Linux olmayan sistemde dahili MySQL kütüphanesini etkinleştirin, çünkü OS paketlerinin kullanımı çok kırılgandır ve genellikle hiç çalışmaz. Bu düzeltmeler [\#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [\#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Etkinleştirildikten sonra bazı sistemlerde sabit yapı `libc++`. Bu yerini alır [\#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [\#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yapmak `Field` yöntemleri daha fazla tür-daha fazla hata bulmak için güvenli. [\#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [\#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Eksik dosyalar eklendi `libc-headers` alt modül. [\#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yanlış Düzelt `JSON` performans testi çıktısında alıntı. [\#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Şimdi yığın izleme için görüntülenir `std::exception` ve `Poco::Exception`. Önceki sürümlerde sadece mevcuttu `DB::Exception`. Bu teşhis geliştirir. [\#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Taşıma `clock_gettime` ve `clock_nanosleep` taze glibc sürümleri için. [\#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([Amos Kuşu](https://github.com/amosbird)) +- Etkinleştirmek `part_log` geliştiriciler için örnek yapılandırmada. [\#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yeniden yüklemenin async doğasını düzeltin `01036_no_superfluous_dict_reload_on_create_database*`. [\#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([Azat Khuzhin](https://github.com/azat)) +- Sabit codec performans testleri. [\#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([Vasily Nemkov](https://github.com/Enmk)) +- İçin yükleme s scriptscript addleri Ekle `.tgz` onlar için yapı ve belgeler. [\#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [\#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([alesapin](https://github.com/alesapin)) +- Eski kaldırıldı `ZSTD` test (2016 yılında zstd'nin 1.0 sürümünün olduğu hatayı yeniden üretmek için oluşturuldu). Bu düzeltmeler [\#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [\#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Mac OS Catalina üzerinde sabit yapı. [\#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([meo](https://github.com/meob)) +- Sonuç fark yapmak için codec performans testlerinde satır sayısı arttı. [\#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([Vasily Nemkov](https://github.com/Enmk)) +- Hata ayıklama yapılarında, tedavi `LOGICAL_ERROR` onaylama işlemi hatası gibi özel durumlar, dikkat kolaylaştırmak. [\#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Biçimlerle ilgili performans testini daha deterministik hale getirin. [\#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Güncelleştirme `lz4` bir hafızayı düzeltmek İçinvatandaş arızası. [\#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Bilinen bir MemorySanitizer yanlış pozitif özel durum işleme bastırın. [\#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Güncelleştirme `gcc` ve `g++` sürüm 9 için `build/docker/build.sh` [\#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([TLightSky](https://github.com/tlightsky)) +- Bunu test etmek için performans testi örneği ekleyin `PREWHERE` daha kötü `WHERE`. [\#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([Amos Kuşu](https://github.com/amosbird)) +- Bir gevşek testi düzeltmek için ilerleme. [\#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Veri için MemorySanitizer raporundan kaçının `libunwind`. [\#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Güncel `libc++` en son sürüme. [\#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kaynaklardan YBÜ Kütüphanesi oluşturun. Bu düzeltmeler [\#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [\#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Anahtar fromlı `libressl` -e doğru `openssl`. ClickHouse, bu değişiklikten sonra TLS 1.3 ve SNI'Yİ desteklemelidir. Bu düzeltmeler [\#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [\#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sabit UBSan raporu kullanırken `chacha20_poly1305` SS SSLL ('den (Bağlan (ış happensta olur https://yandex.ru/). [\#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin varsayılan şifre dosyasının düzeltme modu `.deb` linux dağıtımları. [\#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) +- Almak için geliştirilmiş ifade `clickhouse-server` Pidıd ın `clickhouse-test`. [\#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([Alexander Kazakov](https://github.com/Akazz)) +- V1.10.0 için contrib/googletest güncellendi. [\#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- Sabit ThreadSaninitizer raporu `base64` kitaplık. Ayrıca bu kütüphaneyi en son sürüme güncelledi, ancak önemli değil. Bu düzeltmeler [\#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [\#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Düzeltmek `00600_replace_running_query` işlemciler için. [\#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- İçin destek Kaldır `tcmalloc` yapmak `CMakeLists.txt` basit. [\#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yayın gcc şimdi kullanmak oluşturur `libc++` yerine `libstdc++`. Son zamanlarda `libc++` sadece clang ile kullanıldı. Bu yapı yapılandırmaları ve taşınabilirlik tutarlılığını artıracaktır. [\#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MemorySanitizer ile inşa etmek için YBÜ kütüphanesini etkinleştirin. [\#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Gelen uyarıları bastır `CapNProto` kitaplık. [\#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- İçin özel kod durumları kaldırıldı `tcmalloc`, çünkü artık desteklenmiyor. [\#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- CI kapsama görevinde, kapsama raporunu kaydetmesine izin vermek için sunucuyu incelikle öldürün. Bu, son zamanlarda gördüğümüz eksik kapsama raporlarını düzeltir. [\#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([alesapin](https://github.com/alesapin)) +- Karşı tüm codec bileşenleri için performans testleri `Float64` ve `UInt64` değerler. [\#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([Vasily Nemkov](https://github.com/Enmk)) +- `termcap` (f çeşitli sorunlar için çok kaldırılmış ve kurşundur.g. eksik “up” kap ve yankılanan `^J` çok satır yerine). İyilik `terminfo` veya paketlenmiş `ncurses`. [\#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([Amos Kuşu](https://github.com/amosbird)) +- Düzeltmek `test_storage_s3` entegrasyon testi. [\#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Destek `StorageFile(, null)` aslında Diske yazmadan verilen format dosyasına blok eklemek için. Bu performans testleri için gereklidir. [\#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([Amos Kuşu](https://github.com/amosbird)) +- Eklenen argüman `--print-time` test başına yürütme süresini basan işlevsel testlere. [\#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Eklenen iddialar `KeyCondition` rpn değerlendirirken. Bu, gcc-9'dan gelen uyarıyı düzeltir. [\#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Cı yapılarında cmake seçeneklerini dökümü. [\#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Bazı fat kitaplıkları için hata ayıklama bilgisi oluşturmayın. [\#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Yapmak `log_to_console.xml` her zaman ne olursa olsun interaktif ya da değil, stderr oturum açın. [\#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Kullanılmayan bazı özellikler kaldırıldı `clickhouse-performance-test` aracı. [\#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Şimdi de arayacağız `lld-X` karşılık gelen ile `clang-X` sürüm. [\#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([alesapin](https://github.com/alesapin)) +- Parke inşa iyileştirme. [\#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([maxulan](https://github.com/maxulan)) +- Daha fazla GCC uyarısı [\#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Arch Linux için paket şimdi ClickHouse sunucusu çalıştırmak için izin verir, ve sadece istemci. [\#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([Vladimir Chebotarev](https://github.com/excitoon)) +- İşlemcilerle testi düzeltin. Küçük performans düzeltmeleri. [\#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Contrib/protobuf güncelleyin. [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V. Kornilov](https://github.com/matwey)) +- Yeni yıl kutlaması olarak C++20'ye geçmenin hazırlanmasında. “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([Amos Kuşu](https://github.com/amosbird)) + +#### Deneysel Özellik {#experimental-feature-1} + +- Deneysel ayar eklendi `min_bytes_to_use_mmap_io`. Bu userspace çekirdekten veri kopyalamadan büyük dosyaları okumak için izin verir. Bu ayar varsayılan olarak devre dışıdır. Önerilen eşik yaklaşık 64 MB'dir, çünkü mmap / munmap yavaştır. [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Erişim kontrol sisteminin bir parçası olarak yeniden işlenmiş kotalar. Yeni tablo eklendi `system.quotas` yeni fonksiyonlar `currentQuota`, `currentQuotaKey`, yeni SQL sözdizimi `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([Vitaly Baranov](https://github.com/vitlibar)) +- İstisnalar atmak yerine bilinmeyen ayarları uyarılarla atlamaya izin verin. [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([Vitaly Baranov](https://github.com/vitlibar)) +- Erişim kontrol sisteminin bir parçası olarak reworked satır politikaları. Yeni tablo eklendi `system.row_policies` yeni işlev `currentRowPolicies()`, yeni SQL sözdizimi `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([Vitaly Baranov](https://github.com/vitlibar)) + +#### Güvenlik Düzeltme {#security-fix} + +- İle tablolarda dizin yapısını okuma imkanı Düzelt theildi `File` masa motoru. Bu düzeltmeler [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## [2019 için Changelog](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/tr/whats_new/index.md b/docs/tr/whats_new/index.md new file mode 100644 index 00000000000..17464a36cb0 --- /dev/null +++ b/docs/tr/whats_new/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_folder_title: Ne yeni +toc_priority: 72 +--- + + diff --git a/docs/tr/whats_new/roadmap.md b/docs/tr/whats_new/roadmap.md new file mode 100644 index 00000000000..fc43396d834 --- /dev/null +++ b/docs/tr/whats_new/roadmap.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 74 +toc_title: "Yol haritas\u0131" +--- + +# Yol haritası {#roadmap} + +## Q1 2020 {#q1-2020} + +- Rol tabanlı erişim denetimi + +## Q2 2020 {#q2-2020} + +- Dış kimlik doğrulama hizmetleri ile entegrasyon +- Kullanıcılar arasında küme kapasitesinin daha hassas dağılımı için kaynak havuzları + +{## [Orijinal makale](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/tr/whats_new/security_changelog.md b/docs/tr/whats_new/security_changelog.md new file mode 100644 index 00000000000..117d1766cdb --- /dev/null +++ b/docs/tr/whats_new/security_changelog.md @@ -0,0 +1,76 @@ +--- +machine_translated: true +machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818 +toc_priority: 76 +toc_title: "G\xFCvenlik Changelog" +--- + +## ClickHouse sürümünde düzeltildi 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} + +### CVE-2019-15024 {#cve-2019-15024} + +Аn attacker that has write access to ZooKeeper and who ican run a custom server available from the network where ClickHouse runs, can create a custom-built malicious server that will act as a ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from the malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. + +Kredi: Yandex Bilgi Güvenliği ekibinden Eldar Zaitov + +### CVE-2019-16535 {#cve-2019-16535} + +Аn OOB read, OOB write and integer underflow in decompression algorithms can be used to achieve RCE or DoS via native protocol. + +Kredi: Yandex Bilgi Güvenliği ekibinden Eldar Zaitov + +### CVE-2019-16536 {#cve-2019-16536} + +DOS'A giden yığın taşması, kötü amaçlı kimliği doğrulanmış bir istemci tarafından tetiklenebilir. + +Kredi: Yandex Bilgi Güvenliği ekibinden Eldar Zaitov + +## ClickHouse sürümü 19.13.6.1, 2019-09-20'de düzeltildi {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20} + +### CVE-2019-18657 {#cve-2019-18657} + +Tablo fonksiyonu `url` güvenlik açığı saldırganın istekte rasgele HTTP üstbilgileri enjekte etmesine izin vermişti. + +Krediler: [Nikita Tikhomirov](https://github.com/NSTikhomirov) + +## ClickHouse sürümünde sabit 18.12.13, 2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10} + +### CVE-2018-14672 {#cve-2018-14672} + +Catboost modellerini yüklemek için işlevler, yol geçişine izin verdi ve hata mesajları aracılığıyla keyfi dosyaları okudu. + +Kredi: Yandex Bilgi Güvenliği ekibinden Andrey Krasichkov + +## ClickHouse sürüm 18.10.3, 2018-08-13 sabit {#fixed-in-clickhouse-release-18-10-3-2018-08-13} + +### CVE-2018-14671 {#cve-2018-14671} + +unixODBC, dosya sisteminden rasgele paylaşılan nesnelerin yüklenmesine izin verdi ve bu da uzaktan kod yürütme güvenlik açığına yol açtı. + +Kredi: Yandex Bilgi Güvenliği ekibinden Andrey Krasichkov ve Evgeny Sidorov + +## ClickHouse sürüm 1.1.54388, 2018-06-28 sabit {#fixed-in-clickhouse-release-1-1-54388-2018-06-28} + +### CVE-2018-14668 {#cve-2018-14668} + +“remote” tablo fonksiyonu izin keyfi semboller “user”, “password” ve “default\_database” çapraz Protokol isteği sahtecilik saldırılarına yol açan alanlar. + +Kredi: Yandex Bilgi Güvenliği ekibinden Andrey Krasichkov + +## ClickHouse sürüm 1.1.54390, 2018-07-06 sabit {#fixed-in-clickhouse-release-1-1-54390-2018-07-06} + +### CVE-2018-14669 {#cve-2018-14669} + +ClickHouse MySQL istemcisi vardı “LOAD DATA LOCAL INFILE” işlevsellik, kötü niyetli bir MySQL veritabanının bağlı ClickHouse sunucusundan rasgele dosyaları okumasına izin verdi. + +Kredi: Yandex Bilgi Güvenliği ekibinden Andrey Krasichkov ve Evgeny Sidorov + +## ClickHouse sürüm 1.1.54131, 2017-01-10 sabit {#fixed-in-clickhouse-release-1-1-54131-2017-01-10} + +### CVE-2018-14670 {#cve-2018-14670} + +Deb paketindeki yanlış yapılandırma, veritabanının yetkisiz kullanımına neden olabilir. + +Kredi: İngiltere'nin Ulusal siber güvenlik merkezi (NCSC) + +{## [Orijinal makale](https://clickhouse.tech/docs/en/security_changelog/) ##} diff --git a/docs/zh/changelog/2017.md b/docs/zh/changelog/2017.md deleted file mode 120000 index bf4fe14279d..00000000000 --- a/docs/zh/changelog/2017.md +++ /dev/null @@ -1 +0,0 @@ -../../en/changelog/2017.md \ No newline at end of file diff --git a/docs/zh/changelog/2018.md b/docs/zh/changelog/2018.md deleted file mode 120000 index 20799251f43..00000000000 --- a/docs/zh/changelog/2018.md +++ /dev/null @@ -1 +0,0 @@ -../../en/changelog/2018.md \ No newline at end of file diff --git a/docs/zh/changelog/2019.md b/docs/zh/changelog/2019.md deleted file mode 120000 index 105ca144fca..00000000000 --- a/docs/zh/changelog/2019.md +++ /dev/null @@ -1 +0,0 @@ -../../en/changelog/2019.md \ No newline at end of file diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md deleted file mode 120000 index 79b747aee1b..00000000000 --- a/docs/zh/changelog/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md new file mode 100644 index 00000000000..c79e32ceaf3 --- /dev/null +++ b/docs/zh/changelog/index.md @@ -0,0 +1,666 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_title: '变更日志' +--- + +## 碌莽禄release拢.0755-88888888 {#clickhouse-release-v20-3} + +### ClickHouse版本v20.3.4.10,2020-03-20 {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### 错误修复 {#bug-fix} + +- 此版本还包含20.1.8.41的所有错误修复 +- 修复丢失 `rows_before_limit_at_least` 用于通过http进行查询(使用处理器管道)。 这修复 [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +### ClickHouse释放v20.3.3.6,2020-03-17 {#clickhouse-release-v20-3-3-6-2020-03-17} + +#### 错误修复 {#bug-fix-1} + +- 此版本还包含20.1.7.38的所有错误修复 +- 修复复制中的错误,如果用户在以前的版本上执行了突变,则不允许复制工作。 这修复 [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 +- 添加设置 `use_compact_format_in_distributed_parts_names` 它允许写文件 `INSERT` 查询到 `Distributed` 表格格式更紧凑。 这修复 [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 + +### ClickHouse版本v20.3.2.1,2020-03-12 {#clickhouse-release-v20-3-2-1-2020-03-12} + +#### 向后不兼容的更改 {#backward-incompatible-change} + +- 修正了这个问题 `file name too long` 当发送数据 `Distributed` 大量副本的表。 修复了服务器日志中显示副本凭据的问题。 磁盘上的目录名格式已更改为 `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([米哈伊尔\*科罗托夫](https://github.com/millb))升级到新版本后,您将无法在没有人工干预的情况下降级,因为旧的服务器版本无法识别新的目录格式。 如果要降级,则必须手动将相应的目录重命名为旧格式。 仅当您使用了异步时,此更改才相关 `INSERT`s到 `Distributed` 桌子 在版本20.3.3中,我们将介绍一个设置,让您逐渐启用新格式。 +- 更改了mutation命令的复制日志条目的格式。 在安装新版本之前,您必须等待旧的突变处理。 +- 实现简单的内存分析器,将堆栈跟踪转储到 `system.trace_log` 超过软分配限制的每N个字节 [\#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([伊万](https://github.com/abyss7)) [\#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))列 `system.trace_log` 从改名 `timer_type` 到 `trace_type`. 这将需要改变第三方性能分析和flamegraph处理工具。 +- 在任何地方使用操作系统线程id,而不是内部线程编号。 这修复 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) 老 `clickhouse-client` 无法接收从服务器发送的日志,当设置 `send_logs_level` 已启用,因为结构化日志消息的名称和类型已更改。 另一方面,不同的服务器版本可以相互发送不同类型的日志。 当你不使用 `send_logs_level` 设置,你不应该关心。 [\#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `indexHint` 功能 [\#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `findClusterIndex`, `findClusterValue` 功能。 这修复 [\#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). 如果您正在使用这些功能,请发送电子邮件至 `clickhouse-feedback@yandex-team.com` [\#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在不允许创建列或添加列 `SELECT` 子查询作为默认表达式。 [\#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([阿利沙平](https://github.com/alesapin)) +- 需要联接中的子查询的别名。 [\#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([阿利沙平](https://github.com/alesapin)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 设置 `experimental_use_processors` 默认情况下启用。 此设置允许使用新的查询管道。 这是内部重构,我们期望没有明显的变化。 如果您将看到任何问题,请将其设置为返回零。 [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 新功能 {#new-feature} + +- 添加 `Avro` 和 `AvroConfluent` 输入/输出格式 [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 过期密钥的多线程和非阻塞更新 `cache` 字典(可选的权限读取旧的)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加查询 `ALTER ... MATERIALIZE TTL`. 它运行突变,强制通过TTL删除过期的数据,并重新计算所有部分有关ttl的元信息。 [\#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果需要,从HashJoin切换到MergeJoin(在磁盘上 [\#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `MOVE PARTITION` 命令 `ALTER TABLE` [\#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [\#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 动态地从配置文件重新加载存储配置。 [\#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许更改 `storage_policy` 为了不那么富有的人。 [\#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了对s3存储和表功能的globs/通配符的支持。 [\#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 执行 `bitAnd`, `bitOr`, `bitXor`, `bitNot` 为 `FixedString(N)` 数据类型。 [\#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加功能 `bitCount`. 这修复 [\#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [\#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ikopylov](https://github.com/ikopylov)) +- 添加 `generateRandom` 表函数生成具有给定模式的随机行。 允许用数据填充任意测试表。 [\#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([Ilya Yatsishin](https://github.com/qoega)) +- `JSONEachRowFormat`:当对象包含在顶层数组中时,支持特殊情况。 [\#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([克鲁格洛夫\*帕维尔](https://github.com/Avogar)) +- 现在可以创建一个列 `DEFAULT` 取决于默认列的表达式 `ALIAS` 表达。 [\#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([阿利沙平](https://github.com/alesapin)) +- 允许指定 `--limit` 超过源数据大小 `clickhouse-obfuscator`. 数据将以不同的随机种子重复。 [\#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `groupArraySample` 功能(类似于 `groupArray`)与reservior采样算法。 [\#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在,您可以监视更新队列的大小 `cache`/`complex_key_cache` 通过系统指标字典。 [\#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 允许使用CRLF作为CSV输出格式的行分隔符与设置 `output_format_csv_crlf_end_of_line` 设置为1 [\#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [\#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [\#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 实现的更多功能 [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` 和 `stringToH3` [\#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([Nico Mandery](https://github.com/nmandery)) +- 引入新设置: `max_parser_depth` 控制最大堆栈大小并允许大型复杂查询。 这修复 [\#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) 和 [\#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [\#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([马克西姆\*斯米尔诺夫](https://github.com/qMBQx8GH)) +- 添加设置 `force_optimize_skip_unused_shards` 如果无法跳过未使用的分片,则设置为抛出 [\#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +- 允许配置多个磁盘/卷用于存储数据发送 `Distributed` 发动机 [\#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +- 支持存储策略 (``)用于存储临时数据。 [\#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +- 已添加 `X-ClickHouse-Exception-Code` 如果在发送数据之前引发异常,则设置的HTTP头。 这实现了 [\#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [\#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `ifNotFinite`. 这只是一个句法糖: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [\#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `last_successful_update_time` 列中 `system.dictionaries` 表 [\#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加 `blockSerializedSize` 功能(磁盘大小不压缩) [\#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +- 添加功能 `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +- 添加系统表 `system.zeros` 和 `system.zeros_mt` 以及故事功能 `zeros()` 和 `zeros_mt()`. 表(和表函数)包含具有名称的单列 `zero` 和类型 `UInt8`. 此列包含零。 为了测试目的,需要它作为生成许多行的最快方法。 这修复 [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 实验特点 {#experimental-feature} + +- 添加新的紧凑格式的部件 `MergeTree`-家庭表中的所有列都存储在一个文件中。 它有助于提高小型和频繁插入的性能。 旧的格式(每列一个文件)现在被称为wide。 数据存储格式由设置控制 `min_bytes_for_wide_part` 和 `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([安东\*波波夫](https://github.com/CurtizJ)) +- 支持S3存储 `Log`, `TinyLog` 和 `StripeLog` 桌子 [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([帕维尔\*科瓦连科](https://github.com/Jokser)) + +#### 错误修复 {#bug-fix-2} + +- 修正了日志消息中不一致的空格。 [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复在创建表时将未命名元组数组展平为嵌套结构的错误。 [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) +- 修复了以下问题 “Too many open files” 如果有太多的文件匹配glob模式可能会发生错误 `File` 表或 `file` 表功能。 现在文件懒洋洋地打开。 这修复 [\#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [\#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除临时表现在只删除临时表。 [\#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 当我们关闭服务器或分离/附加表时删除过时的分区。 [\#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 默认磁盘如何计算可用空间 `data` 子目录。 修复了可用空间量计算不正确的问题,如果 `data` 目录被安装到一个单独的设备(罕见的情况)。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 允许逗号(交叉)与IN()内部连接。 [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +- 如果在WHERE部分中有\[NOT\]LIKE运算符,则允许将CROSS重写为INNER JOIN。 [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复后可能不正确的结果 `GROUP BY` 启用设置 `distributed_aggregation_memory_efficient`. 修复 [\#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [\#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 找到的键在缓存字典的指标中被计为错过。 [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 在固定的竞争条件 `queue_task_handle` 在启动 `ReplicatedMergeTree` 桌子 [\#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 令牌 `NOT` 没有工作 `SHOW TABLES NOT LIKE` 查询 [\#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [\#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加范围检查功能 `h3EdgeLengthM`. 如果没有这个检查,缓冲区溢出是可能的。 [\#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了多个参数(超过10)的三元逻辑运算批量计算中的错误。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修复PREWHERE优化的错误,这可能导致段错误或 `Inconsistent number of columns got from MergeTreeRangeReader` 例外。 [\#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复意外 `Timeout exceeded while reading from socket` 异常,在实际超时之前以及启用查询探查器时,在安全连接上随机发生。 还添加 `connect_timeout_with_failover_secure_ms` 设置(默认100ms),这是类似于 `connect_timeout_with_failover_ms`,但用于安全连接(因为SSL握手比普通TCP连接慢) [\#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +- 修复突变最终确定的错误,当突变可能处于以下状态时 `parts_to_do=0` 和 `is_done=0`. [\#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([阿利沙平](https://github.com/alesapin)) +- 使用新的任何连接逻辑 `partial_merge_join` 设置。 有可能使 `ANY|ALL|SEMI LEFT` 和 `ALL INNER` 加入与 `partial_merge_join=1` 现在 [\#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +- Shard现在将从发起者获得的设置夹到shard的constaints,而不是抛出异常。 此修补程序允许将查询发送到具有另一个约束的分片。 [\#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修正了内存管理问题 `MergeTreeReadPool`. [\#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复 `toDecimal*OrNull()` 使用字符串调用时的函数系列 `e`. 修复 [\#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [\#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +- 请确保 `FORMAT Null` 不向客户端发送数据。 [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 修复时间戳中的错误 `LiveViewBlockInputStream` 不会更新。 `LIVE VIEW` 是一个实验特征。 [\#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [\#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +- 固定 `ALTER MODIFY TTL` 不允许删除旧ttl表达式的错误行为。 [\#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复了MergeTreeIndexSet中的UBSan报告。 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从apache Avro第三方库中的析构函数抛出。 [\#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 不要提交从轮询的批次 `Kafka` 部分,因为它可能会导致数据漏洞。 [\#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov)) +- 修复 `joinGet` 使用可为空的返回类型。 https://github.com/ClickHouse/ClickHouse/issues/8919 [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复压缩时的数据不兼容 `T64` 编解ec [\#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2))修复数据类型id `T64` 在受影响的版本中导致错误(de)压缩的压缩编解ec。 [\#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `enable_early_constant_folding` 并禁用它在某些情况下,导致错误。 [\#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +- 使用VIEW修复下推谓词优化器并启用测试 [\#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([张冬](https://github.com/zhang2014)) +- 修复段错误 `Merge` 表,从读取时可能发生 `File` 储存 [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. 否则,它可以使部分数据重新启动后无法访问,并阻止ClickHouse启动。 [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复改变,如果有TTL设置表。 [\#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复在以下情况下可能发生的竞争条件 `SYSTEM RELOAD ALL DICTIONARIES` 在某些字典被修改/添加/删除时执行。 [\#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在以前的版本 `Memory` 数据库引擎使用空数据路径,因此在以下位置创建表 `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [\#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +- 修复了关于缺少默认磁盘或策略的错误日志消息。 [\#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 允许表中的第一列 `Log` 引擎是别名 [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([伊万](https://github.com/abyss7)) +- 从读取时修复范围的顺序 `MergeTree` 表中的一个线程。 它可能会导致例外 `MergeTreeRangeReader` 或错误的查询结果。 [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([安东\*波波夫](https://github.com/CurtizJ)) +- 赂眉露\>\> `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 避免极少数情况下,当用户可以得到错误的错误消息 (`Success` 而不是详细的错误描述)。 [\#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时不要崩溃 `Template` 使用空行模板格式化。 [\#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 系统表的元数据文件可能在错误的位置创建 [\#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix))修复 [\#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +- 修复缓存字典中exception\_ptr上的数据竞赛 [\#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [\#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 不要为查询引发异常 `ATTACH TABLE IF NOT EXISTS`. 以前它是抛出,如果表已经存在,尽管 `IF NOT EXISTS` 条款 [\#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了异常消息中丢失的关闭paren。 [\#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免消息 `Possible deadlock avoided` 在clickhouse客户端在交互模式下启动。 [\#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([filimonov](https://github.com/filimonov)) +- 在固定的异常 `DROP TABLE IF EXISTS` [\#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 支持在JSON相关函数中不适合Int64的UInt64号码。 更新SIMDJSON掌握。 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当使用非严格单调函数索引时,固定执行反转谓词。 [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 不要试图折叠 `IN` 常量在 `GROUP BY` [\#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复bug `ALTER DELETE` 突变导致索引损坏。 这修复 [\#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) 和 [\#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). 另外修复极其罕见的竞争条件 `ReplicatedMergeTree` `ALTER` 查询。 [\#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([阿利沙平](https://github.com/alesapin)) +- 当设置 `compile_expressions` 被启用,你可以得到 `unexpected column` 在 `LLVMExecutableFunction` 当我们使用 `Nullable` 类型 [\#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 多个修复 `Kafka` 引擎:1)修复在消费者组重新平衡期间出现的重复项。 2)修复罕见 ‘holes’ 当数据从一个轮询的几个分区轮询并部分提交时出现(现在我们总是处理/提交整个轮询的消息块)。 3)通过块大小修复刷新(在此之前,只有超时刷新才能正常工作)。 4)更好的订阅程序(与分配反馈)。 5)使测试工作得更快(默认时间间隔和超时)。 由于数据之前没有被块大小刷新(根据文档),pr可能会导致默认设置的性能下降(由于更频繁和更小的刷新不太理想)。 如果您在更改后遇到性能问题-请增加 `kafka_max_block_size` 在表中的更大的值(例如 `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). 修复 [\#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [\#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([filimonov](https://github.com/filimonov)) +- 修复 `Parameter out of bound` 在PREWHERE优化之后的某些查询中出现异常。 [\#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +- 修正了函数参数混合常量的情况 `arrayZip`. [\#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修正了双重移动可能会损坏原始部分的错误。 这是相关的,如果你使用 `ALTER TABLE MOVE` [\#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许 `interval` 用于正确解析的标识符,而无需反引号。 当一个查询不能被执行,即使固定的问题 `interval` 标识符用反引号或双引号括起来。 这修复 [\#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [\#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了模糊测试和不正确的行为 `bitTestAll`/`bitTestAny` 功能。 [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 使用enabled编写的部件修复突变 `insert_quorum`. [\#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([阿利沙平](https://github.com/alesapin)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复运行时显示误导性错误消息的错误 `SHOW CREATE TABLE a_table_that_does_not_exist`. [\#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) +- 固定 `Parameters are out of bound` 例外在一些罕见的情况下,当我们在一个常数 `SELECT` 条款时,我们有一个 `ORDER BY` 和一个 `LIMIT` 条款 [\#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修复突变定稿,当已经完成突变可以有状态 `is_done=0`. [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([阿利沙平](https://github.com/alesapin)) +- 防止执行 `ALTER ADD INDEX` 对于旧语法的MergeTree表,因为它不起作用。 [\#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在服务器启动时不要访问表,这 `LIVE VIEW` 取决于,所以服务器将能够启动。 也删除 `LIVE VIEW` 分离时的依赖关系 `LIVE VIEW`. `LIVE VIEW` 是一个实验特征。 [\#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修正了一个错误,当部分没有被移动的情况下,只有一个卷的TTL规则在后台。 [\#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了这个问题 `Method createColumn() is not implemented for data type Set`. 这修复 [\#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [\#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复 `intDiv` 减一个常数 [\#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +- 修复可能的竞争条件 `BlockIO`. [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([filimonov](https://github.com/filimonov)) +- 增加了解决方法,如果操作系统返回错误的结果 `timer_create` 功能。 [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在使用固定错误 `min_marks_for_seek` 参数。 修复了分布式表中没有分片键时的错误消息,并且我们尝试跳过未使用的分片。 [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### 改进 {#improvement} + +- 执行 `ALTER MODIFY/DROP` 对突变的顶部查询 `ReplicatedMergeTree*` 引擎家族. 现在 `ALTERS` 仅在元数据更新阶段阻止,之后不阻止。 [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([阿利沙平](https://github.com/alesapin)) +- 添加重写交叉到内部连接的能力 `WHERE` 包含未编译名称的部分。 [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +- 赂眉露\>\> `SHOW TABLES` 和 `SHOW DATABASES` 查询支持 `WHERE` 表达式和 `FROM`/`IN` [\#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +- 添加了一个设置 `deduplicate_blocks_in_dependent_materialized_views`. [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +- 在最近的变化之后,MySQL客户端开始以十六进制打印二进制字符串,从而使它们不可读 ([\#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). ClickHouse中的解决方法是将字符串列标记为UTF-8,这并不总是如此,但通常是这种情况。 [\#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 添加对字符串和FixedString键的支持 `sumMap` [\#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +- 支持SummingMergeTree地图中的字符串键 [\#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +- 即使线程已抛出异常,也向线程池发送线程终止信号 [\#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([丁香飞](https://github.com/dingxiangfei2009)) +- 允许设置 `query_id` 在 `clickhouse-benchmark` [\#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([安东\*波波夫](https://github.com/CurtizJ)) +- 不要让奇怪的表达 `ALTER TABLE ... PARTITION partition` 查询。 这个地址 [\#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [\#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 表 `system.table_engines` 现在提供有关功能支持的信息(如 `supports_ttl` 或 `supports_sort_order`). [\#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +- 启用 `system.metric_log` 默认情况下。 它将包含具有ProfileEvents值的行,CurrentMetrics收集与 “collect\_interval\_milliseconds” 间隔(默认情况下为一秒)。 该表非常小(通常以兆字节为单位),默认情况下收集此数据是合理的。 [\#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [\#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [\#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([伊万](https://github.com/abyss7)) +- 现在是暂时的 `LIVE VIEW` 创建者 `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` 而不是 `CREATE TEMPORARY LIVE VIEW ...`,因为以前的语法不符合 `CREATE TEMPORARY TABLE ...` [\#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +- 添加text\_log。级别配置参数,以限制进入 `system.text_log` 表 [\#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +- 允许根据TTL规则将下载的部分放入磁盘/卷 [\#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 对于外部MySQL字典,允许将MySQL连接池共同化为 “share” 他们在字典中。 此选项显着减少到MySQL服务器的连接数。 [\#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 显示分位数的最近查询执行时间 `clickhouse-benchmark` 输出而不是插值值。 最好显示与某些查询的执行时间相对应的值。 [\#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 可以在将数据插入到Kafka时为消息添加密钥和时间戳。 修复 [\#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [\#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([filimonov](https://github.com/filimonov)) +- 如果服务器从终端运行,请按颜色突出显示线程号,查询id和日志优先级。 这是为了提高开发人员相关日志消息的可读性。 [\#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好的异常消息,同时加载表 `Ordinary` 数据库。 [\#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行 `arraySlice` 对于具有聚合函数状态的数组。 这修复 [\#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [\#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许在in运算符的右侧使用常量函数和常量数组。 [\#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果在获取系统数据时发生了zookeeper异常。副本,将其显示在单独的列中。 这实现了 [\#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [\#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 原子删除destroy上的MergeTree数据部分。 [\#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 支持分布式表的行级安全性。 [\#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([伊万](https://github.com/abyss7)) +- Now we recognize suffix (like KB, KiB…) in settings values. [\#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在构建大型连接的结果时防止内存不足。 [\#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +- 在交互模式下为建议添加群集名称 `clickhouse-client`. [\#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [\#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([伊万](https://github.com/abyss7)) +- 添加列 `exception_code` 在 `system.query_log` 桌子 [\#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在端口上启用MySQL兼容服务器 `9004` 在默认服务器配置文件中。 在配置的例子固定密码生成命令。 [\#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 如果文件系统是只读的,请防止在关闭时中止。 这修复 [\#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [\#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当HTTP POST查询中需要长度时,更好的异常消息。 [\#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `_path` 和 `_file` 虚拟列 `HDFS` 和 `File` 发动机和 `hdfs` 和 `file` 表函数 [\#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复错误 `Cannot find column` 同时插入到 `MATERIALIZED VIEW` 在情况下,如果新列被添加到视图的内部表。 [\#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [\#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [\#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [\#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 通过最终更新后发送进度(如日志)修复本机客户端-服务器协议的进度。 这可能仅与使用本机协议的某些第三方工具相关。 [\#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +- 添加系统指标跟踪使用MySQL协议的客户端连接数 ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([尤金\*克里莫夫](https://github.com/Slach)) +- 从现在开始,HTTP响应将有 `X-ClickHouse-Timezone` 标题设置为相同的时区值 `SELECT timezone()` 会报告。 [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### 性能改进 {#performance-improvement} + +- 使用IN提高分析指标的性能 [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([安东\*波波夫](https://github.com/CurtizJ)) +- 逻辑函数+代码清理更简单,更有效的代码。 跟进到 [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 整体性能改善(范围为5%。.通过确保使用C++20功能进行更严格的别名处理,对于受影响的查询来说,这是200%)。 [\#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([阿莫斯鸟](https://github.com/amosbird)) +- 比较函数的内部循环更严格的别名。 [\#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对于算术函数的内部循环更严格的别名。 [\#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ColumnVector::replicate()的实现速度快约3倍,通过该实现ColumnConst::convertToFullColumn()。 在实现常数时,也将在测试中有用。 [\#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 另一个小的性能改进 `ColumnVector::replicate()` (这加快了 `materialize` 函数和高阶函数),甚至进一步改进 [\#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [\#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 改进的性能 `stochasticLinearRegression` 聚合函数。 此补丁由英特尔贡献。 [\#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 提高性能 `reinterpretAsFixedString` 功能。 [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 不要向客户端发送块 `Null` 处理器管道中的格式。 [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +- 替换 `readline` 与 `replxx` 对于在交互式线编辑 `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([伊万](https://github.com/abyss7)) +- 在FunctionsComparison中更好的构建时间和更少的模板实例化。 [\#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了与集成 `clang-tidy` 在线人 另请参阅 [\#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [\#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们使用CI链接ClickHouse `lld` 即使是 `gcc`. [\#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([阿利沙平](https://github.com/alesapin)) +- 允许随机线程调度和插入毛刺时 `THREAD_FUZZER_*` 设置环境变量。 这有助于测试。 [\#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在无状态测试中启用安全套接字 [\#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +- 使SPLIT\_SHARED\_LIBRARIES=OFF更强大 [\#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +- 赂眉露\>\> “performance\_introspection\_and\_logging” 测试可靠的随机服务器卡住。 这可能发生在CI环境中。 另请参阅 [\#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [\#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在样式检查中验证XML。 [\#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了测试中的竞争条件 `00738_lock_for_inner_table`. 这个测试依赖于睡眠。 [\#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除类型的性能测试 `once`. 这是在统计比较模式下运行所有性能测试(更可靠)所需的。 [\#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了算术函数的性能测试。 [\#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了性能测试 `sumMap` 和 `sumMapWithOverflow` 聚合函数。 后续行动 [\#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [\#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过样式检查确保错误代码的样式。 [\#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为测试历史添加脚本。 [\#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([阿利沙平](https://github.com/alesapin)) +- 添加GCC警告 `-Wsuggest-override` 找到并修复所有地方 `override` 必须使用关键字。 [\#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 在Mac OS X下忽略弱符号,因为它必须被定义 [\#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([已删除用户](https://github.com/ghost)) +- 规范性能测试中某些查询的运行时间。 这是在准备在比较模式下运行所有性能测试时完成的。 [\#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复一些测试,以支持pytest与查询测试 [\#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([伊万](https://github.com/abyss7)) +- 使用MSan在生成中启用SSL,因此在运行无状态测试时,服务器不会在启动时失败 [\#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +- 修复测试结果中的数据库替换 [\#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([Ilya Yatsishin](https://github.com/qoega)) +- 针对其他平台构建修复程序 [\#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [\#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [\#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +- 将磁盘部分添加到无状态复盖率测试docker映像 [\#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 使用GRPC构建时,摆脱源代码树中的文件 [\#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([阿莫斯鸟](https://github.com/amosbird)) +- 通过从上下文中删除SessionCleaner来缩短构建时间。 让SessionCleaner的代码更简单。 [\#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新了clickhouse-test脚本中挂起查询的检查 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 从存储库中删除了一些无用的文件。 [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改类型的数学perftests从 `once` 到 `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/index.html) +- 抑制MSan下的一些测试失败。 [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libcxx` 和 `libcxxabi` 为了主人 在准备 [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复flacky测试 `00910_zookeeper_test_alter_compression_codecs`. [\#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清理重复的链接器标志。 确保链接器不会查找意想不到的符号。 [\#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加 `clickhouse-odbc` 驱动程序进入测试图像。 这允许通过自己的ODBC驱动程序测试ClickHouse与ClickHouse的交互。 [\#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) +- 修复单元测试中的几个错误。 [\#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([阿利沙平](https://github.com/alesapin)) +- 启用 `-Wmissing-include-dirs` GCC警告消除所有不存在的包括-主要是由于CMake脚本错误 [\#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 描述查询探查器无法工作的原因。 这是用于 [\#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [\#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将OpenSSL更新到上游主机。 修复了TLS连接可能会失败并显示消息的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` 和 `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. 该问题出现在版本20.1中。 [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新服务器的Dockerfile [\#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +- Build-gcc-from-sources脚本中的小修复 [\#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +- 替换 `numbers` 到 `zeros` 在perftests其中 `number` 不使用列。 这将导致更干净的测试结果。 [\#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复列构造函数中使用initializer\_list时堆栈溢出问题。 [\#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([已删除用户](https://github.com/ghost)) +- 将librdkafka升级到v1.3.0。 启用bund绑 `rdkafka` 和 `gsasl` mac OS X上的库 [\#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 在GCC9.2.0上构建修复程序 [\#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v20-1} + +### ClickHouse版本v20.1.8.41,2020-03-20 {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### 错误修复 {#bug-fix-3} + +- 修复可能的永久性 `Cannot schedule a task` 错误(由于未处理的异常 `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). 这修复 [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +- 修复过多的内存消耗 `ALTER` 查询(突变)。 这修复 [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) 和 [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([阿利沙平](https://github.com/alesapin)) +- 修复外部字典DDL中反引用的错误。 这修复 [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放v20.1.7.38,2020-03-18 {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### 错误修复 {#bug-fix-4} + +- 修正了不正确的内部函数名称 `sumKahan` 和 `sumWithOverflow`. 在远程查询中使用此函数时,我会导致异常。 [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). 这个问题是在所有ClickHouse版本。 +- 允许 `ALTER ON CLUSTER` 的 `Distributed` 具有内部复制的表。 这修复 [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). 这个问题是在所有ClickHouse版本。 +- 修复可能的异常 `Size of filter doesn't match size of column` 和 `Invalid number of rows in Chunk` 在 `MergeTreeRangeReader`. 它们可能在执行时出现 `PREWHERE` 在某些情况下。 修复 [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了这个问题:如果你编写一个简单的算术表达式,则不会保留时区 `time + 1` (与像这样的表达形成对比 `time + INTERVAL 1 SECOND`). 这修复 [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)). 这个问题是在所有ClickHouse版本。 +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-1} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + +### ClickHouse释放v20.1.6.30,2020-03-05 {#clickhouse-release-v20-1-6-30-2020-03-05} + +#### 错误修复 {#bug-fix-5} + +- 修复压缩时的数据不兼容 `T64` 编解ec + [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +- 在一个线程中从MergeTree表中读取时修复范围顺序。 修复 [\#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). + [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ))](https://github.com/CurtizJ) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. 修复 [\#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). + [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ))](https://github.com/CurtizJ) +- 修复 `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. + [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +- 修复 `joinGet` 使用可为空的返回类型。 修复 [\#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) + [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(amosbird)](https://github.com/amosbird) +- 修复bittestall/bitTestAny函数的模糊测试和不正确的行为。 + [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 修复当干草堆有零字节时匹配和提取函数的行为。 当干草堆不变时,这种行为是错误的。 修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) + [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 当使用非严格单调函数索引时,固定执行反转谓词。 修复 [\#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) + [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +- 允许重写 `CROSS` 到 `INNER JOIN` 如果有 `[NOT] LIKE` 操作员在 `WHERE` 科。 修复 [\#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) + [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4ertus2)](https://github.com/4ertus2) +- 允许使用日志引擎的表中的第一列成为别名。 + [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +- 允许逗号加入 `IN()` 进去 修复 [\#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). + [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). + [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +- 修复突变最终确定,当已经完成突变时可以具有状态is\_done=0。 + [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +- 碌莽禄Support: “Processors” 管道系统.数字和系统.numbers\_mt 这也修复了错误时 `max_execution_time` 不被尊重。 + [\#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 修复错误的计数 `DictCacheKeysRequestedFound` 公制。 + [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` 否则可能使部分数据在重新启动后无法访问,并阻止ClickHouse启动。 + [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(excitoon)](https://github.com/excitoon) +- 在固定的瑞银报告 `MergeTreeIndexSet`. 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) + [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 在BlockIO中修复可能的数据集。 + [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 支持 `UInt64` 在JSON相关函数中不适合Int64的数字。 更新 `SIMDJSON` 为了主人 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) + [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 如果将数据目录挂载到单独的设备,则修复可用空间量计算不正确时的问题。 对于默认磁盘,计算数据子目录的可用空间。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) + [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(米尔布)](https://github.com/millb) +- 修复TLS连接可能会失败并显示消息时的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` 将OpenSSL更新到上游主机。 + [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). 还修复了ClickHouseDictionarySource中检查本地地址。 + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- 修复段错误 `StorageMerge`,从StorageFile读取时可能发生。 + [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 相关: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(菲利蒙诺夫)](https://github.com/filimonov) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 修复 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 结合 [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(菲利蒙诺夫)](https://github.com/filimonov) + +#### 新功能 {#new-feature-1} + +- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse版本v20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} + +#### 向后不兼容的更改 {#backward-incompatible-change-1} + +- 使设置 `merge_tree_uniform_read_distribution` 过时了 服务器仍可识别此设置,但无效。 [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改函数的返回类型 `greatCircleDistance` 到 `Float32` 因为现在计算的结果是 `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在预计查询参数表示为 “escaped” 格式。 例如,要传递字符串 `ab` 你必须写 `a\tb` 或 `a\b` 并分别, `a%5Ctb` 或 `a%5C%09b` 在URL中。 这是需要添加传递NULL作为的可能性 `\N`. 这修复 [\#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [\#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 启用 `use_minimalistic_part_header_in_zookeeper` 设置 `ReplicatedMergeTree` 默认情况下。 这将显着减少存储在ZooKeeper中的数据量。 自19.1版本以来支持此设置,我们已经在多个服务的生产中使用它,半年以上没有任何问题。 如果您有机会降级到19.1以前的版本,请禁用此设置。 [\#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 数据跳过索引已准备就绪并默认启用。 设置 `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` 和 `allow_experimental_multiple_joins_emulation` 现在已经过时,什么也不做。 [\#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加新建 `ANY JOIN` 逻辑 `StorageJoin` 符合 `JOIN` 操作。 要在不改变行为的情况下进行升级,您需要添加 `SETTINGS any_join_distinct_right_table_keys = 1` 引擎联接表元数据或在升级后重新创建这些表。 [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 新功能 {#new-feature-2} + +- 添加了有关部件路径的信息 `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 添加执行能力 `SYSTEM RELOAD DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加执行能力 `CREATE DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([阿利沙平](https://github.com/alesapin)) +- 现在用户的个人资料 `users.xml` 可以继承多个配置文件。 [\#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 已添加 `system.stack_trace` 允许查看所有服务器线程的堆栈跟踪的表。 这对于开发人员反省服务器状态非常有用。 这修复 [\#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [\#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `DateTime64` 具有可配置子秒精度的数据类型。 [\#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加表函数 `clusterAllReplicas` 这允许查询集群中的所有节点。 [\#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) +- 添加聚合函数 `categoricalInformationValue` 其计算出离散特征的信息值。 [\#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) +- 加快数据文件的解析 `CSV`, `TSV` 和 `JSONEachRow` 通过并行进行格式化。 [\#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加功能 `bankerRound` 它执行银行家的四舍五入。 [\#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) +- 支持区域名称的嵌入式字典中的更多语言: ‘ru’, ‘en’, ‘ua’, ‘uk’, ‘by’, ‘kz’, ‘tr’, ‘de’, ‘uz’, ‘lv’, ‘lt’, ‘et’, ‘pt’, ‘he’, ‘vi’. [\#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的一致性 `ANY JOIN` 逻辑 现在 `t1 ANY LEFT JOIN t2` 等于 `t2 ANY RIGHT JOIN t1`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `any_join_distinct_right_table_keys` 这使旧的行为 `ANY INNER JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加新建 `SEMI` 和 `ANTI JOIN`. 老 `ANY INNER JOIN` 行为现在可作为 `SEMI LEFT JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `Distributed` 格式 `File` 发动机和 `file` 表函数,它允许从读 `.bin` 通过异步插入生成的文件 `Distributed` 桌子 [\#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加可选的重置列参数 `runningAccumulate` 这允许为每个新的键值重置聚合结果。 [\#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([谢尔盖\*科诺年科](https://github.com/kononencheg)) +- 添加使用ClickHouse作为普罗米修斯端点的能力。 [\#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([vdimir](https://github.com/Vdimir)) +- 添加部分 `` 在 `config.xml` 这将限制允许的主机用于远程表引擎和表函数 `URL`, `S3`, `HDFS`. [\#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `greatCircleAngle` 它计算球体上的距离(以度为单位)。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改变地球半径与h3库一致。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `JSONCompactEachRow` 和 `JSONCompactEachRowWithNamesAndTypes` 输入和输出格式。 [\#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 增加了与文件相关的表引擎和表函数的功能 (`File`, `S3`, `URL`, `HDFS`)它允许读取和写入 `gzip` 基于附加引擎参数或文件扩展名的文件。 [\#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 添加了 `randomASCII(length)` 函数,生成一个字符串与一个随机集 [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) 可打印字符。 [\#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([刺刀](https://github.com/BayoNet)) +- 添加功能 `JSONExtractArrayRaw` 它返回从未解析的json数组元素上的数组 `JSON` 字符串。 [\#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([Oleg Matrokhin](https://github.com/errx)) +- 添加 `arrayZip` 函数允许将多个长度相等的数组合成一个元组数组。 [\#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([张冬](https://github.com/zhang2014)) +- 添加根据配置的磁盘之间移动数据的能力 `TTL`-表达式为 `*MergeTree` 表引擎家族. [\#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了新的聚合功能 `avgWeighted` 其允许计算加权平均值。 [\#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 现在并行解析默认启用 `TSV`, `TSKV`, `CSV` 和 `JSONEachRow` 格式。 [\#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从添加几个地理功能 `H3` 图书馆: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` 和 `h3kRing`. [\#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([Konstantin Malanchev](https://github.com/hombit)) +- 增加了对brotli的支持 (`br`)压缩文件相关的存储和表函数。 这修复 [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `groupBit*` 功能的 `SimpleAggregationFunction` 类型。 [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) + +#### 错误修复 {#bug-fix-6} + +- 修复重命名表 `Distributed` 引擎 修复问题 [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- 现在字典支持 `EXPRESSION` 对于非ClickHouse SQL方言中任意字符串中的属性。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([阿利沙平](https://github.com/alesapin)) +- 修复损坏 `INSERT SELECT FROM mysql(...)` 查询。 这修复 [\#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) 和 [\#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- 修复错误 “Mismatch column sizes” 插入默认值时 `Tuple` 从 `JSONEachRow`. 这修复 [\#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [\#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) +- 现在将在使用的情况下抛出一个异常 `WITH TIES` 旁边的 `LIMIT BY`. 还增加了使用能力 `TOP` 与 `LIMIT BY`. 这修复 [\#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从新鲜的glibc版本中修复unintendent依赖关系 `clickhouse-odbc-bridge` 二进制 [\#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([阿莫斯鸟](https://github.com/amosbird)) +- 修正错误的检查功能 `*MergeTree` 引擎家族. 现在,当我们在最后一个颗粒和最后一个标记(非最终)中有相同数量的行时,它不会失败。 [\#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([阿利沙平](https://github.com/alesapin)) +- 修复插入 `Enum*` 列后 `ALTER` 查询,当基础数值类型等于表指定类型时。 这修复 [\#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [\#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([安东\*波波夫](https://github.com/CurtizJ)) +- 允许非常数负 “size” 函数的参数 `substring`. 这是不允许的错误。 这修复 [\#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [\#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复当错误数量的参数传递到解析错误 `(O|J)DBC` 表引擎。 [\#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([阿利沙平](https://github.com/alesapin)) +- 将日志发送到syslog时使用正在运行的clickhouse进程的命令名。 在以前的版本中,使用空字符串而不是命令名称。 [\#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([Michael Nacharov](https://github.com/mnach)) +- 修复检查允许的主机 `localhost`. 这个公关修复了在提供的解决方案 [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复罕见的崩溃 `argMin` 和 `argMax` 长字符串参数的函数,当结果被用于 `runningAccumulate` 功能。 这修复 [\#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [\#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([恐龙](https://github.com/769344359)) +- 修复表的内存过度使用 `Buffer` 引擎 [\#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([Azat Khuzhin](https://github.com/azat)) +- 修正了可以采取的功能中的潜在错误 `NULL` 作为参数之一,并返回非NULL。 [\#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在线程池中更好地计算后台进程的指标 `MergeTree` 表引擎. [\#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复功能 `IN` 里面 `WHERE` 存在行级表筛选器时的语句。 修复 [\#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [\#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([伊万](https://github.com/abyss7)) +- 现在,如果整数值没有完全解析设置值,则会引发异常。 [\#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 修复当聚合函数用于查询具有两个以上本地分片的分布式表时出现的异常。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- 现在,bloom filter可以处理零长度数组,并且不执行冗余计算。 [\#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) +- 修正了通过匹配客户端主机来检查客户端主机是否允许 `host_regexp` 在指定 `users.xml`. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 放松不明确的列检查,导致多个误报 `JOIN ON` 科。 [\#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了可能的服务器崩溃 (`std::terminate`)当服务器不能发送或写入数据 `JSON` 或 `XML` 格式与值 `String` 数据类型(需要 `UTF-8` 验证)或使用Brotli算法或其他一些罕见情况下压缩结果数据时。 这修复 [\#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复竞争条件 `StorageDistributedDirectoryMonitor` 被线人发现 这修复 [\#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [\#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在背景合并 `*MergeTree` 表引擎家族更准确地保留存储策略卷顺序。 [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在表引擎 `Kafka` 与正常工作 `Native` 格式。 这修复 [\#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [\#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [\#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 固定格式与标题(如 `CSVWithNames`)这是抛出关于EOF表引擎的异常 `Kafka`. [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 修复了从子查询右侧部分制作set的错误 `IN` 科。 这修复 [\#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) 和 [\#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [\#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从存储读取时修复可能的崩溃 `File`. [\#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在固定的文件读取 `Parquet` 包含类型列的格式 `list`. [\#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([马苏兰](https://github.com/maxulan)) +- 修复错误 `Not found column` 对于分布式查询 `PREWHERE` 条件取决于采样键if `max_parallel_replicas > 1`. [\#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误 `Not found column` 如果使用查询 `PREWHERE` 依赖于表的别名,结果集由于主键条件而为空。 [\#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 函数的固定返回类型 `rand` 和 `randConstant` 在情况下 `Nullable` 争论。 现在函数总是返回 `UInt32` 而且从来没有 `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 禁用谓词下推 `WITH FILL` 表达。 这修复 [\#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [\#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([张冬](https://github.com/zhang2014)) +- 修正错误 `count()` 结果 `SummingMergeTree` 当 `FINAL` 部分被使用。 [\#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [\#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复来自远程服务器的常量函数可能不正确的结果。 它发生在具有以下功能的查询中 `version()`, `uptime()` 等。 它为不同的服务器返回不同的常量值。 这修复 [\#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [\#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复下推谓词优化中导致错误结果的复杂错误。 这解决了下推谓词优化的很多问题。 [\#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([张冬](https://github.com/zhang2014)) +- 修复崩溃 `CREATE TABLE .. AS dictionary` 查询。 [\#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([Azat Khuzhin](https://github.com/azat)) +- 一些改进ClickHouse语法 `.g4` 文件 [\#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([太阳里](https://github.com/taiyang-li)) +- 修复导致崩溃的错误 `JOIN`s与表与发动机 `Join`. 这修复 [\#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [\#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [\#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [\#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [\#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复冗余字典重新加载 `CREATE DATABASE`. [\#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat)) +- 限制从读取流的最大数量 `StorageFile` 和 `StorageHDFS`. 修复https://github.com/ClickHouse/ClickHouse/issues/7650. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([阿利沙平](https://github.com/alesapin)) +- 修复bug `ALTER ... MODIFY ... CODEC` 查询,当用户同时指定默认表达式和编解ec。 修复 [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [\#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([阿利沙平](https://github.com/alesapin)) +- 修复列的后台合并错误 `SimpleAggregateFunction(LowCardinality)` 类型。 [\#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定类型签入功能 `toDateTime64`. [\#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 现在服务器不崩溃 `LEFT` 或 `FULL JOIN` 与和加入引擎和不支持 `join_use_nulls` 设置。 [\#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) +- 现在 `DROP DICTIONARY IF EXISTS db.dict` 查询不会抛出异常,如果 `db` 根本不存在 [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复表函数中可能出现的崩溃 (`file`, `mysql`, `remote`)引用删除引起的 `IStorage` 对象。 修复插入表函数时指定的列的不正确解析。 [\#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) +- 确保网络启动前 `clickhouse-server`. 这修复 [\#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [\#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([余志昌](https://github.com/yuzhichang)) +- 修复安全连接的超时处理,因此查询不会无限挂起。 这修复 [\#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [\#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `clickhouse-copier`并发工人之间的冗余争用。 [\#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([丁香飞](https://github.com/dingxiangfei2009)) +- 现在突变不会跳过附加的部分,即使它们的突变版本比当前的突变版本大。 [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([余志昌](https://github.com/yuzhichang)) [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([阿利沙平](https://github.com/alesapin)) +- 忽略冗余副本 `*MergeTree` 数据部分移动到另一个磁盘和服务器重新启动后。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复崩溃 `FULL JOIN` 与 `LowCardinality` 在 `JOIN` 钥匙 [\#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) +- 禁止在插入查询中多次使用列名,如 `INSERT INTO tbl (x, y, x)`. 这修复 [\#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [\#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [\#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([阿利沙平](https://github.com/alesapin)) +- 增加了回退,用于检测未知Cpu的物理CPU内核数量(使用逻辑CPU内核数量)。 这修复 [\#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [\#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `There's no column` 实例化列和别名列出错。 [\#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定切断崩溃时 `EXISTS` 查询没有使用 `TABLE` 或 `DICTIONARY` 预选赛 就像 `EXISTS t`. 这修复 [\#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). 此错误在版本19.17中引入。 [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复罕见错误 `"Sizes of columns doesn't match"` 使用时可能会出现 `SimpleAggregateFunction` 列。 [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- 修正错误,其中用户空 `allow_databases` 可以访问所有数据库(和相同的 `allow_dictionaries`). [\#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) +- 修复客户端崩溃时,服务器已经从客户端断开连接。 [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `ORDER BY` 在按主键前缀和非主键后缀排序的情况下的行为。 [\#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([安东\*波波夫](https://github.com/CurtizJ)) +- 检查表中是否存在合格列。 这修复 [\#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [\#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定行为 `ALTER MOVE` 合并完成后立即运行移动指定的超部分。 修复 [\#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 使用时修复可能的服务器崩溃 `UNION` 具有不同数量的列。 修复 [\#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [\#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复函数结果子字符串的大小 `substr` 负大小。 [\#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在服务器不执行部分突变 `MergeTree` 如果后台池中没有足够的可用线程。 [\#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([tavplubix](https://github.com/tavplubix)) +- 修复格式化时的小错字 `UNION ALL` AST. [\#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([litao91](https://github.com/litao91)) +- 修正了负数不正确的布隆过滤结果。 这修复 [\#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [\#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([张冬](https://github.com/zhang2014)) +- 在解压缩固定潜在的缓冲区溢出。 恶意用户可以传递捏造的压缩数据,这将导致缓冲区后读取。 这个问题是由Yandex信息安全团队的Eldar Zaitov发现的。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复因整数溢出而导致的错误结果 `arrayIntersect`. [\#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在 `OPTIMIZE TABLE` query不会等待脱机副本执行该操作。 [\#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([javi santana](https://github.com/javisantana)) +- 固定 `ALTER TTL` 解析器 `Replicated*MergeTree` 桌子 [\#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复服务器和客户端之间的通信,以便服务器在查询失败后读取临时表信息。 [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `bitmapAnd` 在聚合位图和标量位图相交时出现函数错误。 [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([黄月](https://github.com/moon03432)) +- 完善的定义 `ZXid` 根据动物园管理员的程序员指南,它修复了错误 `clickhouse-cluster-copier`. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁香飞](https://github.com/dingxiangfei2009)) +- `odbc` 表函数现在尊重 `external_table_functions_use_nulls` 设置。 [\#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了导致罕见的数据竞赛的错误。 [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 现在 `SYSTEM RELOAD DICTIONARY` 完全重新加载字典,忽略 `update_field`. 这修复 [\#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [\#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加检查字典是否存在于创建查询的能力。 [\#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([阿利沙平](https://github.com/alesapin)) +- 修复 `Float*` 解析中 `Values` 格式。 这修复 [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- 修复崩溃时,我们不能在一些后台操作保留空间 `*MergeTree` 表引擎家族. [\#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复表包含合并操作时的崩溃 `SimpleAggregateFunction(LowCardinality)` 列。 这修复 [\#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [\#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([Azat Khuzhin](https://github.com/azat)) +- 恢复对所有ICU区域设置的支持,并添加对常量表达式应用排序规则的功能。 还添加语言名称 `system.collations` 桌子 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([阿利沙平](https://github.com/alesapin)) +- 修正错误时,外部字典与零最小寿命 (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)`)不要在后台更新。 [\#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([阿利沙平](https://github.com/alesapin)) +- 修复当clickhouse源外部字典在查询中有子查询时崩溃。 [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复文件扩展名不正确的解析表与引擎 `URL`. 这修复 [\#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [\#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 修复 `CHECK TABLE` 查询为 `*MergeTree` 表没有关键. 修复 [\#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([阿利沙平](https://github.com/alesapin)) +- 固定转换 `Float64` 到MySQL类型。 [\#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在,如果表没有完全删除,因为服务器崩溃,服务器将尝试恢复并加载它。 [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- 修复了表函数中的崩溃 `file` 同时插入到不存在的文件。 现在在这种情况下,文件将被创建,然后插入将被处理。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复罕见的死锁时,可能发生 `trace_log` 处于启用状态。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- 添加能力与不同类型的工作,除了 `Date` 在 `RangeHashed` 从DDL查询创建的外部字典。 修复 [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([阿利沙平](https://github.com/alesapin)) +- 修复崩溃时 `now64()` 用另一个函数的结果调用。 [\#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了通过mysql有线协议检测客户端IP连接的错误。 [\#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([Dmitry Muzyka](https://github.com/dmitriy-myz)) +- 修复空阵列处理 `arraySplit` 功能。 这修复 [\#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [\#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([hcz](https://github.com/hczhcz)) +- 修复了以下问题 `pid-file` 另一个运行 `clickhouse-server` 可能会被删除。 [\#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([徐伟清](https://github.com/weiqxu)) +- 修复字典重新加载,如果它有 `invalidate_query`,停止更新,并在以前的更新尝试一些异常。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([阿利沙平](https://github.com/alesapin)) +- 修正了功能错误 `arrayReduce` 这可能会导致 “double free” 和聚合函数组合器中的错误 `Resample` 这可能会导致内存泄漏。 添加聚合功能 `aggThrow`. 此功能可用于测试目的。 [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-1} + +- 改进了使用时的日志记录 `S3` 表引擎。 [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 在调用时未传递任何参数时打印帮助消息 `clickhouse-local`. 这修复 [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([安德烈\*纳戈尔尼](https://github.com/Melancholic)) +- 添加设置 `mutations_sync` 这允许等待 `ALTER UPDATE/DELETE` 同步查询。 [\#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([阿利沙平](https://github.com/alesapin)) +- 允许设置相对 `user_files_path` 在 `config.xml` (在类似的方式 `format_schema_path`). [\#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([hcz](https://github.com/hczhcz)) +- 为转换函数添加非法类型的异常 `-OrZero` 后缀 [\#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([安德烈\*科尼亚耶夫](https://github.com/akonyaev90)) +- 简化在分布式查询中发送到分片的数据头的格式。 [\#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- `Live View` 表引擎重构。 [\#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +- 为从DDL查询创建的外部字典添加额外的检查。 [\#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([阿利沙平](https://github.com/alesapin)) +- 修复错误 `Column ... already exists` 使用时 `FINAL` 和 `SAMPLE` together, e.g. `select count() from table final sample 1/2`. 修复 [\#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [\#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在表的第一个参数 `joinGet` 函数可以是表标识符。 [\#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([阿莫斯鸟](https://github.com/amosbird)) +- 允许使用 `MaterializedView` 与上面的子查询 `Kafka` 桌子 [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) +- 现在后台在磁盘之间移动,运行它的seprate线程池。 [\#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) +- `SYSTEM RELOAD DICTIONARY` 现在同步执行。 [\#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 堆栈跟踪现在显示物理地址(对象文件中的偏移量),而不是虚拟内存地址(加载对象文件的位置)。 这允许使用 `addr2line` 当二进制独立于位置并且ASLR处于活动状态时。 这修复 [\#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [\#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持行级安全筛选器的新语法: `…
    `. 修复 [\#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [\#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([伊万](https://github.com/abyss7)) +- 现在 `cityHash` 功能可以与工作 `Decimal` 和 `UUID` 类型。 修复 [\#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [\#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 从系统日志中删除了固定的索引粒度(它是1024),因为它在实现自适应粒度之后已经过时。 [\#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当ClickHouse在没有SSL的情况下编译时,启用MySQL兼容服务器。 [\#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在服务器校验和分布式批处理,这在批处理中损坏数据的情况下提供了更多详细的错误。 [\#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([Azat Khuzhin](https://github.com/azat)) +- 碌莽禄Support: `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` 和 `ATTACH TABLE` 为 `MySQL` 数据库引擎。 [\#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([张冬](https://github.com/zhang2014)) +- 在S3表功能和表引擎中添加身份验证。 [\#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了检查额外的部分 `MergeTree` 在不同的磁盘上,为了不允许错过未定义磁盘上的数据部分。 [\#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 启用Mac客户端和服务器的SSL支持。 [\#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([伊万](https://github.com/abyss7)) +- 现在ClickHouse可以作为MySQL联合服务器(参见https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html)。 [\#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- `clickhouse-client` 现在只能启用 `bracketed-paste` 当多查询处于打开状态且多行处于关闭状态时。 这修复(#7757)\[https://github.com/ClickHouse/ClickHouse/issues/7757。 [\#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([阿莫斯鸟](https://github.com/amosbird)) +- 碌莽禄Support: `Array(Decimal)` 在 `if` 功能。 [\#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2)) +- 支持小数 `arrayDifference`, `arrayCumSum` 和 `arrayCumSumNegative` 功能。 [\#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `lifetime` 列到 `system.dictionaries` 桌子 [\#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [\#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekekule](https://github.com/kekekekule)) +- 改进了检查不同磁盘上的现有部件 `*MergeTree` 表引擎. 地址 [\#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [\#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 集成与 `AWS SDK` 为 `S3` 交互允许使用开箱即用的所有S3功能。 [\#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 增加了对子查询的支持 `Live View` 桌子 [\#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +- 检查使用 `Date` 或 `DateTime` 从列 `TTL` 表达式已删除。 [\#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 有关磁盘的信息已添加到 `system.detached_parts` 桌子 [\#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在设置 `max_(table|partition)_size_to_drop` 无需重新启动即可更改。 [\#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 错误消息的可用性略好。 要求用户不要删除下面的行 `Stack trace:`. [\#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地阅读消息 `Kafka` 引擎在各种格式后 [\#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [\#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([伊万](https://github.com/abyss7)) +- 与不支持MySQL客户端更好的兼容性 `sha2_password` 验证插件。 [\#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 支持MySQL兼容性服务器中的更多列类型。 [\#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 执行 `ORDER BY` 优化 `Merge`, `Buffer` 和 `Materilized View` 存储与底层 `MergeTree` 桌子 [\#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([安东\*波波夫](https://github.com/CurtizJ)) +- 现在我们总是使用POSIX实现 `getrandom` 与旧内核更好的兼容性(\<3.17)。 [\#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([阿莫斯鸟](https://github.com/amosbird)) +- 更好地检查移动ttl规则中的有效目标。 [\#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 更好地检查损坏的刀片批次 `Distributed` 表引擎。 [\#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([Azat Khuzhin](https://github.com/azat)) +- 添加带有部件名称数组的列,这些部件将来必须处理突变 `system.mutations` 桌子 [\#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([阿利沙平](https://github.com/alesapin)) +- 处理器的并行合并排序优化。 [\#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 设置 `mark_cache_min_lifetime` 现在已经过时了,什么也不做。 在以前的版本中,标记缓存可以在内存中增长大于 `mark_cache_size` 以容纳内的数据 `mark_cache_min_lifetime` 秒。 这导致了混乱和比预期更高的内存使用率,这在内存受限的系统上尤其糟糕。 如果您在安装此版本后会看到性能下降,则应增加 `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 准备使用 `tid` 到处都是 这是必要的 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 性能改进 {#performance-improvement-1} + +- 处理器管道中的性能优化。 [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 缓存字典中过期密钥的非阻塞更新(具有读取旧密钥的权限)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 没有编译ClickHouse `-fno-omit-frame-pointer` 在全球范围内多余一个寄存器。 [\#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([阿莫斯鸟](https://github.com/amosbird)) +- 加速 `greatCircleDistance` 功能,并为它添加性能测试。 [\#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([Olga Khvostikova](https://github.com/stavrolia)) +- 改进的功能性能 `roundDown`. [\#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `max`, `min`, `argMin`, `argMax` 为 `DateTime64` 数据类型。 [\#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进了无限制或大限制和外部排序的排序性能。 [\#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能格式化浮点数高达6倍。 [\#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `modulo` 功能。 [\#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([阿莫斯鸟](https://github.com/amosbird)) +- 优化 `ORDER BY` 并与单列键合并。 [\#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地实施 `arrayReduce`, `-Array` 和 `-State` 组合子 [\#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在 `PREWHERE` 应优化为至少一样高效 `WHERE`. [\#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([阿莫斯鸟](https://github.com/amosbird)) +- 改进方式 `round` 和 `roundBankers` 处理负数。 [\#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([hcz](https://github.com/hczhcz)) +- 改进的解码性能 `DoubleDelta` 和 `Gorilla` 编解码器大约30-40%。 这修复 [\#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [\#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进的性能 `base64` 相关功能。 [\#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了一个功能 `geoDistance`. 它类似于 `greatCircleDistance` 但使用近似于WGS-84椭球模型。 两个功能的性能几乎相同。 [\#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更快 `min` 和 `max` 聚合函数 `Decimal` 数据类型。 [\#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([Artem Zuikov](https://github.com/4ertus2)) +- 矢量化处理 `arrayReduce`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([阿莫斯鸟](https://github.com/amosbird)) +- `if` 链现在优化为 `multiIf`. [\#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([kamalov-ruslan](https://github.com/kamalov-ruslan)) +- 修复性能回归 `Kafka` 表引擎在19.15中引入。 这修复 [\#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [\#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([filimonov](https://github.com/filimonov)) +- 已删除 “pie” 代码生成 `gcc` 从Debian软件包偶尔带来默认情况下。 [\#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 并行解析数据格式 [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 启用优化的解析器 `Values` 默认使用表达式 (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-2} + +- 构建修复 `ARM` 而在最小模式。 [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) +- 添加复盖文件刷新 `clickhouse-server` 当不调用std::atexit时。 还略微改进了无状态测试的复盖率日志记录。 [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([阿利沙平](https://github.com/alesapin)) +- 更新contrib中的LLVM库。 避免从操作系统包中使用LLVM。 [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使bund绑 `curl` 建立完全安静。 [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 修复一些 `MemorySanitizer` 警告。 [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使用 `add_warning` 和 `no_warning` 宏 `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([伊万](https://github.com/abyss7)) +- 添加对Minio S3兼容对象的支持(https://min.io/)为了更好的集成测试。 [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 导入 `libc` 标题到contrib。 它允许在各种系统中使构建更加一致(仅适用于 `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `-fPIC` 从一些图书馆。 [\#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清洁 `CMakeLists.txt` 对于卷曲。 看https://github.com/ClickHouse/ClickHouse/pull/8011\#issuecomment-569478910 [\#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 无声警告 `CapNProto` 图书馆. [\#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为短字符串优化哈希表添加性能测试。 [\#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在ClickHouse将建立在 `AArch64` 即使 `MADV_FREE` 不可用。 这修复 [\#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [\#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([阿莫斯鸟](https://github.com/amosbird)) +- 更新 `zlib-ng` 来解决记忆消毒的问题 [\#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [\#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在非Linux系统上启用内部MySQL库,因为操作系统包的使用非常脆弱,通常根本不起作用。 这修复 [\#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [\#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了启用后在某些系统上构建的问题 `libc++`. 这取代了 [\#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [\#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `Field` 方法更类型安全,以找到更多的错误。 [\#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [\#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加丢失的文件到 `libc-headers` 子模块。 [\#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误 `JSON` 引用性能测试输出。 [\#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在堆栈跟踪显示 `std::exception` 和 `Poco::Exception`. 在以前的版本中,它仅适用于 `DB::Exception`. 这改进了诊断。 [\#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 移植 `clock_gettime` 和 `clock_nanosleep` 对于新鲜的glibc版本。 [\#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([阿莫斯鸟](https://github.com/amosbird)) +- 启用 `part_log` 在示例配置开发人员。 [\#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新加载的异步性质 `01036_no_superfluous_dict_reload_on_create_database*`. [\#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([Azat Khuzhin](https://github.com/azat)) +- 固定编解码器性能测试。 [\#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加安装脚本 `.tgz` 为他们构建和文档。 [\#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [\#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([阿利沙平](https://github.com/alesapin)) +- 删除旧 `ZSTD` 测试(它是在2016年创建的,以重现zstd1.0版本之前的错误)。 这修复 [\#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [\#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定构建在Mac OS卡特琳娜。 [\#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([meo](https://github.com/meob)) +- 增加编解码器性能测试中的行数,以使结果显着。 [\#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 在调试版本中,处理 `LOGICAL_ERROR` 异常作为断言失败,使得它们更容易被注意到。 [\#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使与格式相关的性能测试更具确定性。 [\#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `lz4` 来修复记忆消毒器的故障 [\#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在异常处理中抑制已知MemorySanitizer误报。 [\#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 更新 `gcc` 和 `g++` 到版本9在 `build/docker/build.sh` [\#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([TLightSky](https://github.com/tlightsky)) +- 添加性能测试用例来测试 `PREWHERE` 比 `WHERE`. [\#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([阿莫斯鸟](https://github.com/amosbird)) +- 在修复一个笨拙的测试方面取得了进展。 [\#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从MemorySanitizer报告数据 `libunwind`. [\#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libc++` 到最新版本。 [\#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从源头构建ICU库。 这修复 [\#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [\#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从切换 `libressl` 到 `openssl`. ClickHouse应在此更改后支持TLS1.3和SNI。 这修复 [\#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [\#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时固定的UBSan报告 `chacha20_poly1305` 从SSL(发生在连接到https://yandex.ru/)。 [\#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复默认密码文件的模式 `.deb` linux发行版。 [\#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) +- 改进的表达式获取 `clickhouse-server` PID输入 `clickhouse-test`. [\#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 更新contrib/googletest到v1.10.0。 [\#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- 修复了ThreadSaninitizer报告 `base64` 图书馆. 还将此库更新到最新版本,但无关紧要。 这修复 [\#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [\#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `00600_replace_running_query` 对于处理器。 [\#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 删除支持 `tcmalloc` 为了使 `CMakeLists.txt` 更简单 [\#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 发布海湾合作委员会构建现在使用 `libc++` 而不是 `libstdc++`. 最近 `libc++` 只与叮当一起使用。 这将提高构建配置的一致性和可移植性。 [\#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用MemorySanitizer启用ICU库进行构建。 [\#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁止从警告 `CapNProto` 图书馆. [\#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除代码的特殊情况 `tcmalloc`,因为它不再受支持。 [\#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI coverage任务中,优雅地终止服务器以允许它保存coverage报告。 这修复了我们最近看到的不完整的复盖率报告。 [\#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([阿利沙平](https://github.com/alesapin)) +- 针对所有编解码器的性能测试 `Float64` 和 `UInt64` 值。 [\#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- `termcap` 非常不推荐使用,并导致各种问题(f.g.missing “up” 帽和呼应 `^J` 而不是多行)。 帮个忙 `terminfo` 或bund绑 `ncurses`. [\#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复 `test_storage_s3` 集成测试。 [\#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 碌莽禄Support: `StorageFile(, null)` 将块插入给定格式的文件而不实际写入磁盘。 这是性能测试所必需的。 [\#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加参数 `--print-time` 功能测试打印每个测试的执行时间。 [\#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加断言 `KeyCondition` 同时评估RPN。 这将修复来自gcc-9的警告。 [\#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI构建中转储cmake选项。 [\#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 不要为某些fat库生成调试信息。 [\#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `log_to_console.xml` 始终登录到stderr,无论它是否交互。 [\#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 删除了一些未使用的功能 `clickhouse-performance-test` 工具 [\#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们也将搜索 `lld-X` 与相应的 `clang-X` 版本。 [\#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([阿利沙平](https://github.com/alesapin)) +- 实木复合地板建设改善。 [\#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([马苏兰](https://github.com/maxulan)) +- 更多海湾合作委员会警告 [\#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Arch Linux的软件包现在允许运行ClickHouse服务器,而不仅仅是客户端。 [\#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复与处理器的测试。 微小的性能修复。 [\#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 更新contrib/protobuf。 [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V.Kornilov](https://github.com/matwey)) +- 在准备切换到c++20作为新年庆祝活动。 “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 实验特点 {#experimental-feature-1} + +- 增加了实验设置 `min_bytes_to_use_mmap_io`. 它允许读取大文件,而无需将数据从内核复制到用户空间。 默认情况下禁用该设置。 建议的阈值大约是64MB,因为mmap/munmap很慢。 [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返工配额作为访问控制系统的一部分。 增加了新表 `system.quotas`,新功能 `currentQuota`, `currentQuotaKey`,新的SQL语法 `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 允许跳过带有警告的未知设置,而不是引发异常。 [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 重新设计的行策略作为访问控制系统的一部分。 增加了新表 `system.row_policies`,新功能 `currentRowPolicies()`,新的SQL语法 `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +#### 安全修复 {#security-fix} + +- 修正了读取目录结构中的表的可能性 `File` 表引擎。 这修复 [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## [更新日志2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md deleted file mode 120000 index eb58e4a90be..00000000000 --- a/docs/zh/commercial/cloud.md +++ /dev/null @@ -1 +0,0 @@ -../../en/commercial/cloud.md \ No newline at end of file diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md new file mode 100644 index 00000000000..765c352d098 --- /dev/null +++ b/docs/zh/commercial/cloud.md @@ -0,0 +1,21 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +--- + +# ツ环板Providersョツ嘉ッ {#clickhouse-cloud-service-providers} + +!!! info "信息" + 如果您已经启动了带有托管ClickHouse服务的公共云,请随时 [打开拉取请求](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) 将其添加到以下列表。 + +## Yandex云 {#yandex-cloud} + +[Yandex的ClickHouse托管服务](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) 提供以下主要功能: + +- 全面管理的动物园管理员服务 [ClickHouse复制](../engines/table_engines/mergetree_family/replication.md) +- 多种存储类型选择 +- 不同可用区中的副本 +- 加密和隔离 +- 自动化维护 + +{## [原始文章](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/zh/commercial/index.md b/docs/zh/commercial/index.md new file mode 100644 index 00000000000..ec704207201 --- /dev/null +++ b/docs/zh/commercial/index.md @@ -0,0 +1,9 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5546\u4E1A" +toc_priority: 70 +toc_title: "\u5546\u4E1A" +--- + + diff --git a/docs/zh/data_types/array.md b/docs/zh/data_types/array.md deleted file mode 100644 index 774210b0d29..00000000000 --- a/docs/zh/data_types/array.md +++ /dev/null @@ -1,72 +0,0 @@ -# Array(T) {#data-type-array} - -由 `T` 类型元素组成的数组。 - -`T` 可以是任意类型,包含数组类型。 但不推荐使用多维数组,ClickHouse 对多维数组的支持有限。例如,不能存储在 `MergeTree` 表中存储多维数组。 - -## 创建数组 {#chuang-jian-shu-zu} - -您可以使用array函数来创建数组: - - array(T) - -您也可以使用方括号: - - [] - -创建数组示例: - - :) SELECT array(1, 2) AS x, toTypeName(x) - - SELECT - [1, 2] AS x, - toTypeName(x) - - ┌─x─────┬─toTypeName(array(1, 2))─┐ - │ [1,2] │ Array(UInt8) │ - └───────┴─────────────────────────┘ - - 1 rows in set. Elapsed: 0.002 sec. - - :) SELECT [1, 2] AS x, toTypeName(x) - - SELECT - [1, 2] AS x, - toTypeName(x) - - ┌─x─────┬─toTypeName([1, 2])─┐ - │ [1,2] │ Array(UInt8) │ - └───────┴────────────────────┘ - - 1 rows in set. Elapsed: 0.002 sec. - -## 使用数据类型 {#shi-yong-shu-ju-lei-xing} - -ClickHouse会自动检测数组元素,并根据元素计算出存储这些元素最小的数据类型。如果在元素中存在 [NULL](../query_language/syntax.md#null-literal) 或存在 [Nullable](nullable.md#data_type-nullable) 类型元素,那么数组的元素类型将会变成 [Nullable](nullable.md)。 - -如果 ClickHouse 无法确定数据类型,它将产生异常。当尝试同时创建一个包含字符串和数字的数组时会发生这种情况 (`SELECT array(1, 'a')`)。 - -自动数据类型检测示例: - - :) SELECT array(1, 2, NULL) AS x, toTypeName(x) - - SELECT - [1, 2, NULL] AS x, - toTypeName(x) - - ┌─x──────────┬─toTypeName(array(1, 2, NULL))─┐ - │ [1,2,NULL] │ Array(Nullable(UInt8)) │ - └────────────┴───────────────────────────────┘ - - 1 rows in set. Elapsed: 0.002 sec. - -如果您尝试创建不兼容的数据类型数组,ClickHouse 将引发异常: - - :) SELECT array(1, 'a') - - SELECT [1, 'a'] - - Received exception from server (version 1.1.54388): - Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. - - 0 rows in set. Elapsed: 0.246 sec. diff --git a/docs/zh/data_types/boolean.md b/docs/zh/data_types/boolean.md deleted file mode 100644 index 1918bb1c56b..00000000000 --- a/docs/zh/data_types/boolean.md +++ /dev/null @@ -1,3 +0,0 @@ -# Boolean Values {#boolean-values} - -没有单独的类型来存储布尔值。可以使用 UInt8 类型,取值限制为 0 或 1。 diff --git a/docs/zh/data_types/date.md b/docs/zh/data_types/date.md deleted file mode 100644 index 96ee60d53a8..00000000000 --- a/docs/zh/data_types/date.md +++ /dev/null @@ -1,5 +0,0 @@ -# Date {#date} - -日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2106年,但最终完全支持的年份为2105)。最小值输出为0000-00-00。 - -日期中没有存储时区信息。 diff --git a/docs/zh/data_types/datetime.md b/docs/zh/data_types/datetime.md deleted file mode 100644 index 50c5964360c..00000000000 --- a/docs/zh/data_types/datetime.md +++ /dev/null @@ -1,11 +0,0 @@ -# DateTime {#data_type-datetime} - -时间戳类型。用四个字节(无符号的)存储 Unix 时间戳)。允许存储与日期类型相同的范围内的值。最小值为 0000-00-00 00:00:00。时间戳类型值精确到秒(没有闰秒)。 - -## 时区 {#shi-qu} - -使用启动客户端或服务器时的系统时区,时间戳是从文本(分解为组件)转换为二进制并返回。在文本格式中,有关夏令时的信息会丢失。 - -默认情况下,客户端连接到服务的时候会使用服务端时区。您可以通过启用客户端命令行选项 `--use_client_time_zone` 来设置使用客户端时间。 - -因此,在处理文本日期时(例如,在保存文本转储时),请记住在夏令时更改期间可能存在歧义,如果时区发生更改,则可能存在匹配数据的问题。 diff --git a/docs/zh/data_types/datetime64.md b/docs/zh/data_types/datetime64.md deleted file mode 120000 index e59b41ea1ae..00000000000 --- a/docs/zh/data_types/datetime64.md +++ /dev/null @@ -1 +0,0 @@ -../../en/data_types/datetime64.md \ No newline at end of file diff --git a/docs/zh/data_types/decimal.md b/docs/zh/data_types/decimal.md deleted file mode 100644 index 1503da90d89..00000000000 --- a/docs/zh/data_types/decimal.md +++ /dev/null @@ -1,80 +0,0 @@ -# Decimal(P, S), Decimal32(S), Decimal64(S), Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} - -有符号的定点数,可在加、减和乘法运算过程中保持精度。对于除法,最低有效数字会被丢弃(不舍入)。 - -## 参数 {#can-shu} - -- P - 精度。有效范围:\[1:38\],决定可以有多少个十进制数字(包括分数)。 -- S - 规模。有效范围:\[0:P\],决定数字的小数部分中包含的小数位数。 - -对于不同的 P 参数值 Decimal 表示,以下例子都是同义的: -- P from \[ 1 : 9 \] - for Decimal32(S) -- P from \[ 10 : 18 \] - for Decimal64(S) -- P from \[ 19 : 38 \] - for Decimal128(S) - -## 十进制值范围 {#shi-jin-zhi-zhi-fan-wei} - -- Decimal32(S) - ( -1 \* 10^(9 - S), 1 \* 10^(9 - S) ) -- Decimal64(S) - ( -1 \* 10^(18 - S), 1 \* 10^(18 - S) ) -- Decimal128(S) - ( -1 \* 10^(38 - S), 1 \* 10^(38 - S) ) - -例如,Decimal32(4) 可以表示 -99999.9999 至 99999.9999 的数值,步长为0.0001。 - -## 内部表示方式 {#nei-bu-biao-shi-fang-shi} - -数据采用与自身位宽相同的有符号整数存储。这个数在内存中实际范围会高于上述范围,从 String 转换到十进制数的时候会做对应的检查。 - -由于现代CPU不支持128位数字,因此 Decimal128 上的操作由软件模拟。所以 Decimal128 的运算速度明显慢于 Decimal32/Decimal64。 - -## 运算和结果类型 {#yun-suan-he-jie-guo-lei-xing} - -对Decimal的二进制运算导致更宽的结果类型(无论参数的顺序如何)。 - -- Decimal64(S1) Decimal32(S2) -\> Decimal64(S) -- Decimal128(S1) Decimal32(S2) -\> Decimal128(S) -- Decimal128(S1) Decimal64(S2) -\> Decimal128(S) - -精度变化的规则: - -- 加法,减法:S = max(S1, S2)。 -- 乘法:S = S1 + S2。 -- 除法:S = S1。 - -对于 Decimal 和整数之间的类似操作,结果是与参数大小相同的十进制。 - -未定义Decimal和Float32/Float64之间的函数。要执行此类操作,您可以使用:toDecimal32、toDecimal64、toDecimal128 或 toFloat32,toFloat64,需要显式地转换其中一个参数。注意,结果将失去精度,类型转换是昂贵的操作。 - -Decimal上的一些函数返回结果为Float64(例如,var或stddev)。对于其中一些,中间计算发生在Decimal中。对于此类函数,尽管结果类型相同,但Float64和Decimal中相同数据的结果可能不同。 - -## 溢出检查 {#yi-chu-jian-cha} - -在对 Decimal 类型执行操作时,数值可能会发生溢出。分数中的过多数字被丢弃(不是舍入的)。整数中的过多数字将导致异常。 - - SELECT toDecimal32(2, 4) AS x, x / 3 - - ┌──────x─┬─divide(toDecimal32(2, 4), 3)─┐ - │ 2.0000 │ 0.6666 │ - └────────┴──────────────────────────────┘ - - SELECT toDecimal32(4.2, 8) AS x, x * x - - DB::Exception: Scale is out of bounds. - - SELECT toDecimal32(4.2, 8) AS x, 6 * x - - DB::Exception: Decimal math overflow. - -检查溢出会导致计算变慢。如果已知溢出不可能,则可以通过设置`decimal_check_overflow`来禁用溢出检查,在这种情况下,溢出将导致结果不正确: - - SET decimal_check_overflow = 0; - SELECT toDecimal32(4.2, 8) AS x, 6 * x - - ┌──────────x─┬─multiply(6, toDecimal32(4.2, 8))─┐ - │ 4.20000000 │ -17.74967296 │ - └────────────┴──────────────────────────────────┘ - -溢出检查不仅发生在算术运算上,还发生在比较运算上: - - SELECT toDecimal32(1, 8) < 100 - - DB::Exception: Can't compare. diff --git a/docs/zh/data_types/domains/ipv4.md b/docs/zh/data_types/domains/ipv4.md deleted file mode 100644 index 65c066fb487..00000000000 --- a/docs/zh/data_types/domains/ipv4.md +++ /dev/null @@ -1,68 +0,0 @@ -## IPv4 {#ipv4} - -`IPv4`是与`UInt32`类型保持二进制兼容的Domain类型,其用于存储IPv4地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 - -### 基本使用 {#ji-ben-shi-yong} - -``` sql -CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; - -DESCRIBE TABLE hits; -``` - - ┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ - │ url │ String │ │ │ │ │ - │ from │ IPv4 │ │ │ │ │ - └──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ - -同时您也可以使用`IPv4`类型的列作为主键: - -``` sql -CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; -``` - -在写入与查询时,`IPv4`类型能够识别可读性更加友好的输入输出格式: - -``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); - -SELECT * FROM hits; -``` - - ┌─url────────────────────────────────┬───────────from─┐ - │ https://clickhouse.tech/docs/en/ │ 116.106.34.242 │ - │ https://wikipedia.org │ 116.253.40.133 │ - │ https://clickhouse.tech │ 183.247.232.58 │ - └────────────────────────────────────┴────────────────┘ - -同时它提供更为紧凑的二进制存储格式: - -``` sql -SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; -``` - - ┌─toTypeName(from)─┬─hex(from)─┐ - │ IPv4 │ B7F7E83A │ - └──────────────────┴───────────┘ - -不可隐式转换为除`UInt32`以外的其他类型类型。如果要将`IPv4`类型的值转换成字符串,你可以使用`IPv4NumToString()`显示的进行转换: - -``` sql -SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; -``` - - ┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ - │ String │ 183.247.232.58 │ - └───────────────────────────────────┴────────────────┘ - -或可以使用`CAST`将它转换为`UInt32`类型: - -``` sql -SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; -``` - - ┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ - │ UInt32 │ 3086477370 │ - └──────────────────────────────────┴────────────┘ - -[来源文章](https://clickhouse.tech/docs/en/data_types/domains/ipv4) diff --git a/docs/zh/data_types/domains/ipv6.md b/docs/zh/data_types/domains/ipv6.md deleted file mode 100644 index bc0f95932aa..00000000000 --- a/docs/zh/data_types/domains/ipv6.md +++ /dev/null @@ -1,68 +0,0 @@ -## IPv6 {#ipv6} - -`IPv6`是与`FixedString(16)`类型保持二进制兼容的Domain类型,其用于存储IPv6地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 - -### 基本用法 {#ji-ben-yong-fa} - -``` sql -CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; - -DESCRIBE TABLE hits; -``` - - ┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ - │ url │ String │ │ │ │ │ - │ from │ IPv6 │ │ │ │ │ - └──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ - -同时您也可以使用`IPv6`类型的列作为主键: - -``` sql -CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; -``` - -在写入与查询时,`IPv6`类型能够识别可读性更加友好的输入输出格式: - -``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); - -SELECT * FROM hits; -``` - - ┌─url────────────────────────────────┬─from──────────────────────────┐ - │ https://clickhouse.tech │ 2001:44c8:129:2632:33:0:252:2 │ - │ https://clickhouse.tech/docs/en/ │ 2a02:e980:1e::1 │ - │ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │ - └────────────────────────────────────┴───────────────────────────────┘ - -同时它提供更为紧凑的二进制存储格式: - -``` sql -SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; -``` - - ┌─toTypeName(from)─┬─hex(from)────────────────────────┐ - │ IPv6 │ 200144C8012926320033000002520002 │ - └──────────────────┴──────────────────────────────────┘ - -不可隐式转换为除`FixedString(16)`以外的其他类型类型。如果要将`IPv6`类型的值转换成字符串,你可以使用`IPv6NumToString()`显示的进行转换: - -``` sql -SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; -``` - - ┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ - │ String │ 2001:44c8:129:2632:33:0:252:2 │ - └───────────────────────────────────┴───────────────────────────────┘ - -或使用`CAST`将其转换为`FixedString(16)`: - -``` sql -SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; -``` - - ┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ - │ FixedString(16) │ ��� │ - └───────────────────────────────────────────┴─────────┘ - -[来源文章](https://clickhouse.tech/docs/en/data_types/domains/ipv6) diff --git a/docs/zh/data_types/domains/overview.md b/docs/zh/data_types/domains/overview.md deleted file mode 100644 index 6c59860132e..00000000000 --- a/docs/zh/data_types/domains/overview.md +++ /dev/null @@ -1,26 +0,0 @@ -# Domains {#domains} - -Domain类型是特定实现的类型,它总是与某个现存的基础类型保持二进制兼容的同时添加一些额外的特性,以能够在维持磁盘数据不变的情况下使用这些额外的特性。目前ClickHouse暂不支持自定义domain类型。 - -如果你可以在一个地方使用与Domain类型二进制兼容的基础类型,那么在相同的地方您也可以使用Domain类型,例如: - -- 使用Domain类型作为表中列的类型 -- 对Domain类型的列进行读/写数据 -- 如果与Domain二进制兼容的基础类型可以作为索引,那么Domain类型也可以作为索引 -- 将Domain类型作为参数传递给函数使用 -- 其他 - -### Domains的额外特性 {#domainsde-e-wai-te-xing} - -- 在执行SHOW CREATE TABLE 或 DESCRIBE TABLE时,其对应的列总是展示为Domain类型的名称 -- 在INSERT INTO domain\_table(domain\_column) VALUES(…)中输入数据总是以更人性化的格式进行输入 -- 在SELECT domain\_column FROM domain\_table中数据总是以更人性化的格式输出 -- 在INSERT INTO domain\_table FORMAT CSV …中,实现外部源数据以更人性化的格式载入 - -### Domains类型的限制 {#domainslei-xing-de-xian-zhi} - -- 无法通过`ALTER TABLE`将基础类型的索引转换为Domain类型的索引。 -- 当从其他列或表插入数据时,无法将string类型的值隐式地转换为Domain类型的值。 -- 无法对存储为Domain类型的值添加约束。 - -[来源文章](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/zh/data_types/enum.md b/docs/zh/data_types/enum.md deleted file mode 100644 index 034406a303b..00000000000 --- a/docs/zh/data_types/enum.md +++ /dev/null @@ -1,100 +0,0 @@ -# Enum8, Enum16 {#enum8-enum16} - -包括 `Enum8` 和 `Enum16` 类型。`Enum` 保存 `'string'= integer` 的对应关系。在 ClickHouse 中,尽管用户使用的是字符串常量,但所有含有 `Enum` 数据类型的操作都是按照包含整数的值来执行。这在性能方面比使用 `String` 数据类型更有效。 - -- `Enum8` 用 `'String'= Int8` 对描述。 -- `Enum16` 用 `'String'= Int16` 对描述。 - -## 用法示例 {#yong-fa-shi-li} - -创建一个带有一个枚举 `Enum8('hello' = 1, 'world' = 2)` 类型的列: - - CREATE TABLE t_enum - ( - x Enum8('hello' = 1, 'world' = 2) - ) - ENGINE = TinyLog - -这个 `x` 列只能存储类型定义中列出的值:`'hello'`或`'world'`。如果您尝试保存任何其他值,ClickHouse 抛出异常。 - - :) INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') - - INSERT INTO t_enum VALUES - - Ok. - - 3 rows in set. Elapsed: 0.002 sec. - - :) insert into t_enum values('a') - - INSERT INTO t_enum VALUES - - - Exception on client: - Code: 49. DB::Exception: Unknown element 'a' for type Enum8('hello' = 1, 'world' = 2) - -当您从表中查询数据时,ClickHouse 从 `Enum` 中输出字符串值。 - - SELECT * FROM t_enum - - ┌─x─────┐ - │ hello │ - │ world │ - │ hello │ - └───────┘ - -如果需要看到对应行的数值,则必须将 `Enum` 值转换为整数类型。 - - SELECT CAST(x, 'Int8') FROM t_enum - - ┌─CAST(x, 'Int8')─┐ - │ 1 │ - │ 2 │ - │ 1 │ - └─────────────────┘ - -在查询中创建枚举值,您还需要使用 `CAST`。 - - SELECT toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')) - - ┌─toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)'))─┐ - │ Enum8('a' = 1, 'b' = 2) │ - └──────────────────────────────────────────────────────┘ - -## 规则及用法 {#gui-ze-ji-yong-fa} - -`Enum8` 类型的每个值范围是 `-128 ... 127`,`Enum16` 类型的每个值范围是 `-32768 ... 32767`。所有的字符串或者数字都必须是不一样的。允许存在空字符串。如果某个 Enum 类型被指定了(在表定义的时候),数字可以是任意顺序。然而,顺序并不重要。 - -`Enum` 中的字符串和数值都不能是 [NULL](../query_language/syntax.md)。 - -`Enum` 包含在 [Nullable](nullable.md) 类型中。因此,如果您使用此查询创建一个表 - - CREATE TABLE t_enum_nullable - ( - x Nullable( Enum8('hello' = 1, 'world' = 2) ) - ) - ENGINE = TinyLog - -不仅可以存储 `'hello'` 和 `'world'` ,还可以存储 `NULL`。 - - INSERT INTO t_enum_nullable Values('hello'),('world'),(NULL) - -在内存中,`Enum` 列的存储方式与相应数值的 `Int8` 或 `Int16` 相同。 - -当以文本方式读取的时候,ClickHouse 将值解析成字符串然后去枚举值的集合中搜索对应字符串。如果没有找到,会抛出异常。当读取文本格式的时候,会根据读取到的字符串去找对应的数值。如果没有找到,会抛出异常。 - -当以文本形式写入时,ClickHouse 将值解析成字符串写入。如果列数据包含垃圾数据(不是来自有效集合的数字),则抛出异常。Enum 类型以二进制读取和写入的方式与 `Int8` 和 `Int16` 类型一样的。 - -隐式默认值是数值最小的值。 - -在 `ORDER BY`,`GROUP BY`,`IN`,`DISTINCT` 等等中,Enum 的行为与相应的数字相同。例如,按数字排序。对于等式运算符和比较运算符,Enum 的工作机制与它们在底层数值上的工作机制相同。 - -枚举值不能与数字进行比较。枚举可以与常量字符串进行比较。如果与之比较的字符串不是有效Enum值,则将引发异常。可以使用 IN 运算符来判断一个 Enum 是否存在于某个 Enum 集合中,其中集合中的 Enum 需要用字符串表示。 - -大多数具有数字和字符串的运算并不适用于Enums;例如,Enum 类型不能和一个数值相加。但是,Enum有一个原生的 `toString` 函数,它返回它的字符串值。 - -Enum 值使用 `toT` 函数可以转换成数值类型,其中 T 是一个数值类型。若 `T` 恰好对应 Enum 的底层数值类型,这个转换是零消耗的。 - -Enum 类型可以被 `ALTER` 无成本地修改对应集合的值。可以通过 `ALTER` 操作来增加或删除 Enum 的成员(只要表没有用到该值,删除都是安全的)。作为安全保障,改变之前使用过的 Enum 成员将抛出异常。 - -通过 `ALTER` 操作,可以将 `Enum8` 转成 `Enum16`,反之亦然,就像 `Int8` 转 `Int16`一样。 diff --git a/docs/zh/data_types/fixedstring.md b/docs/zh/data_types/fixedstring.md deleted file mode 100644 index 27945b74fc8..00000000000 --- a/docs/zh/data_types/fixedstring.md +++ /dev/null @@ -1,56 +0,0 @@ -# FixedString {#fixedstring} - -固定长度 N 的字符串(N 必须是严格的正自然数)。 - -您可以使用下面的语法对列声明为`FixedString`类型: - -``` sql - FixedString(N) -``` - -其中`N`表示自然数。 - -当数据的长度恰好为N个字节时,`FixedString`类型是高效的。 在其他情况下,这可能会降低效率。 - -可以有效存储在`FixedString`类型的列中的值的示例: - -- 二进制表示的IP地址(IPv6使用`FixedString(16)`) -- 语言代码(ru\_RU, en\_US … ) -- 货币代码(USD, RUB … ) -- 二进制表示的哈希值(MD5使用`FixedString(16)`,SHA256使用`FixedString(32)`) - -请使用[UUID](uuid.md)数据类型来存储UUID值,。 - -当向ClickHouse中插入数据时, - -- 如果字符串包含的字节数少于\`N’,将对字符串末尾进行空字节填充。 -- 如果字符串包含的字节数大于`N`,将抛出`Too large value for FixedString(N)`异常。 - -当做数据查询时,ClickHouse不会删除字符串末尾的空字节。 如果使用`WHERE`子句,则须要手动添加空字节以匹配`FixedString`的值。 以下示例阐明了如何将`WHERE`子句与`FixedString`一起使用。 - -考虑带有`FixedString(2)`列的表: - -``` text -┌─name──┐ -│ b │ -└───────┘ -``` - -查询语句`SELECT * FROM FixedStringTable WHERE a = 'b'` 不会返回任何结果。请使用空字节来填充筛选条件。 - -``` sql -SELECT * FROM FixedStringTable -WHERE a = 'b\0' -``` - -``` text -┌─a─┐ -│ b │ -└───┘ -``` - -这种方式与MySQL的`CHAR`类型的方式不同(MySQL中使用空格填充字符串,并在输出时删除空格)。 - -请注意,`FixedString(N)`的长度是个常量。仅由空字符组成的字符串,函数[length](../query_language/functions/array_functions.md#array_functions-length)返回值为`N`,而函数[empty](../query_language/functions/string_functions.md#string_functions-empty)的返回值为`1`。 - -[来源文章](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/zh/data_types/float.md b/docs/zh/data_types/float.md deleted file mode 100644 index f43000ffa35..00000000000 --- a/docs/zh/data_types/float.md +++ /dev/null @@ -1,70 +0,0 @@ -# Float32, Float64 {#float32-float64} - -[浮点数](https://en.wikipedia.org/wiki/IEEE_754)。 - -类型与以下 C 语言中类型是相同的: - -- `Float32` - `float` -- `Float64` - `double` - -我们建议您尽可能以整数形式存储数据。例如,将固定精度的数字转换为整数值,例如货币数量或页面加载时间用毫秒为单位表示 - -## 使用浮点数 {#shi-yong-fu-dian-shu} - -- 对浮点数进行计算可能引起四舍五入的误差。 - - - -``` sql -SELECT 1 - 0.9 -``` - - ┌───────minus(1, 0.9)─┐ - │ 0.09999999999999998 │ - └─────────────────────┘ - -- 计算的结果取决于计算方法(计算机系统的处理器类型和体系结构) - -- 浮点计算结果可能是诸如无穷大(`INF`)和«非数字»(`NaN`)。对浮点数计算的时候应该考虑到这点。 - -- 当一行行阅读浮点数的时候,浮点数的结果可能不是机器最近显示的数值。 - -## NaN and Inf {#data_type-float-nan-inf} - -与标准SQL相比,ClickHouse 支持以下类别的浮点数: - -- `Inf` – 正无穷 - - - -``` sql -SELECT 0.5 / 0 -``` - - ┌─divide(0.5, 0)─┐ - │ inf │ - └────────────────┘ - -- `-Inf` – 负无穷 - - - -``` sql -SELECT -0.5 / 0 -``` - - ┌─divide(-0.5, 0)─┐ - │ -inf │ - └─────────────────┘ - -- `NaN` – 非数字 - - - - SELECT 0 / 0 - - ┌─divide(0, 0)─┐ - │ nan │ - └──────────────┘ - -可以在 [ORDER BY 子句](../query_language/select.md) 查看更多关于 `NaN` 排序的规则。 diff --git a/docs/zh/data_types/index.md b/docs/zh/data_types/index.md deleted file mode 100644 index 70aa976cb11..00000000000 --- a/docs/zh/data_types/index.md +++ /dev/null @@ -1,5 +0,0 @@ -# 数据类型 {#data_types} - -ClickHouse 可以在数据表中存储多种数据类型。 - -本节描述 ClickHouse 支持的数据类型,以及使用或者实现它们时(如果有的话)的注意事项。 diff --git a/docs/zh/data_types/int_uint.md b/docs/zh/data_types/int_uint.md deleted file mode 100644 index 4e01ad017ca..00000000000 --- a/docs/zh/data_types/int_uint.md +++ /dev/null @@ -1,17 +0,0 @@ -# UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} - -固定长度的整型,包括有符号整型或无符号整型。 - -## 整型范围 {#zheng-xing-fan-wei} - -- Int8 - \[-128 : 127\] -- Int16 - \[-32768 : 32767\] -- Int32 - \[-2147483648 : 2147483647\] -- Int64 - \[-9223372036854775808 : 9223372036854775807\] - -## 无符号整型范围 {#wu-fu-hao-zheng-xing-fan-wei} - -- UInt8 - \[0 : 255\] -- UInt16 - \[0 : 65535\] -- UInt32 - \[0 : 4294967295\] -- UInt64 - \[0 : 18446744073709551615\] diff --git a/docs/zh/data_types/nested_data_structures/aggregatefunction.md b/docs/zh/data_types/nested_data_structures/aggregatefunction.md deleted file mode 100644 index 3153150d2bd..00000000000 --- a/docs/zh/data_types/nested_data_structures/aggregatefunction.md +++ /dev/null @@ -1,63 +0,0 @@ -# AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} - -聚合函数的中间状态,可以通过聚合函数名称加`-State`后缀的形式得到它。与此同时,当您需要访问该类型的最终状态数据时,您需要以相同的聚合函数名加`-Merge`后缀的形式来得到最终状态数据。 - -`AggregateFunction` — 参数化的数据类型。 - -**参数** - -- 聚合函数名 - - 如果函数具备多个参数列表,请在此处指定其他参数列表中的值。 - -- 聚合函数参数的类型 - -**示例** - -``` sql -CREATE TABLE t -( - column1 AggregateFunction(uniq, UInt64), - column2 AggregateFunction(anyIf, String, UInt8), - column3 AggregateFunction(quantiles(0.5, 0.9), UInt64) -) ENGINE = ... -``` - -上述中的[uniq](../../query_language/agg_functions/reference.md#agg_function-uniq), anyIf ([any](../../query_language/agg_functions/reference.md#agg_function-any)+[If](../../query_language/agg_functions/combinators.md#agg-functions-combinator-if)) 以及 [quantiles](../../query_language/agg_functions/reference.md) 都为ClickHouse中支持的聚合函数。 - -## 使用指南 {#shi-yong-zhi-nan} - -### 数据写入 {#shu-ju-xie-ru} - -当需要写入数据时,您需要将数据包含在`INSERT SELECT`语句中,同时对于`AggregateFunction`类型的数据,您需要使用对应的以`-State`为后缀的函数进行处理。 - -**函数使用示例** - -``` sql -uniqState(UserID) -quantilesState(0.5, 0.9)(SendTiming) -``` - -不同于`uniq`和`quantiles`函数返回聚合结果的最终值,以`-State`后缀的函数总是返回`AggregateFunction`类型的数据的中间状态。 - -对于`SELECT`而言,`AggregateFunction`类型总是以特定的二进制形式展现在所有的输出格式中。例如,您可以使用`SELECT`语句将函数的状态数据转储为`TabSeparated`格式的同时使用`INSERT`语句将数据转储回去。 - -### 数据查询 {#shu-ju-cha-xun} - -当从`AggregatingMergeTree`表中查询数据时,对于`AggregateFunction`类型的字段,您需要使用以`-Merge`为后缀的相同聚合函数来聚合数据。对于非`AggregateFunction`类型的字段,请将它们包含在`GROUP BY`子句中。 - -以`-Merge`为后缀的聚合函数,可以将多个`AggregateFunction`类型的中间状态组合计算为最终的聚合结果。 - -例如,如下的两个查询返回的结果总是一致: - -``` sql -SELECT uniq(UserID) FROM table - -SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP BY RegionID) -``` - -## 使用示例 {#shi-yong-shi-li} - -请参阅 [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) 的说明 - -[来源文章](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/zh/data_types/nested_data_structures/index.md b/docs/zh/data_types/nested_data_structures/index.md deleted file mode 100644 index 3914064674e..00000000000 --- a/docs/zh/data_types/nested_data_structures/index.md +++ /dev/null @@ -1 +0,0 @@ -# 嵌套数据结构 {#qian-tao-shu-ju-jie-gou} diff --git a/docs/zh/data_types/nested_data_structures/nested.md b/docs/zh/data_types/nested_data_structures/nested.md deleted file mode 100644 index d2fd1e3a630..00000000000 --- a/docs/zh/data_types/nested_data_structures/nested.md +++ /dev/null @@ -1,97 +0,0 @@ -# Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} - -嵌套数据结构类似于嵌套表。嵌套数据结构的参数(列名和类型)与 CREATE 查询类似。每个表可以包含任意多行嵌套数据结构。 - -示例: - -``` sql -CREATE TABLE test.visits -( - CounterID UInt32, - StartDate Date, - Sign Int8, - IsNew UInt8, - VisitID UInt64, - UserID UInt64, - ... - Goals Nested - ( - ID UInt32, - Serial UInt32, - EventTime DateTime, - Price Int64, - OrderID String, - CurrencyID UInt32 - ), - ... -) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign) -``` - -上述示例声明了 `Goals` 这种嵌套数据结构,它包含访客转化相关的数据(访客达到的目标)。在 ‘visits’ 表中每一行都可以对应零个或者任意个转化数据。 - -只支持一级嵌套。嵌套结构的列中,若列的类型是数组类型,那么该列其实和多维数组是相同的,所以目前嵌套层级的支持很局限(MergeTree 引擎中不支持存储这样的列) - -大多数情况下,处理嵌套数据结构时,会指定一个单独的列。为了这样实现,列的名称会与点号连接起来。这些列构成了一组匹配类型。在同一条嵌套数据中,所有的列都具有相同的长度。 - -示例: - -``` sql -SELECT - Goals.ID, - Goals.EventTime -FROM test.visits -WHERE CounterID = 101500 AND length(Goals.ID) < 5 -LIMIT 10 -``` - -``` text -┌─Goals.ID───────────────────────┬─Goals.EventTime───────────────────────────────────────────────────────────────────────────┐ -│ [1073752,591325,591325] │ ['2014-03-17 16:38:10','2014-03-17 16:38:48','2014-03-17 16:42:27'] │ -│ [1073752] │ ['2014-03-17 00:28:25'] │ -│ [1073752] │ ['2014-03-17 10:46:20'] │ -│ [1073752,591325,591325,591325] │ ['2014-03-17 13:59:20','2014-03-17 22:17:55','2014-03-17 22:18:07','2014-03-17 22:18:51'] │ -│ [] │ [] │ -│ [1073752,591325,591325] │ ['2014-03-17 11:37:06','2014-03-17 14:07:47','2014-03-17 14:36:21'] │ -│ [] │ [] │ -│ [] │ [] │ -│ [591325,1073752] │ ['2014-03-17 00:46:05','2014-03-17 00:46:05'] │ -│ [1073752,591325,591325,591325] │ ['2014-03-17 13:28:33','2014-03-17 13:30:26','2014-03-17 18:51:21','2014-03-17 18:51:45'] │ -└────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -所以可以简单地把嵌套数据结构当做是所有列都是相同长度的多列数组。 - -SELECT 查询只有在使用 ARRAY JOIN 的时候才可以指定整个嵌套数据结构的名称。更多信息,参考 «ARRAY JOIN 子句»。示例: - -``` sql -SELECT - Goal.ID, - Goal.EventTime -FROM test.visits -ARRAY JOIN Goals AS Goal -WHERE CounterID = 101500 AND length(Goals.ID) < 5 -LIMIT 10 -``` - -``` text -┌─Goal.ID─┬──────Goal.EventTime─┐ -│ 1073752 │ 2014-03-17 16:38:10 │ -│ 591325 │ 2014-03-17 16:38:48 │ -│ 591325 │ 2014-03-17 16:42:27 │ -│ 1073752 │ 2014-03-17 00:28:25 │ -│ 1073752 │ 2014-03-17 10:46:20 │ -│ 1073752 │ 2014-03-17 13:59:20 │ -│ 591325 │ 2014-03-17 22:17:55 │ -│ 591325 │ 2014-03-17 22:18:07 │ -│ 591325 │ 2014-03-17 22:18:51 │ -│ 1073752 │ 2014-03-17 11:37:06 │ -└─────────┴─────────────────────┘ -``` - -不能对整个嵌套数据结构执行 SELECT。只能明确列出属于它一部分列。 - -对于 INSERT 查询,可以单独地传入所有嵌套数据结构中的列数组(假如它们是单独的列数组)。在插入过程中,系统会检查它们是否有相同的长度。 - -对于 DESCRIBE 查询,嵌套数据结构中的列会以相同的方式分别列出来。 - -ALTER 查询对嵌套数据结构的操作非常有限。 diff --git a/docs/zh/data_types/nullable.md b/docs/zh/data_types/nullable.md deleted file mode 100644 index ae4a2066fd7..00000000000 --- a/docs/zh/data_types/nullable.md +++ /dev/null @@ -1,41 +0,0 @@ -# Nullable(TypeName) {#data_type-nullable} - -允许用特殊标记 ([NULL](../query_language/syntax.md)) 表示«缺失值»,可以与 `TypeName` 的正常值存放一起。例如,`Nullable(Int8)` 类型的列可以存储 `Int8` 类型值,而没有值的行将存储 `NULL`。 - -对于 `TypeName`,不能使用复合数据类型 [Array](array.md) 和 [Tuple](tuple.md)。复合数据类型可以包含 `Nullable` 类型值,例如`Array(Nullable(Int8))`。 - -`Nullable` 类型字段不能包含在表索引中。 - -除非在 ClickHouse 服务器配置中另有说明,否则 `NULL` 是任何 `Nullable` 类型的默认值。 - -## 存储特性 {#cun-chu-te-xing} - -要在表的列中存储 `Nullable` 类型值,ClickHouse 除了使用带有值的普通文件外,还使用带有 `NULL` 掩码的单独文件。 掩码文件中的条目允许 ClickHouse 区分每个表行的 `NULL` 和相应数据类型的默认值。 由于附加了新文件,`Nullable` 列与类似的普通文件相比消耗额外的存储空间。 - -!!! 注意点 "注意点" - 使用 `Nullable` 几乎总是对性能产生负面影响,在设计数据库时请记住这一点 - -掩码文件中的条目允许ClickHouse区分每个表行的对应数据类型的«NULL»和默认值由于有额外的文件,«Nullable»列比普通列消耗更多的存储空间 - -## 用法示例 {#yong-fa-shi-li} - -``` sql -CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog -``` - -``` sql -INSERT INTO t_null VALUES (1, NULL), (2, 3) -``` - -``` sql -SELECT x + y FROM t_null -``` - -``` text -┌─plus(x, y)─┐ -│ ᴺᵁᴸᴸ │ -│ 5 │ -└────────────┘ -``` - -[来源文章](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/zh/data_types/special_data_types/expression.md b/docs/zh/data_types/special_data_types/expression.md deleted file mode 100644 index 86b4d5591c7..00000000000 --- a/docs/zh/data_types/special_data_types/expression.md +++ /dev/null @@ -1,3 +0,0 @@ -# Expression {#expression} - -用于表示高阶函数中的Lambd表达式。 diff --git a/docs/zh/data_types/special_data_types/index.md b/docs/zh/data_types/special_data_types/index.md deleted file mode 100644 index 5963c377f01..00000000000 --- a/docs/zh/data_types/special_data_types/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Special Data Types {#special-data-types} - -特殊数据类型的值既不能存在表中也不能在结果中输出,但可用于查询的中间结果。 diff --git a/docs/zh/data_types/special_data_types/interval.md b/docs/zh/data_types/special_data_types/interval.md deleted file mode 120000 index 6829f5ced00..00000000000 --- a/docs/zh/data_types/special_data_types/interval.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/data_types/special_data_types/interval.md \ No newline at end of file diff --git a/docs/zh/data_types/special_data_types/nothing.md b/docs/zh/data_types/special_data_types/nothing.md deleted file mode 100644 index 7a6bf0e035b..00000000000 --- a/docs/zh/data_types/special_data_types/nothing.md +++ /dev/null @@ -1,19 +0,0 @@ -# Nothing {#nothing} - -此数据类型的唯一目的是表示不是期望值的情况。 所以不能创建一个 `Nothing` 类型的值。 - -例如,文本 [NULL](../../query_language/syntax.md#null-literal) 的类型为 `Nullable(Nothing)`。详情请见 [Nullable](../../data_types/nullable.md)。 - -`Nothing` 类型也可以用来表示空数组: - -``` bash -:) SELECT toTypeName(array()) - -SELECT toTypeName([]) - -┌─toTypeName(array())─┐ -│ Array(Nothing) │ -└─────────────────────┘ - -1 rows in set. Elapsed: 0.062 sec. -``` diff --git a/docs/zh/data_types/special_data_types/set.md b/docs/zh/data_types/special_data_types/set.md deleted file mode 100644 index d1f2ad368ee..00000000000 --- a/docs/zh/data_types/special_data_types/set.md +++ /dev/null @@ -1,3 +0,0 @@ -# Set {#set} - -可以用在 IN 表达式的右半部分。 diff --git a/docs/zh/data_types/string.md b/docs/zh/data_types/string.md deleted file mode 100644 index 742452ee0bf..00000000000 --- a/docs/zh/data_types/string.md +++ /dev/null @@ -1,10 +0,0 @@ -# String {#string} - -字符串可以任意长度的。它可以包含任意的字节集,包含空字节。因此,字符串类型可以代替其他 DBMSs 中的 VARCHAR、BLOB、CLOB 等类型。 - -## 编码 {#bian-ma} - -ClickHouse 没有编码的概念。字符串可以是任意的字节集,按它们原本的方式进行存储和输出。 -若需存储文本,我们建议使用 UTF-8 编码。至少,如果你的终端使用UTF-8(推荐),这样读写就不需要进行任何的转换了。 -同样,对不同的编码文本 ClickHouse 会有不同处理字符串的函数。 -比如,`length` 函数可以计算字符串包含的字节数组的长度,然而 `lengthUTF8` 函数是假设字符串以 UTF-8 编码,计算的是字符串包含的 Unicode 字符的长度。 diff --git a/docs/zh/data_types/tuple.md b/docs/zh/data_types/tuple.md deleted file mode 100644 index 4efeb651e76..00000000000 --- a/docs/zh/data_types/tuple.md +++ /dev/null @@ -1,45 +0,0 @@ -# Tuple(T1, T2, …) {#tuplet1-t2} - -元组,其中每个元素都有单独的 [类型](index.md#data_types)。 - -不能在表中存储元组(除了内存表)。它们可以用于临时列分组。在查询中,IN 表达式和带特定参数的 lambda 函数可以来对临时列进行分组。更多信息,请参阅 [IN 操作符](../query_language/select.md) and [Higher order functions](../query_language/functions/higher_order_functions.md)。 - -元组可以是查询的结果。在这种情况下,对于JSON以外的文本格式,括号中的值是逗号分隔的。在JSON格式中,元组作为数组输出(在方括号中)。 - -## 创建元组 {#chuang-jian-yuan-zu} - -可以使用函数来创建元组: - - tuple(T1, T2, ...) - -创建元组的示例: - - :) SELECT tuple(1,'a') AS x, toTypeName(x) - - SELECT - (1, 'a') AS x, - toTypeName(x) - - ┌─x───────┬─toTypeName(tuple(1, 'a'))─┐ - │ (1,'a') │ Tuple(UInt8, String) │ - └─────────┴───────────────────────────┘ - - 1 rows in set. Elapsed: 0.021 sec. - -## 元组中的数据类型 {#yuan-zu-zhong-de-shu-ju-lei-xing} - -在动态创建元组时,ClickHouse 会自动为元组的每一个参数赋予最小可表达的类型。如果参数为 [NULL](../query_language/syntax.md#null-literal),那这个元组对应元素是 [Nullable](nullable.md)。 - -自动数据类型检测示例: - - SELECT tuple(1, NULL) AS x, toTypeName(x) - - SELECT - (1, NULL) AS x, - toTypeName(x) - - ┌─x────────┬─toTypeName(tuple(1, NULL))──────┐ - │ (1,NULL) │ Tuple(UInt8, Nullable(Nothing)) │ - └──────────┴─────────────────────────────────┘ - - 1 rows in set. Elapsed: 0.002 sec. diff --git a/docs/zh/data_types/uuid.md b/docs/zh/data_types/uuid.md deleted file mode 120000 index aba05e889ac..00000000000 --- a/docs/zh/data_types/uuid.md +++ /dev/null @@ -1 +0,0 @@ -../../en/data_types/uuid.md \ No newline at end of file diff --git a/docs/zh/database_engines/index.md b/docs/zh/database_engines/index.md deleted file mode 100644 index 95c7ea2c319..00000000000 --- a/docs/zh/database_engines/index.md +++ /dev/null @@ -1,11 +0,0 @@ -# 数据库引擎 {#shu-ju-ku-yin-qing} - -您使用的所有表都是由数据库引擎所提供的 - -默认情况下,ClickHouse使用自己的数据库引擎,该引擎提供可配置的[表引擎](../operations/table_engines/index.md)和[所有支持的SQL语法](../query_language/syntax.md). - -除此之外,您还可以选择使用以下的数据库引擎: - -- [MySQL](mysql.md) - -[来源文章](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/zh/database_engines/lazy.md b/docs/zh/database_engines/lazy.md deleted file mode 120000 index 66830dcdb2f..00000000000 --- a/docs/zh/database_engines/lazy.md +++ /dev/null @@ -1 +0,0 @@ -../../en/database_engines/lazy.md \ No newline at end of file diff --git a/docs/zh/database_engines/mysql.md b/docs/zh/database_engines/mysql.md deleted file mode 100644 index 9467269a2cc..00000000000 --- a/docs/zh/database_engines/mysql.md +++ /dev/null @@ -1,128 +0,0 @@ -# MySQL {#mysql} - -MySQL引擎用于将远程的MySQL服务器中的表映射到ClickHouse中,并允许您对表进行`INSERT`和`SELECT`查询,以方便您在ClickHouse与MySQL之间进行数据交换。 - -`MySQL`数据库引擎会将对其的查询转换为MySQL语法并发送到MySQL服务器中,因此您可以执行诸如`SHOW TABLES`或`SHOW CREATE TABLE`之类的操作。 - -但您无法对其执行以下操作: - -- `ATTACH`/`DETACH` -- `DROP` -- `RENAME` -- `CREATE TABLE` -- `ALTER` - -## CREATE DATABASE {#create-database} - -``` sql -CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MySQL('host:port', 'database', 'user', 'password') -``` - -**MySQL数据库引擎参数** - -- `host:port` — 链接的MySQL地址。 -- `database` — 链接的MySQL数据库。 -- `user` — 链接的MySQL用户。 -- `password` — 链接的MySQL用户密码。 - -## 支持的类型对应 {#zhi-chi-de-lei-xing-dui-ying} - -| MySQL | ClickHouse | -|----------------------------------|---------------------------------------------| -| UNSIGNED TINYINT | [UInt8](../data_types/int_uint.md) | -| TINYINT | [Int8](../data_types/int_uint.md) | -| UNSIGNED SMALLINT | [UInt16](../data_types/int_uint.md) | -| SMALLINT | [Int16](../data_types/int_uint.md) | -| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../data_types/int_uint.md) | -| INT, MEDIUMINT | [Int32](../data_types/int_uint.md) | -| UNSIGNED BIGINT | [UInt64](../data_types/int_uint.md) | -| BIGINT | [Int64](../data_types/int_uint.md) | -| FLOAT | [Float32](../data_types/float.md) | -| DOUBLE | [Float64](../data_types/float.md) | -| DATE | [Date](../data_types/date.md) | -| DATETIME, TIMESTAMP | [DateTime](../data_types/datetime.md) | -| BINARY | [FixedString](../data_types/fixedstring.md) | - -其他的MySQL数据类型将全部都转换为[String](../data_types/string.md)。 - -同时以上的所有类型都支持[Nullable](../data_types/nullable.md)。 - -## 使用示例 {#shi-yong-shi-li} - -在MySQL中创建表: - - mysql> USE test; - Database changed - - mysql> CREATE TABLE `mysql_table` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `float` FLOAT NOT NULL, - -> PRIMARY KEY (`int_id`)); - Query OK, 0 rows affected (0,09 sec) - - mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); - Query OK, 1 row affected (0,00 sec) - - mysql> select * from mysql_table; - +--------+-------+ - | int_id | value | - +--------+-------+ - | 1 | 2 | - +--------+-------+ - 1 row in set (0,00 sec) - -在ClickHouse中创建MySQL类型的数据库,同时与MySQL服务器交换数据: - -``` sql -CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') -``` - -``` sql -SHOW DATABASES -``` - -``` text -┌─name─────┐ -│ default │ -│ mysql_db │ -│ system │ -└──────────┘ -``` - -``` sql -SHOW TABLES FROM mysql_db -``` - -``` text -┌─name─────────┐ -│ mysql_table │ -└──────────────┘ -``` - -``` sql -SELECT * FROM mysql_db.mysql_table -``` - -``` text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -└────────┴───────┘ -``` - -``` sql -INSERT INTO mysql_db.mysql_table VALUES (3,4) -``` - -``` sql -SELECT * FROM mysql_db.mysql_table -``` - -``` text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -│ 3 │ 4 │ -└────────┴───────┘ -``` - -[来源文章](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/zh/development/architecture.md b/docs/zh/development/architecture.md index 22eaaf583d6..66d5cac13b5 100644 --- a/docs/zh/development/architecture.md +++ b/docs/zh/development/architecture.md @@ -1,3 +1,4 @@ + # ClickHouse 架构概述 {#clickhouse-jia-gou-gai-shu} ClickHouse 是一个真正的列式数据库管理系统(DBMS)。在 ClickHouse 中,数据始终是按列存储的,包括矢量(向量或列块)执行的过程。只要有可能,操作都是基于矢量进行分派的,而不是单个的值,这被称为«矢量化查询执行»,它有利于降低实际的数据处理开销。 @@ -12,7 +13,7 @@ ClickHouse 是一个真正的列式数据库管理系统(DBMS)。在 ClickHous 不同的 `IColumn` 实现(`ColumnUInt8`、`ColumnString` 等)负责不同的列内存布局。内存布局通常是一个连续的数组。对于数据类型为整型的列,只是一个连续的数组,比如 `std::vector`。对于 `String` 列和 `Array` 列,则由两个向量组成:其中一个向量连续存储所有的 `String` 或数组元素,另一个存储每一个 `String` 或 `Array` 的起始元素在第一个向量中的偏移。而 `ColumnConst` 则仅在内存中存储一个值,但是看起来像一个列。 -## Field {#field} +## 字段 {#field} 尽管如此,有时候也可能需要处理单个值。表示单个值,可以使用 `Field`。`Field` 是 `UInt64`、`Int64`、`Float64`、`String` 和 `Array` 组成的联合。`IColumn` 拥有 `operator[]` 方法来获取第 `n` 个值成为一个 `Field`,同时也拥有 `insert` 方法将一个 `Field` 追加到一个列的末尾。这些方法并不高效,因为它们需要处理表示单一值的临时 `Field` 对象,但是有更高效的方法比如 `insertFrom` 和 `insertRangeFrom` 等。 @@ -115,7 +116,7 @@ ClickHouse 是一个真正的列式数据库管理系统(DBMS)。在 ClickHous 普通函数不会改变行数 - 它们的执行看起来就像是独立地处理每一行数据。实际上,函数不会作用于一个单独的行上,而是作用在以 `Block` 为单位的数据上,以实现向量查询执行。 -还有一些杂项函数,比如 [blockSize](../query_language/functions/other_functions.md#function-blocksize)、[rowNumberInBlock](../query_language/functions/other_functions.md#function-rownumberinblock),以及 [runningAccumulate](../query_language/functions/other_functions.md#function-runningaccumulate),它们对块进行处理,并且不遵从行的独立性。 +还有一些杂项函数,比如 [块大小](../sql_reference/functions/other_functions.md#function-blocksize)、[rowNumberInBlock](../sql_reference/functions/other_functions.md#function-rownumberinblock),以及 [跑累积](../sql_reference/functions/other_functions.md#function-runningaccumulate),它们对块进行处理,并且不遵从行的独立性。 ClickHouse 具有强类型,因此隐式类型转换不会发生。如果函数不支持某个特定的类型组合,则会抛出异常。但函数可以通过重载以支持许多不同的类型组合。比如,`plus` 函数(用于实现 `+` 运算符)支持任意数字类型的组合:`UInt8` + `Float32`,`UInt16` + `Int8` 等。同时,一些可变参数的函数能够级接收任意数目的参数,比如 `concat` 函数。 @@ -159,7 +160,7 @@ ClickHouse 具有强类型,因此隐式类型转换不会发生。如果函数 分布式查询执行没有全局查询计划。每个节点都有针对自己的工作部分的本地查询计划。我们仅有简单的一次性分布式查询执行:将查询发送给远程节点,然后合并结果。但是对于具有高基数的 `GROUP BY` 或具有大量临时数据的 `JOIN` 这样困难的查询的来说,这是不可行的:在这种情况下,我们需要在服务器之间«改组»数据,这需要额外的协调。ClickHouse 不支持这类查询执行,我们需要在这方面进行努力。 -## Merge Tree {#merge-tree} +## 合并树 {#merge-tree} `MergeTree` 是一系列支持按主键索引的存储引擎。主键可以是一个任意的列或表达式的元组。`MergeTree` 表中的数据存储于«分块»中。每一个分块以主键序存储数据(数据按主键元组的字典序排序)。表的所有列都存储在这些«分块»中分离的 `column.bin` 文件中。`column.bin` 文件由压缩块组成,每一个块通常是 64 KB 到 1 MB 大小的未压缩数据,具体取决于平均值大小。这些块由一个接一个连续放置的列值组成。每一列的列值顺序相同(顺序由主键定义),因此当你按多列进行迭代时,你能够得到相应列的值。 diff --git a/docs/zh/development/browse_code.md b/docs/zh/development/browse_code.md deleted file mode 120000 index 8c08c622129..00000000000 --- a/docs/zh/development/browse_code.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/browse_code.md \ No newline at end of file diff --git a/docs/zh/development/browse_code.md b/docs/zh/development/browse_code.md new file mode 100644 index 00000000000..d098675b6a0 --- /dev/null +++ b/docs/zh/development/browse_code.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 63 +toc_title: "\u6D4F\u89C8ClickHouse\u6E90\u4EE3\u7801" +--- + +# 浏览ClickHouse源代码 {#browse-clickhouse-source-code} + +您可以使用 **Woboq** 在线代码浏览器可用 [这里](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/index.html). 它提供了代码导航和语义突出显示,搜索和索引。 代码快照每天更新。 + +此外,您还可以浏览源 [GitHub](https://github.com/ClickHouse/ClickHouse) 像往常一样 + +如果你有兴趣使用什么样的IDE,我们建议CLion,QT Creator,VS Code和KDevelop(有注意事项)。 您可以使用任何喜欢的IDE。 Vim和Emacs也算数。 diff --git a/docs/zh/development/build.md b/docs/zh/development/build.md index 3a81077fb0b..05581985a35 100644 --- a/docs/zh/development/build.md +++ b/docs/zh/development/build.md @@ -1,3 +1,4 @@ + # 如何构建 ClickHouse 发布包 {#ru-he-gou-jian-clickhouse-fa-bu-bao} ## 安装 Git 和 Pbuilder {#an-zhuang-git-he-pbuilder} @@ -32,12 +33,12 @@ cd ClickHouse sudo apt-get install git cmake ninja-build ``` -Or cmake3 instead of cmake on older systems. +或cmake3而不是旧系统上的cmake。 或者在早期版本的系统中用 cmake3 替代 cmake ## 安装 GCC 9 {#an-zhuang-gcc-9} -There are several ways to do this. +有几种方法可以做到这一点。 ### 安装 PPA 包 {#an-zhuang-ppa-bao} @@ -79,6 +80,6 @@ cd .. ``` 若要创建一个执行文件, 执行 `ninja clickhouse`。 -这个命令会使得 `dbms/programs/clickhouse` 文件可执行,您可以使用 `client` or `server` 参数运行。 +这个命令会使得 `programs/clickhouse` 文件可执行,您可以使用 `client` 或 `server` 参数运行。 [来源文章](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/zh/development/build_cross_arm.md b/docs/zh/development/build_cross_arm.md deleted file mode 120000 index 983a9872dc1..00000000000 --- a/docs/zh/development/build_cross_arm.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build_cross_arm.md \ No newline at end of file diff --git a/docs/zh/development/build_cross_arm.md b/docs/zh/development/build_cross_arm.md new file mode 100644 index 00000000000..1061fddfacd --- /dev/null +++ b/docs/zh/development/build_cross_arm.md @@ -0,0 +1,44 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 67 +toc_title: "\u5982\u4F55\u5728Linux\u4E0A\u6784\u5EFAClickHouse for AARCH64\uFF08\ + ARM64)" +--- + +# 如何在Linux上为AARCH64(ARM64)架构构建ClickHouse {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} + +这是当你有Linux机器,并希望使用它来构建的情况下 `clickhouse` 二进制文件将运行在另一个Linux机器上与AARCH64CPU架构。 这适用于在Linux服务器上运行的持续集成检查。 + +Aarch64的交叉构建基于 [构建说明](build.md) 先跟着他们 + +# 安装Clang-8 {#install-clang-8} + +按照以下说明操作https://apt.llvm.org/为您的Ubuntu或Debian设置. +例如,在Ubuntu Bionic中,您可以使用以下命令: + +``` bash +echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list +sudo apt-get update +sudo apt-get install clang-8 +``` + +# 安装交叉编译工具集 {#install-cross-compilation-toolset} + +``` bash +cd ClickHouse +mkdir -p build-aarch64/cmake/toolchain/linux-aarch64 +wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 +``` + +# 建立ClickHouse {#build-clickhouse} + +``` bash +cd ClickHouse +mkdir build-arm64 +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake +ninja -C build-arm64 +``` + +生成的二进制文件将仅在具有AARCH64CPU体系结构的Linux上运行。 diff --git a/docs/zh/development/build_cross_osx.md b/docs/zh/development/build_cross_osx.md index 20577d1213a..c74ff934c0d 100644 --- a/docs/zh/development/build_cross_osx.md +++ b/docs/zh/development/build_cross_osx.md @@ -1,10 +1,11 @@ + # 如何在Linux中编译Mac OS X ClickHouse {#ru-he-zai-linuxzhong-bian-yi-mac-os-x-clickhouse} Linux机器也可以编译运行在OS X系统的`clickhouse`二进制包,这可以用于在Linux上跑持续集成测试。如果要在Mac OS X上直接构建ClickHouse,请参考另外一篇指南: https://clickhouse.tech/docs/zh/development/build\_osx/ Mac OS X的交叉编译基于以下构建说明,请首先遵循它们。 -# Install Clang-8 {#install-clang-8} +# 安装Clang-8 {#install-clang-8} 按照https://apt.llvm.org/中的说明进行Ubuntu或Debian安装。 例如,安装Bionic的命令如下: diff --git a/docs/zh/development/build_osx.md b/docs/zh/development/build_osx.md index e471b716a33..0c1c840912e 100644 --- a/docs/zh/development/build_osx.md +++ b/docs/zh/development/build_osx.md @@ -1,3 +1,4 @@ + # 在 Mac OS X 中编译 ClickHouse {#zai-mac-os-x-zhong-bian-yi-clickhouse} ClickHouse 支持在 Mac OS X 10.12 版本中编译。若您在用更早的操作系统版本,可以尝试在指令中使用 `Gentoo Prefix` 和 `clang sl`. @@ -43,7 +44,7 @@ cd .. 为此,请创建以下文件: -/Library/LaunchDaemons/limit.maxfiles.plist: +/图书馆/LaunchDaemons/限制.maxfilesplist: ``` xml diff --git a/docs/zh/development/contrib.md b/docs/zh/development/contrib.md index 5491cc76f6f..e282856c0e8 100644 --- a/docs/zh/development/contrib.md +++ b/docs/zh/development/contrib.md @@ -1,34 +1,35 @@ + # 使用的三方库 {#shi-yong-de-san-fang-ku} -| Library | License | -|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| base64 | [BSD 2-Clause License](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | -| boost | [Boost Software License 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | -| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | -| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | -| cctz | [Apache License 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | -| double-conversion | [BSD 3-Clause License](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | -| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | -| googletest | [BSD 3-Clause License](https://github.com/google/googletest/blob/master/LICENSE) | -| hyperscan | [BSD 3-Clause License](https://github.com/intel/hyperscan/blob/master/LICENSE) | -| libbtrie | [BSD 2-Clause License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | -| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | -| libdivide | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | -| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | -| libhdfs3 | [Apache License 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | -| libmetrohash | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | -| libpcg-random | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | -| libressl | [OpenSSL License](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | -| librdkafka | [BSD 2-Clause License](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | -| libwidechar\_width | [CC0 1.0 Universal](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | -| llvm | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | -| lz4 | [BSD 2-Clause License](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | -| mariadb-connector-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | -| murmurhash | [Public Domain](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | -| pdqsort | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | -| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | -| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | -| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | -| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | -| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | -| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) | +| 图书馆 | 许可 | +|--------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| base64 | [BSD2-条款许可](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | +| 升压 | [提升软件许可证1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | +| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | +| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | +| cctz | [Apache许可证2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | +| 双转换 | [BSD3-条款许可](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | +| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | +| googletest | [BSD3-条款许可](https://github.com/google/googletest/blob/master/LICENSE) | +| 超扫描 | [BSD3-条款许可](https://github.com/intel/hyperscan/blob/master/LICENSE) | +| libbtrie | [BSD2-条款许可](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | +| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | +| libdivide | [Zlib许可证](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | +| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | +| libhdfs3 | [Apache许可证2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | +| libmetrohash | [Apache许可证2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | +| libpcg-随机 | [Apache许可证2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | +| libressl | [OpenSSL许可证](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | +| librdkafka | [BSD2-条款许可](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | +| libwidechar\_width | [CC0 1.0通用](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | +| llvm | [BSD3-条款许可](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | +| lz4 | [BSD2-条款许可](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | +| mariadb-连接器-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | +| murmurhash | [公共领域](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | +| pdqsort | [Zlib许可证](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | +| poco | [提升软件许可证-1.0版](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | +| protobuf | [BSD3-条款许可](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | +| re2 | [BSD3-条款许可](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | +| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | +| zlib-ng | [Zlib许可证](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | +| zstd | [BSD3-条款许可](https://github.com/facebook/zstd/blob/dev/LICENSE) | diff --git a/docs/zh/development/developer_instruction.md b/docs/zh/development/developer_instruction.md index 27f3c1ad8b2..f39ab665ee6 100644 --- a/docs/zh/development/developer_instruction.md +++ b/docs/zh/development/developer_instruction.md @@ -1,3 +1,4 @@ + ClickHose支持Linux,FreeBSD 及 Mac OS X 系统。 # Windows使用指引 {#windowsshi-yong-zhi-yin} @@ -67,9 +68,9 @@ ClickHose支持Linux,FreeBSD 及 Mac OS X 系统。 命令执行成功后,可以通过执行`git pull upstream master`,从ClickHouse的主分支中拉去更新。 -## Working with submodules {#working-with-submodules} +## 使用子模块 {#working-with-submodules} -Working with submodules in git could be painful. Next commands will help to manage it: +在git中使用子模块可能会很痛苦。 接下来的命令将有助于管理它: # ! each command accepts --recursive # Update remote URLs for submodules. Barely rare case @@ -81,7 +82,7 @@ Working with submodules in git could be painful. Next commands will help to mana # Two last commands could be merged together git submodule update --init -The next commands would help you to reset all submodules to the initial state (!WARING! - any chenges inside will be deleted): +接下来的命令将帮助您将所有子模块重置为初始状态(!华林! -里面的任何chenges将被删除): # Synchronizes submodules' remote URL with .gitmodules git submodule sync --recursive @@ -185,19 +186,19 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 在libhdfs2库中生成有关protobuf文件的消息时,可能会显示诸如`libprotobuf WARNING`。它们没有影响,可以忽略不计。 -成功构建后,会得到一个可执行文件`ClickHouse//dbms/programs/clickhouse`: +成功构建后,会得到一个可执行文件`ClickHouse//programs/clickhouse`: - ls -l dbms/programs/clickhouse + ls -l programs/clickhouse # 运行ClickHouse可执行文件 {#yun-xing-clickhouseke-zhi-xing-wen-jian} -要以当前的用户身份运行服务,请进入到`ClickHouse/dbms/programs/server/` 目录(在`build`文件夹外)并运行: +要以当前的用户身份运行服务,请进入到`ClickHouse/programs/server/` 目录(在`build`文件夹外)并运行: - ../../../build/dbms/programs/clickhouse server + ../../../build/programs/clickhouse server 在这种情况下,ClickHouse将使用位于当前目录中的配置文件。您可以从任何目录运行`Clickhouse server`,并将配置文件`--config-file`的路径指定为命令行参数。 -在另外一个终端上连接ClickHouse的clickhouse-client客户端,请进入到`ClickHouse/build/dbms/programs/` 并运行`clickhouse client`。 +在另外一个终端上连接ClickHouse的clickhouse-client客户端,请进入到`ClickHouse/build/programs/` 并运行`clickhouse client`。 如果您在Mac OS X 或者 FreeBSD上收到`Connection refused`的消息,请尝试指定主机地址为127.0.0.1: @@ -206,7 +207,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 您可以使用自定义构建的ClickHouse二进制文件替换系统中安装的ClickHouse二进制文件的生成版本。为此,请参照官方网站上的说明在计算机上安装ClickHouse。 接下来,运行以下命令: sudo service clickhouse-server stop - sudo cp ClickHouse/build/dbms/programs/clickhouse /usr/bin/ + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start 请注意,`clickhouse-client`,`clickhouse-server`和其他服务通常共享`clickhouse`二进制文件的符号链接。 @@ -214,7 +215,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 您还可以使用系统上安装的ClickHouse软件包中的配置文件运行自定义构建的ClickHouse二进制文件: sudo service clickhouse-server stop - sudo -u clickhouse ClickHouse/build/dbms/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml # IDE (集成开发环境) {#ide-ji-cheng-kai-fa-huan-jing} @@ -234,7 +235,7 @@ ClickHouse的架构描述可以在此处查看:https://clickhouse.tech/docs/en 编写测试用例:https://clickhouse.tech/docs/en/development/tests/ -任务列表:https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/instructions/easy\_tasks\_sorted\_en.md +任务列表:https://github.com/ClickHouse/ClickHouse/blob/master/tests/instructions/easy\_tasks\_sorted\_en.md # 测试数据 {#ce-shi-shu-ju} diff --git a/docs/zh/development/index.md b/docs/zh/development/index.md index 187ee1b3e25..cf3b2fae1d9 100644 --- a/docs/zh/development/index.md +++ b/docs/zh/development/index.md @@ -1,3 +1,4 @@ + # ClickHouse 开发 {#clickhouse-kai-fa} [来源文章](https://clickhouse.tech/docs/en/development/) diff --git a/docs/zh/development/style.md b/docs/zh/development/style.md index 4d374f9b2e8..10c036fef3b 100644 --- a/docs/zh/development/style.md +++ b/docs/zh/development/style.md @@ -1,3 +1,4 @@ + # 如何编写 C++ 代码 {#ru-he-bian-xie-c-dai-ma} ## 一般建议 {#yi-ban-jian-yi} @@ -200,7 +201,7 @@ std::cerr << static_cast(c) << std::endl; for (Names::const_iterator it = column_names.begin(); it != column_names.end(); ++it) ``` -## Comments {#comments} +## 评论 {#comments} **1.** 请务必为所有非常重要的代码部分添加注释。 @@ -297,7 +298,7 @@ void executeQuery( /// for ``` -## Names {#names} +## 姓名 {#names} **1.** 在变量和类成员的名称中使用带下划线的小写字母。 @@ -623,7 +624,7 @@ Loader() {} **18.** 编码。 -在所有情况下使用 UTF-8 编码。使用 `std::string` and `char *`。不要使用 `std::wstring` 和 `wchar_t`。 +在所有情况下使用 UTF-8 编码。使用 `std::string` 和 `char *`。不要使用 `std::wstring` 和 `wchar_t`。 **19.** 日志。 diff --git a/docs/zh/development/tests.md b/docs/zh/development/tests.md index 0416daf307c..f54e273a77a 100644 --- a/docs/zh/development/tests.md +++ b/docs/zh/development/tests.md @@ -1,3 +1,4 @@ + # ClickHouse 测试 {#clickhouse-ce-shi} ## 功能性测试 {#gong-neng-xing-ce-shi} @@ -6,15 +7,15 @@ 每个功能测试会向正在运行的 ClickHouse服 务器发送一个或多个查询,并将结果与预期结果进行比较。 -测试用例在 `dbms/src/tests/queries` 目录中。这里有两个子目录:`stateless` 和 `stateful`目录。 无状态的测试无需预加载测试数据集 - 通常是在测试运行期间动态创建小量的数据集。有状态测试需要来自 Yandex.Metrica 的预加载测试数据,而不向一般公众提供。 我们倾向于仅使用«无状态»测试并避免添加新的«有状态»测试。 +测试用例在 `tests/queries` 目录中。这里有两个子目录:`stateless` 和 `stateful`目录。 无状态的测试无需预加载测试数据集 - 通常是在测试运行期间动态创建小量的数据集。有状态测试需要来自 Yandex.Metrica 的预加载测试数据,而不向一般公众提供。 我们倾向于仅使用«无状态»测试并避免添加新的«有状态»测试。 每个测试用例可以是两种类型之一:`.sql` 和 `.sh`。`.sql` 测试文件是用于管理`clickhouse-client --multiquery --testmode`的简单SQL脚本。`.sh` 测试文件是一个可以自己运行的脚本。 -要运行所有测试,请使用 `dbms/tests/clickhouse-test` 工具,用 `--help` 可以获取所有的选项列表。您可以简单地运行所有测试或运行测试名称中的子字符串过滤的测试子集:`./clickhouse-test substring`。 +要运行所有测试,请使用 `tests/clickhouse-test` 工具,用 `--help` 可以获取所有的选项列表。您可以简单地运行所有测试或运行测试名称中的子字符串过滤的测试子集:`./clickhouse-test substring`。 调用功能测试最简单的方法是将 `clickhouse-client` 复制到`/usr/bin/`,运行`clickhouse-server`,然后从自己的目录运行`./ clickhouse-test`。 -要添加新测试,请在 `dbms/src/tests/queries/0_stateless` 目录内添加新的 `.sql` 或 `.sh` 文件,手动检查,然后按以下方式生成 `.reference` 文件: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`。 +要添加新测试,请在 `tests/queries/0_stateless` 目录内添加新的 `.sql` 或 `.sh` 文件,手动检查,然后按以下方式生成 `.reference` 文件: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` 或 `./00000_test.sh > ./00000_test.reference`。 测试应该只使用(创建,删除等)`test` 数据库中的表,这些表假定是事先创建的; 测试也可以使用临时表。 @@ -24,13 +25,13 @@ ## 已知的bug {#yi-zhi-de-bug} -如果我们知道一些可以通过功能测试轻松复制的错误,我们将准备好的功能测试放在 `dbms/src/tests/queries/bugs` 目录中。当修复错误时,这些测试将被移动到 `dbms/src/tests/queries/0_stateless` 目录中。 +如果我们知道一些可以通过功能测试轻松复制的错误,我们将准备好的功能测试放在 `tests/queries/bugs` 目录中。当修复错误时,这些测试将被移动到 `tests/queries/0_stateless` 目录中。 ## 集成测试 {#ji-cheng-ce-shi} 集成测试允许在集群配置中测试 ClickHouse,并与其他服务器(如MySQL,Postgres,MongoDB)进行 ClickHouse 交互。它们可用于模拟网络拆分,数据包丢弃等。这些测试在Docker 下运行,并使用各种软件创建多个容器。 -参考 `dbms/tests/integration/README.md` 文档关于如何使用集成测试。 +参考 `tests/integration/README.md` 文档关于如何使用集成测试。 请注意,ClickHouse 与第三方驱动程序的集成未经过测试。此外,我们目前还没有与 JDBC 和ODBC 驱动程序进行集成测试。 @@ -42,7 +43,7 @@ ## 性能测试 {#xing-neng-ce-shi} -性能测试允许测量和比较综合查询中 ClickHouse 的某些独立部分的性能。测试位于`dbms/tests/performance` 目录中。每个测试都由 `.xml` 文件表示,并附有测试用例的描述。使用 `clickhouse performance-test` 工具(嵌入在 `clickhouse` 二进制文件中)运行测试。请参阅 `--help` 以进行调用。 +性能测试允许测量和比较综合查询中 ClickHouse 的某些独立部分的性能。测试位于`tests/performance` 目录中。每个测试都由 `.xml` 文件表示,并附有测试用例的描述。使用 `clickhouse performance-test` 工具(嵌入在 `clickhouse` 二进制文件中)运行测试。请参阅 `--help` 以进行调用。 每个测试在循环中运行一个或多个查询(可能带有参数组合),并具有一些停止条件(如«最大执行速度不会在三秒内更改»)并测量一些有关查询性能的指标(如«最大执行速度»))。某些测试可以包含预加载的测试数据集的前提条件。 @@ -52,13 +53,13 @@ ## 测试工具和脚本 {#ce-shi-gong-ju-he-jiao-ben} -`tests`目录中的一些程序不是准备测试,而是测试工具。例如,对于`Lexer`,有一个工具`dbms/src/Parsers/tests/lexer` 标准输出。您可以使用这些工具作为代码示例以及探索和手动测试。 +`tests`目录中的一些程序不是准备测试,而是测试工具。例如,对于`Lexer`,有一个工具`src/Parsers/tests/lexer` 标准输出。您可以使用这些工具作为代码示例以及探索和手动测试。 您还可以将一对文件 `.sh` 和 `.reference` 与工具放在一些预定义的输入上运行它 - 然后可以将脚本结果与 `.reference` 文件进行比较。这些测试不是自动化的。 ## 杂项测试 {#za-xiang-ce-shi} -有一些外部字典的测试位于 `dbms/tests/external_dictionaries`,机器学习模型在`dbms/tests/external_models`目录。这些测试未更新,必须转移到集成测试。 +有一些外部字典的测试位于 `tests/external_dictionaries`,机器学习模型在`tests/external_models`目录。这些测试未更新,必须转移到集成测试。 对于分布式数据的插入,有单独的测试。此测试在单独的服务器上运行 ClickHouse 集群并模拟各种故障情况:网络拆分,数据包丢弃(ClickHouse 节点之间,ClickHouse 和 ZooKeeper之间,ClickHouse 服务器和客户端之间等),进行 `kill -9`,`kill -STOP` 和`kill -CONT` 等操作,类似[Jepsen](https://aphyr.com/tags/Jepsen)。然后,测试检查是否已写入所有已确认的插入,并且所有已拒绝的插入都未写入。 @@ -68,7 +69,7 @@ 当您开发了新的功能,做手动测试也是合理的。可以按照以下步骤来进行: -编译 ClickHouse。在命令行中运行 ClickHouse:进入 `dbms/src/programs/clickhouse-server` 目录并运行 `./clickhouse-server`。它会默认使用当前目录的配置文件 (`config.xml`, `users.xml` 以及在 `config.d` 和 `users.d` 目录的文件)。可以使用 `dbms/src/programs/clickhouse-client/clickhouse-client` 来连接数据库。 +编译 ClickHouse。在命令行中运行 ClickHouse:进入 `programs/clickhouse-server` 目录并运行 `./clickhouse-server`。它会默认使用当前目录的配置文件 (`config.xml`, `users.xml` 以及在 `config.d` 和 `users.d` 目录的文件)。可以使用 `programs/clickhouse-client/clickhouse-client` 来连接数据库。 或者,您可以安装 ClickHouse 软件包:从 Yandex 存储库中获得稳定版本,或者您可以在ClickHouse源根目录中使用 `./release` 构建自己的软件包。然后使用 `sudo service clickhouse-server start` 启动服务器(或停止服务器)。在 `/etc/clickhouse-server/clickhouse-server.log` 中查找日志。 @@ -152,27 +153,27 @@ Clang 有更多有用的警告 - 您可以使用 `-Weverything` 查找它们并 对于生产构建,使用 gcc(它仍然生成比 clang 稍高效的代码)。对于开发来说,clang 通常更方便使用。您可以使用调试模式在自己的机器上构建(以节省笔记本电脑的电量),但请注意,由于更好的控制流程和过程分析,编译器使用 `-O3` 会生成更多警告。 当使用 clang 构建时,使用 `libc++` 而不是 `libstdc++`,并且在使用调试模式构建时,使用调试版本的 `libc++`,它允许在运行时捕获更多错误。 -## Sanitizers {#sanitizers} +## 消毒剂 {#sanitizers} -**Address sanitizer**. +**地址消毒剂**. 我们在每个提交的基础上在 ASan 下运行功能和集成测试。 -**Valgrind (Memcheck)**. +**ツ暗ェツ氾环催ツ団ツ法ツ人)**. 我们在 Valgrind 过夜进行功能测试。 这需要几个小时。 目前在 `re2` 库中有一个已知的误报,请参阅 [文章](https://research.swtch.com/sparse)。 -**Thread sanitizer**. +**螺纹消毒剂**. 我们在 TSan 下进行功能测试。ClickHouse 必须通过所有测试。在 TSan 下运行不是自动化的,只是偶尔执行。 -**Memory sanitizer**. +**记忆消毒剂**. 目前我们不使用 MSan。 -**Undefined behaviour sanitizer.** +**未定义的行为消毒剂。** 我们仍然不会在每次提交的基础上使用 UBSan。 有一些地方需要解决。 -**Debug allocator.** +**调试分alloc。** 您可以使用 `DEBUG_TCMALLOC` CMake 选项启用 `tcmalloc` 的调试版本。我们在每次提交的基础上使用调试分配器运行测试。 -更多请参阅 `dbms/tests/instructions/sanitizers.txt`。 +更多请参阅 `tests/instructions/sanitizers.txt`。 ## 模糊测试 {#mo-hu-ce-shi} @@ -186,7 +187,7 @@ Yandex Cloud 部门的人员从安全角度对 ClickHouse 功能进行了一些 ## 静态分析 {#jing-tai-fen-xi} -我们偶尔使用静态分析。我们已经评估过 `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`。您将在 `dbms/tests/instructions/` 目录中找到使用说明。你也可以阅读[俄文文章](https://habr.com/company/yandex/blog/342018/). +我们偶尔使用静态分析。我们已经评估过 `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`。您将在 `tests/instructions/` 目录中找到使用说明。你也可以阅读[俄文文章](https://habr.com/company/yandex/blog/342018/). 如果您使用 `CLion` 作为 IDE,您可以开箱即用一些 `clang-tidy` 检查。 diff --git a/docs/zh/engines/database_engines/index.md b/docs/zh/engines/database_engines/index.md new file mode 100644 index 00000000000..2431b96a43d --- /dev/null +++ b/docs/zh/engines/database_engines/index.md @@ -0,0 +1,12 @@ + +# 数据库引擎 {#shu-ju-ku-yin-qing} + +您使用的所有表都是由数据库引擎所提供的 + +默认情况下,ClickHouse使用自己的数据库引擎,该引擎提供可配置的[表引擎](../../engines/database_engines/index.md)和[所有支持的SQL语法](../../engines/database_engines/index.md). + +除此之外,您还可以选择使用以下的数据库引擎: + +- [MySQL](mysql.md) + +[来源文章](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/zh/engines/database_engines/lazy.md b/docs/zh/engines/database_engines/lazy.md new file mode 100644 index 00000000000..6b094c8793d --- /dev/null +++ b/docs/zh/engines/database_engines/lazy.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 31 +toc_title: "\u61D2\u60F0" +--- + +# 懒惰 {#lazy} + +仅将表保留在RAM中 `expiration_time_in_seconds` 上次访问后几秒钟。 只能与\*日志表一起使用。 + +它针对存储许多小\*日志表进行了优化,访问之间存在较长的时间间隔。 + +## 创建数据库 {#creating-a-database} + + CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); + +[原始文章](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/zh/engines/database_engines/mysql.md b/docs/zh/engines/database_engines/mysql.md new file mode 100644 index 00000000000..80ff82ec2d3 --- /dev/null +++ b/docs/zh/engines/database_engines/mysql.md @@ -0,0 +1,127 @@ + +# MySQL {#mysql} + +MySQL引擎用于将远程的MySQL服务器中的表映射到ClickHouse中,并允许您对表进行`INSERT`和`SELECT`查询,以方便您在ClickHouse与MySQL之间进行数据交换。 + +`MySQL`数据库引擎会将对其的查询转换为MySQL语法并发送到MySQL服务器中,因此您可以执行诸如`SHOW TABLES`或`SHOW CREATE TABLE`之类的操作。 + +但您无法对其执行以下操作: + +- `RENAME` +- `CREATE TABLE` +- `ALTER` + +## CREATE DATABASE {#create-database} + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] +ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') +``` + +**MySQL数据库引擎参数** + +- `host:port` — 链接的MySQL地址。 +- `database` — 链接的MySQL数据库。 +- `user` — 链接的MySQL用户。 +- `password` — 链接的MySQL用户密码。 + +## 支持的类型对应 {#zhi-chi-de-lei-xing-dui-ying} + +| MySQL | ClickHouse | +|----------------------------------|-------------------------------------------------------------| +| UNSIGNED TINYINT | [UInt8](../../sql_reference/data_types/int_uint.md) | +| TINYINT | [Int8](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED SMALLINT | [UInt16](../../sql_reference/data_types/int_uint.md) | +| SMALLINT | [Int16](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql_reference/data_types/int_uint.md) | +| INT, MEDIUMINT | [Int32](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED BIGINT | [UInt64](../../sql_reference/data_types/int_uint.md) | +| BIGINT | [Int64](../../sql_reference/data_types/int_uint.md) | +| FLOAT | [Float32](../../sql_reference/data_types/float.md) | +| DOUBLE | [Float64](../../sql_reference/data_types/float.md) | +| DATE | [日期](../../sql_reference/data_types/date.md) | +| DATETIME, TIMESTAMP | [日期时间](../../sql_reference/data_types/datetime.md) | +| BINARY | [固定字符串](../../sql_reference/data_types/fixedstring.md) | + +其他的MySQL数据类型将全部都转换为[字符串](../../sql_reference/data_types/string.md)。 + +同时以上的所有类型都支持[可为空](../../sql_reference/data_types/nullable.md)。 + +## 使用示例 {#shi-yong-shi-li} + +在MySQL中创建表: + + mysql> USE test; + Database changed + + mysql> CREATE TABLE `mysql_table` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `float` FLOAT NOT NULL, + -> PRIMARY KEY (`int_id`)); + Query OK, 0 rows affected (0,09 sec) + + mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); + Query OK, 1 row affected (0,00 sec) + + mysql> select * from mysql_table; + +--------+-------+ + | int_id | value | + +--------+-------+ + | 1 | 2 | + +--------+-------+ + 1 row in set (0,00 sec) + +在ClickHouse中创建MySQL类型的数据库,同时与MySQL服务器交换数据: + +``` sql +CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') +``` + +``` sql +SHOW DATABASES +``` + +``` text +┌─name─────┐ +│ default │ +│ mysql_db │ +│ system │ +└──────────┘ +``` + +``` sql +SHOW TABLES FROM mysql_db +``` + +``` text +┌─name─────────┐ +│ mysql_table │ +└──────────────┘ +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +└────────┴───────┘ +``` + +``` sql +INSERT INTO mysql_db.mysql_table VALUES (3,4) +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +│ 3 │ 4 │ +└────────┴───────┘ +``` + +[来源文章](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/zh/engines/index.md b/docs/zh/engines/index.md new file mode 100644 index 00000000000..41d2a7e3d8d --- /dev/null +++ b/docs/zh/engines/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u53D1\u52A8\u673A" +toc_priority: 25 +--- + + diff --git a/docs/zh/engines/table_engines/index.md b/docs/zh/engines/table_engines/index.md new file mode 100644 index 00000000000..9603ebe78c8 --- /dev/null +++ b/docs/zh/engines/table_engines/index.md @@ -0,0 +1,74 @@ + +# 表引擎 {#biao-yin-qing} + +表引擎(即表的类型)决定了: + +- 数据的存储方式和位置,写到哪里以及从哪里读取数据 +- 支持哪些查询以及如何支持。 +- 并发数据访问。 +- 索引的使用(如果存在)。 +- 是否可以执行多线程请求。 +- 数据复制参数。 + +# 引擎类型 {#yin-qing-lei-xing} + +## MergeTree {#mergetree} + +适用于高负载任务的最通用和功能最强大的表引擎。这些引擎的共同特点是可以快速插入数据并进行后续的后台数据处理。 MergeTree系列引擎支持数据复制(使用[复制\*](mergetree_family/replication.md) 的引擎版本),分区和一些其他引擎不支持的其他功能。 + +该类型的引擎: +\* [MergeTree](mergetree_family/mergetree.md) +\* [更换麦树](mergetree_family/replacingmergetree.md) +\* [SummingMergeTree](mergetree_family/summingmergetree.md) +\* [AggregatingMergeTree](mergetree_family/aggregatingmergetree.md) +\* [折叠树](mergetree_family/collapsingmergetree.md) +\* [版本集合在新树](mergetree_family/versionedcollapsingmergetree.md) +\* [GraphiteMergeTree](mergetree_family/graphitemergetree.md) + +## 日志 {#log} + +具有最小功能的[轻量级引擎](log_family/index.md)。当您需要快速写入许多小表(最多约100万行)并在以后整体读取它们时,该类型的引擎是最有效的。 + +该类型的引擎: + +- [TinyLog](log_family/tinylog.md) +- [StripeLog](log_family/stripelog.md) +- [日志](log_family/log.md) + +## 集成引擎 {#integration-engines} + +用于与其他的数据存储与处理系统集成的引擎。 +该类型的引擎: + +- [卡夫卡](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) +- [HDFS](integrations/hdfs.md) + +## 用于其他特定功能的引擎 {#yong-yu-qi-ta-te-ding-gong-neng-de-yin-qing} + +该类型的引擎: + +- [分布](special/distributed.md) +- [MaterializedView](special/materializedview.md) +- [字典](special/dictionary.md) +- [合并](special/merge.md) +- [文件](special/file.md) +- [Null](special/null.md) +- [设置](special/set.md) +- [加入我们](special/join.md) +- [URL](special/url.md) +- [查看](special/view.md) +- [记忆](special/memory.md) +- [缓冲区](special/buffer.md) + +# 虚拟列 {#xu-ni-lie} + +虚拟列是表引擎组成的一部分,它在对应的表引擎的源代码中定义。 + +您不能在 `CREATE TABLE` 中指定虚拟列,并且虚拟列不会包含在 `SHOW CREATE TABLE` 和 `DESCRIBE TABLE` 的查询结果中。虚拟列是只读的,所以您不能向虚拟列中写入数据。 + +如果想要查询虚拟列中的数据,您必须在SELECT查询中包含虚拟列的名字。SELECT \* 不会返回虚拟列的内容。 + +若您创建的表中有一列与虚拟列的名字相同,那么虚拟列将不能再被访问。我们不建议您这样做。为了避免这种列名的冲突,虚拟列的名字一般都以下划线开头。 diff --git a/docs/zh/engines/table_engines/integrations/hdfs.md b/docs/zh/engines/table_engines/integrations/hdfs.md new file mode 100644 index 00000000000..5cd60a855bc --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/hdfs.md @@ -0,0 +1,123 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 36 +toc_title: HDFS +--- + +# HDFS {#table_engines-hdfs} + +该引擎提供了集成 [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) 生态系统通过允许管理数据 [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)通过ClickHouse. 这个引擎是相似的 +到 [文件](../special/file.md) 和 [URL](../special/url.md) 引擎,但提供Hadoop特定的功能。 + +## 用途 {#usage} + +``` sql +ENGINE = HDFS(URI, format) +``` + +该 `URI` 参数是HDFS中的整个文件URI。 +该 `format` 参数指定一种可用的文件格式。 执行 +`SELECT` 查询时,格式必须支持输入,并执行 +`INSERT` queries – for output. The available formats are listed in the +[格式](../../../interfaces/formats.md#formats) 科。 +路径部分 `URI` 可能包含水珠。 在这种情况下,表将是只读的。 + +**示例:** + +**1.** 设置 `hdfs_engine_table` 表: + +``` sql +CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') +``` + +**2.** 填充文件: + +``` sql +INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` + +**3.** 查询数据: + +``` sql +SELECT * FROM hdfs_engine_table LIMIT 2 +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## 实施细节 {#implementation-details} + +- 读取和写入可以并行 +- 不支持: + - `ALTER` 和 `SELECT...SAMPLE` 操作。 + - 索引。 + - 复制。 + +**路径中的水珠** + +多个路径组件可以具有globs。 对于正在处理的文件应该存在并匹配到整个路径模式。 文件列表确定在 `SELECT` (不在 `CREATE` 时刻)。 + +- `*` — Substitutes any number of any characters except `/` 包括空字符串。 +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +建筑与 `{}` 类似于 [远程](../../../sql_reference/table_functions/remote.md) 表功能。 + +**示例** + +1. 假设我们在HDFS上有几个TSV格式的文件,其中包含以下Uri: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. 有几种方法可以创建由所有六个文件组成的表: + + + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') +``` + +另一种方式: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') +``` + +表由两个目录中的所有文件组成(所有文件都应满足query中描述的格式和模式): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') +``` + +!!! warning "警告" + 如果文件列表包含带有前导零的数字范围,请单独使用带有大括号的构造或使用 `?`. + +**示例** + +创建具有名为文件的表 `file000`, `file001`, … , `file999`: + +``` sql +CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') +``` + +## 虚拟列 {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**另请参阅** + +- [虚拟列](../index.md#table_engines-virtual_columns) + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/zh/engines/table_engines/integrations/index.md b/docs/zh/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..b488c83d1bd --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u96C6\u6210" +toc_priority: 30 +--- + + diff --git a/docs/zh/engines/table_engines/integrations/jdbc.md b/docs/zh/engines/table_engines/integrations/jdbc.md new file mode 100644 index 00000000000..00363bb988a --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/jdbc.md @@ -0,0 +1,90 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 34 +toc_title: JDBC +--- + +# JDBC {#table-engine-jdbc} + +允许ClickHouse通过以下方式连接到外部数据库 [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). + +要实现JDBC连接,ClickHouse使用单独的程序 [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/alex-krash/clickhouse-jdbc-bridge) 这应该作为守护进程运行。 + +该引擎支持 [可为空](../../../sql_reference/data_types/nullable.md) 数据类型。 + +## 创建表 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name +( + columns list... +) +ENGINE = JDBC(dbms_uri, external_database, external_table) +``` + +**发动机参数** + +- `dbms_uri` — URI of an external DBMS. + + 格式: `jdbc:://:/?user=&password=`. + Mysql的示例: `jdbc:mysql://localhost:3306/?user=root&password=root`. + +- `external_database` — Database in an external DBMS. + +- `external_table` — Name of the table in `external_database`. + +## 用法示例 {#usage-example} + +通过直接与它的控制台客户端连接在MySQL服务器中创建一个表: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +在ClickHouse服务器中创建表并从中选择数据: + +``` sql +CREATE TABLE jdbc_table +( + `int_id` Int32, + `int_nullable` Nullable(Int32), + `float` Float32, + `float_nullable` Nullable(Float32) +) +ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') +``` + +``` sql +SELECT * +FROM jdbc_table +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [JDBC表函数](../../../sql_reference/table_functions/jdbc.md). + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/zh/engines/table_engines/integrations/kafka.md b/docs/zh/engines/table_engines/integrations/kafka.md new file mode 100644 index 00000000000..53bde650dfc --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/kafka.md @@ -0,0 +1,136 @@ + +# 卡夫卡 {#kafka} + +此引擎与 [Apache Kafka](http://kafka.apache.org/) 结合使用。 + +Kafka 特性: + +- 发布或者订阅数据流。 +- 容错存储机制。 +- 处理流数据。 + + + +老版格式: + + Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format + [, kafka_row_delimiter, kafka_schema, kafka_num_consumers]) + +新版格式: + + Kafka SETTINGS + kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic1,topic2', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_row_delimiter = '\n', + kafka_schema = '', + kafka_num_consumers = 2 + +必要参数: + +- `kafka_broker_list` – 以逗号分隔的 brokers 列表 (`localhost:9092`)。 +- `kafka_topic_list` – topic 列表 (`my_topic`)。 +- `kafka_group_name` – Kafka 消费组名称 (`group1`)。如果不希望消息在集群中重复,请在每个分片中使用相同的组名。 +- `kafka_format` – 消息体格式。使用与 SQL 部分的 `FORMAT` 函数相同表示方法,例如 `JSONEachRow`。了解详细信息,请参考 `Formats` 部分。 + +可选参数: + +- `kafka_row_delimiter` - 每个消息体(记录)之间的分隔符。 +- `kafka_schema` – 如果解析格式需要一个 schema 时,此参数必填。例如,[普罗托船长](https://capnproto.org/) 需要 schema 文件路径以及根对象 `schema.capnp:Message` 的名字。 +- `kafka_num_consumers` – 单个表的消费者数量。默认值是:`1`,如果一个消费者的吞吐量不足,则指定更多的消费者。消费者的总数不应该超过 topic 中分区的数量,因为每个分区只能分配一个消费者。 + +示例: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + SELECT * FROM queue LIMIT 5; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') + SETTINGS kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; +``` + +消费的消息会被自动追踪,因此每个消息在不同的消费组里只会记录一次。如果希望获得两次数据,则使用另一个组名创建副本。 + +消费组可以灵活配置并且在集群之间同步。例如,如果群集中有10个主题和5个表副本,则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 http://kafka.apache.org/intro。 + +`SELECT` 查询对于读取消息并不是很有用(调试除外),因为每条消息只能被读取一次。使用物化视图创建实时线程更实用。您可以这样做: + +1. 使用引擎创建一个 Kafka 消费者并作为一条数据流。 +2. 创建一个结构表。 +3. 创建物化视图,改视图会在后台转换引擎中的数据并将其放入之前创建的表中。 + +当 `MATERIALIZED VIEW` 添加至引擎,它将会在后台收集数据。可以持续不断地从 Kafka 收集数据并通过 `SELECT` 将数据转换为所需要的格式。 + +示例: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + CREATE TABLE daily ( + day Date, + level String, + total UInt64 + ) ENGINE = SummingMergeTree(day, (day, level), 8192); + + CREATE MATERIALIZED VIEW consumer TO daily + AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total + FROM queue GROUP BY day, level; + + SELECT level, sum(total) FROM daily GROUP BY level; +``` + +为了提高性能,接受的消息被分组为 [max\_insert\_block\_size](../../../operations/settings/settings.md#settings-max_insert_block_size) 大小的块。如果未在 [stream\_flush\_interval\_ms](../../../operations/settings/settings.md) 毫秒内形成块,则不关心块的完整性,都会将数据刷新到表中。 + +停止接收主题数据或更改转换逻辑,请 detach 物化视图: + + DETACH TABLE consumer; + ATTACH MATERIALIZED VIEW consumer; + +如果使用 `ALTER` 更改目标表,为了避免目标表与视图中的数据之间存在差异,推荐停止物化视图。 + +## 配置 {#pei-zhi} + +与 `GraphiteMergeTree` 类似,Kafka 引擎支持使用ClickHouse配置文件进行扩展配置。可以使用两个配置键:全局 (`kafka`) 和 主题级别 (`kafka_*`)。首先应用全局配置,然后应用主题级配置(如果存在)。 + +``` xml + + + cgrp + smallest + + + + + 250 + 100000 + +``` + +有关详细配置选项列表,请参阅 [librdkafka配置参考](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)。在 ClickHouse 配置中使用下划线 (`_`) ,并不是使用点 (`.`)。例如,`check.crcs=true` 将是 `true`。 + +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/kafka/) diff --git a/docs/zh/engines/table_engines/integrations/mysql.md b/docs/zh/engines/table_engines/integrations/mysql.md new file mode 100644 index 00000000000..bfd3e6445a5 --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/mysql.md @@ -0,0 +1,26 @@ + +# MySQL {#mysql} + +MySQL 引擎可以对存储在远程 MySQL 服务器上的数据执行 `SELECT` 查询。 + +调用格式: + + MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); + +**调用参数** + +- `host:port` — MySQL 服务器地址。 +- `database` — 数据库的名称。 +- `table` — 表名称。 +- `user` — 数据库用户。 +- `password` — 用户密码。 +- `replace_query` — 将 `INSERT INTO` 查询是否替换为 `REPLACE INTO` 的标志。如果 `replace_query=1`,则替换查询 +- `'on_duplicate_clause'` — 将 `ON DUPLICATE KEY UPDATE 'on_duplicate_clause'` 表达式添加到 `INSERT` 查询语句中。例如:`impression = VALUES(impression) + impression`。如果需要指定 `'on_duplicate_clause'`,则需要设置 `replace_query=0`。如果同时设置 `replace_query = 1` 和 `'on_duplicate_clause'`,则会抛出异常。 + +此时,简单的 `WHERE` 子句(例如 `=, !=, >, >=, <, <=`)是在 MySQL 服务器上执行。 + +其余条件以及 `LIMIT` 采样约束语句仅在对MySQL的查询完成后才在ClickHouse中执行。 + +`MySQL` 引擎不支持 [可为空](../../../engines/table_engines/integrations/mysql.md) 数据类型,因此,当从MySQL表中读取数据时,`NULL` 将转换为指定列类型的默认值(通常为0或空字符串)。 + +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/mysql/) diff --git a/docs/zh/engines/table_engines/integrations/odbc.md b/docs/zh/engines/table_engines/integrations/odbc.md new file mode 100644 index 00000000000..1488ab0d856 --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/odbc.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 35 +toc_title: ODBC +--- + +# ODBC {#table-engine-odbc} + +允许ClickHouse通过以下方式连接到外部数据库 [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +为了安全地实现ODBC连接,ClickHouse使用单独的程序 `clickhouse-odbc-bridge`. 如果直接从ODBC驱动程序加载 `clickhouse-server`,驱动程序问题可能会导致ClickHouse服务器崩溃。 ClickHouse自动启动 `clickhouse-odbc-bridge` 当它是必需的。 ODBC桥程序是从相同的软件包作为安装 `clickhouse-server`. + +该引擎支持 [可为空](../../../sql_reference/data_types/nullable.md) 数据类型。 + +## 创建表 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1], + name2 [type2], + ... +) +ENGINE = ODBC(connection_settings, external_database, external_table) +``` + +请参阅的详细说明 [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) 查询。 + +表结构可以与源表结构不同: + +- 列名应与源表中的列名相同,但您可以按任何顺序使用其中的一些列。 +- 列类型可能与源表中的列类型不同。 ClickHouse尝试 [投](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) ClickHouse数据类型的值。 + +**发动机参数** + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` 文件 +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +## 用法示例 {#usage-example} + +**通过ODBC从本地MySQL安装中检索数据** + +此示例检查Ubuntu Linux18.04和MySQL服务器5.7。 + +确保安装了unixODBC和MySQL连接器。 + +默认情况下(如果从软件包安装),ClickHouse以用户身份启动 `clickhouse`. 因此,您需要在MySQL服务器中创建和配置此用户。 + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +然后配置连接 `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +您可以使用 `isql` unixodbc安装中的实用程序。 + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQL中的表: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouse中的表,从MySQL表中检索数据: + +``` sql +CREATE TABLE odbc_t +( + `int_id` Int32, + `float_nullable` Nullable(Float32) +) +ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') +``` + +``` sql +SELECT * FROM odbc_t +``` + +``` text +┌─int_id─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└────────┴────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [ODBC外部字典](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC表函数](../../../sql_reference/table_functions/odbc.md) + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/zh/engines/table_engines/log_family/index.md b/docs/zh/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..78557921c09 --- /dev/null +++ b/docs/zh/engines/table_engines/log_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u65E5\u5FD7\u7CFB\u5217" +toc_priority: 29 +--- + + diff --git a/docs/zh/engines/table_engines/log_family/log.md b/docs/zh/engines/table_engines/log_family/log.md new file mode 100644 index 00000000000..90f892615c9 --- /dev/null +++ b/docs/zh/engines/table_engines/log_family/log.md @@ -0,0 +1,6 @@ + +# 日志 {#log} + +日志与 TinyLog 的不同之处在于,«标记» 的小文件与列文件存在一起。这些标记写在每个数据块上,并且包含偏移量,这些偏移量指示从哪里开始读取文件以便跳过指定的行数。这使得可以在多个线程中读取表数据。对于并发数据访问,可以同时执行读取操作,而写入操作则阻塞读取和其它写入。Log 引擎不支持索引。同样,如果写入表失败,则该表将被破坏,并且从该表读取将返回错误。Log 引擎适用于临时数据,write-once 表以及测试或演示目的。 + +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/log/) diff --git a/docs/zh/engines/table_engines/log_family/log_family.md b/docs/zh/engines/table_engines/log_family/log_family.md new file mode 100644 index 00000000000..60cecab5faf --- /dev/null +++ b/docs/zh/engines/table_engines/log_family/log_family.md @@ -0,0 +1,46 @@ + +# 日志引擎系列 {#table_engines-log-engine-family} + +这些引擎是为了需要写入许多小数据量(少于一百万行)的表的场景而开发的。 + +这系列的引擎有: + +- [StripeLog](stripelog.md) +- [日志](log.md) +- [TinyLog](tinylog.md) + +## 共同属性 {#table_engines-log-engine-family-common-properties} + +引擎: + +- 数据存储在磁盘上。 + +- 写入时将数据追加在文件末尾。 + +- 不支持[突变](../../../engines/table_engines/log_family/log_family.md#alter-mutations)操作。 + +- 不支持索引。 + + 这意味着 `SELECT` 在范围查询时效率不高。 + +- 非原子地写入数据。 + + 如果某些事情破坏了写操作,例如服务器的异常关闭,你将会得到一张包含了损坏数据的表。 + +## 差异 {#table_engines-log-engine-family-differences} + +`Log` 和 `StripeLog` 引擎支持: + +- 并发访问数据的锁。 + + `INSERT` 请求执行过程中表会被锁定,并且其他的读写数据的请求都会等待直到锁定被解除。如果没有写数据的请求,任意数量的读请求都可以并发执行。 + +- 并行读取数据。 + + 在读取数据时,ClickHouse 使用多线程。 每个线程处理不同的数据块。 + +`Log` 引擎为表中的每一列使用不同的文件。`StripeLog` 将所有的数据存储在一个文件中。因此 `StripeLog` 引擎在操作系统中使用更少的描述符,但是 `Log` 引擎提供更高的读性能。 + +`TingLog` 引擎是该系列中最简单的引擎并且提供了最少的功能和最低的性能。`TingLog` 引擎不支持并行读取和并发数据访问,并将每一列存储在不同的文件中。它比其余两种支持并行读取的引擎的读取速度更慢,并且使用了和 `Log` 引擎同样多的描述符。你可以在简单的低负载的情景下使用它。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/zh/engines/table_engines/log_family/stripelog.md b/docs/zh/engines/table_engines/log_family/stripelog.md new file mode 100644 index 00000000000..ab4deb67ebb --- /dev/null +++ b/docs/zh/engines/table_engines/log_family/stripelog.md @@ -0,0 +1,83 @@ + +# StripeLog {#table_engines-stripelog} + +该引擎属于日志引擎系列。请在[日志引擎系列](log_family.md)文章中查看引擎的共同属性和差异。 + +在你需要写入许多小数据量(小于一百万行)的表的场景下使用这个引擎。 + +## 建表 {#table_engines-stripelog-creating-a-table} + + CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] + ( + column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... + ) ENGINE = StripeLog + +查看[建表](../../../engines/table_engines/log_family/stripelog.md#create-table-query)请求的详细说明。 + +## 写数据 {#table_engines-stripelog-writing-the-data} + +`StripeLog` 引擎将所有列存储在一个文件中。对每一次 `Insert` 请求,ClickHouse 将数据块追加在表文件的末尾,逐列写入。 + +ClickHouse 为每张表写入以下文件: + +- `data.bin` — 数据文件。 +- `index.mrk` — 带标记的文件。标记包含了已插入的每个数据块中每列的偏移量。 + +`StripeLog` 引擎不支持 `ALTER UPDATE` 和 `ALTER DELETE` 操作。 + +## 读数据 {#table_engines-stripelog-reading-the-data} + +带标记的文件使得 ClickHouse 可以并行的读取数据。这意味着 `SELECT` 请求返回行的顺序是不可预测的。使用 `ORDER BY` 子句对行进行排序。 + +## 使用示例 {#table_engines-stripelog-example-of-use} + +建表: + +``` sql +CREATE TABLE stripe_log_table +( + timestamp DateTime, + message_type String, + message String +) +ENGINE = StripeLog +``` + +插入数据: + +``` sql +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') +``` + +我们使用两次 `INSERT` 请求从而在 `data.bin` 文件中创建两个数据块。 + +ClickHouse 在查询数据时使用多线程。每个线程读取单独的数据块并在完成后独立的返回结果行。这样的结果是,大多数情况下,输出中块的顺序和输入时相应块的顺序是不同的。例如: + +``` sql +SELECT * FROM stripe_log_table +``` + + ┌───────────timestamp─┬─message_type─┬─message────────────────────┐ + │ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ + │ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ + └─────────────────────┴──────────────┴────────────────────────────┘ + ┌───────────timestamp─┬─message_type─┬─message───────────────────┐ + │ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ + └─────────────────────┴──────────────┴───────────────────────────┘ + +对结果排序(默认增序): + +``` sql +SELECT * FROM stripe_log_table ORDER BY timestamp +``` + + ┌───────────timestamp─┬─message_type─┬─message────────────────────┐ + │ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ + │ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ + │ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ + └─────────────────────┴──────────────┴────────────────────────────┘ + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/zh/engines/table_engines/log_family/tinylog.md b/docs/zh/engines/table_engines/log_family/tinylog.md new file mode 100644 index 00000000000..9a1b27fd418 --- /dev/null +++ b/docs/zh/engines/table_engines/log_family/tinylog.md @@ -0,0 +1,14 @@ + +# TinyLog {#tinylog} + +最简单的表引擎,用于将数据存储在磁盘上。每列都存储在单独的压缩文件中。写入时,数据将附加到文件末尾。 + +并发数据访问不受任何限制: +- 如果同时从表中读取并在不同的查询中写入,则读取操作将抛出异常 +- 如果同时写入多个查询中的表,则数据将被破坏。 + +这种表引擎的典型用法是 write-once:首先只写入一次数据,然后根据需要多次读取。查询在单个流中执行。换句话说,此引擎适用于相对较小的表(建议最多1,000,000行)。如果您有许多小表,则使用此表引擎是适合的,因为它比Log引擎更简单(需要打开的文件更少)。当您拥有大量小表时,可能会导致性能低下,但在可能已经在其它 DBMS 时使用过,则您可能会发现切换使用 TinyLog 类型的表更容易。**不支持索引**。 + +在 Yandex.Metrica 中,TinyLog 表用于小批量处理的中间数据。 + +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/tinylog/) diff --git a/docs/zh/engines/table_engines/mergetree_family/aggregatingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/aggregatingmergetree.md new file mode 100644 index 00000000000..2d898a5d168 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -0,0 +1,95 @@ + +# AggregatingMergeTree {#aggregatingmergetree} + +该引擎继承自 [MergeTree](mergetree.md),并改变了数据片段的合并逻辑。 ClickHouse 会将相同主键的所有行(在一个数据片段内)替换为单个存储一系列聚合函数状态的行。 + +可以使用 `AggregatingMergeTree` 表来做增量数据统计聚合,包括物化视图的数据聚合。 + +引擎需使用 [AggregateFunction](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md) 类型来处理所有列。 + +如果要按一组规则来合并减少行数,则使用 `AggregatingMergeTree` 是合适的。 + +## 建表 {#jian-biao} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = AggregatingMergeTree() +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +语句参数的说明,请参阅 [语句描述](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md)。 + +**子句** + +创建 `AggregatingMergeTree` 表时,需用跟创建 `MergeTree` 表一样的[子句](mergetree.md)。 + +
    + +已弃用的建表方法 + +!!! 注意 "注意" + 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +上面的所有参数跟 `MergeTree` 中的一样。 +
    + +## SELECT 和 INSERT {#select-he-insert} + +插入数据,需使用带有聚合 -State- 函数的 [INSERT SELECT](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md) 语句。 +从 `AggregatingMergeTree` 表中查询数据时,需使用 `GROUP BY` 子句并且要使用与插入时相同的聚合函数,但后缀要改为 `-Merge` 。 + +在 `SELECT` 查询的结果中,对于 ClickHouse 的所有输出格式 `AggregateFunction` 类型的值都实现了特定的二进制表示法。如果直接用 `SELECT` 导出这些数据,例如如用 `TabSeparated` 格式,那么这些导出数据也能直接用 `INSERT` 语句加载导入。 + +## 聚合物化视图的示例 {#ju-he-wu-hua-shi-tu-de-shi-li} + +创建一个跟踪 `test.visits` 表的 `AggregatingMergeTree` 物化视图: + +``` sql +CREATE MATERIALIZED VIEW test.basic +ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate) +AS SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM test.visits +GROUP BY CounterID, StartDate; +``` + +向 `test.visits` 表中插入数据。 + +``` sql +INSERT INTO test.visits ... +``` + +数据会同时插入到表和视图中,并且视图 `test.basic` 会将里面的数据聚合。 + +要获取聚合数据,我们需要在 `test.basic` 视图上执行类似 `SELECT ... GROUP BY ...` 这样的查询 : + +``` sql +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +GROUP BY StartDate +ORDER BY StartDate; +``` + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/zh/engines/table_engines/mergetree_family/collapsingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/collapsingmergetree.md new file mode 100644 index 00000000000..85b5ce076e8 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -0,0 +1,207 @@ + +# 折叠树 {#table_engine-collapsingmergetree} + +该引擎继承于 [MergeTree](mergetree.md),并在数据块合并算法中添加了折叠行的逻辑。 + +`CollapsingMergeTree` 会异步的删除(折叠)这些除了特定列 `Sign` 有 `1` 和 `-1` 的值以外,其余所有字段的值都相等的成对的行。没有成对的行会被保留。更多的细节请看本文的[折叠](#table_engine-collapsingmergetree-collapsing)部分。 + +因此,该引擎可以显著的降低存储量并提高 `SELECT` 查询效率。 + +## 建表 {#jian-biao} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = CollapsingMergeTree(sign) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +请求参数的描述,参考[请求参数](../../../engines/table_engines/mergetree_family/collapsingmergetree.md)。 + +**CollapsingMergeTree 参数** + +- `sign` — 类型列的名称: `1` 是«状态»行,`-1` 是«取消»行。 + + 列数据类型 — `Int8`。 + +**子句** + +创建 `CollapsingMergeTree` 表时,需要与创建 `MergeTree` 表时相同的[子句](mergetree.md#table_engine-mergetree-creating-a-table)。 + +
    + +已弃用的建表方法 + +!!! attention "注意" + 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign) +``` + +除了 `sign` 的所有参数都与 `MergeTree` 中的含义相同。 + +- `sign` — 类型列的名称: `1` 是«状态»行,`-1` 是«取消»行。 + + 列数据类型 — `Int8`。 + +
    + +## 折叠 {#table_engine-collapsingmergetree-collapsing} + +### 数据 {#shu-ju} + +考虑你需要为某个对象保存不断变化的数据的情景。似乎为一个对象保存一行记录并在其发生任何变化时更新记录是合乎逻辑的,但是更新操作对 DBMS 来说是昂贵且缓慢的,因为它需要重写存储中的数据。如果你需要快速的写入数据,则更新操作是不可接受的,但是你可以按下面的描述顺序地更新一个对象的变化。 + +在写入行的时候使用特定的列 `Sign`。如果 `Sign = 1` 则表示这一行是对象的状态,我们称之为«状态»行。如果 `Sign = -1` 则表示是对具有相同属性的状态行的取消,我们称之为«取消»行。 + +例如,我们想要计算用户在某个站点访问的页面页面数以及他们在那里停留的时间。在某个时候,我们将用户的活动状态写入下面这样的行。 + + ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ + │ 4324182021466249494 │ 5 │ 146 │ 1 │ + └─────────────────────┴───────────┴──────────┴──────┘ + +一段时间后,我们写入下面的两行来记录用户活动的变化。 + + ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ + │ 4324182021466249494 │ 5 │ 146 │ -1 │ + │ 4324182021466249494 │ 6 │ 185 │ 1 │ + └─────────────────────┴───────────┴──────────┴──────┘ + +第一行取消了这个对象(用户)的状态。它需要复制被取消的状态行的所有除了 `Sign` 的属性。 + +第二行包含了当前的状态。 + +因为我们只需要用户活动的最后状态,这些行 + + ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ + │ 4324182021466249494 │ 5 │ 146 │ 1 │ + │ 4324182021466249494 │ 5 │ 146 │ -1 │ + └─────────────────────┴───────────┴──────────┴──────┘ + +可以在折叠对象的失效(老的)状态的时候被删除。`CollapsingMergeTree` 会在合并数据片段的时候做这件事。 + +为什么我们每次改变需要 2 行可以阅读[算法](#table_engine-collapsingmergetree-collapsing-algorithm)段。 + +**这种方法的特殊属性** + +1. 写入的程序应该记住对象的状态从而可以取消它。«取消»字符串应该是«状态»字符串的复制,除了相反的 `Sign`。它增加了存储的初始数据的大小,但使得写入数据更快速。 +2. 由于写入的负载,列中长的增长阵列会降低引擎的效率。数据越简单,效率越高。 +3. `SELECT` 的结果很大程度取决于对象变更历史的一致性。在准备插入数据时要准确。在不一致的数据中会得到不可预料的结果,例如,像会话深度这种非负指标的负值。 + +### 算法 {#table_engine-collapsingmergetree-collapsing-algorithm} + +当 ClickHouse 合并数据片段时,每组具有相同主键的连续行被减少到不超过两行,一行 `Sign = 1`(«状态»行),另一行 `Sign = -1` («取消»行),换句话说,数据项被折叠了。 + +对每个结果的数据部分 ClickHouse 保存: + + 1. 第一个«取消»和最后一个«状态»行,如果«状态»和«取消»行的数量匹配和最后一个行是«状态»行 + 2. 最后一个«状态»行,如果«状态»行比«取消»行多一个或一个以上。 + 3. 第一个«取消»行,如果«取消»行比«状态»行多一个或一个以上。 + 4. 没有行,在其他所有情况下。 + + 合并会继续,但是 ClickHouse 会把此情况视为逻辑错误并将其记录在服务日志中。这个错误会在相同的数据被插入超过一次时出现。 + +因此,折叠不应该改变统计数据的结果。 +变化逐渐地被折叠,因此最终几乎每个对象都只剩下了最后的状态。 + +`Sign` 是必须的因为合并算法不保证所有有相同主键的行都会在同一个结果数据片段中,甚至是在同一台物理服务器上。ClickHouse 用多线程来处理 `SELECT` 请求,所以它不能预测结果中行的顺序。如果要从 `CollapsingMergeTree` 表中获取完全«折叠»后的数据,则需要聚合。 + +要完成折叠,请使用 `GROUP BY` 子句和用于处理符号的聚合函数编写请求。例如,要计算数量,使用 `sum(Sign)` 而不是 `count()`。要计算某物的总和,使用 `sum(Sign * x)` 而不是 `sum(x)`,并添加 `HAVING sum(Sign) > 0` 子句。 + +聚合体 `count`,`sum` 和 `avg` 可以用这种方式计算。如果一个对象至少有一个未被折叠的状态,则可以计算 `uniq` 聚合。`min` 和 `max` 聚合无法计算,因为 `CollaspingMergeTree` 不会保存折叠状态的值的历史记录。 + +如果你需要在不进行聚合的情况下获取数据(例如,要检查是否存在最新值与特定条件匹配的行),你可以在 `FROM` 从句中使用 `FINAL` 修饰符。这种方法显然是更低效的。 + +## 示例 {#shi-li} + +示例数据: + + ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ + │ 4324182021466249494 │ 5 │ 146 │ 1 │ + │ 4324182021466249494 │ 5 │ 146 │ -1 │ + │ 4324182021466249494 │ 6 │ 185 │ 1 │ + └─────────────────────┴───────────┴──────────┴──────┘ + +建表: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY UserID +``` + +插入数据: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) +``` + +我们使用两次 `INSERT` 请求来创建两个不同的数据片段。如果我们使用一个请求插入数据,ClickHouse 只会创建一个数据片段且不会执行任何合并操作。 + +获取数据: + + SELECT * FROM UAct + + ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ + │ 4324182021466249494 │ 5 │ 146 │ -1 │ + │ 4324182021466249494 │ 6 │ 185 │ 1 │ + └─────────────────────┴───────────┴──────────┴──────┘ + ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ + │ 4324182021466249494 │ 5 │ 146 │ 1 │ + └─────────────────────┴───────────┴──────────┴──────┘ + +我们看到了什么,哪里有折叠? + +通过两个 `INSERT` 请求,我们创建了两个数据片段。`SELECT` 请求在两个线程中被执行,我们得到了随机顺序的行。没有发生折叠是因为还没有合并数据片段。ClickHouse 在一个我们无法预料的未知时刻合并数据片段。 + +因此我们需要聚合: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration +FROM UAct +GROUP BY UserID +HAVING sum(Sign) > 0 +``` + + ┌──────────────UserID─┬─PageViews─┬─Duration─┐ + │ 4324182021466249494 │ 6 │ 185 │ + └─────────────────────┴───────────┴──────────┘ + +如果我们不需要聚合并想要强制进行折叠,我们可以在 `FROM` 从句中使用 `FINAL` 修饰语。 + +``` sql +SELECT * FROM UAct FINAL +``` + + ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ + │ 4324182021466249494 │ 6 │ 185 │ 1 │ + └─────────────────────┴───────────┴──────────┴──────┘ + +这种查询数据的方法是非常低效的。不要在大表中使用它。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/zh/engines/table_engines/mergetree_family/custom_partitioning_key.md b/docs/zh/engines/table_engines/mergetree_family/custom_partitioning_key.md new file mode 100644 index 00000000000..3844506c782 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -0,0 +1,117 @@ + +# 自定义分区键 {#zi-ding-yi-fen-qu-jian} + +[MergeTree](mergetree.md) 系列的表(包括 [可复制表](replication.md) )可以使用分区。基于 MergeTree 表的 [物化视图](../special/materializedview.md) 也支持分区。 + +一个分区是指按指定规则逻辑组合一起的表的记录集。可以按任意标准进行分区,如按月,按日或按事件类型。为了减少需要操作的数据,每个分区都是分开存储的。访问数据时,ClickHouse 尽量使用这些分区的最小子集。 + +分区是在 [建表](mergetree.md#table_engine-mergetree-creating-a-table) 的 `PARTITION BY expr` 子句中指定。分区键可以是关于列的任何表达式。例如,指定按月分区,表达式为 `toYYYYMM(date_column)`: + +``` sql +CREATE TABLE visits +( + VisitDate Date, + Hour UInt8, + ClientID UUID +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(VisitDate) +ORDER BY Hour; +``` + +分区键也可以是表达式元组(类似 [主键](mergetree.md#primary-keys-and-indexes-in-queries) )。例如: + +``` sql +ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) +PARTITION BY (toMonday(StartDate), EventType) +ORDER BY (CounterID, StartDate, intHash32(UserID)); +``` + +上例中,我们设置按一周内的事件类型分区。 + +新数据插入到表中时,这些数据会存储为按主键排序的新片段(块)。插入后 10-15 分钟,同一分区的各个片段会合并为一整个片段。 + +!!! attention "注意" + 那些有相同分区表达式值的数据片段才会合并。这意味着 **你不应该用太精细的分区方案**(超过一千个分区)。否则,会因为文件系统中的文件数量和需要找开的文件描述符过多,导致 `SELECT` 查询效率不佳。 + +可以通过 [系统。零件](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#system_tables-parts) 表查看表片段和分区信息。例如,假设我们有一个 `visits` 表,按月分区。对 `system.parts` 表执行 `SELECT`: + +``` sql +SELECT + partition, + name, + active +FROM system.parts +WHERE table = 'visits' +``` + + ┌─partition─┬─name───────────┬─active─┐ + │ 201901 │ 201901_1_3_1 │ 0 │ + │ 201901 │ 201901_1_9_2 │ 1 │ + │ 201901 │ 201901_8_8_0 │ 0 │ + │ 201901 │ 201901_9_9_0 │ 0 │ + │ 201902 │ 201902_4_6_1 │ 1 │ + │ 201902 │ 201902_10_10_0 │ 1 │ + │ 201902 │ 201902_11_11_0 │ 1 │ + └───────────┴────────────────┴────────┘ + +`partition` 列存储分区的名称。此示例中有两个分区:`201901` 和 `201902`。在 [ALTER … PARTITION](#alter_manipulations-with-partitions) 语句中你可以使用该列值来指定分区名称。 + +`name` 列为分区中数据片段的名称。在 [ALTER ATTACH PART](#alter_attach-partition) 语句中你可以使用此列值中来指定片段名称。 + +这里我们拆解下第一部分的名称:`201901_1_3_1`: + +- `201901` 是分区名称。 +- `1` 是数据块的最小编号。 +- `3` 是数据块的最大编号。 +- `1` 是块级别(即在由块组成的合并树中,该块在树中的深度)。 + +!!! attention "注意" + 旧类型表的片段名称为:`20190117_20190123_2_2_0`(最小日期 - 最大日期 - 最小块编号 - 最大块编号 - 块级别)。 + +`active` 列为片段状态。`1` 激活状态;`0` 非激活状态。非激活片段是那些在合并到较大片段之后剩余的源数据片段。损坏的数据片段也表示为非活动状态。 + +正如在示例中所看到的,同一分区中有几个独立的片段(例如,`201901_1_3_1`和`201901_1_9_2`)。这意味着这些片段尚未合并。ClickHouse 大约在插入后15分钟定期报告合并操作,合并插入的数据片段。此外,你也可以使用 [OPTIMIZE](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#misc_operations-optimize) 语句直接执行合并。例: + +``` sql +OPTIMIZE TABLE visits PARTITION 201902; +``` + + ┌─partition─┬─name───────────┬─active─┐ + │ 201901 │ 201901_1_3_1 │ 0 │ + │ 201901 │ 201901_1_9_2 │ 1 │ + │ 201901 │ 201901_8_8_0 │ 0 │ + │ 201901 │ 201901_9_9_0 │ 0 │ + │ 201902 │ 201902_4_6_1 │ 0 │ + │ 201902 │ 201902_4_11_2 │ 1 │ + │ 201902 │ 201902_10_10_0 │ 0 │ + │ 201902 │ 201902_11_11_0 │ 0 │ + └───────────┴────────────────┴────────┘ + +非激活片段会在合并后的10分钟左右删除。 + +查看片段和分区信息的另一种方法是进入表的目录:`/var/lib/clickhouse/data///`。例如: + +``` bash +dev:/var/lib/clickhouse/data/default/visits$ ls -l +total 40 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached +``` + +文件夹 ‘201901\_1\_1\_0’,‘201901\_1\_7\_1’ 等是片段的目录。每个片段都与一个对应的分区相关,并且只包含这个月的数据(本例中的表按月分区)。 + +`detached` 目录存放着使用 [DETACH](#alter_detach-partition) 语句从表中分离的片段。损坏的片段也会移到该目录,而不是删除。服务器不使用`detached`目录中的片段。可以随时添加,删除或修改此目录中的数据 – 在运行 [ATTACH](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#alter_attach-partition) 语句前,服务器不会感知到。 + +注意,在操作服务器时,你不能手动更改文件系统上的片段集或其数据,因为服务器不会感知到这些修改。对于非复制表,可以在服务器停止时执行这些操作,但不建议这样做。对于复制表,在任何情况下都不要更改片段文件。 + +ClickHouse 支持对分区执行这些操作:删除分区,从一个表复制到另一个表,或创建备份。了解分区的所有操作,请参阅 [分区和片段的操作](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#alter_manipulations-with-partitions) 一节。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/zh/engines/table_engines/mergetree_family/graphitemergetree.md b/docs/zh/engines/table_engines/mergetree_family/graphitemergetree.md new file mode 100644 index 00000000000..b578414a203 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/graphitemergetree.md @@ -0,0 +1,174 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: GraphiteMergeTree +--- + +# GraphiteMergeTree {#graphitemergetree} + +此引擎专为细化和聚合/平均(rollup) [石墨](http://graphite.readthedocs.io/en/latest/index.html) 戴达 对于想要使用ClickHouse作为Graphite的数据存储的开发人员来说,这可能会有所帮助。 + +您可以使用任何ClickHouse表引擎来存储石墨数据,如果你不需要汇总,但如果你需要一个汇总使用 `GraphiteMergeTree`. 该引擎减少了存储量,并提高了Graphite查询的效率。 + +引擎继承从属性 [MergeTree](mergetree.md). + +## 创建表 {#creating-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE = GraphiteMergeTree(config_section) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +请参阅的详细说明 [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) 查询。 + +Graphite数据的表应具有以下数据的列: + +- 公制名称(石墨传感器)。 数据类型: `String`. + +- 测量度量的时间。 数据类型: `DateTime`. + +- 度量值。 数据类型:任何数字。 + +- 指标的版本。 数据类型:任何数字。 + + 如果版本相同,ClickHouse会保存版本最高或最后写入的行。 其他行在数据部分合并期间被删除。 + +应在汇总配置中设置这些列的名称。 + +**GraphiteMergeTree参数** + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +**查询子句** + +当创建一个 `GraphiteMergeTree` 表,相同 [条款](mergetree.md#table_engine-mergetree-creating-a-table) 是必需的,因为当创建 `MergeTree` 桌子 + +
    + +不推荐使用的创建表的方法 + +!!! attention "注意" + 不要在新项目中使用此方法,如果可能的话,请将旧项目切换到上述方法。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + EventDate Date, + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) +``` + +所有参数除外 `config_section` 具有相同的含义 `MergeTree`. + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +
    + +## 汇总配置 {#rollup-configuration} + +汇总的设置由 [graphite\_rollup](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) 服务器配置中的参数。 参数的名称可以是any。 您可以创建多个配置并将它们用于不同的表。 + +汇总配置结构: + + required-columns + patterns + +### 必填列 {#required-columns} + +- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. +- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. +- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. 默认值: `Value`. +- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. + +### 模式 {#patterns} + +的结构 `patterns` 科: + +``` text +pattern + regexp + function +pattern + regexp + age + precision + ... +pattern + regexp + function + age + precision + ... +pattern + ... +default + function + age + precision + ... +``` + +!!! warning "注意" + 模式必须严格排序: + + 1. Patterns without `function` or `retention`. + 1. Patterns with both `function` and `retention`. + 1. Pattern `default`. + +在处理行时,ClickHouse会检查以下内容中的规则 `pattern` 部分。 每个 `pattern` (包括 `default`)部分可以包含 `function` 聚合参数, `retention` 参数或两者兼而有之。 如果指标名称匹配 `regexp`,从规则 `pattern` 部分(sections节)的应用;否则,从规则 `default` 部分被使用。 + +字段为 `pattern` 和 `default` 科: + +- `regexp`– A pattern for the metric name. +- `age` – The minimum age of the data in seconds. +- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). +- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. + +### 配置示例 {#configuration-example} + +``` xml + + Version + + click_cost + any + + 0 + 5 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/zh/engines/table_engines/mergetree_family/index.md b/docs/zh/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..1cbf6104dc3 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u6885\u6811\u5BB6\u65CF" +toc_priority: 28 +--- + + diff --git a/docs/zh/engines/table_engines/mergetree_family/mergetree.md b/docs/zh/engines/table_engines/mergetree_family/mergetree.md new file mode 100644 index 00000000000..0778ab2487d --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/mergetree.md @@ -0,0 +1,395 @@ + +# MergeTree {#table_engines-mergetree} + +Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该系列(`*MergeTree`)中的其他引擎。 + +`MergeTree` 引擎系列的基本理念如下。当你有巨量数据要插入到表中,你要高效地一批批写入数据片段,并希望这些数据片段在后台按照一定规则合并。相比在插入时不断修改(重写)数据进存储,这种策略会高效很多。 + +主要特点: + +- 存储的数据按主键排序。 + + 这让你可以创建一个用于快速检索数据的小稀疏索引。 + +- 允许使用分区,如果指定了 [分区键](custom_partitioning_key.md) 的话。 + + 在相同数据集和相同结果集的情况下 ClickHouse 中某些带分区的操作会比普通操作更快。查询中指定了分区键时 ClickHouse 会自动截取分区数据。这也有效增加了查询性能。 + +- 支持数据副本。 + + `ReplicatedMergeTree` 系列的表便是用于此。更多信息,请参阅 [数据副本](replication.md) 一节。 + +- 支持数据采样。 + + 需要的话,你可以给表设置一个采样方法。 + +!!! 注意 "注意" + [合并](../special/merge.md) 引擎并不属于 `*MergeTree` 系列。 + +## 建表 {#table_engine-mergetree-creating-a-table} + + CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] + ( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... + INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, + INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 + ) ENGINE = MergeTree() + [PARTITION BY expr] + [ORDER BY expr] + [PRIMARY KEY expr] + [SAMPLE BY expr] + [SETTINGS name=value, ...] + +请求参数的描述,参考 [请求描述](../../../engines/table_engines/mergetree_family/mergetree.md) 。 + + + +**子句** + +- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`. `MergeTree` 引擎没有参数。 + +- `PARTITION BY` — [分区键](custom_partitioning_key.md) 。 + + 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../../engines/table_engines/mergetree_family/mergetree.md) 类型的列。这里该分区名格式会是 `"YYYYMM"` 这样。 + +- `ORDER BY` — 表的排序键。 + + 可以是一组列的元组或任意的表达式。 例如: `ORDER BY (CounterID, EventDate)` 。 + +- `PRIMARY KEY` - 主键,如果要设成 [跟排序键不相同](mergetree.md)。 + + 默认情况下主键跟排序键(由 `ORDER BY` 子句指定)相同。 + 因此,大部分情况下不需要再专门指定一个 `PRIMARY KEY` 子句。 + +- `SAMPLE BY` — 用于抽样的表达式。 + + 如果要用抽样表达式,主键中必须包含这个表达式。例如: + `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))` 。 + +- `SETTINGS` — 影响 `MergeTree` 性能的额外参数: + + - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Storages/MergeTree/MergeTreeSettings.h) 。 + - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果仅按数据行数限制索引粒度, 请设置为0(不建议)。 + - `enable_mixed_granularity_parts` — 启用或禁用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从大表(数十或数百兆)中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果你的表内数据量很大,可以开启这项配置用以提升`SELECT` 查询的性能。 + - `use_minimalistic_part_header_in_zookeeper` — 数据片段头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 + - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。 + + - `merge_with_ttl_timeout` — TTL合并频率的最小间隔时间。默认值: 86400 (1 天)。 + - `write_final_mark` — 启用或禁用在数据片段尾部写入最终索引标记。默认值: 1(不建议更改)。 + - `storage_policy` — 存储策略。 参见 [使用多个区块装置进行数据存储](#table_engine-mergetree-multiple-volumes). + +**示例配置** + + ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 + +示例中,我们设为按月分区。 + +同时我们设置了一个按用户ID哈希的抽样表达式。这让你可以有该表中每个 `CounterID` 和 `EventDate` 下面的数据的伪随机分布。如果你在查询时指定了 [SAMPLE](../../../engines/table_engines/mergetree_family/mergetree.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 + +`index_granularity` 可省略,默认值为 8192 。 + +
    + +已弃用的建表方法 + +!!! attention "注意" + 不要在新版项目中使用该方法,可能的话,请将旧项目切换到上述方法。 + + CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] + ( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... + ) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) + +**MergeTree() 参数** + +- `date-column` — 类型为 [日期](../../../engines/table_engines/mergetree_family/mergetree.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 +- `sampling_expression` — 采样表达式。 +- `(primary, key)` — 主键。类型 — [元组()](../../../engines/table_engines/mergetree_family/mergetree.md) +- `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。设为 8192 可以适用大部分场景。 + +**示例** + + MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) + +对于主要的配置方法,这里 `MergeTree` 引擎跟前面的例子一样,可以以同样的方式配置。 +
    + +## 数据存储 {#mergetree-data-storage} + +表由按主键排序的数据 *片段* 组成。 + +当数据被插入到表中时,会分成数据片段并按主键的字典序排序。例如,主键是 `(CounterID, Date)` 时,片段中数据按 `CounterID` 排序,具有相同 `CounterID` 的部分按 `Date` 排序。 + +不同分区的数据会被分成不同的片段,ClickHouse 在后台合并数据片段以便更高效存储。不会合并来自不同分区的数据片段。这个合并机制并不保证相同主键的所有行都会合并到同一个数据片段中。 + +ClickHouse 会为每个数据片段创建一个索引文件,索引文件包含每个索引行(『标记』)的主键值。索引行号定义为 `n * index_granularity` 。最大的 `n` 等于总行数除以 `index_granularity` 的值的整数部分。对于每列,跟主键相同的索引行处也会写入『标记』。这些『标记』让你可以直接找到数据所在的列。 + +你可以只用一单一大表并不断地一块块往里面加入数据 – `MergeTree` 引擎的就是为了这样的场景。 + +## 主键和索引在查询中的表现 {#primary-keys-and-indexes-in-queries} + +我们以 `(CounterID, Date)` 以主键。排序好的索引的图示会是下面这样: + + 全部数据 : [-------------------------------------------------------------------------] + CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] + Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] + 标记: | | | | | | | | | | | + a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 + 标记号: 0 1 2 3 4 5 6 7 8 9 10 + +如果指定查询如下: + +- `CounterID in ('a', 'h')`,服务器会读取标记号在 `[0, 3)` 和 `[6, 8)` 区间中的数据。 +- `CounterID IN ('a', 'h') AND Date = 3`,服务器会读取标记号在 `[1, 3)` 和 `[7, 8)` 区间中的数据。 +- `Date = 3`,服务器会读取标记号在 `[1, 10]` 区间中的数据。 + +上面例子可以看出使用索引通常会比全表描述要高效。 + +稀疏索引会引起额外的数据读取。当读取主键单个区间范围的数据时,每个数据块中最多会多读 `index_granularity * 2` 行额外的数据。大部分情况下,当 `index_granularity = 8192` 时,ClickHouse的性能并不会降级。 + +稀疏索引让你能操作有巨量行的表。因为这些索引是常驻内存(RAM)的。 + +ClickHouse 不要求主键惟一。所以,你可以插入多条具有相同主键的行。 + +### 主键的选择 {#zhu-jian-de-xuan-ze} + +主键中列的数量并没有明确的限制。依据数据结构,你应该让主键包含多些或少些列。这样可以: + +- 改善索引的性能。 + + 如果当前主键是 `(a, b)` ,然后加入另一个 `c` 列,满足下面条件时,则可以改善性能: + - 有带有 `c` 列条件的查询。 + - 很长的数据范围( `index_granularity` 的数倍)里 `(a, b)` 都是相同的值,并且这种的情况很普遍。换言之,就是加入另一列后,可以让你的查询略过很长的数据范围。 + +- 改善数据压缩。 + + ClickHouse 以主键排序片段数据,所以,数据的一致性越高,压缩越好。 + +- [折叠树](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里,数据合并时,会有额外的处理逻辑。 + + 在这种情况下,指定一个跟主键不同的 *排序键* 也是有意义的。 + +长的主键会对插入性能和内存消耗有负面影响,但主键中额外的列并不影响 `SELECT` 查询的性能。 + +### 选择跟排序键不一样主键 {#xuan-ze-gen-pai-xu-jian-bu-yi-yang-zhu-jian} + +指定一个跟排序键(用于排序数据片段中行的表达式) +不一样的主键(用于计算写到索引文件的每个标记值的表达式)是可以的。 +这种情况下,主键表达式元组必须是排序键表达式元组的一个前缀。 + +当使用 [SummingMergeTree](summingmergetree.md) 和 +[AggregatingMergeTree](aggregatingmergetree.md) 引擎时,这个特性非常有用。 +通常,使用这类引擎时,表里列分两种:*维度* 和 *度量* 。 +典型的查询是在 `GROUP BY` 并过虑维度的情况下统计度量列的值。 +像 SummingMergeTree 和 AggregatingMergeTree ,用相同的排序键值统计行时, +通常会加上所有的维度。结果就是,这键的表达式会是一长串的列组成, +并且这组列还会因为新加维度必须频繁更新。 + +这种情况下,主键中仅预留少量列保证高效范围扫描, +剩下的维度列放到排序键元组里。这样是合理的。 + +[排序键的修改](../../../engines/table_engines/mergetree_family/mergetree.md) 是轻量级的操作,因为一个新列同时被加入到表里和排序键后时,已存在的数据片段并不需要修改。由于旧的排序键是新排序键的前缀,并且刚刚添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。 + +### 索引和分区在查询中的应用 {#suo-yin-he-fen-qu-zai-cha-xun-zhong-de-ying-yong} + +对于 `SELECT` 查询,ClickHouse 分析是否可以使用索引。如果 `WHERE/PREWHERE` 子句具有下面这些表达式(作为谓词链接一子项或整个)则可以使用索引:基于主键或分区键的列或表达式的部分的等式或比较运算表达式;基于主键或分区键的列或表达式的固定前缀的 `IN` 或 `LIKE` 表达式;基于主键或分区键的列的某些函数;基于主键或分区键的表达式的逻辑表达式。 + +因此,在索引键的一个或多个区间上快速地跑查询都是可能的。下面例子中,指定标签;指定标签和日期范围;指定标签和日期;指定多个标签和日期范围等运行查询,都会非常快。 + +当引擎配置如下时: + + ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 + +这种情况下,这些查询: + +``` sql +SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 +SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) +SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) +``` + +ClickHouse 会依据主键索引剪掉不符合的数据,依据按月分区的分区键剪掉那些不包含符合数据的分区。 + +上文的查询显示,即使索引用于复杂表达式。因为读表操作是组织好的,所以,使用索引不会比完整扫描慢。 + +下面这个例子中,不会使用索引。 + +``` sql +SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' +``` + +要检查 ClickHouse 执行一个查询时能否使用索引,可设置 [force\_index\_by\_date](../../../operations/settings/settings.md#settings-force_index_by_date) 和 [force\_primary\_key](../../../operations/settings/settings.md) 。 + +按月分区的分区键是只能读取包含适当范围日期的数据块。这种情况下,数据块会包含很多天(最多整月)的数据。在块中,数据按主键排序,主键第一列可能不包含日期。因此,仅使用日期而没有带主键前缀条件的查询将会导致读取超过这个日期范围。 + +### 跳数索引(分段汇总索引,实验性的) {#tiao-shu-suo-yin-fen-duan-hui-zong-suo-yin-shi-yan-xing-de} + +需要设置 `allow_experimental_data_skipping_indices` 为 1 才能使用此索引。(执行 `SET allow_experimental_data_skipping_indices = 1`)。 + +此索引在 `CREATE` 语句的列部分里定义。 + +``` sql +INDEX index_name expr TYPE type(...) GRANULARITY granularity_value +``` + +`*MergeTree` 系列的表都能指定跳数索引。 + +这些索引是由数据块按粒度分割后的每部分在指定表达式上汇总信息 `granularity_value` 组成(粒度大小用表引擎里 `index_granularity` 的指定)。 +这些汇总信息有助于用 `where` 语句跳过大片不满足的数据,从而减少 `SELECT` 查询从磁盘读取的数据量, + +示例 + +``` sql +CREATE TABLE table_name +( + u64 UInt64, + i32 Int32, + s String, + ... + INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, + INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 +) ENGINE = MergeTree() +... +``` + +上例中的索引能让 ClickHouse 执行下面这些查询时减少读取数据量。 + +``` sql +SELECT count() FROM table WHERE s < 'z' +SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 +``` + +#### 索引的可用类型 {#table_engine-mergetree-data_skipping-indexes} + +- `minmax` + 存储指定表达式的极值(如果表达式是 `tuple` ,则存储 `tuple` 中每个元素的极值),这些信息用于跳过数据块,类似主键。 + +- `set(max_rows)` + 存储指定表达式的惟一值(不超过 `max_rows` 个,`max_rows=0` 则表示『无限制』)。这些信息可用于检查 `WHERE` 表达式是否满足某个数据块。 + +- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + 存储包含数据块中所有 n 元短语的 [布隆过滤器](https://en.wikipedia.org/wiki/Bloom_filter) 。只可用在字符串上。 + 可用于优化 `equals` , `like` 和 `in` 表达式的性能。 + `n` – 短语长度。 + `size_of_bloom_filter_in_bytes` – 布隆过滤器大小,单位字节。(因为压缩得好,可以指定比较大的值,如256或512)。 + `number_of_hash_functions` – 布隆过滤器中使用的 hash 函数的个数。 + `random_seed` – hash 函数的随机种子。 + +- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + 跟 `ngrambf_v1` 类似,不同于 ngrams 存储字符串指定长度的所有片段。它只存储被非字母数据字符分割的片段。 + + + +``` sql +INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 +INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 +INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 +``` + +## 并发数据访问 {#bing-fa-shu-ju-fang-wen} + +应对表的并发访问,我们使用多版本机制。换言之,当同时读和更新表时,数据从当前查询到的一组片段中读取。没有冗长的的锁。插入不会阻碍读取。 + +对表的读操作是自动并行的。 + +## 列和表的TTL {#table_engine-mergetree-ttl} + +TTL可以设置值的生命周期,它既可以为整张表设置,也可以为每个列字段单独设置。如果`TTL`同时作用于表和字段,ClickHouse会使用先到期的那个。 + +被设置TTL的表,必须拥有[日期](../../../engines/table_engines/mergetree_family/mergetree.md) 或 [日期时间](../../../engines/table_engines/mergetree_family/mergetree.md) 类型的字段。要定义数据的生命周期,需要在这个日期字段上使用操作符,例如: + +``` sql +TTL time_column +TTL time_column + interval +``` + +要定义`interval`, 需要使用 [时间间隔](../../../engines/table_engines/mergetree_family/mergetree.md#operators-datetime) 操作符。 + +``` sql +TTL date_time + INTERVAL 1 MONTH +TTL date_time + INTERVAL 15 HOUR +``` + +### 列字段 TTL {#mergetree-column-ttl} + +当列字段中的值过期时, ClickHouse会将它们替换成数据类型的默认值。如果分区内,某一列的所有值均已过期,则ClickHouse会从文件系统中删除这个分区目录下的列文件。 + +`TTL`子句不能被用于主键字段。 + +示例说明: + +创建一张包含 `TTL` 的表 + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int TTL d + INTERVAL 1 MONTH, + b Int TTL d + INTERVAL 1 MONTH, + c String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d; +``` + +为表中已存在的列字段添加 `TTL` + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 DAY; +``` + +修改列字段的 `TTL` + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 MONTH; +``` + +### 表 TTL {#mergetree-table-ttl} + +当表内的数据过期时, ClickHouse会删除所有对应的行。 + +举例说明: + +创建一张包含 `TTL` 的表 + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d +TTL d + INTERVAL 1 MONTH; +``` + +修改表的 `TTL` + +``` sql +ALTER TABLE example_table + MODIFY TTL d + INTERVAL 1 DAY; +``` + +**删除数据** + +当ClickHouse合并数据分区时, 会删除TTL过期的数据。 + +当ClickHouse发现数据过期时, 它将会执行一个计划外的合并。要控制这类合并的频率, 你可以设置 [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout)。如果该值被设置的太低, 它将导致执行许多的计划外合并,这可能会消耗大量资源。 + +如果在合并的时候执行`SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在`SELECT`之前使用 [OPTIMIZE](../../../engines/table_engines/mergetree_family/mergetree.md#misc_operations-optimize) 查询。 + +## 使用多个块设备进行数据存储 {#table_engine-mergetree-multiple-volumes} + +### 配置 {#table_engine-mergetree-multiple-volumes-configure} + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/mergetree/) diff --git a/docs/zh/engines/table_engines/mergetree_family/replacingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/replacingmergetree.md new file mode 100644 index 00000000000..720560bf1a4 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/replacingmergetree.md @@ -0,0 +1,61 @@ + +# 更换麦树 {#replacingmergetree} + +该引擎和[MergeTree](mergetree.md)的不同之处在于它会删除具有相同主键的重复项。 + +数据的去重只会在合并的过程中出现。合并会在未知的时间在后台进行,因此你无法预先作出计划。有一些数据可能仍未被处理。尽管你可以调用 `OPTIMIZE` 语句发起计划外的合并,但请不要指望使用它,因为 `OPTIMIZE` 语句会引发对大量数据的读和写。 + +因此,`ReplacingMergeTree` 适用于在后台清除重复的数据以节省空间,但是它不保证没有重复的数据出现。 + +## 建表 {#jian-biao} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = ReplacingMergeTree([ver]) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +请求参数的描述,参考[请求参数](../../../engines/table_engines/mergetree_family/replacingmergetree.md)。 + +**替换树参数** + +- `ver` — 版本列。类型为 `UInt*`, `Date` 或 `DateTime`。可选参数。 + + 合并的时候,`ReplacingMergeTree` 从所有具有相同主键的行中选择一行留下: + - 如果 `ver` 列未指定,选择最后一条。 + - 如果 `ver` 列已指定,选择 `ver` 值最大的版本。 + +**子句** + +创建 `ReplacingMergeTree` 表时,需要与创建 `MergeTree` 表时相同的[子句](mergetree.md)。 + +
    + +已弃用的建表方法 + +!!! attention "注意" + 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) +``` + +除了 `ver` 的所有参数都与 `MergeTree` 中的含义相同。 + +- `ver` - 版本列。可选参数,有关说明,请参阅上文。 + +
    + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/zh/engines/table_engines/mergetree_family/replication.md b/docs/zh/engines/table_engines/mergetree_family/replication.md new file mode 100644 index 00000000000..e518eb805c4 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/replication.md @@ -0,0 +1,203 @@ + +# 数据副本 {#table_engines-replication} + +只有 MergeTree 系列里的表可支持副本: + +- ReplicatedMergeTree +- ReplicatedSummingMergeTree +- ReplicatedReplacingMergeTree +- ReplicatedAggregatingMergeTree +- ReplicatedCollapsingMergeTree +- ReplicatedVersionedCollapsingMergetree +- ReplicatedGraphiteMergeTree + +副本是表级别的,不是整个服务器级的。所以,服务器里可以同时有复制表和非复制表。 + +副本不依赖分片。每个分片有它自己的独立副本。 + +对于 `INSERT` 和 `ALTER` 语句操作数据的会在压缩的情况下被复制(更多信息,看 [ALTER](../../../engines/table_engines/mergetree_family/replication.md#query_language_queries_alter) )。 + +而 `CREATE`,`DROP`,`ATTACH`,`DETACH` 和 `RENAME` 语句只会在单个服务器上执行,不会被复制。 + +- `The CREATE TABLE` 在运行此语句的服务器上创建一个新的可复制表。如果此表已存在其他服务器上,则给该表添加新副本。 +- `The DROP TABLE` 删除运行此查询的服务器上的副本。 +- `The RENAME` 重命名一个副本。换句话说,可复制表不同的副本可以有不同的名称。 + +要使用副本,需在配置文件中设置 ZooKeeper 集群的地址。例如: + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + + example3 + 2181 + + +``` + +需要 ZooKeeper 3.4.5 或更高版本。 + +你可以配置任何现有的 ZooKeeper 集群,系统会使用里面的目录来存取元数据(该目录在创建可复制表时指定)。 + +如果配置文件中没有设置 ZooKeeper ,则无法创建复制表,并且任何现有的复制表都将变为只读。 + +`SELECT` 查询并不需要借助 ZooKeeper ,复本并不影响 `SELECT` 的性能,查询复制表与非复制表速度是一样的。查询分布式表时,ClickHouse的处理方式可通过设置 [max\_replica\_delay\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) 和 [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../../../operations/settings/settings.md) 修改。 + +对于每个 `INSERT` 语句,会通过几个事务将十来个记录添加到 ZooKeeper。(确切地说,这是针对每个插入的数据块; 每个 INSERT 语句的每 `max_insert_block_size = 1048576` 行和最后剩余的都各算作一个块。)相比非复制表,写 zk 会导致 `INSERT` 的延迟略长一些。但只要你按照建议每秒不超过一个 `INSERT` 地批量插入数据,不会有任何问题。一个 ZooKeeper 集群能给整个 ClickHouse 集群支撑协调每秒几百个 `INSERT`。数据插入的吞吐量(每秒的行数)可以跟不用复制的数据一样高。 + +对于非常大的集群,你可以把不同的 ZooKeeper 集群用于不同的分片。然而,即使 Yandex.Metrica 集群(大约300台服务器)也证明还不需要这么做。 + +复制是多主异步。 `INSERT` 语句(以及 `ALTER` )可以发给任意可用的服务器。数据会先插入到执行该语句的服务器上,然后被复制到其他服务器。由于它是异步的,在其他副本上最近插入的数据会有一些延迟。如果部分副本不可用,则数据在其可用时再写入。副本可用的情况下,则延迟时长是通过网络传输压缩数据块所需的时间。 + +默认情况下,INSERT 语句仅等待一个副本写入成功后返回。如果数据只成功写入一个副本后该副本所在的服务器不再存在,则存储的数据会丢失。要启用数据写入多个副本才确认返回,使用 `insert_quorum` 选项。 + +单个数据块写入是原子的。 INSERT 的数据按每块最多 `max_insert_block_size = 1048576` 行进行分块,换句话说,如果 `INSERT` 插入的行少于 1048576,则该 INSERT 是原子的。 + +数据块会去重。对于被多次写的相同数据块(大小相同且具有相同顺序的相同行的数据块),该块仅会写入一次。这样设计的原因是万一在网络故障时客户端应用程序不知道数据是否成功写入DB,此时可以简单地重复 `INSERT` 。把相同的数据发送给多个副本 INSERT 并不会有问题。因为这些 `INSERT` 是完全相同的(会被去重)。去重参数参看服务器设置 [merge\_tree](../../../operations/server_configuration_parameters/settings.md) 。(注意:Replicated\*MergeTree 才会去重,不需要 zookeeper 的不带 MergeTree 不会去重) + +在复制期间,只有要插入的源数据通过网络传输。进一步的数据转换(合并)会在所有副本上以相同的方式进行处理执行。这样可以最大限度地减少网络使用,这意味着即使副本在不同的数据中心,数据同步也能工作良好。(能在不同数据中心中的同步数据是副本机制的主要目标。) + +你可以给数据做任意多的副本。Yandex.Metrica 在生产中使用双副本。某一些情况下,给每台服务器都使用 RAID-5 或 RAID-6 和 RAID-10。是一种相对可靠和方便的解决方案。 + +系统会监视副本数据同步情况,并能在发生故障后恢复。故障转移是自动的(对于小的数据差异)或半自动的(当数据差异很大时,这可能意味是有配置错误)。 + +## 创建复制表 {#creating-replicated-tables} + +在表引擎名称上加上 `Replicated` 前缀。例如:`ReplicatedMergeTree`。 + +**Replicated\*MergeTree 参数** + +- `zoo_path` — ZooKeeper 中该表的路径。 +- `replica_name` — ZooKeeper 中的该表的副本名称。 + +示例: + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}') +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +``` + +已弃用的建表语法示例: + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192) +``` + +如上例所示,这些参数可以包含宏替换的占位符,即大括号的部分。它们会被替换为配置文件里 ‘macros’ 那部分配置的值。示例: + +``` xml + + 05 + 02 + example05-02-1.yandex.ru + +``` + +«ZooKeeper 中该表的路径»对每个可复制表都要是唯一的。不同分片上的表要有不同的路径。 +这种情况下,路径包含下面这些部分: + +`/clickhouse/tables/` 是公共前缀,我们推荐使用这个。 + +`{layer}-{shard}` 是分片标识部分。在此示例中,由于 Yandex.Metrica 集群使用了两级分片,所以它是由两部分组成的。但对于大多数情况来说,你只需保留 {shard} 占位符即可,它会替换展开为分片标识。 + +`table_name` 是该表在 ZooKeeper 中的名称。使其与 ClickHouse 中的表名相同比较好。 这里它被明确定义,跟 ClickHouse 表名不一样,它并不会被 RENAME 语句修改。 +*HINT*:你可以在前面添加一个数据库名称 `table_name` 也是 例如。 `db_name.table_name` + +副本名称用于标识同一个表分片的不同副本。你可以使用服务器名称,如上例所示。同个分片中不同副本的副本名称要唯一。 + +你也可以显式指定这些参数,而不是使用宏替换。对于测试和配置小型集群这可能会很方便。但是,这种情况下,则不能使用分布式 DDL 语句(`ON CLUSTER`)。 + +使用大型集群时,我们建议使用宏替换,因为它可以降低出错的可能性。 + +在每个副本服务器上运行 `CREATE TABLE` 查询。将创建新的复制表,或给现有表添加新副本。 + +如果其他副本上已包含了某些数据,在表上添加新副本,则在运行语句后,数据会从其他副本复制到新副本。换句话说,新副本会与其他副本同步。 + +要删除副本,使用 `DROP TABLE`。但它只删除那个 – 位于运行该语句的服务器上的副本。 + +## 故障恢复 {#gu-zhang-hui-fu} + +如果服务器启动时 ZooKeeper 不可用,则复制表会切换为只读模式。系统会定期尝试去连接 ZooKeeper。 + +如果在 `INSERT` 期间 ZooKeeper 不可用,或者在与 ZooKeeper 交互时发生错误,则抛出异常。 + +连接到 ZooKeeper 后,系统会检查本地文件系统中的数据集是否与预期的数据集( ZooKeeper 存储此信息)一致。如果存在轻微的不一致,系统会通过与副本同步数据来解决。 + +如果系统检测到损坏的数据片段(文件大小错误)或无法识别的片段(写入文件系统但未记录在 ZooKeeper 中的部分),则会把它们移动到 ‘detached’ 子目录(不会删除)。而副本中其他任何缺少的但正常数据片段都会被复制同步。 + +注意,ClickHouse 不会执行任何破坏性操作,例如自动删除大量数据。 + +当服务器启动(或与 ZooKeeper 建立新会话)时,它只检查所有文件的数量和大小。 如果文件大小一致但中间某处已有字节被修改过,不会立即被检测到,只有在尝试读取 `SELECT` 查询的数据时才会检测到。该查询会引发校验和不匹配或压缩块大小不一致的异常。这种情况下,数据片段会添加到验证队列中,并在必要时从其他副本中复制。 + +如果本地数据集与预期数据的差异太大,则会触发安全机制。服务器在日志中记录此内容并拒绝启动。这种情况很可能是配置错误,例如,一个分片上的副本意外配置为别的分片上的副本。然而,此机制的阈值设置得相当低,在正常故障恢复期间可能会出现这种情况。在这种情况下,数据恢复则是半自动模式,通过用户主动操作触发。 + +要触发启动恢复,可在 ZooKeeper 中创建节点 `/path_to_table/replica_name/flags/force_restore_data`,节点值可以是任何内容,或运行命令来恢复所有的可复制表: + +``` bash +sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data +``` + +然后重启服务器。启动时,服务器会删除这些标志并开始恢复。 + +## 在数据完全丢失后的恢复 {#zai-shu-ju-wan-quan-diu-shi-hou-de-hui-fu} + +如果其中一个服务器的所有数据和元数据都消失了,请按照以下步骤进行恢复: + +1. 在服务器上安装 ClickHouse。在包含分片标识符和副本的配置文件中正确定义宏配置,如果有用到的话, +2. 如果服务器上有非复制表则必须手动复制,可以从副本服务器上(在 `/var/lib/clickhouse/data/db_name/table_name/` 目录中)复制它们的数据。 +3. 从副本服务器上中复制位于 `/var/lib/clickhouse/metadata/` 中的表定义信息。如果在表定义信息中显式指定了分片或副本标识符,请更正它以使其对应于该副本。(另外,启动服务器,然后会在 `/var/lib/clickhouse/metadata/` 中的.sql文件中生成所有的 `ATTACH TABLE` 语句。) + 4.要开始恢复,ZooKeeper 中创建节点 `/path_to_table/replica_name/flags/force_restore_data`,节点内容不限,或运行命令来恢复所有复制的表:`sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` + +然后启动服务器(如果它已运行则重启)。数据会从副本中下载。 + +另一种恢复方式是从 ZooKeeper(`/path_to_table/replica_name`)中删除有数据丢的副本的所有元信息,然后再按照«[创建可复制表](#creating-replicated-tables)»中的描述重新创建副本。 + +恢复期间的网络带宽没有限制。特别注意这一点,尤其是要一次恢复很多副本。 + +## MergeTree 转换为 ReplicatedMergeTree {#mergetree-zhuan-huan-wei-replicatedmergetree} + +我们使用 `MergeTree` 来表示 `MergeTree系列` 中的所有表引擎,`ReplicatedMergeTree` 同理。 + +如果你有一个手动同步的 `MergeTree` 表,您可以将其转换为可复制表。如果你已经在 `MergeTree` 表中收集了大量数据,并且现在要启用复制,则可以执行这些操作。 + +如果各个副本上的数据不一致,则首先对其进行同步,或者除保留的一个副本外,删除其他所有副本上的数据。 + +重命名现有的 MergeTree 表,然后使用旧名称创建 `ReplicatedMergeTree` 表。 +将数据从旧表移动到新表(`/var/lib/clickhouse/data/db_name/table_name/`)目录内的 ‘detached’ 目录中。 +然后在其中一个副本上运行`ALTER TABLE ATTACH PARTITION`,将这些数据片段添加到工作集中。 + +## ReplicatedMergeTree 转换为 MergeTree {#replicatedmergetree-zhuan-huan-wei-mergetree} + +使用其他名称创建 MergeTree 表。将具有`ReplicatedMergeTree`表数据的目录中的所有数据移动到新表的数据目录中。然后删除`ReplicatedMergeTree`表并重新启动服务器。 + +如果你想在不启动服务器的情况下清除 `ReplicatedMergeTree` 表: + +- 删除元数据目录中的相应 `.sql` 文件(`/var/lib/clickhouse/metadata/`)。 +- 删除 ZooKeeper 中的相应路径(`/path_to_table/replica_name`)。 + +之后,你可以启动服务器,创建一个 `MergeTree` 表,将数据移动到其目录,然后重新启动服务器。 + +## 当 ZooKeeper 集群中的元数据丢失或损坏时恢复方法 {#dang-zookeeper-ji-qun-zhong-de-yuan-shu-ju-diu-shi-huo-sun-pi-shi-hui-fu-fang-fa} + +如果 ZooKeeper 中的数据丢失或损坏,如上所述,你可以通过将数据转移到非复制表来保存数据。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/zh/engines/table_engines/mergetree_family/summingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/summingmergetree.md new file mode 100644 index 00000000000..73576b00346 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/summingmergetree.md @@ -0,0 +1,125 @@ + +# SummingMergeTree {#summingmergetree} + +该引擎继承自 [MergeTree](mergetree.md)。区别在于,当合并 `SummingMergeTree` 表的数据片段时,ClickHouse 会把所有具有相同主键的行合并为一行,该行包含了被合并的行中具有数值数据类型的列的汇总值。如果主键的组合方式使得单个键值对应于大量的行,则可以显著的减少存储空间并加快数据查询的速度。 + +我们推荐将该引擎和 `MergeTree` 一起使用。例如,在准备做报告的时候,将完整的数据存储在 `MergeTree` 表中,并且使用 `SummingMergeTree` 来存储聚合数据。这种方法可以使你避免因为使用不正确的主键组合方式而丢失有价值的数据。 + +## 建表 {#jian-biao} + + CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] + ( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... + ) ENGINE = SummingMergeTree([columns]) + [PARTITION BY expr] + [ORDER BY expr] + [SAMPLE BY expr] + [SETTINGS name=value, ...] + +请求参数的描述,参考 [请求描述](../../../engines/table_engines/mergetree_family/summingmergetree.md)。 + +**SummingMergeTree 的参数** + +- `columns` - 包含了将要被汇总的列的列名的元组。可选参数。 + 所选的列必须是数值类型,并且不可位于主键中。 + + 如果没有指定 `columns`,ClickHouse 会把所有不在主键中的数值类型的列都进行汇总。 + +**子句** + +创建 `SummingMergeTree` 表时,需要与创建 `MergeTree` 表时相同的[子句](mergetree.md)。 + +
    + +已弃用的建表方法 + +!!! attention "注意" + 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 + + CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] + ( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... + ) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) + +除 `columns` 外的所有参数都与 `MergeTree` 中的含义相同。 + +- `columns` — 包含将要被汇总的列的列名的元组。可选参数。有关说明,请参阅上文。 + +
    + +## 用法示例 {#yong-fa-shi-li} + +考虑如下的表: + +``` sql +CREATE TABLE summtt +( + key UInt32, + value UInt32 +) +ENGINE = SummingMergeTree() +ORDER BY key +``` + +向其中插入数据: + + :) INSERT INTO summtt Values(1,1),(1,2),(2,1) + +ClickHouse可能不会完整的汇总所有行([见下文](#data-processing)),因此我们在查询中使用了聚合函数 `sum` 和 `GROUP BY` 子句。 + +``` sql +SELECT key, sum(value) FROM summtt GROUP BY key +``` + + ┌─key─┬─sum(value)─┐ + │ 2 │ 1 │ + │ 1 │ 3 │ + └─────┴────────────┘ + +## 数据处理 {#data-processing} + +当数据被插入到表中时,他们将被原样保存。ClickHouse 定期合并插入的数据片段,并在这个时候对所有具有相同主键的行中的列进行汇总,将这些行替换为包含汇总数据的一行记录。 + +ClickHouse 会按片段合并数据,以至于不同的数据片段中会包含具有相同主键的行,即单个汇总片段将会是不完整的。因此,聚合函数 [sum()](../../../engines/table_engines/mergetree_family/summingmergetree.md#agg_function-sum) 和 `GROUP BY` 子句应该在(`SELECT`)查询语句中被使用,如上文中的例子所述。 + +### 汇总的通用规则 {#hui-zong-de-tong-yong-gui-ze} + +列中数值类型的值会被汇总。这些列的集合在参数 `columns` 中被定义。 + +如果用于汇总的所有列中的值均为0,则该行会被删除。 + +如果列不在主键中且无法被汇总,则会在现有的值中任选一个。 + +主键所在的列中的值不会被汇总。 + +### AggregateFunction 列中的汇总 {#aggregatefunction-lie-zhong-de-hui-zong} + +对于 [AggregateFunction 类型](../../../engines/table_engines/mergetree_family/summingmergetree.md)的列,ClickHouse 根据对应函数表现为 [AggregatingMergeTree](aggregatingmergetree.md) 引擎的聚合。 + +### 嵌套结构 {#qian-tao-jie-gou} + +表中可以具有以特殊方式处理的嵌套数据结构。 + +如果嵌套表的名称以 `Map` 结尾,并且包含至少两个符合以下条件的列: + +- 第一列是数值类型 `(*Int*, Date, DateTime)`,我们称之为 `key`, +- 其他的列是可计算的 `(*Int*, Float32/64)`,我们称之为 `(values...)`, + +然后这个嵌套表会被解释为一个 `key => (values...)` 的映射,当合并它们的行时,两个数据集中的元素会被根据 `key` 合并为相应的 `(values...)` 的汇总值。 + +示例: + + [(1, 100)] + [(2, 150)] -> [(1, 100), (2, 150)] + [(1, 100)] + [(1, 150)] -> [(1, 250)] + [(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] + [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] + +请求数据时,使用 [sumMap(key,value)](../../../engines/table_engines/mergetree_family/summingmergetree.md) 函数来对 `Map` 进行聚合。 + +对于嵌套数据结构,你无需在列的元组中指定列以进行汇总。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/zh/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md new file mode 100644 index 00000000000..37f11bc21ad --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -0,0 +1,238 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: "\u7248\u672C\u96C6\u5408\u5728\u65B0\u6811" +--- + +# 版本集合在新树 {#versionedcollapsingmergetree} + +这个引擎: + +- 允许快速写入不断变化的对象状态。 +- 删除后台中的旧对象状态。 这显着降低了存储体积。 + +请参阅部分 [崩溃](#table_engines_versionedcollapsingmergetree) 有关详细信息。 + +引擎继承自 [MergeTree](mergetree.md#table_engines-mergetree) 并将折叠行的逻辑添加到合并数据部分的算法中。 `VersionedCollapsingMergeTree` 用于相同的目的 [折叠树](collapsingmergetree.md) 但使用不同的折叠算法,允许以多个线程的任何顺序插入数据。 特别是, `Version` 列有助于正确折叠行,即使它们以错误的顺序插入。 相比之下, `CollapsingMergeTree` 只允许严格连续插入。 + +## 创建表 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = VersionedCollapsingMergeTree(sign, version) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +有关查询参数的说明,请参阅 [查询说明](../../../sql_reference/statements/create.md). + +**发动机参数** + +``` sql +VersionedCollapsingMergeTree(sign, version) +``` + +- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 + + 列数据类型应为 `Int8`. + +- `version` — Name of the column with the version of the object state. + + 列数据类型应为 `UInt*`. + +**查询子句** + +当创建一个 `VersionedCollapsingMergeTree` 表,相同 [条款](mergetree.md) 需要创建一个时 `MergeTree` 桌子 + +
    + +不推荐使用的创建表的方法 + +!!! attention "注意" + 不要在新项目中使用此方法。 如果可能,请将旧项目切换到上述方法。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) +``` + +所有的参数,除了 `sign` 和 `version` 具有相同的含义 `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 + + Column Data Type — `Int8`. + +- `version` — Name of the column with the version of the object state. + + 列数据类型应为 `UInt*`. + +
    + +## 崩溃 {#table_engines-versionedcollapsingmergetree} + +### 数据 {#data} + +考虑一种情况,您需要为某个对象保存不断变化的数据。 对于一个对象有一行,并在发生更改时更新该行是合理的。 但是,对于数据库管理系统来说,更新操作非常昂贵且速度很慢,因为它需要重写存储中的数据。 如果需要快速写入数据,则不能接受更新,但可以按如下顺序将更改写入对象。 + +使用 `Sign` 列写入行时。 如果 `Sign = 1` 这意味着该行是一个对象的状态(让我们把它称为 “state” 行)。 如果 `Sign = -1` 它指示具有相同属性的对象的状态的取消(让我们称之为 “cancel” 行)。 还可以使用 `Version` 列,它应该用单独的数字标识对象的每个状态。 + +例如,我们要计算用户在某个网站上访问了多少页面以及他们在那里的时间。 在某个时间点,我们用用户活动的状态写下面的行: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +在稍后的某个时候,我们注册用户活动的变化,并用以下两行写入它。 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +第一行取消对象(用户)的先前状态。 它应该复制已取消状态的所有字段,除了 `Sign`. + +第二行包含当前状态。 + +因为我们只需要用户活动的最后一个状态,行 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +可以删除,折叠对象的无效(旧)状态。 `VersionedCollapsingMergeTree` 在合并数据部分时执行此操作。 + +要了解为什么每次更改都需要两行,请参阅 [算法](#table_engines-versionedcollapsingmergetree-algorithm). + +**使用注意事项** + +1. 写入数据的程序应该记住对象的状态以取消它。 该 “cancel” 字符串应该是 “state” 与相反的字符串 `Sign`. 这增加了存储的初始大小,但允许快速写入数据。 +2. 列中长时间增长的数组由于写入负载而降低了引擎的效率。 数据越简单,效率就越高。 +3. `SELECT` 结果很大程度上取决于对象变化历史的一致性。 准备插入数据时要准确。 您可以通过不一致的数据获得不可预测的结果,例如会话深度等非负指标的负值。 + +### 算法 {#table_engines-versionedcollapsingmergetree-algorithm} + +当ClickHouse合并数据部分时,它会删除具有相同主键和版本且不同主键和版本的每对行 `Sign`. 行的顺序并不重要。 + +当ClickHouse插入数据时,它会按主键对行进行排序。 如果 `Version` 列不在主键中,ClickHouse将其隐式添加到主键作为最后一个字段并使用它进行排序。 + +## 选择数据 {#selecting-data} + +ClickHouse不保证具有相同主键的所有行都将位于相同的结果数据部分中,甚至位于相同的物理服务器上。 对于写入数据和随后合并数据部分都是如此。 此外,ClickHouse流程 `SELECT` 具有多个线程的查询,并且无法预测结果中的行顺序。 这意味着聚合是必需的,如果有必要得到完全 “collapsed” 从数据 `VersionedCollapsingMergeTree` 桌子 + +要完成折叠,请使用 `GROUP BY` 考虑符号的子句和聚合函数。 例如,要计算数量,请使用 `sum(Sign)` 而不是 `count()`. 要计算的东西的总和,使用 `sum(Sign * x)` 而不是 `sum(x)`,并添加 `HAVING sum(Sign) > 0`. + +聚合 `count`, `sum` 和 `avg` 可以这样计算。 聚合 `uniq` 如果对象至少具有一个非折叠状态,则可以计算。 聚合 `min` 和 `max` 无法计算是因为 `VersionedCollapsingMergeTree` 不保存折叠状态值的历史记录。 + +如果您需要提取数据 “collapsing” 但是,如果没有聚合(例如,要检查是否存在其最新值与某些条件匹配的行),则可以使用 `FINAL` 修饰符 `FROM` 条款 这种方法效率低下,不应与大型表一起使用。 + +## 使用示例 {#example-of-use} + +示例数据: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +创建表: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8, + Version UInt8 +) +ENGINE = VersionedCollapsingMergeTree(Sign, Version) +ORDER BY UserID +``` + +插入数据: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) +``` + +我们用两个 `INSERT` 查询以创建两个不同的数据部分。 如果我们使用单个查询插入数据,ClickHouse将创建一个数据部分,并且永远不会执行任何合并。 + +获取数据: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +我们在这里看到了什么,折叠的部分在哪里? +我们使用两个创建了两个数据部分 `INSERT` 查询。 该 `SELECT` 查询是在两个线程中执行的,结果是行的随机顺序。 +由于数据部分尚未合并,因此未发生折叠。 ClickHouse在我们无法预测的未知时间点合并数据部分。 + +这就是为什么我们需要聚合: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration, + Version +FROM UAct +GROUP BY UserID, Version +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 2 │ +└─────────────────────┴───────────┴──────────┴─────────┘ +``` + +如果我们不需要聚合,并希望强制折叠,我们可以使用 `FINAL` 修饰符 `FROM` 条款 + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +这是一个非常低效的方式来选择数据。 不要把它用于大桌子。 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/zh/engines/table_engines/special/buffer.md b/docs/zh/engines/table_engines/special/buffer.md new file mode 100644 index 00000000000..6b53883be7b --- /dev/null +++ b/docs/zh/engines/table_engines/special/buffer.md @@ -0,0 +1,54 @@ + +# 缓冲区 {#buffer} + +缓冲数据写入 RAM 中,周期性地将数据刷新到另一个表。在读取操作时,同时从缓冲区和另一个表读取数据。 + + Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes) + +引擎的参数:database,table - 要刷新数据的表。可以使用返回字符串的常量表达式而不是数据库名称。 num\_layers - 并行层数。在物理上,该表将表示为 num\_layers 个独立缓冲区。建议值为16。min\_time,max\_time,min\_rows,max\_rows,min\_bytes,max\_bytes - 从缓冲区刷新数据的条件。 + +如果满足所有 «min» 条件或至少一个 «max» 条件,则从缓冲区刷新数据并将其写入目标表。min\_time,max\_time — 从第一次写入缓冲区时起以秒为单位的时间条件。min\_rows,max\_rows - 缓冲区中行数的条件。min\_bytes,max\_bytes - 缓冲区中字节数的条件。 + +写入时,数据从 num\_layers 个缓冲区中随机插入。或者,如果插入数据的大小足够大(大于 max\_rows 或 max\_bytes ),则会绕过缓冲区将其写入目标表。 + +每个 «num\_layers» 缓冲区刷新数据的条件是分别计算。例如,如果 num\_layers = 16 且 max\_bytes = 100000000,则最大RAM消耗将为1.6 GB。 + +示例: + +``` sql +CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10, 100, 10000, 1000000, 10000000, 100000000) +``` + +创建一个 «merge.hits\_buffer» 表,其结构与 «merge.hits» 相同,并使用 Buffer 引擎。写入此表时,数据缓冲在 RAM 中,然后写入 «merge.hits» 表。创建了16个缓冲区。如果已经过了100秒,或者已写入100万行,或者已写入100 MB数据,则刷新每个缓冲区的数据;或者如果同时已经过了10秒并且已经写入了10,000行和10 MB的数据。例如,如果只写了一行,那么在100秒之后,都会被刷新。但是如果写了很多行,数据将会更快地刷新。 + +当服务器停止时,使用 DROP TABLE 或 DETACH TABLE,缓冲区数据也会刷新到目标表。 + +可以为数据库和表名在单个引号中设置空字符串。这表示没有目的地表。在这种情况下,当达到数据刷新条件时,缓冲器被简单地清除。这可能对于保持数据窗口在内存中是有用的。 + +从 Buffer 表读取时,将从缓冲区和目标表(如果有)处理数据。 +请注意,Buffer 表不支持索引。换句话说,缓冲区中的数据被完全扫描,对于大缓冲区来说可能很慢。(对于目标表中的数据,将使用它支持的索引。) + +如果 Buffer 表中的列集与目标表中的列集不匹配,则会插入两个表中存在的列的子集。 + +如果类型与 Buffer 表和目标表中的某列不匹配,则会在服务器日志中输入错误消息并清除缓冲区。 +如果在刷新缓冲区时目标表不存在,则会发生同样的情况。 + +如果需要为目标表和 Buffer 表运行 ALTER,我们建议先删除 Buffer 表,为目标表运行 ALTER,然后再次创建 Buffer 表。 + +如果服务器异常重启,缓冲区中的数据将丢失。 + +PREWHERE,FINAL 和 SAMPLE 对缓冲表不起作用。这些条件将传递到目标表,但不用于处理缓冲区中的数据。因此,我们建议只使用Buffer表进行写入,同时从目标表进行读取。 + +将数据添加到缓冲区时,其中一个缓冲区被锁定。如果同时从表执行读操作,则会导致延迟。 + +插入到 Buffer 表中的数据可能以不同的顺序和不同的块写入目标表中。因此,Buffer 表很难用于正确写入 CollapsingMergeTree。为避免出现问题,您可以将 «num\_layers» 设置为1。 + +如果目标表是复制表,则在写入 Buffer 表时会丢失复制表的某些预期特征。数据部分的行次序和大小的随机变化导致数据不能去重,这意味着无法对复制表进行可靠的 «exactly once» 写入。 + +由于这些缺点,我们只建议在极少数情况下使用 Buffer 表。 + +当在单位时间内从大量服务器接收到太多 INSERTs 并且在插入之前无法缓冲数据时使用 Buffer 表,这意味着这些 INSERTs 不能足够快地执行。 + +请注意,一次插入一行数据是没有意义的,即使对于 Buffer 表也是如此。这将只产生每秒几千行的速度,而插入更大的数据块每秒可以产生超过一百万行(参见 «性能» 部分)。 + +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/buffer/) diff --git a/docs/zh/engines/table_engines/special/dictionary.md b/docs/zh/engines/table_engines/special/dictionary.md new file mode 100644 index 00000000000..27da9b40e52 --- /dev/null +++ b/docs/zh/engines/table_engines/special/dictionary.md @@ -0,0 +1,102 @@ + +# 字典 {#dictionary} + +`Dictionary` 引擎将字典数据展示为一个ClickHouse的表。 + +例如,考虑使用一个具有以下配置的 `products` 字典: + +``` xml + + + products + + +
    products
    + DSN=some-db-server + + + + 300 + 360 + + + + + + + product_id + + + title + String + + + + + +``` + +查询字典中的数据: + +``` sql +select name, type, key, attribute.names, attribute.types, bytes_allocated, element_count,source from system.dictionaries where name = 'products'; + +SELECT + name, + type, + key, + attribute.names, + attribute.types, + bytes_allocated, + element_count, + source +FROM system.dictionaries +WHERE name = 'products' +``` + + ┌─name─────┬─type─┬─key────┬─attribute.names─┬─attribute.types─┬─bytes_allocated─┬─element_count─┬─source──────────┐ + │ products │ Flat │ UInt64 │ ['title'] │ ['String'] │ 23065376 │ 175032 │ ODBC: .products │ + └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ + +你可以使用 [dictGet\*](../../../engines/table_engines/special/dictionary.md) 函数来获取这种格式的字典数据。 + +当你需要获取原始数据,或者是想要使用 `JOIN` 操作的时候,这种视图并没有什么帮助。对于这些情况,你可以使用 `Dictionary` 引擎,它可以将字典数据展示在表中。 + +语法: + + CREATE TABLE %table_name% (%fields%) engine = Dictionary(%dictionary_name%)` + +示例: + +``` sql +create table products (product_id UInt64, title String) Engine = Dictionary(products); + +CREATE TABLE products +( + product_id UInt64, + title String, +) +ENGINE = Dictionary(products) +``` + + Ok. + + 0 rows in set. Elapsed: 0.004 sec. + +看一看表中的内容。 + +``` sql +select * from products limit 1; + +SELECT * +FROM products +LIMIT 1 +``` + + ┌────product_id─┬─title───────────┐ + │ 152689 │ Some item │ + └───────────────┴─────────────────┘ + + 1 rows in set. Elapsed: 0.006 sec. + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) diff --git a/docs/zh/engines/table_engines/special/distributed.md b/docs/zh/engines/table_engines/special/distributed.md new file mode 100644 index 00000000000..f31dae7c1ef --- /dev/null +++ b/docs/zh/engines/table_engines/special/distributed.md @@ -0,0 +1,121 @@ + +# 分布 {#distributed} + +**分布式引擎本身不存储数据**, 但可以在多个服务器上进行分布式查询。 +读是自动并行的。读取时,远程服务器表的索引(如果有的话)会被使用。 +分布式引擎参数:服务器配置文件中的集群名,远程数据库名,远程表名,数据分片键(可选)。 +示例: + + Distributed(logs, default, hits[, sharding_key]) + +将会从位于«logs»集群中 default.hits 表所有服务器上读取数据。 +远程服务器不仅用于读取数据,还会对尽可能数据做部分处理。 +例如,对于使用 GROUP BY 的查询,数据首先在远程服务器聚合,之后返回聚合函数的中间状态给查询请求的服务器。再在请求的服务器上进一步汇总数据。 + +数据库名参数除了用数据库名之外,也可用返回字符串的常量表达式。例如:currentDatabase()。 + +logs – 服务器配置文件中的集群名称。 + +集群示例配置如下: + +``` xml + + + + + 1 + + false + + example01-01-1 + 9000 + + + example01-01-2 + 9000 + + + + 2 + false + + example01-02-1 + 9000 + + + example01-02-2 + 1 + 9440 + + + + +``` + +这里定义了一个名为'logs'的集群,它由两个分片组成,每个分片包含两个副本。 +分片是指包含数据不同部分的服务器(要读取所有数据,必须访问所有分片)。 +副本是存储复制数据的服务器(要读取所有数据,访问任一副本上的数据即可)。 + +集群名称不能包含点号。 + +每个服务器需要指定 `host`,`port`,和可选的 `user`,`password`,`secure`,`compression` 的参数: +- `host` – 远程服务器地址。可以域名、IPv4或IPv6。如果指定域名,则服务在启动时发起一个 DNS 请求,并且请求结果会在服务器运行期间一直被记录。如果 DNS 请求失败,则服务不会启动。如果你修改了 DNS 记录,则需要重启服务。 +- `port` – 消息传递的 TCP 端口(「tcp\_port」配置通常设为 9000)。不要跟 http\_port 混淆。 +- `user` – 用于连接远程服务器的用户名。默认值:default。该用户必须有权限访问该远程服务器。访问权限配置在 users.xml 文件中。更多信息,请查看«访问权限»部分。 +- `password` – 用于连接远程服务器的密码。默认值:空字符串。 +- `secure` – 是否使用ssl进行连接,设为true时,通常也应该设置 `port` = 9440。服务器也要监听 9440 并有正确的证书。 +- `compression` - 是否使用数据压缩。默认值:true。 + +配置了副本,读取操作会从每个分片里选择一个可用的副本。可配置负载平衡算法(挑选副本的方式) - 请参阅«load\_balancing»设置。 +如果跟服务器的连接不可用,则在尝试短超时的重连。如果重连失败,则选择下一个副本,依此类推。如果跟所有副本的连接尝试都失败,则尝试用相同的方式再重复几次。 +该机制有利于系统可用性,但不保证完全容错:如有远程服务器能够接受连接,但无法正常工作或状况不佳。 + +你可以配置一个(这种情况下,查询操作更应该称为远程查询,而不是分布式查询)或任意多个分片。在每个分片中,可以配置一个或任意多个副本。不同分片可配置不同数量的副本。 + +可以在配置中配置任意数量的集群。 + +要查看集群,可使用«system.clusters»表。 + +通过分布式引擎可以像使用本地服务器一样使用集群。但是,集群不是自动扩展的:你必须编写集群配置到服务器配置文件中(最好,给所有集群的服务器写上完整配置)。 + +不支持用分布式表查询别的分布式表(除非该表只有一个分片)。或者说,要用分布表查查询«最终»的数据表。 + +分布式引擎需要将集群信息写入配置文件。配置文件中的集群信息会即时更新,无需重启服务器。如果你每次是要向不确定的一组分片和副本发送查询,则不适合创建分布式表 - 而应该使用«远程»表函数。 请参阅«表函数»部分。 + +向集群写数据的方法有两种: + +一,自已指定要将哪些数据写入哪些服务器,并直接在每个分片上执行写入。换句话说,在分布式表上«查询»,在数据表上 INSERT。 +这是最灵活的解决方案 – 你可以使用任何分片方案,对于复杂业务特性的需求,这可能是非常重要的。 +这也是最佳解决方案,因为数据可以完全独立地写入不同的分片。 + +二,在分布式表上执行 INSERT。在这种情况下,分布式表会跨服务器分发插入数据。 +为了写入分布式表,必须要配置分片键(最后一个参数)。当然,如果只有一个分片,则写操作在没有分片键的情况下也能工作,因为这种情况下分片键没有意义。 + +每个分片都可以在配置文件中定义权重。默认情况下,权重等于1。数据依据分片权重按比例分发到分片上。例如,如果有两个分片,第一个分片的权重是9,而第二个分片的权重是10,则发送 9 / 19 的行到第一个分片, 10 / 19 的行到第二个分片。 + +分片可在配置文件中定义 ‘internal\_replication’ 参数。 + +此参数设置为«true»时,写操作只选一个正常的副本写入数据。如果分布式表的子表是复制表(\*ReplicaMergeTree),请使用此方案。换句话说,这其实是把数据的复制工作交给实际需要写入数据的表本身而不是分布式表。 + +若此参数设置为«false»(默认值),写操作会将数据写入所有副本。实质上,这意味着要分布式表本身来复制数据。这种方式不如使用复制表的好,因为不会检查副本的一致性,并且随着时间的推移,副本数据可能会有些不一样。 + +选择将一行数据发送到哪个分片的方法是,首先计算分片表达式,然后将这个计算结果除以所有分片的权重总和得到余数。该行会发送到那个包含该余数的从'prev\_weight'到'prev\_weights + weight'的半闭半开区间对应的分片上,其中 ‘prev\_weights’ 是该分片前面的所有分片的权重和,‘weight’ 是该分片的权重。例如,如果有两个分片,第一个分片权重为9,而第二个分片权重为10,则余数在 \[0,9) 中的行发给第一个分片,余数在 \[9,19) 中的行发给第二个分片。 + +分片表达式可以是由常量和表列组成的任何返回整数表达式。例如,您可以使用表达式 ‘rand()’ 来随机分配数据,或者使用 ‘UserID’ 来按用户 ID 的余数分布(相同用户的数据将分配到单个分片上,这可降低带有用户信息的 IN 和 JOIN 的语句运行的复杂度)。如果该列数据分布不够均匀,可以将其包装在散列函数中:intHash64(UserID)。 + +这种简单的用余数来选择分片的方案是有局限的,并不总适用。它适用于中型和大型数据(数十台服务器)的场景,但不适用于巨量数据(数百台或更多服务器)的场景。后一种情况下,应根据业务特性需求考虑的分片方案,而不是直接用分布式表的多分片。 + +SELECT 查询会被发送到所有分片,并且无论数据在分片中如何分布(即使数据完全随机分布)都可正常工作。添加新分片时,不必将旧数据传输到该分片。你可以给新分片分配大权重然后写新数据 - 数据可能会稍分布不均,但查询会正确高效地运行。 + +下面的情况,你需要关注分片方案: + +- 使用需要特定键连接数据( IN 或 JOIN )的查询。如果数据是用该键进行分片,则应使用本地 IN 或 JOIN 而不是 GLOBAL IN 或 GLOBAL JOIN,这样效率更高。 +- 使用大量服务器(上百或更多),但有大量小查询(个别客户的查询 - 网站,广告商或合作伙伴)。为了使小查询不影响整个集群,让单个客户的数据处于单个分片上是有意义的。或者,正如我们在 Yandex.Metrica 中所做的那样,你可以配置两级分片:将整个集群划分为«层»,一个层可以包含多个分片。单个客户的数据位于单个层上,根据需要将分片添加到层中,层中的数据随机分布。然后给每层创建分布式表,再创建一个全局的分布式表用于全局的查询。 + +数据是异步写入的。对于分布式表的 INSERT,数据块只写本地文件系统。之后会尽快地在后台发送到远程服务器。你可以通过查看表目录中的文件列表(等待发送的数据)来检查数据是否成功发送:/var/lib/clickhouse/data/database/table/ 。 + +如果在 INSERT 到分布式表时服务器节点丢失或重启(如,设备故障),则插入的数据可能会丢失。如果在表目录中检测到损坏的数据分片,则会将其转移到«broken»子目录,并不再使用。 + +启用 max\_parallel\_replicas 选项后,会在分表的所有副本上并行查询处理。更多信息,请参阅«设置,max\_parallel\_replicas»部分。 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/zh/engines/table_engines/special/external_data.md b/docs/zh/engines/table_engines/special/external_data.md new file mode 100644 index 00000000000..399ffd8c0f3 --- /dev/null +++ b/docs/zh/engines/table_engines/special/external_data.md @@ -0,0 +1,62 @@ + +# 用于查询处理的外部数据 {#external-data-for-query-processing} + +ClickHouse 允许向服务器发送处理查询所需的数据以及 SELECT 查询。这些数据放在一个临时表中(请参阅 «临时表» 一节),可以在查询中使用(例如,在 IN 操作符中)。 + +例如,如果您有一个包含重要用户标识符的文本文件,则可以将其与使用此列表过滤的查询一起上传到服务器。 + +如果需要使用大量外部数据运行多个查询,请不要使用该特性。最好提前把数据上传到数据库。 + +可以使用命令行客户端(在非交互模式下)或使用 HTTP 接口上传外部数据。 + +在命令行客户端中,您可以指定格式的参数部分 + +``` bash +--external --file=... [--name=...] [--format=...] [--types=...|--structure=...] +``` + +对于传输的表的数量,可能有多个这样的部分。 + +**–external** – 标记子句的开始。 +**–file** – 带有表存储的文件的路径,或者,它指的是STDIN。 +只能从 stdin 中检索单个表。 + +以下的参数是可选的:**–name** – 表的名称,如果省略,则采用 \_data。 +**–format** – 文件中的数据格式。 如果省略,则使用 TabSeparated。 + +以下的参数必选一个:**–types** – 逗号分隔列类型的列表。例如:`UInt64,String`。列将被命名为 \_1,\_2,… +**–structure**– 表结构的格式 `UserID UInt64`,`URL String`。定义列的名字以及类型。 + +在 «file» 中指定的文件将由 «format» 中指定的格式解析,使用在 «types» 或 «structure» 中指定的数据类型。该表将被上传到服务器,并在作为名称为 «name»临时表。 + +示例: + +``` bash +echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 +849897 +cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +当使用HTTP接口时,外部数据以 multipart/form-data 格式传递。每个表作为一个单独的文件传输。表名取自文件名。«query\_string» 传递参数 «name\_format»、«name\_types»和«name\_structure»,其中 «name» 是这些参数对应的表的名称。参数的含义与使用命令行客户端时的含义相同。 + +示例: + +``` bash +cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv + +curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +对于分布式查询,将临时表发送到所有远程服务器。 + +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/external_data/) diff --git a/docs/zh/engines/table_engines/special/file.md b/docs/zh/engines/table_engines/special/file.md new file mode 100644 index 00000000000..71c96f8ab43 --- /dev/null +++ b/docs/zh/engines/table_engines/special/file.md @@ -0,0 +1,74 @@ + +# 文件(输入格式) {#table_engines-file} + +数据源是以 Clickhouse 支持的一种输入格式(TabSeparated,Native等)存储数据的文件。 + +用法示例: + +- 从 ClickHouse 导出数据到文件。 +- 将数据从一种格式转换为另一种格式。 +- 通过编辑磁盘上的文件来更新 ClickHouse 中的数据。 + +## 在 ClickHouse 服务器中的使用 {#zai-clickhouse-fu-wu-qi-zhong-de-shi-yong} + + File(Format) + +选用的 `Format` 需要支持 `INSERT` 或 `SELECT` 。有关支持格式的完整列表,请参阅 [格式](../../../interfaces/formats.md#formats)。 + +ClickHouse 不支持给 `File` 指定文件系统路径。它使用服务器配置中 [路径](../../../operations/server_configuration_parameters/settings.md) 设定的文件夹。 + +使用 `File(Format)` 创建表时,它会在该文件夹中创建空的子目录。当数据写入该表时,它会写到该子目录中的 `data.Format` 文件中。 + +你也可以在服务器文件系统中手动创建这些子文件夹和文件,然后通过 [ATTACH](../../../engines/table_engines/special/file.md) 将其创建为具有对应名称的表,这样你就可以从该文件中查询数据了。 + +!!! 注意 "注意" + 注意这个功能,因为 ClickHouse 不会跟踪这些文件在外部的更改。在 ClickHouse 中和 ClickHouse 外部同时写入会造成结果是不确定的。 + +**示例:** + +**1.** 创建 `file_engine_table` 表: + +``` sql +CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) +``` + +默认情况下,Clickhouse 会创建目录 `/var/lib/clickhouse/data/default/file_engine_table` 。 + +**2.** 手动创建 `/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated` 文件,并且包含内容: + +``` bash +$ cat data.TabSeparated +one 1 +two 2 +``` + +**3.** 查询这些数据: + +``` sql +SELECT * FROM file_engine_table +``` + + ┌─name─┬─value─┐ + │ one │ 1 │ + │ two │ 2 │ + └──────┴───────┘ + +## 在 Clickhouse-local 中的使用 {#zai-clickhouse-local-zhong-de-shi-yong} + +使用 [ツ环板-ョツ嘉ッツ偲](../../../engines/table_engines/special/file.md) 时,File 引擎除了 `Format` 之外,还可以接受文件路径参数。可以使用数字或人类可读的名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 +**例如:** + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +``` + +## 功能实现 {#gong-neng-shi-xian} + +- 读操作可支持并发,但写操作不支持 +- 不支持: + - `ALTER` + - `SELECT ... SAMPLE` + - 索引 + - 副本 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/zh/engines/table_engines/special/generate.md b/docs/zh/engines/table_engines/special/generate.md new file mode 100644 index 00000000000..6a31e270066 --- /dev/null +++ b/docs/zh/engines/table_engines/special/generate.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: GenerateRandom +--- + +# Generaterandom {#table_engines-generate} + +GenerateRandom表引擎为给定的表架构生成随机数据。 + +使用示例: + +- 在测试中使用填充可重复的大表。 +- 为模糊测试生成随机输入。 + +## 在ClickHouse服务器中的使用 {#usage-in-clickhouse-server} + +``` sql +ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) +``` + +该 `max_array_length` 和 `max_string_length` 参数指定所有的最大长度 +数组列和字符串相应地在生成的数据中。 + +生成表引擎仅支持 `SELECT` 查询。 + +它支持所有 [数据类型](../../../sql_reference/data_types/index.md) 可以存储在一个表中,除了 `LowCardinality` 和 `AggregateFunction`. + +**示例:** + +**1.** 设置 `generate_engine_table` 表: + +``` sql +CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) +``` + +**2.** 查询数据: + +``` sql +SELECT * FROM generate_engine_table LIMIT 3 +``` + +``` text +┌─name─┬──────value─┐ +│ c4xJ │ 1412771199 │ +│ r │ 1791099446 │ +│ 7#$ │ 124312908 │ +└──────┴────────────┘ +``` + +## 实施细节 {#details-of-implementation} + +- 不支持: + - `ALTER` + - `SELECT ... SAMPLE` + - `INSERT` + - 指数 + - 复制 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/zh/engines/table_engines/special/index.md b/docs/zh/engines/table_engines/special/index.md new file mode 100644 index 00000000000..7be40b75fb5 --- /dev/null +++ b/docs/zh/engines/table_engines/special/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u7279\u522B" +toc_priority: 31 +--- + + diff --git a/docs/zh/engines/table_engines/special/join.md b/docs/zh/engines/table_engines/special/join.md new file mode 100644 index 00000000000..33cc0685a52 --- /dev/null +++ b/docs/zh/engines/table_engines/special/join.md @@ -0,0 +1,29 @@ + +# 加入我们 {#join} + +加载好的 JOIN 表数据会常驻内存中。 + + Join(ANY|ALL, LEFT|INNER, k1[, k2, ...]) + +引擎参数:`ANY|ALL` – 连接修饰;`LEFT|INNER` – 连接类型。更多信息可参考 [JOIN子句](../../../engines/table_engines/special/join.md#select-join)。 +这些参数设置不用带引号,但必须与要 JOIN 表匹配。 k1,k2,……是 USING 子句中要用于连接的关键列。 + +此引擎表不能用于 GLOBAL JOIN 。 + +类似于 Set 引擎,可以使用 INSERT 向表中添加数据。设置为 ANY 时,重复键的数据会被忽略(仅一条用于连接)。设置为 ALL 时,重复键的数据都会用于连接。不能直接对 JOIN 表进行 SELECT。检索其数据的唯一方法是将其作为 JOIN 语句右边的表。 + +跟 Set 引擎类似,Join 引擎把数据存储在磁盘中。 + +### 限制和设置 {#join-limitations-and-settings} + +创建表时,将应用以下设置: + +- join\_use\_nulls +- max\_rows\_in\_join +- max\_bytes\_in\_join +- join\_overflow\_mode +- join\_any\_take\_last\_row + +该 `Join`-发动机表不能用于 `GLOBAL JOIN` 操作。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/zh/engines/table_engines/special/materializedview.md b/docs/zh/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..5dc4e261fbd --- /dev/null +++ b/docs/zh/engines/table_engines/special/materializedview.md @@ -0,0 +1,6 @@ + +# 物化视图 {#wu-hua-shi-tu} + +物化视图的使用(更多信息请参阅 [CREATE TABLE](../../../engines/table_engines/special/materializedview.md) )。它需要使用一个不同的引擎来存储数据,这个引擎要在创建物化视图时指定。当从表中读取时,它就会使用该引擎。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/zh/engines/table_engines/special/memory.md b/docs/zh/engines/table_engines/special/memory.md new file mode 100644 index 00000000000..3fd29813d00 --- /dev/null +++ b/docs/zh/engines/table_engines/special/memory.md @@ -0,0 +1,8 @@ + +# 记忆 {#memory} + +Memory 引擎以未压缩的形式将数据存储在 RAM 中。数据完全以读取时获得的形式存储。换句话说,从这张表中读取是很轻松的。并发数据访问是同步的。锁范围小:读写操作不会相互阻塞。不支持索引。阅读是并行化的。在简单查询上达到最大生产率(超过10 GB /秒),因为没有磁盘读取,不需要解压缩或反序列化数据。(值得注意的是,在许多情况下,与 MergeTree 引擎的性能几乎一样高)。重新启动服务器时,表中的数据消失,表将变为空。通常,使用此表引擎是不合理的。但是,它可用于测试,以及在相对较少的行(最多约100,000,000)上需要最高性能的查询。 + +Memory 引擎是由系统用于临时表进行外部数据的查询(请参阅 «外部数据用于请求处理» 部分),以及用于实现 `GLOBAL IN`(请参见 «IN 运算符» 部分)。 + +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/memory/) diff --git a/docs/zh/engines/table_engines/special/merge.md b/docs/zh/engines/table_engines/special/merge.md new file mode 100644 index 00000000000..e4ee3fe92a5 --- /dev/null +++ b/docs/zh/engines/table_engines/special/merge.md @@ -0,0 +1,64 @@ + +# 合并 {#merge} + +`Merge` 引擎 (不要跟 `MergeTree` 引擎混淆) 本身不存储数据,但可用于同时从任意多个其他的表中读取数据。 +读是自动并行的,不支持写入。读取时,那些被真正读取到数据的表的索引(如果有的话)会被使用。 +`Merge` 引擎的参数:一个数据库名和一个用于匹配表名的正则表达式。 + +示例: + + Merge(hits, '^WatchLog') + +数据会从 `hits` 数据库中表名匹配正则 ‘`^WatchLog`’ 的表中读取。 + +除了数据库名,你也可以用一个返回字符串的常量表达式。例如, `currentDatabase()` 。 + +正则表达式 — [re2](https://github.com/google/re2) (支持 PCRE 一个子集的功能),大小写敏感。 +了解关于正则表达式中转义字符的说明可参看 «match» 一节。 + +当选择需要读的表时,`Merge` 表本身会被排除,即使它匹配上了该正则。这样设计为了避免循环。 +当然,是能够创建两个相互无限递归读取对方数据的 `Merge` 表的,但这并没有什么意义。 + +`Merge` 引擎的一个典型应用是可以像使用一张表一样使用大量的 `TinyLog` 表。 + +示例 2 : + +我们假定你有一个旧表(WatchLog\_old),你想改变数据分区了,但又不想把旧数据转移到新表(WatchLog\_new)里,并且你需要同时能看到这两个表的数据。 + + CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) + ENGINE=MergeTree(date, (UserId, EventType), 8192); + INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); + + CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) + ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; + INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); + + CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); + + SELECT * + FROM WatchLog + + ┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ + │ 2018-01-01 │ 1 │ hit │ 3 │ + └────────────┴────────┴───────────┴─────┘ + ┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ + │ 2018-01-02 │ 2 │ hit │ 3 │ + └────────────┴────────┴───────────┴─────┘ + +## 虚拟列 {#xu-ni-lie} + +虚拟列是一种由表引擎提供而不是在表定义中的列。换种说法就是,这些列并没有在 `CREATE TABLE` 中指定,但可以在 `SELECT` 中使用。 + +下面列出虚拟列跟普通列的不同点: + +- 虚拟列不在表结构定义里指定。 +- 不能用 `INSERT` 向虚拟列写数据。 +- 使用不指定列名的 `INSERT` 语句时,虚拟列要会被忽略掉。 +- 使用星号通配符( `SELECT *` )时虚拟列不会包含在里面。 +- 虚拟列不会出现在 `SHOW CREATE TABLE` 和 `DESC TABLE` 的查询结果里。 + +`Merge` 类型的表包括一个 `String` 类型的 `_table` 虚拟列。(如果该表本来已有了一个 `_table` 的列,那这个虚拟列会命名为 `_table1` ;如果 `_table1` 也本就存在了,那这个虚拟列会被命名为 `_table2` ,依此类推)该列包含被读数据的表名。 + +如果 `WHERE/PREWHERE` 子句包含了带 `_table` 的条件,并且没有依赖其他的列(如作为表达式谓词链接的一个子项或作为整个的表达式),这些条件的作用会像索引一样。这些条件会在那些可能被读数据的表的表名上执行,并且读操作只会在那些满足了该条件的表上去执行。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/zh/engines/table_engines/special/null.md b/docs/zh/engines/table_engines/special/null.md new file mode 100644 index 00000000000..3fd891db393 --- /dev/null +++ b/docs/zh/engines/table_engines/special/null.md @@ -0,0 +1,8 @@ + +# Null {#null} + +当写入 Null 类型的表时,将忽略数据。从 Null 类型的表中读取时,返回空。 + +但是,可以在 Null 类型的表上创建物化视图。写入表的数据将转发到视图中。 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/zh/engines/table_engines/special/set.md b/docs/zh/engines/table_engines/special/set.md new file mode 100644 index 00000000000..b6ef859b85a --- /dev/null +++ b/docs/zh/engines/table_engines/special/set.md @@ -0,0 +1,12 @@ + +# 设置 {#set} + +始终存在于 RAM 中的数据集。它适用于IN运算符的右侧(请参见 «IN运算符» 部分)。 + +可以使用 INSERT 向表中插入数据。新元素将添加到数据集中,而重复项将被忽略。但是不能对此类型表执行 SELECT 语句。检索数据的唯一方法是在 IN 运算符的右半部分使用它。 + +数据始终存在于 RAM 中。对于 INSERT,插入数据块也会写入磁盘上的表目录。启动服务器时,此数据将加载到 RAM。也就是说,重新启动后,数据仍然存在。 + +对于强制服务器重启,磁盘上的数据块可能会丢失或损坏。在数据块损坏的情况下,可能需要手动删除包含损坏数据的文件。 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/zh/engines/table_engines/special/url.md b/docs/zh/engines/table_engines/special/url.md new file mode 100644 index 00000000000..0e02693aea0 --- /dev/null +++ b/docs/zh/engines/table_engines/special/url.md @@ -0,0 +1,72 @@ + +# URL(URL,格式) {#table_engines-url} + +用于管理远程 HTTP/HTTPS 服务器上的数据。该引擎类似 +[文件](file.md) 引擎。 + +## 在 ClickHouse 服务器中使用引擎 {#zai-clickhouse-fu-wu-qi-zhong-shi-yong-yin-qing} + +`Format` 必须是 ClickHouse 可以用于 +`SELECT` 查询的一种格式,若有必要,还要可用于 `INSERT` 。有关支持格式的完整列表,请查看 +[格式](../../../interfaces/formats.md#formats)。 + +`URL` 必须符合统一资源定位符的结构。指定的URL必须指向一个 +HTTP 或 HTTPS 服务器。对于服务端响应, +不需要任何额外的 HTTP 头标记。 + +`INSERT` 和 `SELECT` 查询会分别转换为 `POST` 和 `GET` 请求。 +对于 `POST` 请求的处理,远程服务器必须支持 +[分块传输编码](https://en.wikipedia.org/wiki/Chunked_transfer_encoding)。 + +**示例:** + +**1.** 在 Clickhouse 服务上创建一个 `url_engine_table` 表: + +``` sql +CREATE TABLE url_engine_table (word String, value UInt64) +ENGINE=URL('http://127.0.0.1:12345/', CSV) +``` + +**2.** 用标准的 Python 3 工具库创建一个基本的 HTTP 服务并 +启动它: + +``` python3 +from http.server import BaseHTTPRequestHandler, HTTPServer + +class CSVHTTPServer(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header('Content-type', 'text/csv') + self.end_headers() + + self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) + +if __name__ == "__main__": + server_address = ('127.0.0.1', 12345) + HTTPServer(server_address, CSVHTTPServer).serve_forever() +``` + +``` bash +python3 server.py +``` + +**3.** 查询请求: + +``` sql +SELECT * FROM url_engine_table +``` + + ┌─word──┬─value─┐ + │ Hello │ 1 │ + │ World │ 2 │ + └───────┴───────┘ + +## 功能实现 {#gong-neng-shi-xian} + +- 读写操作都支持并发 +- 不支持: + - `ALTER` 和 `SELECT...SAMPLE` 操作。 + - 索引。 + - 副本。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/zh/engines/table_engines/special/view.md b/docs/zh/engines/table_engines/special/view.md new file mode 100644 index 00000000000..a17dab21ce2 --- /dev/null +++ b/docs/zh/engines/table_engines/special/view.md @@ -0,0 +1,6 @@ + +# 查看 {#view} + +用于构建视图(有关更多信息,请参阅 `CREATE VIEW 查询`)。 它不存储数据,仅存储指定的 `SELECT` 查询。 从表中读取时,它会运行此查询(并从查询中删除所有不必要的列)。 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/zh/faq/general.md b/docs/zh/faq/general.md index 17f4fe9b11b..b81d521fa80 100644 --- a/docs/zh/faq/general.md +++ b/docs/zh/faq/general.md @@ -1,3 +1,4 @@ + # 常见问题 {#chang-jian-wen-ti} ## 为什么不使用MapReduce之类的产品呢? {#wei-shi-yao-bu-shi-yong-mapreducezhi-lei-de-chan-pin-ni} @@ -8,11 +9,11 @@ 大多数MapReduce系统允许您在集群上执行任意代码。但是,声明性查询语言更适合OLAP,以便快速运行实验。例如,Hadoop包含Hive和Pig,Cloudera Impala或Shark(过时)for Spark,以及Spark SQL、Presto和Apache Drill。与专业系统相比,运行此类任务时的性能非常不理想,所以将这些系统用作Web接口的后端服务是不现实的,因为延迟相对较高。 -## What to do if I have a problem with encodings when using Oracle through ODBC? {#oracle-odbc-encodings} +## 如果我在通过ODBC使用Oracle时遇到编码问题,该怎么办? {#oracle-odbc-encodings} -If you use Oracle through ODBC driver as a source of external dictionaries, you need to set up correctly value for the `NLS_LANG` variable in the `/etc/default/clickhouse`. For more details see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). +如果您通过ODBC驱动程序使用Oracle作为外部字典的源,则需要为 `NLS_LANG` 在变量 `/etc/default/clickhouse`. 欲了解更多详情,请参阅 [Oracle NLS\_常见问题](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). -**Example** +**示例** NLS_LANG=CHINESE_CHINA.ZHS16GBK diff --git a/docs/zh/faq/index.md b/docs/zh/faq/index.md new file mode 100644 index 00000000000..7c0b25dbec0 --- /dev/null +++ b/docs/zh/faq/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/zh/getting_started/example_datasets/amplab_benchmark.md b/docs/zh/getting_started/example_datasets/amplab_benchmark.md index fc78daa6a46..30d55f8b28d 100644 --- a/docs/zh/getting_started/example_datasets/amplab_benchmark.md +++ b/docs/zh/getting_started/example_datasets/amplab_benchmark.md @@ -1,4 +1,5 @@ -# AMPLab 大数据基准测试 {#amplab-da-shu-ju-ji-zhun-ce-shi} + +# AMPLab大数据基准测试 {#amplab-da-shu-ju-ji-zhun-ce-shi} 参考 https://amplab.cs.berkeley.edu/benchmark/ @@ -119,4 +120,4 @@ ORDER BY totalRevenue DESC LIMIT 1 ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/zh/getting_started/example_datasets/criteo.md b/docs/zh/getting_started/example_datasets/criteo.md index 6083566113a..0ae2650b390 100644 --- a/docs/zh/getting_started/example_datasets/criteo.md +++ b/docs/zh/getting_started/example_datasets/criteo.md @@ -1,3 +1,4 @@ + # Criteo TB级别点击日志 {#criteo-tbji-bie-dian-ji-ri-zhi} 可以从http://labs.criteo.com/downloads/download-terabyte-click-logs/上下载数据 @@ -71,4 +72,4 @@ INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int DROP TABLE criteo_log; ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) diff --git a/docs/zh/getting_started/example_datasets/index.md b/docs/zh/getting_started/example_datasets/index.md deleted file mode 120000 index c891314f915..00000000000 --- a/docs/zh/getting_started/example_datasets/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/index.md \ No newline at end of file diff --git a/docs/zh/getting_started/example_datasets/index.md b/docs/zh/getting_started/example_datasets/index.md new file mode 100644 index 00000000000..c610af8a269 --- /dev/null +++ b/docs/zh/getting_started/example_datasets/index.md @@ -0,0 +1,20 @@ +--- +toc_folder_title: "\u793A\u4F8B\u6570\u636E\u96C6" +toc_priority: 12 +toc_title: "\u5BFC\u8A00" +--- + +# 示例数据集 {#example-datasets} + +本节介绍如何获取示例数据集并将其导入ClickHouse。 +对于某些数据集示例查询也可用。 + +- [脱敏的Yandex.Metrica数据集](metrica.md) +- [星型基准测试](star_schema.md) +- [维基访问数据](wikistat.md) +- [Criteo TB级别点击日志](criteo.md) +- [AMPLab大数据基准测试](amplab_benchmark.md) +- [纽约出租车数据](nyc_taxi.md) +- [航班飞行数据](ontime.md) + +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/zh/getting_started/example_datasets/metrica.md b/docs/zh/getting_started/example_datasets/metrica.md deleted file mode 120000 index 984023973eb..00000000000 --- a/docs/zh/getting_started/example_datasets/metrica.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/metrica.md \ No newline at end of file diff --git a/docs/zh/getting_started/example_datasets/metrica.md b/docs/zh/getting_started/example_datasets/metrica.md new file mode 100644 index 00000000000..6e349a1135d --- /dev/null +++ b/docs/zh/getting_started/example_datasets/metrica.md @@ -0,0 +1,68 @@ +--- +toc_priority: 21 +toc_title: "Yandex\u6885\u7279\u91CC\u5361\u6570\u636E" +--- + +# 脱敏的Yandex.Metrica数据集 {#anonymized-yandex-metrica-data} + +Dataset由两个表组成,其中包含有关命中的匿名数据 (`hits_v1`)和访问 (`visits_v1`)的Yandex的。梅特里卡 你可以阅读更多关于Yandex的。梅特里卡 [ClickHouse历史](../../introduction/history.md) 科。 + +数据集由两个表组成,其中任何一个都可以作为压缩表下载 `tsv.xz` 文件或作为准备的分区。 除此之外,该扩展版本 `hits` 包含1亿行的表可作为TSV在https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz 并作为准备的分区在https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz. + +## 从准备好的分区获取表 {#obtaining-tables-from-prepared-partitions} + +下载和导入点击表: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar +tar xvf hits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +下载和导入访问: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/visits/partitions/visits_v1.tar +tar xvf visits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## 从压缩TSV文件获取表 {#obtaining-tables-from-compressed-tsv-file} + +从压缩的TSV文件下载并导入命中: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +从压缩tsv文件下载和导入访问: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## 查询示例 {#example-queries} + +[点击教程](../../getting_started/tutorial.md) 是基于Yandex的。Metrica数据集和开始使用此数据集的推荐方式是通过教程。 + +查询这些表的其他示例可以在 [有状态测试](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) ClickHouse的(它们被命名为 `test.hists` 和 `test.visits` 那里)。 diff --git a/docs/zh/getting_started/example_datasets/nyc_taxi.md b/docs/zh/getting_started/example_datasets/nyc_taxi.md index 50dcbed0988..e486dbef9a7 100644 --- a/docs/zh/getting_started/example_datasets/nyc_taxi.md +++ b/docs/zh/getting_started/example_datasets/nyc_taxi.md @@ -1,3 +1,4 @@ + # 纽约市出租车数据 {#niu-yue-shi-chu-zu-che-shu-ju} 纽约市出租车数据有以下两个方式获取: @@ -259,7 +260,7 @@ FROM trips ``` 这需要3030秒,速度约为每秒428,000行。 -要加快速度,可以使用`Log`引擎替换’MergeTree\`引擎来创建表。 在这种情况下,下载速度超过200秒。 +要加快速度,可以使用`Log`引擎替换'MergeTree\`引擎来创建表。 在这种情况下,下载速度超过200秒。 这个表需要使用126GB的磁盘空间。 @@ -285,7 +286,7 @@ $ sudo service clickhouse-server restart $ clickhouse-client --query "select count(*) from datasets.trips_mergetree" ``` -!!! info "Info" +!!! info "信息" 如果要运行下面的SQL查询,必须使用完整的表名, `datasets.trips_mergetree`。 @@ -297,7 +298,7 @@ Q1: SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type ``` -0.490 seconds. +0.490秒 Q2: @@ -305,7 +306,7 @@ Q2: SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count ``` -1.224 seconds. +1.224秒 Q3: @@ -313,7 +314,7 @@ Q3: SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year ``` -2.104 seconds. +2.104秒 Q4: @@ -324,11 +325,11 @@ GROUP BY passenger_count, year, distance ORDER BY year, count(*) DESC ``` -3.593 seconds. +3.593秒 我们使用的是如下配置的服务器: -Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical kernels total,128 GiB RAM,8x6 TB HD on hardware RAID-5 +两个英特尔(R)至强(R)CPU E5-2650v2@2.60GHz,总共有16个物理内核,128GiB RAM,硬件RAID-5上的8X6TB HD 执行时间是取三次运行中最好的值,但是从第二次查询开始,查询就讲从文件系统的缓存中读取数据。同时在每次读取和处理后不在进行缓存。 @@ -356,29 +357,29 @@ INSERT INTO trips_mergetree_x3 SELECT * FROM trips_mergetree 在三台服务器集群中运行的结果: -Q1: 0.212 seconds. -Q2: 0.438 seconds. -Q3: 0.733 seconds. -Q4: 1.241 seconds. +Q1:0.212秒. +Q2:0.438秒。 +Q3:0.733秒。 +Q4:1.241秒. 不出意料,查询是线性扩展的。 我们同时在140台服务器的集群中运行的结果: -Q1: 0.028 sec. -Q2: 0.043 sec. -Q3: 0.051 sec. -Q4: 0.072 sec. +Q1:0.028秒。 +Q2:0.043秒。 +Q3:0.051秒。 +Q4:0.072秒。 在这种情况下,查询处理时间首先由网络延迟确定。 我们使用位于芬兰的Yandex数据中心中的客户端去位于俄罗斯的集群上运行查询,这增加了大约20毫秒的延迟。 ## 总结 {#zong-jie} -| servers | Q1 | Q2 | Q3 | Q4 | -|---------|-------|-------|-------|-------| -| 1 | 0.490 | 1.224 | 2.104 | 3.593 | -| 3 | 0.212 | 0.438 | 0.733 | 1.241 | -| 140 | 0.028 | 0.043 | 0.051 | 0.072 | +| 服务器 | Q1 | Q2 | Q3 | Q4 | +|--------|-------|-------|-------|-------| +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/zh/getting_started/example_datasets/ontime.md b/docs/zh/getting_started/example_datasets/ontime.md index 6db294b12fb..31684129bb7 100644 --- a/docs/zh/getting_started/example_datasets/ontime.md +++ b/docs/zh/getting_started/example_datasets/ontime.md @@ -1,3 +1,4 @@ + # 航班飞行数据 {#hang-ban-fei-xing-shu-ju} 航班飞行数据有以下两个方式获取: @@ -156,7 +157,7 @@ $ sudo service clickhouse-server restart $ clickhouse-client --query "select count(*) from datasets.ontime" ``` -!!! info "Info" +!!! info "信息" 如果要运行下面的SQL查询,必须使用完整的表名, `datasets.ontime`。 @@ -356,7 +357,7 @@ ORDER by rate DESC LIMIT 1000; ``` -Bonus: +奖金: ``` sql SELECT avg(cnt) diff --git a/docs/zh/getting_started/example_datasets/star_schema.md b/docs/zh/getting_started/example_datasets/star_schema.md index 4680fe652b2..b575abe63fa 100644 --- a/docs/zh/getting_started/example_datasets/star_schema.md +++ b/docs/zh/getting_started/example_datasets/star_schema.md @@ -1,4 +1,5 @@ -# Star Schema Benchmark {#star-schema-benchmark} + +# 星型基准测试 {#star-schema-benchmark} 编译 dbgen: @@ -110,7 +111,7 @@ FROM lineorder l ALTER TABLE lineorder_flat DROP COLUMN C_CUSTKEY, DROP COLUMN S_SUPPKEY, DROP COLUMN P_PARTKEY; ``` -Running the queries: +运行查询: Q1.1 @@ -190,4 +191,4 @@ Q4.3 SELECT toYear(LO_ORDERDATE) AS year, S_CITY, P_BRAND, sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' GROUP BY year, S_CITY, P_BRAND ORDER BY year, S_CITY, P_BRAND; ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) diff --git a/docs/zh/getting_started/example_datasets/wikistat.md b/docs/zh/getting_started/example_datasets/wikistat.md index aacbdbf37f0..c2681a912e2 100644 --- a/docs/zh/getting_started/example_datasets/wikistat.md +++ b/docs/zh/getting_started/example_datasets/wikistat.md @@ -1,3 +1,4 @@ + # 维基访问数据 {#wei-ji-fang-wen-shu-ju} 参考: http://dumps.wikimedia.org/other/pagecounts-raw/ @@ -25,4 +26,4 @@ $ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/page $ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) diff --git a/docs/zh/getting_started/index.md b/docs/zh/getting_started/index.md index d6830aa6c84..35ae08bde7b 100644 --- a/docs/zh/getting_started/index.md +++ b/docs/zh/getting_started/index.md @@ -1,3 +1,4 @@ + # 入门 {#ru-men} 如果您是ClickHouse的新手,并希望亲身体验它的性能,首先您需要通过 [安装过程](install.md). diff --git a/docs/zh/getting_started/install.md b/docs/zh/getting_started/install.md index bf0ace6824f..111c362caf7 100644 --- a/docs/zh/getting_started/install.md +++ b/docs/zh/getting_started/install.md @@ -1,3 +1,5 @@ +# 安装 {#clickhouse-an-zhuang} + ## 系统要求 {#xi-tong-yao-qiu} ClickHouse可以在任何具有x86\_64,AArch64或PowerPC64LE CPU架构的Linux,FreeBSD或Mac OS X上运行。 @@ -21,7 +23,7 @@ $ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ ``` -如果你想使用最新的测试版本,请使用’testing’替换’stable’。 +如果你想使用最新的测试版本,请使用'testing'替换'stable'。 然后运行: @@ -34,8 +36,8 @@ sudo apt-get install clickhouse-client clickhouse-server 你也可以从这里手动下载安装包:https://repo.yandex.ru/clickhouse/deb/stable/main/。 -ClickHouse包含访问控制配置,它们位于`users.xml`文件中(与’config.xml’同目录)。 -默认情况下,允许从任何地方使用默认的‘default’用户无密码的访问ClickHouse。参考‘user/default/networks’。 +ClickHouse包含访问控制配置,它们位于`users.xml`文件中(与'config.xml'同目录)。 +默认情况下,允许从任何地方使用默认的'default'用户无密码的访问ClickHouse。参考'user/default/networks'。 有关更多信息,请参考«Configuration files»部分。 ### 来自RPM包 {#from-rpm-packages} @@ -62,7 +64,7 @@ sudo yum install clickhouse-server clickhouse-client ### 来自Docker {#from-docker-image} -要在Docker中运行ClickHouse,请遵循[Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/)上的指南。那些图像使用官方的`deb`包。 +要在Docker中运行ClickHouse,请遵循[码头工人中心](https://hub.docker.com/r/yandex/clickhouse-server/)上的指南。那些图像使用官方的`deb`包。 ### 使用源码安装 {#from-sources} @@ -72,8 +74,8 @@ sudo yum install clickhouse-server clickhouse-client 你也可以直接使用而不进行安装。 ``` text -Client: dbms/programs/clickhouse-client -Server: dbms/programs/clickhouse-server +Client: programs/clickhouse-client +Server: programs/clickhouse-server ``` 在服务器中为数据创建如下目录: @@ -84,9 +86,9 @@ Server: dbms/programs/clickhouse-server ``` (它们可以在server config中配置。) -为需要的用户运行‘chown’ +为需要的用户运行'chown' -日志的路径可以在server config (src/dbms/programs/server/config.xml)中配置。 +日志的路径可以在server config (src/programs/server/config.xml)中配置。 ## 启动 {#qi-dong} @@ -107,7 +109,7 @@ clickhouse-server --config-file=/etc/clickhouse-server/config.xml ``` 在这种情况下,日志将被打印到控制台中,这在开发过程中很方便。 -如果配置文件在当前目录中,你可以不指定‘–config-file’参数。它默认使用‘./config.xml’。 +如果配置文件在当前目录中,你可以不指定'–config-file'参数。它默认使用'./config.xml'。 你可以使用命令行客户端连接到服务: @@ -115,7 +117,7 @@ clickhouse-server --config-file=/etc/clickhouse-server/config.xml clickhouse-client ``` -默认情况下它使用‘default’用户无密码的与localhost:9000服务建立连接。 +默认情况下它使用'default'用户无密码的与localhost:9000服务建立连接。 客户端也可以用于连接远程服务,例如: ``` bash @@ -127,7 +129,7 @@ clickhouse-client --host=example.com 检查系统是否工作: ``` bash -milovidov@hostname:~/work/metrica/src/dbms/src/Client$ ./clickhouse-client +milovidov@hostname:~/work/metrica/src/src/Client$ ./clickhouse-client ClickHouse client version 0.0.18749. Connecting to localhost:9000. Connected to ClickHouse server version 0.0.18749. @@ -149,4 +151,4 @@ SELECT 1 为了继续进行实验,你可以尝试下载测试数据集。 -[Original article](https://clickhouse.tech/docs/en/getting_started/install/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/install/) diff --git a/docs/zh/getting_started/playground.md b/docs/zh/getting_started/playground.md deleted file mode 120000 index de5b41f453e..00000000000 --- a/docs/zh/getting_started/playground.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/playground.md \ No newline at end of file diff --git a/docs/zh/getting_started/playground.md b/docs/zh/getting_started/playground.md new file mode 100644 index 00000000000..252b2eae611 --- /dev/null +++ b/docs/zh/getting_started/playground.md @@ -0,0 +1,47 @@ +--- +toc_priority: 14 +toc_title: "\u266A\u64CD\u573A\u266A" +--- + +# ClickHouse体验平台 {#clickhouse-playground} + +[ClickHouse体验平台](https://play.clickhouse.tech?file=welcome) 允许人们通过即时运行查询来尝试ClickHouse,而无需设置他们的服务器或集群。 +体验平台中提供了几个示例数据集以及显示ClickHouse特性的示例查询。 + +查询以只读用户身份执行。 这意味着一些局限性: + +- 不允许DDL查询 +- 不允许插入查询 + +还强制执行以下设置: +- [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) +- [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) +- [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) +- [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) + +ClickHouse体验还有如下: +[ClickHouse管理服务](https://cloud.yandex.com/services/managed-clickhouse) +实例托管 [Yandex云](https://cloud.yandex.com/). +更多信息 [云提供商](../commercial/cloud.md). + +ClickHouse体验平台界面实际上是通过ClickHouse [HTTP API](../interfaces/http.md)接口实现的. +体验平台后端只是一个ClickHouse集群,没有任何额外的服务器端应用程序。 +体验平台也同样提供了ClickHouse HTTPS服务端口。 + + +您可以使用任何HTTP客户端向体验平台进行查询,例如 [curl](https://curl.haxx.se) 或 [wget](https://www.gnu.org/software/wget/),或使用以下方式建立连接 [JDBC](../interfaces/jdbc.md) 或 [ODBC](../interfaces/odbc.md) 司机 +有关支持ClickHouse的软件产品的更多信息,请访问 [这里](../interfaces/index.md). + +| 参数 | 值 | +|:-----|:--------------------------------------| +| 服务端口 | https://play-api.clickhouse.tech:8443 | +| 用户 | `playground` | +| 密码 | `clickhouse` | + +请注意,此服务端口需要安全连接。 + +示例: + +``` bash +curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" +``` diff --git a/docs/zh/getting_started/tutorial.md b/docs/zh/getting_started/tutorial.md deleted file mode 120000 index 8bc40816ab2..00000000000 --- a/docs/zh/getting_started/tutorial.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/zh/getting_started/tutorial.md b/docs/zh/getting_started/tutorial.md new file mode 100644 index 00000000000..bd86e19a037 --- /dev/null +++ b/docs/zh/getting_started/tutorial.md @@ -0,0 +1,671 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 12 +toc_title: "\u6559\u7A0B" +--- + +# 教程 {#clickhouse-tutorial} + +## 从本教程中可以期待什么? {#what-to-expect-from-this-tutorial} + +通过本教程,您将学习如何设置一个简单的ClickHouse集群。 它会很小,但容错和可扩展。 然后,我们将使用其中一个示例数据集来填充数据并执行一些演示查询。 + +## 单节点设置 {#single-node-setup} + +为了推迟分布式环境的复杂性,我们将首先在单个服务器或虚拟机上部署ClickHouse。 ClickHouse通常是从安装 [黛布](index.md#install-from-deb-packages) 或 [rpm](index.md#from-rpm-packages) 包,但也有 [替代办法](index.md#from-docker-image) 对于不支持它们的操作系统。 + +例如,您选择了 `deb` 包和执行: + +``` bash +sudo apt-get install dirmngr +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 + +echo "deb http://repo.clickhouse.tech/deb/stable/ main/" | sudo tee /etc/apt/sources.list.d/clickhouse.list +sudo apt-get update + +sudo apt-get install -y clickhouse-server clickhouse-client +``` + +我们在安装的软件包中有什么: + +- `clickhouse-client` 包包含 [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 应用程序,交互式ClickHouse控制台客户端。 +- `clickhouse-common` 包包含一个ClickHouse可执行文件。 +- `clickhouse-server` 包包含要作为服务器运行ClickHouse的配置文件。 + +服务器配置文件位于 `/etc/clickhouse-server/`. 在进一步讨论之前,请注意 `` 元素in `config.xml`. Path确定数据存储的位置,因此应该位于磁盘容量较大的卷上;默认值为 `/var/lib/clickhouse/`. 如果你想调整配置,直接编辑并不方便 `config.xml` 文件,考虑到它可能会在未来的软件包更新中被重写。 复盖配置元素的推荐方法是创建 [在配置文件。d目录](../operations/configuration_files.md) 它作为 “patches” 要配置。xml + +你可能已经注意到了, `clickhouse-server` 安装包后不会自动启动。 它也不会在更新后自动重新启动。 您启动服务器的方式取决于您的init系统,通常情况下,它是: + +``` bash +sudo service clickhouse-server start +``` + +或 + +``` bash +sudo /etc/init.d/clickhouse-server start +``` + +服务器日志的默认位置是 `/var/log/clickhouse-server/`. 服务器已准备好处理客户端连接一旦它记录 `Ready for connections` 消息 + +一旦 `clickhouse-server` 正在运行我们可以利用 `clickhouse-client` 连接到服务器并运行一些测试查询,如 `SELECT "Hello, world!";`. + +
    + +Clickhouse-客户端的快速提示 +交互模式: + +``` bash +clickhouse-client +clickhouse-client --host=... --port=... --user=... --password=... +``` + +启用多行查询: + +``` bash +clickhouse-client -m +clickhouse-client --multiline +``` + +以批处理模式运行查询: + +``` bash +clickhouse-client --query='SELECT 1' +echo 'SELECT 1' | clickhouse-client +clickhouse-client <<< 'SELECT 1' +``` + +从指定格式的文件中插入数据: + +``` bash +clickhouse-client --query='INSERT INTO table VALUES' < data.txt +clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv +``` + +
    + +## 导入示例数据集 {#import-sample-dataset} + +现在是时候用一些示例数据填充我们的ClickHouse服务器。 在本教程中,我们将使用Yandex的匿名数据。Metrica,在成为开源之前以生产方式运行ClickHouse的第一个服务(更多关于这一点 [历史科](../introduction/history.md)). 有 [多种导入Yandex的方式。梅里卡数据集](example_datasets/metrica.md),为了本教程,我们将使用最现实的一个。 + +### 下载并提取表数据 {#download-and-extract-table-data} + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +``` + +提取的文件大小约为10GB。 + +### 创建表 {#create-tables} + +与大多数数据库管理系统一样,ClickHouse在逻辑上将表分组为 “databases”. 有一个 `default` 数据库,但我们将创建一个名为新的 `tutorial`: + +``` bash +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" +``` + +与数据库相比,创建表的语法要复杂得多(请参阅 [参考资料](../sql_reference/statements/create.md). 一般 `CREATE TABLE` 声明必须指定三个关键的事情: + +1. 要创建的表的名称。 +2. Table schema, i.e. list of columns and their [数据类型](../sql_reference/data_types/index.md). +3. [表引擎](../engines/table_engines/index.md) 它是settings,它决定了如何物理执行对此表的查询的所有细节。 + +YandexMetrica是一个网络分析服务,样本数据集不包括其全部功能,因此只有两个表可以创建: + +- `hits` 是一个表格,其中包含所有用户在服务所涵盖的所有网站上完成的每个操作。 +- `visits` 是一个包含预先构建的会话而不是单个操作的表。 + +让我们看看并执行这些表的实际创建表查询: + +``` sql +CREATE TABLE tutorial.hits_v1 +( + `WatchID` UInt64, + `JavaEnable` UInt8, + `Title` String, + `GoodEvent` Int16, + `EventTime` DateTime, + `EventDate` Date, + `CounterID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RegionID` UInt32, + `UserID` UInt64, + `CounterClass` Int8, + `OS` UInt8, + `UserAgent` UInt8, + `URL` String, + `Referer` String, + `URLDomain` String, + `RefererDomain` String, + `Refresh` UInt8, + `IsRobot` UInt8, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `FlashMinor2` String, + `NetMajor` UInt8, + `NetMinor` UInt8, + `UserAgentMajor` UInt16, + `UserAgentMinor` FixedString(2), + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `MobilePhone` UInt8, + `MobilePhoneModel` String, + `Params` String, + `IPNetworkID` UInt32, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `IsArtifical` UInt8, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `ClientTimeZone` Int16, + `ClientEventTime` DateTime, + `SilverlightVersion1` UInt8, + `SilverlightVersion2` UInt8, + `SilverlightVersion3` UInt32, + `SilverlightVersion4` UInt16, + `PageCharset` String, + `CodeVersion` UInt32, + `IsLink` UInt8, + `IsDownload` UInt8, + `IsNotBounce` UInt8, + `FUniqID` UInt64, + `HID` UInt32, + `IsOldCounter` UInt8, + `IsEvent` UInt8, + `IsParameter` UInt8, + `DontCountHits` UInt8, + `WithHash` UInt8, + `HitColor` FixedString(1), + `UTCEventTime` DateTime, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `WindowName` Int32, + `OpenerName` Int32, + `HistoryLength` Int16, + `BrowserLanguage` FixedString(2), + `BrowserCountry` FixedString(2), + `SocialNetwork` String, + `SocialAction` String, + `HTTPError` UInt16, + `SendTiming` Int32, + `DNSTiming` Int32, + `ConnectTiming` Int32, + `ResponseStartTiming` Int32, + `ResponseEndTiming` Int32, + `FetchTiming` Int32, + `RedirectTiming` Int32, + `DOMInteractiveTiming` Int32, + `DOMContentLoadedTiming` Int32, + `DOMCompleteTiming` Int32, + `LoadEventStartTiming` Int32, + `LoadEventEndTiming` Int32, + `NSToDOMContentLoadedTiming` Int32, + `FirstPaintTiming` Int32, + `RedirectCount` Int8, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `ParamPrice` Int64, + `ParamOrderID` String, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `GoalsReached` Array(UInt32), + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `RefererHash` UInt64, + `URLHash` UInt64, + `CLID` UInt32, + `YCLID` UInt64, + `ShareService` String, + `ShareURL` String, + `ShareTitle` String, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `IslandID` FixedString(16), + `RequestNum` UInt32, + `RequestTry` UInt8 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +``` sql +CREATE TABLE tutorial.visits_v1 +( + `CounterID` UInt32, + `StartDate` Date, + `Sign` Int8, + `IsNew` UInt8, + `VisitID` UInt64, + `UserID` UInt64, + `StartTime` DateTime, + `Duration` UInt32, + `UTCStartTime` DateTime, + `PageViews` Int32, + `Hits` Int32, + `IsBounce` UInt8, + `Referer` String, + `StartURL` String, + `RefererDomain` String, + `StartURLDomain` String, + `EndURL` String, + `LinkURL` String, + `IsDownload` UInt8, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `PlaceID` Int32, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `IsYandex` UInt8, + `GoalReachesDepth` Int32, + `GoalReachesURL` Int32, + `GoalReachesAny` Int32, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `MobilePhoneModel` String, + `ClientEventTime` DateTime, + `RegionID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `IPNetworkID` UInt32, + `SilverlightVersion3` UInt32, + `CodeVersion` UInt32, + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `UserAgentMajor` UInt16, + `UserAgentMinor` UInt16, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `SilverlightVersion2` UInt8, + `SilverlightVersion4` UInt16, + `FlashVersion3` UInt16, + `FlashVersion4` UInt16, + `ClientTimeZone` Int16, + `OS` UInt8, + `UserAgent` UInt8, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `NetMajor` UInt8, + `NetMinor` UInt8, + `MobilePhone` UInt8, + `SilverlightVersion1` UInt8, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `JavaEnable` UInt8, + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `BrowserLanguage` UInt16, + `BrowserCountry` UInt16, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `Params` Array(String), + `Goals` Nested( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32), + `WatchIDs` Array(UInt64), + `ParamSumPrice` Int64, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `ClickLogID` UInt64, + `ClickEventID` Int32, + `ClickGoodEvent` Int32, + `ClickEventTime` DateTime, + `ClickPriorityID` Int32, + `ClickPhraseID` Int32, + `ClickPageID` Int32, + `ClickPlaceID` Int32, + `ClickTypeID` Int32, + `ClickResourceID` Int32, + `ClickCost` UInt32, + `ClickClientIP` UInt32, + `ClickDomainID` UInt32, + `ClickURL` String, + `ClickAttempt` UInt8, + `ClickOrderID` UInt32, + `ClickBannerID` UInt32, + `ClickMarketCategoryID` UInt32, + `ClickMarketPP` UInt32, + `ClickMarketCategoryName` String, + `ClickMarketPPName` String, + `ClickAWAPSCampaignName` String, + `ClickPageName` String, + `ClickTargetType` UInt16, + `ClickTargetPhraseID` UInt64, + `ClickContextType` UInt8, + `ClickSelectType` Int8, + `ClickOptions` String, + `ClickGroupBannerID` Int32, + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `FirstVisit` DateTime, + `PredLastVisit` Date, + `LastVisit` Date, + `TotalVisits` UInt32, + `TraficSource` Nested( + ID Int8, + SearchEngineID UInt16, + AdvEngineID UInt8, + PlaceID UInt16, + SocialSourceNetworkID UInt8, + Domain String, + SearchPhrase String, + SocialSourcePage String), + `Attendance` FixedString(16), + `CLID` UInt32, + `YCLID` UInt64, + `NormalizedRefererHash` UInt64, + `SearchPhraseHash` UInt64, + `RefererDomainHash` UInt64, + `NormalizedStartURLHash` UInt64, + `StartURLDomainHash` UInt64, + `NormalizedEndURLHash` UInt64, + `TopLevelDomain` UInt64, + `URLScheme` UInt64, + `OpenstatServiceNameHash` UInt64, + `OpenstatCampaignIDHash` UInt64, + `OpenstatAdIDHash` UInt64, + `OpenstatSourceIDHash` UInt64, + `UTMSourceHash` UInt64, + `UTMMediumHash` UInt64, + `UTMCampaignHash` UInt64, + `UTMContentHash` UInt64, + `UTMTermHash` UInt64, + `FromHash` UInt64, + `WebVisorEnabled` UInt8, + `WebVisorActivity` UInt32, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `Market` Nested( + Type UInt8, + GoalID UInt32, + OrderID String, + OrderPrice Int64, + PP UInt32, + DirectPlaceID UInt32, + DirectOrderID UInt32, + DirectBannerID UInt32, + GoodID String, + GoodName String, + GoodQuantity Int32, + GoodPrice Int64), + `IslandID` FixedString(16) +) +ENGINE = CollapsingMergeTree(Sign) +PARTITION BY toYYYYMM(StartDate) +ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +您可以使用以下交互模式执行这些查询 `clickhouse-client` (只需在终端中启动它,而不需要提前指定查询)或尝试一些 [替代接口](../interfaces/index.md) 如果你愿意的话 + +正如我们所看到的, `hits_v1` 使用 [基本MergeTree引擎](../engines/table_engines/mergetree_family/mergetree.md),而 `visits_v1` 使用 [崩溃](../engines/table_engines/mergetree_family/collapsingmergetree.md) 变体。 + +### 导入数据 {#import-data} + +数据导入到ClickHouse是通过以下方式完成的 [INSERT INTO](../sql_reference/statements/insert_into.md) 查询像许多其他SQL数据库。 然而,数据通常是在一个提供 [支持的序列化格式](../interfaces/formats.md) 而不是 `VALUES` 子句(也支持)。 + +我们之前下载的文件是以制表符分隔的格式,所以这里是如何通过控制台客户端导入它们: + +``` bash +clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv +clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv +``` + +ClickHouse有很多 [要调整的设置](../operations/settings/index.md) 在控制台客户端中指定它们的一种方法是通过参数,我们可以看到 `--max_insert_block_size`. 找出可用的设置,它们意味着什么以及默认值的最简单方法是查询 `system.settings` 表: + +``` sql +SELECT name, value, changed, description +FROM system.settings +WHERE name LIKE '%max_insert_b%' +FORMAT TSV + +max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." +``` + +您也可以 [OPTIMIZE](../sql_reference/statements/misc.md#misc_operations-optimize) 导入后的表。 使用MergeTree-family引擎配置的表总是在后台合并数据部分以优化数据存储(或至少检查是否有意义)。 这些查询强制表引擎立即进行存储优化,而不是稍后进行一段时间: + +``` bash +clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" +clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" +``` + +这些查询开始一个I/O和CPU密集型操作,所以如果表一直接收到新数据,最好不要管它,让合并在后台运行。 + +现在我们可以检查表导入是否成功: + +``` bash +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" +``` + +## 查询示例 {#example-queries} + +``` sql +SELECT + StartURL AS URL, + AVG(Duration) AS AvgDuration +FROM tutorial.visits_v1 +WHERE StartDate BETWEEN '2014-03-23' AND '2014-03-30' +GROUP BY URL +ORDER BY AvgDuration DESC +LIMIT 10 +``` + +``` sql +SELECT + sum(Sign) AS visits, + sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits, + (100. * goal_visits) / visits AS goal_percent +FROM tutorial.visits_v1 +WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') +``` + +## 集群部署 {#cluster-deployment} + +ClickHouse集群是一个同质集群。 设置步骤: + +1. 在群集的所有计算机上安装ClickHouse服务器 +2. 在配置文件中设置群集配置 +3. 在每个实例上创建本地表 +4. 创建一个 [分布式表](../engines/table_engines/special/distributed.md) + +[分布式表](../engines/table_engines/special/distributed.md) 实际上是一种 “view” 到ClickHouse集群的本地表。 从分布式表中选择查询使用集群所有分片的资源执行。 您可以为多个集群指定configs,并创建多个分布式表,为不同的集群提供视图。 + +具有三个分片的集群的示例配置,每个分片一个副本: + +``` xml + + + + + example-perftest01j.yandex.ru + 9000 + + + + + example-perftest02j.yandex.ru + 9000 + + + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +为了进一步演示,让我们创建一个新的本地表 `CREATE TABLE` 我们用于查询 `hits_v1`,但不同的表名: + +``` sql +CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... +``` + +创建提供集群本地表视图的分布式表: + +``` sql +CREATE TABLE tutorial.hits_all AS tutorial.hits_local +ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); +``` + +常见的做法是在集群的所有计算机上创建类似的分布式表。 它允许在群集的任何计算机上运行分布式查询。 还有一个替代选项可以使用以下方法为给定的SELECT查询创建临时分布式表 [远程](../sql_reference/table_functions/remote.md) 表功能。 + +我们走吧 [INSERT SELECT](../sql_reference/statements/insert_into.md) 将该表传播到多个服务器。 + +``` sql +INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; +``` + +!!! warning "碌莽禄Notice:" + 这种方法不适合大型表的分片。 有一个单独的工具 [ツ环板-ョツ嘉ッツ偲](../operations/utilities/clickhouse-copier.md) 这可以重新分片任意大表。 + +正如您所期望的那样,如果计算量大的查询使用3台服务器而不是一个,则运行速度快N倍。 + +在这种情况下,我们使用了具有3个分片的集群,每个分片都包含一个副本。 + +为了在生产环境中提供弹性,我们建议每个分片应包含分布在多个可用区或数据中心(或至少机架)之间的2-3个副本。 请注意,ClickHouse支持无限数量的副本。 + +包含三个副本的一个分片集群的示例配置: + +``` xml + + ... + + + + example-perftest01j.yandex.ru + 9000 + + + example-perftest02j.yandex.ru + 9000 + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +启用本机复制 [动物园管理员](http://zookeeper.apache.org/) 是必需的。 ClickHouse负责所有副本的数据一致性,并在失败后自动运行恢复过程。 建议将ZooKeeper集群部署在单独的服务器上(其中没有其他进程,包括ClickHouse正在运行)。 + +!!! note "注" + ZooKeeper不是一个严格的requirement:在某些简单的情况下,您可以通过将数据写入应用程序代码中的所有副本来复制数据。 这种方法是 **不** 建议,在这种情况下,ClickHouse将无法保证所有副本上的数据一致性。 因此,它成为您的应用程序的责任。 + +ZooKeeper位置在配置文件中指定: + +``` xml + + + zoo01.yandex.ru + 2181 + + + zoo02.yandex.ru + 2181 + + + zoo03.yandex.ru + 2181 + + +``` + +此外,我们需要设置宏来识别每个用于创建表的分片和副本: + +``` xml + + 01 + 01 + +``` + +如果在创建复制表时没有副本,则会实例化新的第一个副本。 如果已有实时副本,则新副本将克隆现有副本中的数据。 您可以选择首先创建所有复制的表,然后向其中插入数据。 另一种选择是创建一些副本,并在数据插入之后或期间添加其他副本。 + +``` sql +CREATE TABLE tutorial.hits_replica (...) +ENGINE = ReplcatedMergeTree( + '/clickhouse_perftest/tables/{shard}/hits', + '{replica}' +) +... +``` + +在这里,我们使用 [ReplicatedMergeTree](../engines/table_engines/mergetree_family/replication.md) 表引擎。 在参数中,我们指定包含分片和副本标识符的ZooKeeper路径。 + +``` sql +INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; +``` + +复制在多主机模式下运行。 数据可以加载到任何副本中,然后系统会自动将其与其他实例同步。 复制是异步的,因此在给定时刻,并非所有副本都可能包含最近插入的数据。 至少应有一个副本允许数据摄取。 其他人将同步数据和修复一致性,一旦他们将再次变得活跃。 请注意,这种方法允许最近插入的数据丢失的可能性很低。 + +[原始文章](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/zh/guides/apply_catboost_model.md b/docs/zh/guides/apply_catboost_model.md deleted file mode 120000 index dd36e885974..00000000000 --- a/docs/zh/guides/apply_catboost_model.md +++ /dev/null @@ -1 +0,0 @@ -../../en/guides/apply_catboost_model.md \ No newline at end of file diff --git a/docs/zh/guides/apply_catboost_model.md b/docs/zh/guides/apply_catboost_model.md new file mode 100644 index 00000000000..4ac7d926961 --- /dev/null +++ b/docs/zh/guides/apply_catboost_model.md @@ -0,0 +1,239 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 41 +toc_title: "\u5E94\u7528CatBoost\u6A21\u578B" +--- + +# 在ClickHouse中应用Catboost模型 {#applying-catboost-model-in-clickhouse} + +[CatBoost](https://catboost.ai) 是一个自由和开源的梯度提升库开发 [Yandex](https://yandex.com/company/) 用于机器学习。 + +通过此指令,您将学习如何通过从SQL运行模型推理在ClickHouse中应用预先训练好的模型。 + +在ClickHouse中应用CatBoost模型: + +1. [创建表](#create-table). +2. [将数据插入到表中](#insert-data-to-table). +3. [碌莽禄into拢Integrate010-68520682\](#integrate-catboost-into-clickhouse) (可选步骤)。 +4. [从SQL运行模型推理](#run-model-inference). + +有关训练CatBoost模型的详细信息,请参阅 [培训和应用模型](https://catboost.ai/docs/features/training.html#training). + +## 先决条件 {#prerequisites} + +如果你没有 [Docker](https://docs.docker.com/install/) 然而,安装它。 + +!!! note "注" + [Docker](https://www.docker.com) 是一个软件平台,允许您创建容器,将CatBoost和ClickHouse安装与系统的其余部分隔离。 + +在应用CatBoost模型之前: + +**1.** 拉 [码头窗口映像](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) 从注册表: + +``` bash +$ docker pull yandex/tutorial-catboost-clickhouse +``` + +此Docker映像包含运行CatBoost和ClickHouse所需的所有内容:代码、运行时、库、环境变量和配置文件。 + +**2.** 确保已成功拉取Docker映像: + +``` bash +$ docker image ls +REPOSITORY TAG IMAGE ID CREATED SIZE +yandex/tutorial-catboost-clickhouse latest 622e4d17945b 22 hours ago 1.37GB +``` + +**3.** 基于此映像启动一个Docker容器: + +``` bash +$ docker run -it -p 8888:8888 yandex/tutorial-catboost-clickhouse +``` + +## 1. 创建表 {#create-table} + +为训练样本创建ClickHouse表: + +**1.** 在交互模式下启动ClickHouse控制台客户端: + +``` bash +$ clickhouse client +``` + +!!! note "注" + ClickHouse服务器已经在Docker容器内运行。 + +**2.** 使用以下命令创建表: + +``` sql +:) CREATE TABLE amazon_train +( + date Date MATERIALIZED today(), + ACTION UInt8, + RESOURCE UInt32, + MGR_ID UInt32, + ROLE_ROLLUP_1 UInt32, + ROLE_ROLLUP_2 UInt32, + ROLE_DEPTNAME UInt32, + ROLE_TITLE UInt32, + ROLE_FAMILY_DESC UInt32, + ROLE_FAMILY UInt32, + ROLE_CODE UInt32 +) +ENGINE = MergeTree ORDER BY date +``` + +**3.** 从ClickHouse控制台客户端退出: + +``` sql +:) exit +``` + +## 2. 将数据插入到表中 {#insert-data-to-table} + +插入数据: + +**1.** 运行以下命令: + +``` bash +$ clickhouse client --host 127.0.0.1 --query 'INSERT INTO amazon_train FORMAT CSVWithNames' < ~/amazon/train.csv +``` + +**2.** 在交互模式下启动ClickHouse控制台客户端: + +``` bash +$ clickhouse client +``` + +**3.** 确保数据已上传: + +``` sql +:) SELECT count() FROM amazon_train + +SELECT count() +FROM amazon_train + ++-count()-+ +| 65538 | ++-------+ +``` + +## 3. 碌莽禄into拢Integrate010-68520682\ {#integrate-catboost-into-clickhouse} + +!!! note "注" + **可选步骤。** Docker映像包含运行CatBoost和ClickHouse所需的所有内容。 + +碌莽禄to拢integrate010-68520682\: + +**1.** 构建评估库。 + +评估CatBoost模型的最快方法是编译 `libcatboostmodel.` 图书馆. 有关如何构建库的详细信息,请参阅 [CatBoost文件](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). + +**2.** 例如,在任何地方和任何名称创建一个新目录, `data` 并将创建的库放入其中。 Docker映像已经包含了库 `data/libcatboostmodel.so`. + +**3.** 例如,在任何地方和任何名称为config model创建一个新目录, `models`. + +**4.** 创建具有任意名称的模型配置文件,例如, `models/amazon_model.xml`. + +**5.** 描述模型配置: + +``` xml + + + + catboost + + amazon + + /home/catboost/tutorial/catboost_model.bin + + 0 + + +``` + +**6.** 将CatBoost的路径和模型配置添加到ClickHouse配置: + +``` xml + +/home/catboost/data/libcatboostmodel.so +/home/catboost/models/*_model.xml +``` + +## 4. 从SQL运行模型推理 {#run-model-inference} + +对于测试模型,运行ClickHouse客户端 `$ clickhouse client`. + +让我们确保模型正常工作: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) > 0 AS prediction, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "注" + 功能 [模型值](../sql_reference/functions/other_functions.md#function-modelevaluate) 返回带有多类模型的每类原始预测的元组。 + +让我们预测一下: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1 + exp(-prediction)) AS probability, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "注" + 更多信息 [exp()](../sql_reference/functions/math_functions.md) 功能。 + +让我们计算样本的LogLoss: + +``` sql +:) SELECT -avg(tg * log(prob) + (1 - tg) * log(1 - prob)) AS logloss +FROM +( + SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1. + exp(-prediction)) AS prob, + ACTION AS tg + FROM amazon_train +) +``` + +!!! note "注" + 更多信息 [avg()](../sql_reference/aggregate_functions/reference.md#agg_function-avg) 和 [日志()](../sql_reference/functions/math_functions.md) 功能。 + +[原始文章](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/zh/guides/index.md b/docs/zh/guides/index.md deleted file mode 120000 index 162dcbc3b8f..00000000000 --- a/docs/zh/guides/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/guides/index.md \ No newline at end of file diff --git a/docs/zh/guides/index.md b/docs/zh/guides/index.md new file mode 100644 index 00000000000..00fe071434d --- /dev/null +++ b/docs/zh/guides/index.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u6307\u5357" +toc_priority: 38 +toc_title: "\u6982\u8FF0" +--- + +# ClickHouse指南 {#clickhouse-guides} + +详细的一步一步的说明,帮助解决使用ClickHouse的各种任务列表: + +- [简单集群设置教程](../getting_started/tutorial.md) +- [在ClickHouse中应用CatBoost模型](apply_catboost_model.md) + +[原始文章](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/zh/index.md b/docs/zh/index.md index b10fafebe93..455a578456b 100644 --- a/docs/zh/index.md +++ b/docs/zh/index.md @@ -1,36 +1,41 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +--- + # 什么是ClickHouse? {#shi-yao-shi-clickhouse} ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS)。 在传统的行式数据库系统中,数据按如下顺序存储: -| Row | WatchID | JavaEnable | Title | GoodEvent | EventTime | -|-----|-------------|------------|--------------------|-----------|---------------------| -| \#0 | 89354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 | -| \#1 | 90329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 | -| \#2 | 89953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 | -| \#N | … | … | … | … | … | +| 行 | 小心点 | JavaEnable | 标题 | GoodEvent | 活动时间 | +|-----|-------------|------------|------------|-----------|---------------------| +| \#0 | 89354350662 | 1 | 投资者关系 | 1 | 2016-05-18 05:19:20 | +| \#1 | 90329509958 | 0 | 联系我们 | 1 | 2016-05-18 08:10:20 | +| \#2 | 89953706054 | 1 | 任务 | 1 | 2016-05-18 07:38:00 | +| \#N | … | … | … | … | … | 处于同一行中的数据总是被物理的存储在一起。 常见的行式数据库系统有: MySQL、Postgres和MS SQL Server。 -{: .grey } +{: .灰色 } 在列式数据库系统中,数据按如下的顺序存储: -| Row: | \#0 | \#1 | \#2 | \#N | +| 行: | \#0 | \#1 | \#2 | \#N | |-------------|---------------------|---------------------|---------------------|-----| -| WatchID: | 89354350662 | 90329509958 | 89953706054 | … | +| 小心点: | 89354350662 | 90329509958 | 89953706054 | … | | JavaEnable: | 1 | 0 | 1 | … | -| Title: | Investor Relations | Contact us | Mission | … | +| 标题: | 投资者关系 | 联系我们 | 任务 | … | | GoodEvent: | 1 | 1 | 1 | … | -| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … | +| 活动时间: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … | 该示例中只展示了数据在列式数据库中数据的排列顺序。 对于存储而言,列式数据库总是将同一列的数据存储在一起,不同列的数据也总是分开存储。 常见的列式数据库有: Vertica、 Paraccel (Actian Matrix,Amazon Redshift)、 Sybase IQ、 Exasol、 Infobright、 InfiniDB、 MonetDB (VectorWise, Actian Vector)、 LucidDB、 SAP HANA、 Google Dremel、 Google PowerDrill、 Druid、 kdb+。 -{: .grey } +{: .灰色 } 不同的存储方式适合不同的场景,这里的查询场景包括: 进行了哪些查询,多久查询一次以及各类查询的比例; 每种查询读取多少数据————行、列和字节;读取数据和写入数据之间的关系;使用的数据集大小以及如何使用本地的数据集;是否使用事务,以及它们是如何进行隔离的;数据的复制机制与数据的完整性要求;每种类型的查询要求的延迟与吞吐量等等。 @@ -68,7 +73,7 @@ ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS) 看到差别了么?下面将详细介绍为什么会发生这种情况。 -### Input/output {#inputoutput} +### 输入/输出 {#inputoutput} 1. 针对分析类查询,通常只需要读取表的一小部分列。在列式数据库中你可以只读取你需要的数据。例如,如果只需要读取100列中的5列,这将帮助你最少减少20倍的I/O消耗。 2. 由于数据总是打包成批量读取的,所以压缩是非常容易的。同时数据按列分别存储这也更容易压缩。这进一步降低了I/O的体积。 @@ -76,54 +81,6 @@ ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS) 例如,查询«统计每个广告平台的记录数量»需要读取«广告平台ID»这一列,它在未压缩的情况下需要1个字节进行存储。如果大部分流量不是来自广告平台,那么这一列至少可以以十倍的压缩率被压缩。当采用快速压缩算法,它的解压速度最少在十亿字节(未压缩数据)每秒。换句话说,这个查询可以在单个服务器上以每秒大约几十亿行的速度进行处理。这实际上是当前实现的速度。 -
    - -示例 - - $ clickhouse-client - ClickHouse client version 0.0.52053. - Connecting to localhost:9000. - Connected to ClickHouse server version 0.0.52053. - - :) SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 - - SELECT - CounterID, - count() - FROM hits - GROUP BY CounterID - ORDER BY count() DESC - LIMIT 20 - - ┌─CounterID─┬──count()─┐ - │ 114208 │ 56057344 │ - │ 115080 │ 51619590 │ - │ 3228 │ 44658301 │ - │ 38230 │ 42045932 │ - │ 145263 │ 42042158 │ - │ 91244 │ 38297270 │ - │ 154139 │ 26647572 │ - │ 150748 │ 24112755 │ - │ 242232 │ 21302571 │ - │ 338158 │ 13507087 │ - │ 62180 │ 12229491 │ - │ 82264 │ 12187441 │ - │ 232261 │ 12148031 │ - │ 146272 │ 11438516 │ - │ 168777 │ 11403636 │ - │ 4120072 │ 11227824 │ - │ 10938808 │ 10519739 │ - │ 74088 │ 9047015 │ - │ 115079 │ 8837972 │ - │ 337234 │ 8205961 │ - └───────────┴──────────┘ - - 20 rows in set. Elapsed: 0.153 sec. Processed 1.00 billion rows, 4.00 GB (6.53 billion rows/s., 26.10 GB/s.) - - :) - -
    - ### CPU {#cpu} 由于执行一个查询需要处理大量的行,因此在整个向量上执行所有操作将比在每一行上执行所有操作更加高效。同时这将有助于实现一个几乎没有调用成本的查询引擎。如果你不这样做,使用任何一个机械硬盘,查询引擎都不可避免的停止CPU进行等待。所以,在数据按列存储并且按列执行是很有意义的。 diff --git a/docs/zh/interfaces/cli.md b/docs/zh/interfaces/cli.md index 7e858ce458b..fef8e404aef 100644 --- a/docs/zh/interfaces/cli.md +++ b/docs/zh/interfaces/cli.md @@ -1,3 +1,4 @@ + # 命令行客户端 {#ming-ling-xing-ke-hu-duan} 通过命令行来访问 ClickHouse,您可以使用 `clickhouse-client` @@ -48,7 +49,7 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA 命令行客户端是基于 `replxx`。换句话说,它可以使用我们熟悉的快捷键方式来操作以及保留历史命令。 历史命令会写入在 `~/.clickhouse-client-history` 中。 -默认情况下,输出的格式是 `PrettyCompact`。您可以通过 FORMAT 设置根据不同查询来修改格式,或者通过在查询末尾指定 `\G` 字符,或通过在命令行中使用 `--format` or `--vertical` 参数,或使用客户端的配置文件。 +默认情况下,输出的格式是 `PrettyCompact`。您可以通过 FORMAT 设置根据不同查询来修改格式,或者通过在查询末尾指定 `\G` 字符,或通过在命令行中使用 `--format` 或 `--vertical` 参数,或使用客户端的配置文件。 若要退出客户端,使用 Ctrl+D (或 Ctrl+C),或者输入以下其中一个命令:`exit`, `quit`, `logout`, `учше`, `йгше`, `дщпщге`, `exit;`, `quit;`, `logout;`, `учшеж`, `йгшеж`, `дщпщгеж`, `q`, `й`, `q`, `Q`, `:q`, `й`, `Й`, `Жй` @@ -61,7 +62,7 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA 您可以通过 Ctrl+C 来取消一个长时间的查询。然而,您依然需要等待服务端来中止请求。在某个阶段去取消查询是不可能的。如果您不等待并再次按下 Ctrl + C,客户端将会退出。 -命令行客户端允许通过外部数据 (外部临时表) 来查询。更多相关信息,请参考 «[外部数据查询处理](../operations/table_engines/external_data.md)». +命令行客户端允许通过外部数据 (外部临时表) 来查询。更多相关信息,请参考 «[外部数据查询处理](../engines/table_engines/special/external_data.md)». ## 配置 {#interfaces_cli_configuration} diff --git a/docs/zh/interfaces/cpp.md b/docs/zh/interfaces/cpp.md index 6f162036e01..4aa4f15a456 100644 --- a/docs/zh/interfaces/cpp.md +++ b/docs/zh/interfaces/cpp.md @@ -1,5 +1,6 @@ + # C ++客户端库 {#c-ke-hu-duan-ku} -请参阅以下网站的自述文件[clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp)资料库。 +请参阅以下网站的自述文件[ツ暗ェツ氾环催ツ団](https://github.com/ClickHouse/clickhouse-cpp)资料库。 -[Original article](https://clickhouse.tech/docs/zh/interfaces/cpp/) +[原始文章](https://clickhouse.tech/docs/zh/interfaces/cpp/) diff --git a/docs/zh/interfaces/formats.md b/docs/zh/interfaces/formats.md index 80ca5fdf221..64c1940df86 100644 --- a/docs/zh/interfaces/formats.md +++ b/docs/zh/interfaces/formats.md @@ -1,3 +1,4 @@ + # 输入输出格式 {#formats} ClickHouse 可以接受多种数据格式,可以在 (`INSERT`) 以及 (`SELECT`) 请求中使用。 @@ -10,19 +11,19 @@ ClickHouse 可以接受多种数据格式,可以在 (`INSERT`) 以及 (`SELECT | [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | | [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | | [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | -| [Template](#format-template) | ✔ | ✔ | +| [模板](#format-template) | ✔ | ✔ | | [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | | [CSV](#csv) | ✔ | ✔ | | [CSVWithNames](#csvwithnames) | ✔ | ✔ | -| [CustomSeparated](#format-customseparated) | ✔ | ✔ | -| [Values](#data-format-values) | ✔ | ✔ | -| [Vertical](#vertical) | ✗ | ✔ | +| [自定义分离](#format-customseparated) | ✔ | ✔ | +| [值](#data-format-values) | ✔ | ✔ | +| [垂直](#vertical) | ✗ | ✔ | | VerticalRaw | ✗ | ✔ | | [JSON](#json) | ✗ | ✔ | | [JSONCompact](#jsoncompact) | ✗ | ✔ | | [JSONEachRow](#jsoneachrow) | ✔ | ✔ | | [TSKV](#tskv) | ✔ | ✔ | -| [Pretty](#pretty) | ✗ | ✔ | +| [漂亮](#pretty) | ✗ | ✔ | | [PrettyCompact](#prettycompact) | ✗ | ✔ | | [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | | [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | @@ -30,11 +31,11 @@ ClickHouse 可以接受多种数据格式,可以在 (`INSERT`) 以及 (`SELECT | [Protobuf](#protobuf) | ✔ | ✔ | | [Avro](#data-format-avro) | ✔ | ✔ | | [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | -| [Parquet](#data-format-parquet) | ✔ | ✔ | +| [镶木地板](#data-format-parquet) | ✔ | ✔ | | [ORC](#data-format-orc) | ✔ | ✗ | | [RowBinary](#rowbinary) | ✔ | ✔ | | [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | -| [Native](#native) | ✔ | ✔ | +| [本地人](#native) | ✔ | ✔ | | [Null](#null) | ✗ | ✔ | | [XML](#xml) | ✗ | ✔ | | [CapnProto](#capnproto) | ✔ | ✔ | @@ -70,7 +71,7 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 整数以十进制形式写入。数字在开头可以包含额外的 `+` 字符(解析时忽略,格式化时不记录)。非负数不能包含负号。 读取时,允许将空字符串解析为零,或者(对于带符号的类型)将仅包含负号的字符串解析为零。 不符合相应数据类型的数字可能会被解析为不同的数字,而不会显示错误消息。 -浮点数以十进制形式写入。点号用作小数点分隔符。支持指数等符号,如’inf’,‘+ inf’,‘-inf’和’nan’。 浮点数的输入可以以小数点开始或结束。 +浮点数以十进制形式写入。点号用作小数点分隔符。支持指数等符号,如'inf',‘+ inf’,‘-inf’和’nan’。 浮点数的输入可以以小数点开始或结束。 格式化的时候,浮点数的精确度可能会丢失。 解析的时候,没有严格需要去读取与机器可以表示的最接近的数值。 @@ -96,7 +97,7 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 数组写在方括号内的逗号分隔值列表中。 通常情况下,数组中的数字项目会被拼凑,但日期,带时间的日期以及字符串将使用与上面相同的转义规则用单引号引起来。 -[NULL](../query_language/syntax.md) 将输出为 `\N`。 +[NULL](../sql_reference/syntax.md) 将输出为 `\N`。 ## TabSeparatedRaw {#tabseparatedraw} @@ -120,13 +121,13 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 这种格式也可以使用名称 `TSVWithNamesAndTypes` 来表示。 -## Template {#format-template} +## 模板 {#format-template} -This format allows to specify a custom format string with placeholders for values with specified escaping rule. +此格式允许为具有指定转义规则的值指定带有占位符的自定义格式字符串。 -It uses settings `format_schema`, `format_schema_rows`, `format_schema_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` when using `JSON` escaping, see further) +它使用设置 `format_schema`, `format_schema_rows`, `format_schema_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` 使用时 `JSON` 逃跑,进一步查看) -Format string `format_schema_rows` specifies rows format with the following syntax: +格式字符串 `format_schema_rows` 使用以下语法指定行格式: `delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N`, @@ -150,25 +151,25 @@ Format string `format_schema_rows` specifies rows format with the following synt `Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` -The `format_schema_rows_between_delimiter` setting specifies delimiter between rows, which is printed (or expected) after every row except the last one (`\n` by default) +该 `format_schema_rows_between_delimiter` setting指定行之间的分隔符,该分隔符在除最后一行之外的每一行之后打印(或预期) (`\n` 默认情况下) -Format string `format_schema` has the same syntax as `format_schema_rows` and allows to specify a prefix, a suffix and a way to print some additional information. It contains the following placeholders instead of column names: +格式字符串 `format_schema` 具有相同的语法 `format_schema_rows` 并允许指定前缀,后缀和打印一些附加信息的方式。 它包含以下占位符而不是列名: -- `data` is the rows with data in `format_schema_rows` format, separated by `format_schema_rows_between_delimiter`. This placeholder must be the first placeholder in the format string. -- `totals` is the row with total values in `format_schema_rows` format (when using WITH TOTALS) -- `min` is the row with minimum values in `format_schema_rows` format (when extremes is set to 1) -- `max` is the row with maximum values in `format_schema_rows` format (when extremes is set to 1) -- `rows` is the total number of output rows -- `rows_before_limit` is the minimal number of rows there would have been without LIMIT. Output only if the query contains LIMIT. If the query contains GROUP BY, rows\_before\_limit\_at\_least is the exact number of rows there would have been without a LIMIT. -- `time` is the request execution time in seconds -- `rows_read` is the number of rows have been read -- `bytes_read` is the number of bytes (uncompressed) have been read +- `data` 包含数据的行 `format_schema_rows` 格式,由分隔 `format_schema_rows_between_delimiter`. 此占位符必须是格式字符串中的第一个占位符。 +- `totals` 是包含总值的行 `format_schema_rows` 格式(与总计一起使用时) +- `min` 是具有最小值的行 `format_schema_rows` 格式(当极值设置为1时) +- `max` 是具有最大值的行 `format_schema_rows` 格式(当极值设置为1时) +- `rows` 输出行总数 +- `rows_before_limit` 是没有限制的最小行数。 仅当查询包含LIMIT时输出。 如果查询包含GROUP BY,则rows\_before\_limit\_at\_least是没有限制的确切行数。 +- `time` 请求执行时间以秒为单位 +- `rows_read` 已读取的行数 +- `bytes_read` 被读取的字节数(未压缩) -The placeholders `data`, `totals`, `min` and `max` must not have escaping rule specified (or `None` must be specified explicitly). The remaining placeholders may have any escaping rule specified. -If the `format_schema` setting is an empty string, `${data}` is used as default value. -For insert queries format allows to skip some columns or some fields if prefix or suffix (see example). +占位符 `data`, `totals`, `min` 和 `max` 必须没有指定转义规则(或 `None` 必须明确指定)。 其余的占位符可能具有指定的任何转义规则。 +如果 `format_schema` 设置为空字符串, `${data}` 用作默认值。 +对于插入查询格式允许跳过一些列或一些字段,如果前缀或后缀(见示例)。 -`Select` example: +`Select` 示例: ``` sql SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 @@ -210,7 +211,7 @@ format_schema_rows_between_delimiter = '\n ' ``` -`Insert` example: +`Insert` 示例: Some header Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 @@ -223,14 +224,14 @@ format_schema = 'Some header\n${data}\nTotal rows: ${:CSV}\n', format_schema_rows = 'Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV}' ``` -`PageViews`, `UserID`, `Duration` and `Sign` inside placeholders are names of columns in the table. Values after `Useless field` in rows and after `\nTotal rows:` in suffix will be ignored. -All delimiters in the input data must be strictly equal to delimiters in specified format strings. +`PageViews`, `UserID`, `Duration` 和 `Sign` 占位符内部是表中列的名称。 值后 `Useless field` 在行和之后 `\nTotal rows:` in后缀将被忽略。 +输入数据中的所有分隔符必须严格等于指定格式字符串中的分隔符。 ## TemplateIgnoreSpaces {#templateignorespaces} -This format is suitable only for input. -Similar to `Template`, but skips whitespace characters between delimiters and values in the input stream. However, if format strings contain whitespace characters, these characters will be expected in the input stream. Also allows to specify empty placeholders (`${}` or `${:None}`) to split some delimiter into separate parts to ignore spaces between them. Such placeholders are used only for skipping whitespace characters. -It’s possible to read `JSON` using this format, if values of columns have the same order in all rows. For example, the following request can be used for inserting data from output example of format [JSON](#json): +此格式仅适用于输入。 +类似于 `Template`,但跳过输入流中的分隔符和值之间的空格字符。 但是,如果格式字符串包含空格字符,则在输入流中将需要这些字符。 还允许指定空占位符 (`${}` 或 `${:None}`)将一些分隔符分成单独的部分,以忽略它们之间的空格。 此类占位符仅用于跳过空格字符。 +可以阅读 `JSON` 如果列的值在所有行中具有相同的顺序,则使用此格式。 例如,以下请求可用于从格式的输出示例中插入数据 [JSON](#json): ``` sql INSERT INTO table_name FORMAT TemplateIgnoreSpaces SETTINGS @@ -254,7 +255,7 @@ format_schema_rows_between_delimiter = ',' SearchPhrase=curtain designs count()=1064 SearchPhrase=baku count()=1000 -[NULL](../query_language/syntax.md) 输出为 `\N`。 +[NULL](../sql_reference/syntax.md) 输出为 `\N`。 ``` sql SELECT * FROM t_null FORMAT TSKV @@ -288,10 +289,10 @@ CSV 格式是和 TabSeparated 一样的方式输出总数和极值。 会输出带头部行,和 `TabSeparatedWithNames` 一样。 -## CustomSeparated {#format-customseparated} +## 自定义分离 {#format-customseparated} -Similar to [Template](#format-template), but it prints or reads all columns and uses escaping rule from setting `format_custom_escaping_rule` and delimiters from settings `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` and `format_custom_result_after_delimiter`, not from format strings. -There is also `CustomSeparatedIgnoreSpaces` format, which is similar to `TemplateIgnoreSpaces`. +类似于 [模板](#format-template),但它打印或读取所有列,并使用从设置转义规则 `format_custom_escaping_rule` 从设置和分隔符 `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` 和 `format_custom_result_after_delimiter`,而不是从格式字符串。 +也有 `CustomSeparatedIgnoreSpaces` 格式,这是类似于 `TemplateIgnoreSpaces`. ## JSON {#json} @@ -378,7 +379,7 @@ JSON 与 JavaScript 兼容。为了确保这一点,一些字符被另外转义 该格式仅适用于输出查询结果,但不适用于解析输入(将数据插入到表中)。 -ClickHouse 支持 [NULL](../query_language/syntax.md), 在 JSON 格式中以 `null` 输出来表示. +ClickHouse 支持 [NULL](../sql_reference/syntax.md), 在 JSON 格式中以 `null` 输出来表示. 参考 JSONEachRow 格式。 @@ -449,23 +450,23 @@ ClickHouse 支持 [NULL](../query_language/syntax.md), 在 JSON 格式中以 `nu 对于解析,任何顺序都支持不同列的值。可以省略某些值 - 它们被视为等于它们的默认值。在这种情况下,零和空行被用作默认值。 作为默认值,不支持表中指定的复杂值。元素之间的空白字符被忽略。如果在对象之后放置逗号,它将被忽略。对象不一定必须用新行分隔。 -### Usage of Nested Structures {#jsoneachrow-nested} +### 嵌套结构的使用 {#jsoneachrow-nested} -If you have a table with the [Nested](../data_types/nested_data_structures/nested.md) data type columns, you can insert JSON data having the same structure. Enable this functionality with the [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) setting. +如果你有一张桌子 [嵌套式](../sql_reference/data_types/nested_data_structures/nested.md) 数据类型列,可以插入具有相同结构的JSON数据。 启用此功能与 [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) 设置。 -For example, consider the following table: +例如,请考虑下表: ``` sql CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory ``` -As you can find in the `Nested` data type description, ClickHouse treats each component of the nested structure as a separate column, `n.s` and `n.i` for our table. So you can insert the data the following way: +正如你可以在找到 `Nested` 数据类型说明,ClickHouse将嵌套结构的每个组件视为单独的列, `n.s` 和 `n.i` 为了我们的桌子 所以你可以通过以下方式插入数据: ``` sql INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} ``` -To insert data as hierarchical JSON object set [input\_format\_import\_nested\_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). +将数据作为分层JSON对象集插入 [input\_format\_import\_nested\_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). ``` json { @@ -476,7 +477,7 @@ To insert data as hierarchical JSON object set [input\_format\_import\_nested\_j } ``` -Without this setting ClickHouse throws the exception. +如果没有此设置,ClickHouse将引发异常。 ``` sql SELECT name, value FROM system.settings WHERE name = 'input_format_import_nested_json' @@ -508,7 +509,7 @@ SELECT * FROM json_each_row_nested └───────────────┴────────┘ ``` -## Native {#native} +## 本地人 {#native} 最高性能的格式。 据通过二进制格式的块进行写入和读取。对于每个块,该块中的行数,列数,列名称和类型以及列的部分将被相继记录。 换句话说,这种格式是 «列式»的 - 它不会将列转换为行。 这是用于在服务器之间进行交互的本地界面中使用的格式,用于使用命令行客户端和 C++ 客户端。 @@ -519,13 +520,13 @@ SELECT * FROM json_each_row_nested 没有输出。但是,查询已处理完毕,并且在使用命令行客户端时,数据将传输到客户端。这仅用于测试,包括生产力测试。 显然,这种格式只适用于输出,不适用于解析。 -## Pretty {#pretty} +## 漂亮 {#pretty} 将数据以表格形式输出,也可以使用 ANSI 转义字符在终端中设置颜色。 它会绘制一个完整的表格,每行数据在终端中占用两行。 每一个结果块都会以单独的表格输出。这是很有必要的,以便结果块不用缓冲结果输出(缓冲在可以预见结果集宽度的时候是很有必要的)。 -[NULL](../query_language/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 +[NULL](../sql_reference/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 ``` sql SELECT * FROM t_null @@ -610,29 +611,29 @@ FixedString 被简单地表示为一个字节序列。 数组表示为 varint 长度(无符号 [LEB128](https://en.wikipedia.org/wiki/LEB128)),后跟有序的数组元素。 -对于 [NULL](../query_language/syntax.md#null-literal) 的支持, 一个为 1 或 0 的字节会加在每个 [Nullable](../data_types/nullable.md) 值前面。如果为 1, 那么该值就是 `NULL`。 如果为 0,则不为 `NULL`。 +对于 [NULL](../sql_reference/syntax.md#null-literal) 的支持, 一个为 1 或 0 的字节会加在每个 [可为空](../sql_reference/data_types/nullable.md) 值前面。如果为 1, 那么该值就是 `NULL`。 如果为 0,则不为 `NULL`。 ## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} -Similar to [RowBinary](#rowbinary), but with added header: +类似于 [RowBinary](#rowbinary),但添加了标题: -- [LEB128](https://en.wikipedia.org/wiki/LEB128)-encoded number of columns (N) -- N `String`s specifying column names -- N `String`s specifying column types +- [LEB128](https://en.wikipedia.org/wiki/LEB128)-编码列数(N) +- N `String`s指定列名 +- N `String`s指定列类型 -## Values {#data-format-values} +## 值 {#data-format-values} -在括号中打印每一行。行由逗号分隔。最后一行之后没有逗号。括号内的值也用逗号分隔。数字以十进制格式输出,不含引号。 数组以方括号输出。带有时间的字符串,日期和时间用引号包围输出。转义字符的解析规则与 [TabSeparated](#tabseparated) 格式类似。 在格式化过程中,不插入额外的空格,但在解析过程中,空格是被允许并跳过的(除了数组值之外的空格,这是不允许的)。[NULL](../query_language/syntax.md) 为 `NULL`。 +在括号中打印每一行。行由逗号分隔。最后一行之后没有逗号。括号内的值也用逗号分隔。数字以十进制格式输出,不含引号。 数组以方括号输出。带有时间的字符串,日期和时间用引号包围输出。转义字符的解析规则与 [TabSeparated](#tabseparated) 格式类似。 在格式化过程中,不插入额外的空格,但在解析过程中,空格是被允许并跳过的(除了数组值之外的空格,这是不允许的)。[NULL](../sql_reference/syntax.md) 为 `NULL`。 以 Values 格式传递数据时需要转义的最小字符集是:单引号和反斜线。 这是 `INSERT INTO t VALUES ...` 中可以使用的格式,但您也可以将其用于查询结果。 -## Vertical {#vertical} +## 垂直 {#vertical} 使用指定的列名在单独的行上打印每个值。如果每行都包含大量列,则此格式便于打印一行或几行。 -[NULL](../query_language/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 +[NULL](../sql_reference/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 示例: @@ -747,9 +748,9 @@ SELECT * FROM t_null FORMAT Vertical ## CapnProto {#capnproto} -Cap’n Proto 是一种二进制消息格式,类似 Protocol Buffers 和 Thriftis,但与 JSON 或 MessagePack 格式不一样。 +Cap'n Proto 是一种二进制消息格式,类似 Protocol Buffers 和 Thriftis,但与 JSON 或 MessagePack 格式不一样。 -Cap’n Proto 消息格式是严格类型的,而不是自我描述,这意味着它们不需要外部的描述。这种格式可以实时地应用,并针对每个查询进行缓存。 +Cap'n Proto 消息格式是严格类型的,而不是自我描述,这意味着它们不需要外部的描述。这种格式可以实时地应用,并针对每个查询进行缓存。 ``` sql SELECT SearchPhrase, count() AS c FROM test.hits @@ -763,18 +764,18 @@ SELECT SearchPhrase, count() AS c FROM test.hits c @1 :Uint64; } -格式文件存储的目录可以在服务配置中的 [format\_schema\_path](../operations/server_settings/settings.md) 指定。 +格式文件存储的目录可以在服务配置中的 [format\_schema\_path](../operations/server_configuration_parameters/settings.md) 指定。 -Cap’n Proto 反序列化是很高效的,通常不会增加系统的负载。 +Cap'n Proto 反序列化是很高效的,通常不会增加系统的负载。 ## Protobuf {#protobuf} -Protobuf - is a [Protocol Buffers](https://developers.google.com/protocol-buffers/) format. +Protobuf-是一个 [协议缓冲区](https://developers.google.com/protocol-buffers/) 格式。 -This format requires an external format schema. The schema is cached between queries. -ClickHouse supports both `proto2` and `proto3` syntaxes. Repeated/optional/required fields are supported. +此格式需要外部格式架构。 在查询之间缓存架构。 +ClickHouse支持 `proto2` 和 `proto3` 语法 支持重复/可选/必填字段。 -Usage examples: +使用示例: ``` sql SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:MessageType' @@ -784,7 +785,7 @@ SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:Me cat protobuf_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT Protobuf SETTINGS format_schema='schemafile:MessageType'" ``` -where the file `schemafile.proto` looks like this: +哪里的文件 `schemafile.proto` 看起来像这样: ``` capnp syntax = "proto3"; @@ -797,11 +798,11 @@ message MessageType { }; ``` -To find the correspondence between table columns and fields of Protocol Buffers’ message type ClickHouse compares their names. -This comparison is case-insensitive and the characters `_` (underscore) and `.` (dot) are considered as equal. -If types of a column and a field of Protocol Buffers’ message are different the necessary conversion is applied. +要查找协议缓冲区的消息类型的表列和字段之间的对应关系,ClickHouse比较它们的名称。 +这种比较是不区分大小写和字符 `_` (下划线)和 `.` (点)被认为是相等的。 +如果协议缓冲区消息的列和字段的类型不同,则应用必要的转换。 -Nested messages are supported. For example, for the field `z` in the following message type +支持嵌套消息。 例如,对于字段 `z` 在下面的消息类型 ``` capnp message MessageType { @@ -815,10 +816,10 @@ message MessageType { }; ``` -ClickHouse tries to find a column named `x.y.z` (or `x_y_z` or `X.y_Z` and so on). -Nested messages are suitable to input or output a [nested data structures](../data_types/nested_data_structures/nested.md). +ClickHouse尝试找到一个名为 `x.y.z` (或 `x_y_z` 或 `X.y_Z` 等)。 +嵌套消息适用于输入或输出一个 [嵌套数据结构](../sql_reference/data_types/nested_data_structures/nested.md). -Default values defined in a protobuf schema like this +在protobuf模式中定义的默认值,如下所示 ``` capnp syntax = "proto2"; @@ -828,91 +829,91 @@ message MessageType { } ``` -are not applied; the [table defaults](../query_language/create.md#create-default-values) are used instead of them. +不应用;该 [表默认值](../sql_reference/statements/create.md#create-default-values) 用来代替它们。 -ClickHouse inputs and outputs protobuf messages in the `length-delimited` format. -It means before every message should be written its length as a [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). -See also [how to read/write length-delimited protobuf messages in popular languages](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). +ClickHouse在输入和输出protobuf消息 `length-delimited` 格式。 +这意味着每个消息之前,应该写它的长度作为一个 [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). +另请参阅 [如何在流行语言中读取/写入长度分隔的protobuf消息](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). ## Avro {#data-format-avro} -[Apache Avro](http://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project. +[Apache Avro](http://avro.apache.org/) 是在Apache Hadoop项目中开发的面向行的数据序列化框架。 -ClickHouse Avro format supports reading and writing [Avro data files](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). +ClickHouse Avro格式支持读取和写入 [Avro数据文件](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). -### Data Types Matching {#data_types-matching} +### 数据类型匹配{\#sql\_reference/data\_types-matching} {#data-types-matching-sql_referencedata_types-matching} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` and `SELECT` queries. +下表显示了支持的数据类型以及它们如何匹配ClickHouse [数据类型](../sql_reference/data_types/index.md) 在 `INSERT` 和 `SELECT` 查询。 -| Avro data type `INSERT` | ClickHouse data type | Avro data type `SELECT` | -|---------------------------------------------|-------------------------------------------------------------------------------------------|------------------------------| -| `boolean`, `int`, `long`, `float`, `double` | [Int(8\|16\|32)](../data_types/int_uint.md), [UInt(8\|16\|32)](../data_types/int_uint.md) | `int` | -| `boolean`, `int`, `long`, `float`, `double` | [Int64](../data_types/int_uint.md), [UInt64](../data_types/int_uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](../data_types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](../data_types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [String](../data_types/string.md) | `bytes` | -| `bytes`, `string`, `fixed` | [FixedString(N)](../data_types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum(8\|16)](../data_types/enum.md) | `enum` | -| `array(T)` | [Array(T)](../data_types/array.md) | `array(T)` | -| `union(null, T)`, `union(T, null)` | [Nullable(T)](../data_types/date.md) | `union(null, T)` | -| `null` | [Nullable(Nothing)](../data_types/special_data_types/nothing.md) | `null` | -| `int (date)` \* | [Date](../data_types/date.md) | `int (date)` \* | -| `long (timestamp-millis)` \* | [DateTime64(3)](../data_types/datetime.md) | `long (timestamp-millis)` \* | -| `long (timestamp-micros)` \* | [DateTime64(6)](../data_types/datetime.md) | `long (timestamp-micros)` \* | +| Avro数据类型 `INSERT` | ClickHouse数据类型 | Avro数据类型 `SELECT` | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------|------------------------------| +| `boolean`, `int`, `long`, `float`, `double` | [Int(8/16/32)](../sql_reference/data_types/int_uint.md), [UInt(8/16/32)](../sql_reference/data_types/int_uint.md) | `int` | +| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [UInt64](../sql_reference/data_types/int_uint.md) | `long` | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [字符串](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [固定字符串(N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [枚举(8/16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [阵列(T)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [可为空(T)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [可为空(无)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [日期](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [DateTime64(3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [DateTime64(6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | -\* [Avro logical types](http://avro.apache.org/docs/current/spec.html#Logical+Types) +\* [Avro逻辑类型](http://avro.apache.org/docs/current/spec.html#Logical+Types) -Unsupported Avro data types: `record` (non-root), `map` +不支持的Avro数据类型: `record` (非根), `map` -Unsupported Avro logical data types: `uuid`, `time-millis`, `time-micros`, `duration` +不支持的Avro逻辑数据类型: `uuid`, `time-millis`, `time-micros`, `duration` -### Inserting Data {#inserting-data} +### 插入数据 {#inserting-data} -To insert data from an Avro file into ClickHouse table: +将Avro文件中的数据插入ClickHouse表: ``` bash $ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avro" ``` -The root schema of input Avro file must be of `record` type. +输入Avro文件的根模式必须是 `record` 类型。 -To find the correspondence between table columns and fields of Avro schema ClickHouse compares their names. This comparison is case-sensitive. -Unused fields are skipped. +要查找Avro schema的表列和字段之间的对应关系,ClickHouse比较它们的名称。 此比较区分大小写。 +跳过未使用的字段。 -Data types of a ClickHouse table columns can differ from the corresponding fields of the Avro data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to corresponding column type. +ClickHouse表列的数据类型可能与插入的Avro数据的相应字段不同。 插入数据时,ClickHouse根据上表解释数据类型,然后 [投](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) 将数据转换为相应的列类型。 -### Selecting Data {#selecting-data} +### 选择数据 {#selecting-data} -To select data from ClickHouse table into an Avro file: +从ClickHouse表中选择数据到Avro文件: ``` bash $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro ``` -Column names must: +列名必须: -- start with `[A-Za-z_]` -- subsequently contain only `[A-Za-z0-9_]` +- 名,名,名,名 `[A-Za-z_]` +- 随后只包含 `[A-Za-z0-9_]` -Output Avro file compression and sync interval can be configured with [output\_format\_avro\_codec](../operations/settings/settings.md#settings-output_format_avro_codec) and [output\_format\_avro\_sync\_interval](../operations/settings/settings.md#settings-output_format_avro_sync_interval) respectively. +输出Avro文件压缩和同步间隔可以配置 [output\_format\_avro\_codec](../operations/settings/settings.md#settings-output_format_avro_codec) 和 [output\_format\_avro\_sync\_interval](../operations/settings/settings.md#settings-output_format_avro_sync_interval) 分别。 ## AvroConfluent {#data-format-avro-confluent} -AvroConfluent supports decoding single-object Avro messages commonly used with [Kafka](https://kafka.apache.org/) and [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html). +AvroConfluent支持解码单对象Avro消息常用于 [卡夫卡](https://kafka.apache.org/) 和 [汇合的模式注册表](https://docs.confluent.io/current/schema-registry/index.html). -Each Avro message embeds a schema id that can be resolved to the actual schema with help of the Schema Registry. +每个Avro消息都嵌入了一个架构id,该架构id可以在架构注册表的帮助下解析为实际架构。 -Schemas are cached once resolved. +模式解析后会进行缓存。 -Schema Registry URL is configured with [format\_avro\_schema\_registry\_url](../operations/settings/settings.md#settings-format_avro_schema_registry_url) +架构注册表URL配置为 [format\_avro\_schema\_registry\_url](../operations/settings/settings.md#settings-format_avro_schema_registry_url) -### Data Types Matching {#data_types-matching-1} +### 数据类型匹配{\#sql\_reference/data\_types-matching-1} {#data-types-matching-sql_referencedata_types-matching-1} -Same as [Avro](#data-format-avro) +和 [Avro](#data-format-avro) -### Usage {#usage} +### 用途 {#usage} -To quickly verify schema resolution you can use [kafkacat](https://github.com/edenhill/kafkacat) with [clickhouse-local](../operations/utils/clickhouse-local.md): +要快速验证架构解析,您可以使用 [kafkacat](https://github.com/edenhill/kafkacat) 与 [ツ环板-ョツ嘉ッツ偲](../operations/utilities/clickhouse-local.md): ``` bash $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' @@ -921,7 +922,7 @@ $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse- 3 c ``` -To use `AvroConfluent` with [Kafka](../operations/table_engines/kafka.md): +使用 `AvroConfluent` 与 [卡夫卡](../engines/table_engines/integrations/kafka.md): ``` sql CREATE TABLE topic1_stream @@ -941,123 +942,123 @@ SET format_avro_schema_registry_url = 'http://schema-registry'; SELECT * FROM topic1_stream; ``` -!!! note "Warning" - Setting `format_avro_schema_registry_url` needs to be configured in `users.xml` to maintain it’s value after a restart. +!!! note "警告" + 设置 `format_avro_schema_registry_url` 需要在配置 `users.xml` restart动后保持它的价值。 -## Parquet {#data-format-parquet} +## 镶木地板 {#data-format-parquet} -[Apache Parquet](http://parquet.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. ClickHouse supports read and write operations for this format. +[阿帕奇地板](http://parquet.apache.org/) 是Hadoop生态系统中普遍存在的列式存储格式。 ClickHouse支持此格式的读写操作。 -### Data Types Matching {#data_types-matching-2} +### 数据类型匹配{\#sql\_reference/data\_types-matching-2} {#data-types-matching-sql_referencedata_types-matching-2} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` and `SELECT` queries. +下表显示了支持的数据类型以及它们如何匹配ClickHouse [数据类型](../sql_reference/data_types/index.md) 在 `INSERT` 和 `SELECT` 查询。 -| Parquet data type (`INSERT`) | ClickHouse data type | Parquet data type (`SELECT`) | -|------------------------------|---------------------------------------------|------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | `UINT8` | -| `INT8` | [Int8](../data_types/int_uint.md) | `INT8` | -| `UINT16` | [UInt16](../data_types/int_uint.md) | `UINT16` | -| `INT16` | [Int16](../data_types/int_uint.md) | `INT16` | -| `UINT32` | [UInt32](../data_types/int_uint.md) | `UINT32` | -| `INT32` | [Int32](../data_types/int_uint.md) | `INT32` | -| `UINT64` | [UInt64](../data_types/int_uint.md) | `UINT64` | -| `INT64` | [Int64](../data_types/int_uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](../data_types/float.md) | `DOUBLE` | -| `DATE32` | [Date](../data_types/date.md) | `UINT16` | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | `UINT32` | -| `STRING`, `BINARY` | [String](../data_types/string.md) | `STRING` | -| — | [FixedString](../data_types/fixedstring.md) | `STRING` | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | `DECIMAL` | +| Parquet数据类型 (`INSERT`) | ClickHouse数据类型 | Parquet数据类型 (`SELECT`) | +|----------------------------|----------------------------------------------------------|----------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [日期](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [日期时间](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [字符串](../sql_reference/data_types/string.md) | `STRING` | +| — | [固定字符串](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [十进制](../sql_reference/data_types/decimal.md) | `DECIMAL` | -ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type. +ClickHouse支持可配置的精度 `Decimal` 类型。 该 `INSERT` 查询对待实木复合地板 `DECIMAL` 键入为ClickHouse `Decimal128` 类型。 -Unsupported Parquet data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. +不支持的Parquet数据类型: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Data types of a ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column. +ClickHouse表列的数据类型可能与插入的Parquet数据的相应字段不同。 插入数据时,ClickHouse根据上表解释数据类型,然后 [投](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) 为ClickHouse表列设置的数据类型的数据。 -### Inserting and Selecting Data {#inserting-and-selecting-data} +### 插入和选择数据 {#inserting-and-selecting-data} -You can insert Parquet data from a file into ClickHouse table by the following command: +您可以通过以下命令将Parquet数据从文件插入到ClickHouse表中: ``` bash $ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" ``` -You can select data from a ClickHouse table and save them into some file in the Parquet format by the following command: +您可以从ClickHouse表中选择数据,并通过以下命令将它们保存到Parquet格式的某个文件中: ``` bash $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} ``` -To exchange data with Hadoop, you can use [HDFS table engine](../operations/table_engines/hdfs.md). +要与Hadoop交换数据,您可以使用 [HDFS表引擎](../engines/table_engines/integrations/hdfs.md). ## ORC {#data-format-orc} -[Apache ORC](https://orc.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. You can only insert data in this format to ClickHouse. +[阿帕奇兽人](https://orc.apache.org/) 是Hadoop生态系统中普遍存在的列式存储格式。 您只能将此格式的数据插入ClickHouse。 -### Data Types Matching {#data_types-matching-3} +### 数据类型匹配{\#sql\_reference/data\_types-matching-3} {#data-types-matching-sql_referencedata_types-matching-3} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` queries. +下表显示了支持的数据类型以及它们如何匹配ClickHouse [数据类型](../sql_reference/data_types/index.md) 在 `INSERT` 查询。 -| ORC data type (`INSERT`) | ClickHouse data type | -|--------------------------|---------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | -| `INT8` | [Int8](../data_types/int_uint.md) | -| `UINT16` | [UInt16](../data_types/int_uint.md) | -| `INT16` | [Int16](../data_types/int_uint.md) | -| `UINT32` | [UInt32](../data_types/int_uint.md) | -| `INT32` | [Int32](../data_types/int_uint.md) | -| `UINT64` | [UInt64](../data_types/int_uint.md) | -| `INT64` | [Int64](../data_types/int_uint.md) | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | -| `DOUBLE` | [Float64](../data_types/float.md) | -| `DATE32` | [Date](../data_types/date.md) | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | -| `STRING`, `BINARY` | [String](../data_types/string.md) | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | +| ORC数据类型 (`INSERT`) | ClickHouse数据类型 | +|------------------------|-----------------------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | +| `DATE32` | [日期](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [日期时间](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [字符串](../sql_reference/data_types/string.md) | +| `DECIMAL` | [十进制](../sql_reference/data_types/decimal.md) | -ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type. +ClickHouse支持的可配置精度 `Decimal` 类型。 该 `INSERT` 查询对待兽人 `DECIMAL` 键入为ClickHouse `Decimal128` 类型。 -Unsupported ORC data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. +不支持的ORC数据类型: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -The data types of ClickHouse table columns don’t have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to the data type set for the ClickHouse table column. +ClickHouse表列的数据类型不必匹配相应的ORC数据字段。 插入数据时,ClickHouse根据上表解释数据类型,然后 [投](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) 将数据转换为ClickHouse表列的数据类型集。 -### Inserting Data {#inserting-data-1} +### 插入数据 {#inserting-data-1} -You can insert ORC data from a file into ClickHouse table by the following command: +您可以通过以下命令将文件中的ORC数据插入到ClickHouse表中: ``` bash $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` -To exchange data with Hadoop, you can use [HDFS table engine](../operations/table_engines/hdfs.md). +要与Hadoop交换数据,您可以使用 [HDFS表引擎](../engines/table_engines/integrations/hdfs.md). -## Format Schema {#formatschema} +## 格式架构 {#formatschema} -The file name containing the format schema is set by the setting `format_schema`. -It’s required to set this setting when it is used one of the formats `Cap'n Proto` and `Protobuf`. -The format schema is a combination of a file name and the name of a message type in this file, delimited by colon, +包含格式架构的文件名由该设置设置 `format_schema`. +当使用其中一种格式时,需要设置此设置 `Cap'n Proto` 和 `Protobuf`. +格式架构是文件名和此文件中消息类型的名称的组合,用冒号分隔, e.g. `schemafile.proto:MessageType`. -If the file has the standard extension for the format (for example, `.proto` for `Protobuf`), -it can be omitted and in this case the format schema looks like `schemafile:MessageType`. +如果文件具有格式的标准扩展名(例如, `.proto` 为 `Protobuf`), +它可以被省略,在这种情况下,格式模式如下所示 `schemafile:MessageType`. -If you input or output data via the [client](../interfaces/cli.md) in the interactive mode, the file name specified in the format schema -can contain an absolute path or a path relative to the current directory on the client. -If you use the client in the batch mode, the path to the schema must be relative due to security reasons. +如果您通过输入或输出数据 [客户](../interfaces/cli.md) 在交互模式下,格式架构中指定的文件名 +可以包含绝对路径或相对于客户端上当前目录的路径。 +如果在批处理模式下使用客户端,则由于安全原因,架构的路径必须是相对的。 -If you input or output data via the [HTTP interface](../interfaces/http.md) the file name specified in the format schema -should be located in the directory specified in [format\_schema\_path](../operations/server_settings/settings.md#server_settings-format_schema_path) -in the server configuration. +如果您通过输入或输出数据 [HTTP接口](../interfaces/http.md) 格式架构中指定的文件名 +应该位于指定的目录中 [format\_schema\_path](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) +在服务器配置中。 -[Original article](https://clickhouse.tech/docs/en/interfaces/formats/) +[原始文章](https://clickhouse.tech/docs/en/interfaces/formats/) -## Skipping Errors {#skippingerrors} +## 跳过错误 {#skippingerrors} -Some formats such as `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` and `Protobuf` can skip broken row if parsing error occurred and continue parsing from the beginning of next row. See [input\_format\_allow\_errors\_num](../operations/settings/settings.md#settings-input_format_allow_errors_num) and -[input\_format\_allow\_errors\_ratio](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) settings. -Limitations: -- In case of parsing error `JSONEachRow` skips all data until the new line (or EOF), so rows must be delimited by `\n` to count errors correctly. -- `Template` and `CustomSeparated` use delimiter after the last column and delimiter between rows to find the beginning of next row, so skipping errors works only if at least one of them is not empty. +一些格式,如 `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` 和 `Protobuf` 如果发生解析错误,可以跳过断开的行,并从下一行开始继续解析。 看 [input\_format\_allow\_errors\_num](../operations/settings/settings.md#settings-input_format_allow_errors_num) 和 +[input\_format\_allow\_errors\_ratio](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) 设置。 +限制: +-在解析错误的情况下 `JSONEachRow` 跳过所有数据,直到新行(或EOF),所以行必须由 `\n` 正确计算错误。 +- `Template` 和 `CustomSeparated` 在最后一列之后使用分隔符,并在行之间使用分隔符来查找下一行的开头,所以跳过错误只有在其中至少有一个不为空时才有效。 [来源文章](https://clickhouse.tech/docs/zh/interfaces/formats/) diff --git a/docs/zh/interfaces/http.md b/docs/zh/interfaces/http.md index 1dfbe87b7e0..ca8a9076fba 100644 --- a/docs/zh/interfaces/http.md +++ b/docs/zh/interfaces/http.md @@ -1,3 +1,4 @@ + # HTTP 客户端 {#http-ke-hu-duan} HTTP 接口可以让你通过任何平台和编程语言来使用 ClickHouse。我们用 Java 和 Perl 以及 shell 脚本来访问它。在其他的部门中,HTTP 接口会用在 Perl,Python 以及 Go 中。HTTP 接口比 TCP 原生接口更为局限,但是却有更好的兼容性。 @@ -17,7 +18,7 @@ Ok. 当使用 GET 方法请求时,`readonly` 会被设置。换句话说,若要作修改数据的查询,只能发送 POST 方法的请求。可以将查询通过 POST 主体发送,也可以通过 URL 参数发送。 -Examples: +例: ``` bash $ curl 'http://localhost:8123/?query=SELECT%201' @@ -200,7 +201,7 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812 可选的 `quota_key` 参数可能当做 quota key 传入(或者任何字符串)。更多信息,参见 «[配额](../operations/quotas.md#quotas)» 部分。 -HTTP 接口允许传入额外的数据(外部临时表)来查询。更多信息,参见 «[外部数据查询处理](../operations/table_engines/external_data.md)» 部分。 +HTTP 接口允许传入额外的数据(外部临时表)来查询。更多信息,参见 «[外部数据查询处理](../engines/table_engines/special/external_data.md)» 部分。 ## 响应缓冲 {#xiang-ying-huan-chong} diff --git a/docs/zh/interfaces/index.md b/docs/zh/interfaces/index.md index df0313cc3d2..a4131e833e7 100644 --- a/docs/zh/interfaces/index.md +++ b/docs/zh/interfaces/index.md @@ -1,3 +1,4 @@ + # 客户端 {#interfaces} ClickHouse提供了两个网络接口(两者都可以选择包装在TLS中以提高安全性): diff --git a/docs/zh/interfaces/jdbc.md b/docs/zh/interfaces/jdbc.md index a2aac229cca..932ab53b9af 100644 --- a/docs/zh/interfaces/jdbc.md +++ b/docs/zh/interfaces/jdbc.md @@ -1,8 +1,9 @@ + # JDBC 驱动 {#jdbc-qu-dong} - **[官方JDBC 的驱动](https://github.com/ClickHouse/clickhouse-jdbc)** - 三方提供的 JDBC 驱动: - - [ClickHouse-Native-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC) + - [掳胫--禄脢鹿脷露胫鲁隆鹿--酶](https://github.com/housepower/ClickHouse-Native-JDBC) - [clickhouse4j](https://github.com/blynkkk/clickhouse4j) [来源文章](https://clickhouse.tech/docs/zh/interfaces/jdbc/) diff --git a/docs/zh/interfaces/mysql.md b/docs/zh/interfaces/mysql.md deleted file mode 120000 index df728b35f80..00000000000 --- a/docs/zh/interfaces/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/mysql.md \ No newline at end of file diff --git a/docs/zh/interfaces/mysql.md b/docs/zh/interfaces/mysql.md new file mode 100644 index 00000000000..8996ad6ae6f --- /dev/null +++ b/docs/zh/interfaces/mysql.md @@ -0,0 +1,49 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 20 +toc_title: "MySQL\u63A5\u53E3" +--- + +# MySQL接口 {#mysql-interface} + +ClickHouse支持MySQL线协议。 它可以通过启用 [mysql\_port](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) 在配置文件中设置: + +``` xml +9004 +``` + +使用命令行工具连接的示例 `mysql`: + +``` bash +$ mysql --protocol tcp -u default -P 9004 +``` + +如果连接成功,则输出: + +``` text +Welcome to the MySQL monitor. Commands end with ; or \g. +Your MySQL connection id is 4 +Server version: 20.2.1.1-ClickHouse + +Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + +Oracle is a registered trademark of Oracle Corporation and/or its +affiliates. Other names may be trademarks of their respective +owners. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +mysql> +``` + +为了与所有MySQL客户端兼容,建议使用以下命令指定用户密码 [双SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) 在配置文件中。 +如果使用用户密码指定 [SHA256](../operations/settings/settings_users.md#password_sha256_hex),一些客户端将无法进行身份验证(mysqljs和旧版本的命令行工具mysql)。 + +限制: + +- 不支持准备好的查询 + +- 某些数据类型以字符串形式发送 + +[原始文章](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/zh/interfaces/odbc.md b/docs/zh/interfaces/odbc.md index b45c54f8507..5cba3a499f1 100644 --- a/docs/zh/interfaces/odbc.md +++ b/docs/zh/interfaces/odbc.md @@ -1,3 +1,4 @@ + # ODBC 驱动 {#odbc-qu-dong} - ClickHouse官方有 ODBC 的驱动。 见 [这里](https://github.com/ClickHouse/clickhouse-odbc)。 diff --git a/docs/zh/interfaces/tcp.md b/docs/zh/interfaces/tcp.md index 9fd4e6b108f..b926a63c476 100644 --- a/docs/zh/interfaces/tcp.md +++ b/docs/zh/interfaces/tcp.md @@ -1,5 +1,6 @@ + # 原生客户端接口(TCP) {#yuan-sheng-ke-hu-duan-jie-kou-tcp} -本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/src/Client))和/或拦截和分析TCP流量。 +本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client))和/或拦截和分析TCP流量。 [来源文章](https://clickhouse.tech/docs/zh/interfaces/tcp/) diff --git a/docs/zh/interfaces/third-party/client_libraries.md b/docs/zh/interfaces/third-party/client_libraries.md index bf74c490092..8e48bb8735e 100644 --- a/docs/zh/interfaces/third-party/client_libraries.md +++ b/docs/zh/interfaces/third-party/client_libraries.md @@ -1,3 +1,4 @@ + # 第三方开发的库 {#di-san-fang-kai-fa-de-ku} !!! warning "放弃" @@ -5,45 +6,46 @@ - Python - [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm) - - [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver) - - [clickhouse-client](https://github.com/yurial/clickhouse-client) + - [ツ环板driverョツ嘉ッツ偲](https://github.com/mymarilyn/clickhouse-driver) + - [ツ环板clientョツ嘉ッツ偲](https://github.com/yurial/clickhouse-client) - PHP - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) - - [8bitov/clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client) - - [bozerkins/clickhouse-client](https://packagist.org/packages/bozerkins/clickhouse-client) - - [simpod/clickhouse-client](https://packagist.org/packages/simpod/clickhouse-client) + - [8bitov/clickhouse-php客户端](https://packagist.org/packages/8bitov/clickhouse-php-client) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://packagist.org/packages/bozerkins/clickhouse-client) + - [ツ环板clientョツ嘉ッツ偲](https://packagist.org/packages/simpod/clickhouse-client) - [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) - - [SeasClick C++ client](https://github.com/SeasX/SeasClick) -- Go + - [ツ环板clientョツ嘉ッツ偲](https://github.com/SeasX/SeasClick) +- 走吧 - [clickhouse](https://github.com/kshvakov/clickhouse/) - - [go-clickhouse](https://github.com/roistat/go-clickhouse) - - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) + - [ツ环板-ョツ嘉ッツ偲](https://github.com/roistat/go-clickhouse) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/mailru/go-clickhouse) - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) - NodeJs - - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse) - - [node-clickhouse](https://github.com/apla/node-clickhouse) + - [ツ暗ェツ氾环催ツ団ツ法ツ人)](https://github.com/TimonKK/clickhouse) + - [ツ环板-ョツ嘉ッツ偲](https://github.com/apla/node-clickhouse) - Perl - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) - - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://metacpan.org/release/AnyEvent-ClickHouse) - Ruby - - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [ツ暗ェツ氾环催ツ団)](https://github.com/shlima/click_house) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [RClickhouse](https://github.com/IMSMWU/RClickhouse) - Java - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) -- Scala - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) +- 斯卡拉 + - [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) - Kotlin - [AORM](https://github.com/TanVD/AORM) - C\# - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) + - [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client) -- Elixir + - [克莱克豪斯客户](https://github.com/DarkWanderer/ClickHouse.Client) +- 仙丹 - [clickhousex](https://github.com/appodeal/clickhousex/) -- Nim +- 尼姆 - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) [来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/client_libraries/) diff --git a/docs/zh/interfaces/third-party/gui.md b/docs/zh/interfaces/third-party/gui.md index 83656d18858..bbbd78f650e 100644 --- a/docs/zh/interfaces/third-party/gui.md +++ b/docs/zh/interfaces/third-party/gui.md @@ -1,3 +1,4 @@ + # 第三方开发的可视化界面 {#di-san-fang-kai-fa-de-ke-shi-hua-jie-mian} ## 开源 {#kai-yuan} @@ -37,9 +38,9 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 集群管理 - 监控副本情况以及 Kafka 引擎表 -### LightHouse {#lighthouse} +### 灯塔 {#lighthouse} -[LightHouse](https://github.com/VKCOM/lighthouse) 是ClickHouse的轻量级Web界面。 +[灯塔](https://github.com/VKCOM/lighthouse) 是ClickHouse的轻量级Web界面。 特征: @@ -57,9 +58,9 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 表格预览。 - 自动完成。 -### clickhouse-cli {#clickhouse-cli} +### ツ环板-ョツ嘉ッツ偲 {#clickhouse-cli} -[clickhouse-cli](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端,用Python 3编写。 +[ツ环板-ョツ嘉ッツ偲](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端,用Python 3编写。 特征: @@ -68,15 +69,15 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 寻呼机支持数据输出。 - 自定义PostgreSQL类命令。 -### clickhouse-flamegraph {#clickhouse-flamegraph} +### ツ暗ェツ氾环催ツ団ツ法ツ人 {#clickhouse-flamegraph} [clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 是一个可视化的专业工具`system.trace_log`如[flamegraph](http://www.brendangregg.com/flamegraphs.html). ## 商业 {#shang-ye} -### Holistics Software {#holistics-software} +### ツ环板Softwareョツ嘉ッ {#holistics-software} -[Holistics](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具,用于设置您的分析流程。 +[整体学](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具,用于设置您的分析流程。 特征: diff --git a/docs/zh/interfaces/third-party/index.md b/docs/zh/interfaces/third-party/index.md new file mode 100644 index 00000000000..fab8cb364e8 --- /dev/null +++ b/docs/zh/interfaces/third-party/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u7B2C\u4E09\u65B9" +toc_priority: 24 +--- + + diff --git a/docs/zh/interfaces/third-party/integrations.md b/docs/zh/interfaces/third-party/integrations.md index 4bfe367e1f1..128a4060c2d 100644 --- a/docs/zh/interfaces/third-party/integrations.md +++ b/docs/zh/interfaces/third-party/integrations.md @@ -7,80 +7,86 @@ - 关系数据库管理系统 - [MySQL](https://www.mysql.com) - - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) + - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) + - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) + - [horgh-复制器](https://github.com/larsnovikov/horgh-replicator) - [PostgreSQL](https://www.postgresql.org) - - [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) - - [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [pg2ch](https://github.com/mkabilov/pg2ch) + - [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) + - [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [pg2ch](https://github.com/mkabilov/pg2ch) - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) - - [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator) + - [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator) - 消息队列 - - [Kafka](https://kafka.apache.org) - - [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [Go client](https://github.com/kshvakov/clickhouse/)) + - [卡夫卡](https://kafka.apache.org) + - [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [去客户](https://github.com/ClickHouse/clickhouse-go/)) +- 流处理 + - [Flink](https://flink.apache.org) + - [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink) - 对象存储 - [S3](https://en.wikipedia.org/wiki/Amazon_S3) - - [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup) + - [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup) - 容器编排 - [Kubernetes](https://kubernetes.io) - - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) + - [clickhouse-操](https://github.com/Altinity/clickhouse-operator) - 配置管理 - - [puppet](https://puppet.com) - - [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse) + - [木偶](https://puppet.com) + - [ツ环板/ョツ嘉ッツ偲](https://forge.puppet.com/innogames/clickhouse) - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) - 监控 - - [Graphite](https://graphiteapp.org) - - [graphouse](https://github.com/yandex/graphouse) - - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + - - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) - - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../operations/table_engines/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../operations/table_engines/graphitemergetree.md#rollup-configuration) could be applied + - [石墨](https://graphiteapp.org) + - [graphouse](https://github.com/yandex/graphouse) + - [ツ暗ェツ氾环催ツ団](https://github.com/lomik/carbon-clickhouse) + + - [ツ环板-ョツ嘉ッツ偲](https://github.com/lomik/graphite-clickhouse) + - [石墨-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) -优化静态分区 [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) 如果从规则 [汇总配置](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) 可以应用 - [Grafana](https://grafana.com/) - - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - - [Prometheus](https://prometheus.io/) - - [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter) - - [PromHouse](https://github.com/Percona-Lab/PromHouse) - - [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (uses [Go client](https://github.com/kshvakov/clickhouse/)) + - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) + - [普罗米修斯号](https://prometheus.io/) + - [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter) + - [PromHouse](https://github.com/Percona-Lab/PromHouse) + - [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (用途 [去客户](https://github.com/kshvakov/clickhouse/)) - [Nagios](https://www.nagios.org/) - - [check\_clickhouse](https://github.com/exogroup/check_clickhouse/) - - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) + - [check\_clickhouse](https://github.com/exogroup/check_clickhouse/) + - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) - [Zabbix](https://www.zabbix.com) - - [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/Altinity/clickhouse-zabbix-template) - [Sematext](https://sematext.com/) - - [clickhouse积分](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) + - [clickhouse积分](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) - 记录 - [rsyslog](https://www.rsyslog.com/) - - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) + - [鹿茫house omhousee酶](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) - [fluentd](https://www.fluentd.org) - - [loghouse](https://github.com/flant/loghouse) (对于 [Kubernetes](https://kubernetes.io)) - - [logagent](https://www.sematext.com/logagent) - - [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) + - [loghouse](https://github.com/flant/loghouse) (对于 [Kubernetes](https://kubernetes.io)) + - [Sematext](https://www.sematext.com/logagent) + - [logagent输出-插件-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) - 地理 - [MaxMind](https://dev.maxmind.com/geoip/) - - [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) + - [ツ环板-ョツ嘉ッツ偲青clickシツ氾カツ鉄ツ工ツ渉](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) ## 编程语言生态系统 {#bian-cheng-yu-yan-sheng-tai-xi-tong} - Python - [SQLAlchemy](https://www.sqlalchemy.org) - - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [pandas](https://pandas.pydata.org) - - [pandahouse](https://github.com/kszucs/pandahouse) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [熊猫](https://pandas.pydata.org) + - [pandahouse](https://github.com/kszucs/pandahouse) +- PHP + - [Doctrine](https://www.doctrine-project.org/) + - [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse) - R - [dplyr](https://db.rstudio.com/dplyr/) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (使用 [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) + - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (使用 [ツ暗ェツ氾环催ツ団](https://github.com/artpaul/clickhouse-cpp)) - Java - [Hadoop](http://hadoop.apache.org) - - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (使用 [JDBC](../../query_language/table_functions/jdbc.md)) -- Scala + - [clickhouse-hdfs-装载机](https://github.com/jaykelin/clickhouse-hdfs-loader) (使用 [JDBC](../../sql_reference/table_functions/jdbc.md)) +- 斯卡拉 - [Akka](https://akka.io) - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) + - [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) - C\# - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) + - [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) -- Elixir +- 仙丹 - [Ecto](https://github.com/elixir-ecto/ecto) - [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto) diff --git a/docs/zh/interfaces/third-party/proxy.md b/docs/zh/interfaces/third-party/proxy.md index 727bff00cbb..e954444c46f 100644 --- a/docs/zh/interfaces/third-party/proxy.md +++ b/docs/zh/interfaces/third-party/proxy.md @@ -1,3 +1,4 @@ + # 来自第三方开发人员的代理服务器 {#lai-zi-di-san-fang-kai-fa-ren-yuan-de-dai-li-fu-wu-qi} [chproxy](https://github.com/Vertamedia/chproxy) 是ClickHouse数据库的http代理和负载均衡器。 @@ -22,9 +23,9 @@ 在Go中实现。 -## ClickHouse-Bulk {#clickhouse-bulk} +## ツ环板-ョツ嘉ッツ偲 {#clickhouse-bulk} -[ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk) 是一个简单的ClickHouse插入收集器。 +[ツ环板-ョツ嘉ッツ偲](https://github.com/nikepan/clickhouse-bulk) 是一个简单的ClickHouse插入收集器。 特征: diff --git a/docs/zh/introduction/adopters.md b/docs/zh/introduction/adopters.md deleted file mode 120000 index 659153d5f6c..00000000000 --- a/docs/zh/introduction/adopters.md +++ /dev/null @@ -1 +0,0 @@ -../../en/introduction/adopters.md \ No newline at end of file diff --git a/docs/zh/introduction/adopters.md b/docs/zh/introduction/adopters.md new file mode 100644 index 00000000000..f550fd6e473 --- /dev/null +++ b/docs/zh/introduction/adopters.md @@ -0,0 +1,80 @@ +--- +toc_priority: 8 +toc_title: "\u91C7\u7528\u8005" +--- + +# ClickHouse用户 {#clickhouse-adopters} + +!!! warning "免责声明" + 如下使用ClickHouse的公司和他们的成功案例来源于公开资源,因此和实际情况可能有所出入。如果您分享您公司使用ClickHouse的故事,我们将不胜感激 [将其添加到列表](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md),但请确保你这样做不会有任何保密协议的问题。也欢迎提供来自其他公司的出版物的更新。 + +| 公司简介 | 行业 | 用例 | 群集大小 | (Un)压缩数据大小\* | 参考资料 | +|-----------------------------------------------------------------|-------------------|----------------|---------------------------------------------------|----------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [2gis](https://2gis.ru) | 地图 | 监测 | — | — | [讲俄语,2019年7月](https://youtu.be/58sPkXfq6nw) | +| [阿罗哈浏览器](https://alohabrowser.com/) | 移动应用程序 | 浏览器后端 | — | — | [俄罗斯幻灯片,2019年5月](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | +| [阿玛迪斯](https://amadeus.com/) | 旅费 | 分析 | — | — | [新闻稿,四月2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| [Appsflyer](https://www.appsflyer.com) | 移动分析 | 主要产品 | — | — | [讲俄语,2019年7月](https://www.youtube.com/watch?v=M3wbRlcpBbY) | +| [ArenaData](https://arenadata.tech/) | 数据平台 | 主要产品 | — | — | [幻灯片在俄罗斯,十二月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | +| [Badoo](https://badoo.com) | 约会 | 时间序列 | — | — | [幻灯片在俄罗斯,十二月2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | +| [Benocs](https://www.benocs.com/) | 网络遥测和分析 | 主要产品 | — | — | [幻灯片英文,2017年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | +| [彭博](https://www.bloomberg.com/) | 金融、媒体 | 监测 | 102个服务器 | — | [幻灯片,2018年5月](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| [Bloxy](https://bloxy.info) | 区块链 | 分析 | — | — | [幻灯片在俄罗斯,八月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | +| `Dataliance/UltraPower` | 电信 | 分析 | — | — | [中文幻灯片,2018年1月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | +| [CARTO](https://carto.com/) | 商业智能 | 地理分析 | — | — | [地理空间处理与ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CERN](http://public.web.cern.ch/public/) | 研究 | 实验 | — | — | [新闻稿,四月2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | +| [Cisco](http://cisco.com/) | 碌莽禄Networking: | 流量分析 | — | — | [闪电对话,十月2019](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [城堡证券](https://www.citadelsecurities.com/) | 财政 | — | — | — | [贡献,2019年3月](https://github.com/ClickHouse/ClickHouse/pull/4774) | +| [Citymobil](https://city-mobil.ru) | 出租车 | 分析 | — | — | [博客文章在俄罗斯,三月2020](https://habr.com/en/company/citymobil/blog/490660/) | +| [内容广场](https://contentsquare.com) | 网站分析 | 主要产品 | — | — | [博客文章在法国,十一月2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| [Cloudflare](https://cloudflare.com) | CDN | 流量分析 | 36服务器 | — | [博客文章,五月2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [博客文章,三月2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| [Corunet](https://coru.net/) | 分析 | 主要产品 | — | — | [英文幻灯片,2019年4月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | +| [CraiditX 氪信](https://creditx.com) | 金融AI | 分析 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| [ツ环板/ョツ嘉ッツ偲](https://www.criteo.com/) | 零售 | 主要产品 | — | — | [幻灯片中的英文,十月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| [德意志银行](https://db.com) | 财政 | 商业智能分析 | — | — | [幻灯片中的英文,十月2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | +| [Diva-e](https://www.diva-e.com) | 数字咨询 | 主要产品 | — | — | [英文幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | +| [Exness](https://www.exness.com) | 交易 | 指标,日志记录 | — | — | [俄语交谈,2019年5月](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| [精灵](https://geniee.co.jp) | 广告网络 | 主要产品 | — | — | [日文博客,2017年7月](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [HUYA](https://www.huya.com/) | 视频流 | 分析 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | +| [Idealista](https://www.idealista.com) | 房地产 | 分析 | — | — | [博客文章英文,四月2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [Infovista](https://www.infovista.com/) | 网络 | 分析 | — | — | [幻灯片中的英文,十月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [InnoGames](https://www.innogames.com) | 游戏 | 指标,日志记录 | — | — | [俄罗斯幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| [Integros](https://integros.com) | 视频服务平台 | 分析 | — | — | [俄罗斯幻灯片,2019年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [科迪亚克数据](https://www.kodiakdata.com/) | 云 | 主要产品 | — | — | [虏茅驴麓卤戮碌禄路戮鲁拢](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | +| [Kontur](https://kontur.ru) | 软件开发 | 指标 | — | — | [俄语交谈,2018年11月](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | +| [LifeStreet](https://lifestreet.com/) | 广告网络 | 主要产品 | 75台服务器(3个副本) | 5.27PiB | [博客文章在俄罗斯,2017年2月](https://habr.com/en/post/322620/) | +| [Mail.ru 云解决方案](https://mcs.mail.ru/) | 云服务 | 主要产品 | — | — | [运行ClickHouse实例,俄语](https://mcs.mail.ru/help/db-create/clickhouse#) | +| [MessageBird](https://www.messagebird.com) | 电信 | 统计 | — | — | [英文幻灯片,2018年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | +| [MGID](https://www.mgid.com/) | 广告网络 | 网络分析 | — | — | [我们在实施分析DBMS ClickHouse的经验,在俄罗斯](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [OneAPM](https://www.oneapm.com/) | 监测和数据分析 | 主要产品 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| [ツ环板Innovationョツ嘉ッ](http://www.pragma-innovation.fr/) | 遥测和大数据分析 | 主要产品 | — | — | [幻灯片中的英文,十月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | +| [QINGCLOUD](https://www.qingcloud.com/) | 云服务 | 主要产品 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | +| [Qrator](https://qrator.net) | DDoS保护 | 主要产品 | — | — | [博客文章,三月2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [北京百分之信息技术有限公司,Ltd.](https://www.percent.cn/) | 分析 | 主要产品 | — | — | [中文幻灯片,2019年6月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| [漫步者](https://rambler.ru) | 互联网服务 | 分析 | — | — | [俄语交谈,2018年4月](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | +| [腾讯](https://www.tencent.com) | 消息传递 | 日志记录 | — | — | [中文讲座,2019年11月](https://youtu.be/T-iVQRuw-QY?t=5050) | +| [交通明星](https://trafficstars.com/) | 广告网络 | — | — | — | [幻灯片在俄罗斯,2018年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [S7航空公司](https://www.s7.ru) | 航空公司 | 指标,日志记录 | — | — | [讲俄语,2019年3月](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| [SEMrush](https://www.semrush.com/) | 碌莽禄Marketing: | 主要产品 | — | — | [幻灯片在俄罗斯,八月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | +| [scireum GmbH](https://www.scireum.de/) | 电子商务 | 主要产品 | — | — | [德语讲座,2020年2月](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| [哨兵](https://sentry.io/) | 软件开发人员 | 产品后端 | — | — | [博客文章英文,五月2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | 政府社会保障 | 分析 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | +| [seo.do](https://seo.do/) | 分析 | 主要产品 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| [新浪](http://english.sina.com/index.html) | 新闻 | — | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | +| [SMI2](https://smi2.ru/) | 新闻 | 分析 | — | — | [博客文章在俄罗斯,2017年11月](https://habr.com/ru/company/smi2/blog/314558/) | +| [Splunk](https://www.splunk.com/) | 业务分析 | 主要产品 | — | — | [英文幻灯片,2018年1月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | +| [Spotify的](https://www.spotify.com) | 音乐 | 实验 | — | — | [幻灯片,七月2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | +| [腾讯](https://www.tencent.com) | 大数据 | 数据处理 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| [优步](https://www.uber.com) | 出租车 | 日志记录 | — | — | [幻灯片,二月2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [VKontakte](https://vk.com) | 社交网络 | 统计,日志记录 | — | — | [幻灯片在俄罗斯,八月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| [Wisebits](https://wisebits.com/) | IT解决方案 | 分析 | — | — | [俄罗斯幻灯片,2019年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [小新科技](https://www.xiaoheiban.cn/) | 教育 | 共同目的 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | +| [西马拉亚](https://www.ximalaya.com/) | 音频共享 | OLAP | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | +| [Yandex云](https://cloud.yandex.ru/services/managed-clickhouse) | 公有云 | 主要产品 | — | — | [讲俄语,2019年12月](https://www.youtube.com/watch?v=pgnak9e_E0o) | +| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | 商业智能 | 主要产品 | — | — | [幻灯片在俄罗斯,十二月2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [Yandex市场](https://market.yandex.ru/) | 电子商务 | 指标,日志记录 | — | — | [讲俄语,2019年1月](https://youtu.be/_l1qP0DyBcA?t=478) | +| [Yandex Metrica](https://metrica.yandex.com) | 网站分析 | 主要产品 | 一个集群中的360台服务器,一个部门中的1862台服务器 | 66.41PiB/5.68PiB | [幻灯片,二月2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| [ЦВТ](https://htc-cs.ru/) | 软件开发 | 指标,日志记录 | — | — | [博客文章,三月2019,在俄罗斯](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [МКБ](https://mkb.ru/) | 银行 | 网络系统监控 | — | — | [俄罗斯幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | +| [金数据](https://jinshuju.net) | 商业智能分析 | 主要产品 | — | — | [中文幻灯片,2019年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | + +[原始文章](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/zh/introduction/distinctive_features.md b/docs/zh/introduction/distinctive_features.md index 250a1a20e87..e27f3317bf0 100644 --- a/docs/zh/introduction/distinctive_features.md +++ b/docs/zh/introduction/distinctive_features.md @@ -1,4 +1,5 @@ -# ClickHouse的独特功能 {#clickhousede-du-te-gong-neng} + +# ClickHouse的特性 {#clickhouse-de-te-xing} ## 真正的列式数据库管理系统 {#zhen-zheng-de-lie-shi-shu-ju-ku-guan-li-xi-tong} @@ -59,6 +60,12 @@ ClickHouse提供各种各样在允许牺牲数据精度的情况下对查询进 ClickHouse使用异步的多主复制技术。当数据被写入任何一个可用副本后,系统会在后台将数据分发给其他副本,以保证系统在不同副本上保持相同的数据。在大多数情况下ClickHouse能在故障后自动恢复,在一些少数的复杂情况下需要手动恢复。 -更多信息,参见 [数据复制](../operations/table_engines/replication.md)。 +更多信息,参见 [数据复制](../engines/table_engines/mergetree_family/replication.md)。 + +# 限制 {#clickhouseke-xian-zhi} + +1. 没有完整的事务支持。 +2. 缺少高频率,低延迟的修改或删除已存在数据的能力。仅能用于批量删除或修改数据,但这符合 [GDPR](https://gdpr-info.eu)。 +3. 稀疏索引使得ClickHouse不适合通过其键检索单行的点查询。 [来源文章](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/zh/introduction/features_considered_disadvantages.md b/docs/zh/introduction/features_considered_disadvantages.md deleted file mode 100644 index 04cd34c6ffc..00000000000 --- a/docs/zh/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,7 +0,0 @@ -# ClickHouse的限制 {#clickhouseke-yi-ren-wei-shi-que-dian-de-gong-neng} - -1. 没有完整的事务支持。 -2. 缺少高频率,低延迟的修改或删除已存在数据的能力。仅能用于批量删除或修改数据,但这符合 [GDPR](https://gdpr-info.eu)。 -3. 稀疏索引使得ClickHouse不适合通过其键检索单行的点查询。 - -[来源文章](https://clickhouse.tech/docs/zh/introduction/features_considered_disadvantages/) diff --git a/docs/zh/introduction/history.md b/docs/zh/introduction/history.md index 7c1a058ea76..673e070addb 100644 --- a/docs/zh/introduction/history.md +++ b/docs/zh/introduction/history.md @@ -1,6 +1,7 @@ + # ClickHouse历史 {#clickhouseli-shi} -ClickHouse最初是为 [Yandex.Metrica](https://metrica.yandex.com/) [世界第二大Web分析平台](http://w3techs.com/technologies/overview/traffic_analysis/all) 而开发的。多年来一直作为该系统的核心组件被该系统持续使用着。目前为止,该系统在ClickHouse中有超过13万亿条记录,并且每天超过200多亿个事件被处理。它允许直接从原始数据中动态查询并生成报告。本文简要介绍了ClickHouse在其早期发展阶段的目标。 +ClickHouse最初是为 [YandexMetrica](https://metrica.yandex.com/) [世界第二大Web分析平台](http://w3techs.com/technologies/overview/traffic_analysis/all) 而开发的。多年来一直作为该系统的核心组件被该系统持续使用着。目前为止,该系统在ClickHouse中有超过13万亿条记录,并且每天超过200多亿个事件被处理。它允许直接从原始数据中动态查询并生成报告。本文简要介绍了ClickHouse在其早期发展阶段的目标。 Yandex.Metrica基于用户定义的字段,对实时访问、连接会话,生成实时的统计报表。这种需求往往需要复杂聚合方式,比如对访问用户进行去重。构建报表的数据,是实时接收存储的新数据。 diff --git a/docs/zh/introduction/index.md b/docs/zh/introduction/index.md new file mode 100644 index 00000000000..4bc6a76857a --- /dev/null +++ b/docs/zh/introduction/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5BFC\u8A00" +toc_priority: 1 +--- + + diff --git a/docs/zh/introduction/performance.md b/docs/zh/introduction/performance.md index ed44ec760bb..9c5ce29df6f 100644 --- a/docs/zh/introduction/performance.md +++ b/docs/zh/introduction/performance.md @@ -1,4 +1,5 @@ -# Performance {#performance} + +# 性能 {#performance} 根据Yandex的内部测试结果,ClickHouse表现出了比同类可比较产品更优的性能。你可以在 [这里](https://clickhouse.tech/benchmark.html) 查看具体的测试结果。 diff --git a/docs/zh/operations/access_rights.md b/docs/zh/operations/access_rights.md index 1c648a29f26..0178001e74f 100644 --- a/docs/zh/operations/access_rights.md +++ b/docs/zh/operations/access_rights.md @@ -1,8 +1,9 @@ -# Access Rights {#access-rights} -Users and access rights are set up in the user config. This is usually `users.xml`. +# 访问权限 {#access-rights} -Users are recorded in the `users` section. Here is a fragment of the `users.xml` file: +用户和访问权限在用户配置中设置。 这通常是 `users.xml`. + +用户被记录在 `users` 科。 这里是一个片段 `users.xml` 文件: ``` xml @@ -59,15 +60,15 @@ Users are recorded in the `users` section. Here is a fragment of the `users.xml` ``` -You can see a declaration from two users: `default`and`web`. We added the `web` user separately. +您可以看到两个用户的声明: `default`和`web`. 我们添加了 `web` 用户分开。 -The `default` user is chosen in cases when the username is not passed. The `default` user is also used for distributed query processing, if the configuration of the server or cluster doesn’t specify the `user` and `password` (see the section on the [Distributed](../operations/table_engines/distributed.md) engine). +该 `default` 在用户名未通过的情况下选择用户。 该 `default` 如果服务器或群集的配置没有指定分布式查询处理,则user也用于分布式查询处理 `user` 和 `password` (见上的部分 [分布](../engines/table_engines/special/distributed.md) 发动机)。 The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail. -The password is specified in clear text (not recommended) or in SHA-256. The hash isn’t salted. In this regard, you should not consider these passwords as providing security against potential malicious attacks. Rather, they are necessary for protection from employees. +密码以明文(不推荐)或SHA-256形式指定。 哈希没有腌制。 在这方面,您不应将这些密码视为提供了针对潜在恶意攻击的安全性。 相反,他们是必要的保护员工。 -A list of networks is specified that access is allowed from. In this example, the list of networks for both users is loaded from a separate file (`/etc/metrika.xml`) containing the `networks` substitution. Here is a fragment of it: +指定允许访问的网络列表。 在此示例中,将从单独的文件加载两个用户的网络列表 (`/etc/metrika.xml`)包含 `networks` 替代。 这里是它的一个片段: ``` xml @@ -81,21 +82,21 @@ A list of networks is specified that access is allowed from. In this example, th ``` -You could define this list of networks directly in `users.xml`, or in a file in the `users.d` directory (for more information, see the section «[Configuration files](configuration_files.md#configuration_files)»). +您可以直接在以下内容中定义此网络列表 `users.xml`,或在文件中 `users.d` directory (for more information, see the section «[配置文件](configuration_files.md#configuration_files)»). -The config includes comments explaining how to open access from everywhere. +该配置包括解释如何从任何地方打开访问的注释。 -For use in production, only specify `ip` elements (IP addresses and their masks), since using `host` and `hoost_regexp` might cause extra latency. +对于在生产中使用,仅指定 `ip` 元素(IP地址及其掩码),因为使用 `host` 和 `hoost_regexp` 可能会导致额外的延迟。 -Next the user settings profile is specified (see the section «[Settings profiles](settings/settings_profiles.md)»). You can specify the default profile, `default'`. The profile can have any name. You can specify the same profile for different users. The most important thing you can write in the settings profile is `readonly=1`, which ensures read-only access. -Then specify the quota to be used (see the section «[Quotas](quotas.md#quotas)»). You can specify the default quota: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. +Next the user settings profile is specified (see the section «[设置配置文件](settings/settings_profiles.md)»). You can specify the default profile, `default'`. 配置文件可以有任何名称。 您可以为不同的用户指定相同的配置文件。 您可以在设置配置文件中编写的最重要的事情是 `readonly=1`,这确保只读访问。 +Then specify the quota to be used (see the section «[配额](quotas.md#quotas)»). You can specify the default quota: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. -In the optional `` section, you can also specify a list of databases that the user can access. By default, all databases are available to the user. You can specify the `default` database. In this case, the user will receive access to the database by default. +在可选 `` 您还可以指定用户可以访问的数据库列表。 默认情况下,所有数据库都可供用户使用。 您可以指定 `default` 数据库。 在这种情况下,默认情况下,用户将接收对数据库的访问权限。 -Access to the `system` database is always allowed (since this database is used for processing queries). +访问 `system` 始终允许数据库(因为此数据库用于处理查询)。 -The user can get a list of all databases and tables in them by using `SHOW` queries or system tables, even if access to individual databases isn’t allowed. +用户可以通过以下方式获取其中所有数据库和表的列表 `SHOW` 查询或系统表,即使不允许访问单个数据库。 -Database access is not related to the [readonly](settings/permissions_for_queries.md#settings_readonly) setting. You can’t grant full access to one database and `readonly` access to another one. +数据库访问是不相关的 [只读](settings/permissions_for_queries.md#settings_readonly) 设置。 您不能授予对一个数据库的完全访问权限,并 `readonly` 进入另一个。 -[Original article](https://clickhouse.tech/docs/en/operations/access_rights/) +[原始文章](https://clickhouse.tech/docs/en/operations/access_rights/) diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md deleted file mode 120000 index 1003fb30e61..00000000000 --- a/docs/zh/operations/backup.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/backup.md \ No newline at end of file diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md new file mode 100644 index 00000000000..256ddddd2c2 --- /dev/null +++ b/docs/zh/operations/backup.md @@ -0,0 +1,41 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 49 +toc_title: "\u6570\u636E\u5907\u4EFD" +--- + +# 数据备份 {#data-backup} + +碌莽禄While: [复制](../engines/table_engines/mergetree_family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [您不能使用类似MergeTree的引擎删除包含超过50Gb数据的表](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). 但是,这些保障措施并不涵盖所有可能的情况,可以规避。 + +为了有效地减少可能的人为错误,您应该仔细准备备份和还原数据的策略 **提前**. + +每家公司都有不同的可用资源和业务需求,因此没有适合各种情况的ClickHouse备份和恢复通用解决方案。 什么适用于一千兆字节的数据可能不会为几十pb的工作。 有多种可能的方法有自己的优点和缺点,这将在下面讨论。 这是一个好主意,使用几种方法,而不是只是一个,以弥补其各种缺点。 + +!!! note "注" + 请记住,如果您备份了某些内容并且从未尝试过还原它,那么当您实际需要它时(或者至少需要比业务能够容忍的时间更长),恢复可能无法正常工作。 因此,无论您选择哪种备份方法,请确保自动还原过程,并定期在备用ClickHouse群集上练习。 + +## 将源数据复制到其他地方 {#duplicating-source-data-somewhere-else} + +通常被摄入到ClickHouse的数据是通过某种持久队列传递的,例如 [Apache Kafka](https://kafka.apache.org). 在这种情况下,可以配置一组额外的订阅服务器,这些订阅服务器将在写入ClickHouse时读取相同的数据流,并将其存储在冷存储中。 大多数公司已经有一些默认的推荐冷存储,可能是对象存储或分布式文件系统,如 [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). + +## 文件系统快照 {#filesystem-snapshots} + +某些本地文件系统提供快照功能(例如, [ZFS](https://en.wikipedia.org/wiki/ZFS)),但它们可能不是提供实时查询的最佳选择。 一个可能的解决方案是使用这种文件系统创建额外的副本,并将它们从 [分布](../engines/table_engines/special/distributed.md) 用于以下目的的表 `SELECT` 查询。 任何修改数据的查询都无法访问此类副本上的快照。 作为奖励,这些副本可能具有特殊的硬件配置,每个服务器附加更多的磁盘,这将是经济高效的。 + +## ツ环板-ョツ嘉ッツ偲 {#clickhouse-copier} + +[ツ环板-ョツ嘉ッツ偲](utilities/clickhouse-copier.md) 是一个多功能工具,最初创建用于重新分片pb大小的表。 它还可用于备份和还原目的,因为它可以在ClickHouse表和集群之间可靠地复制数据。 + +对于较小的数据量,一个简单的 `INSERT INTO ... SELECT ...` 到远程表也可以工作。 + +## 部件操作 {#manipulations-with-parts} + +ClickHouse允许使用 `ALTER TABLE ... FREEZE PARTITION ...` 查询以创建表分区的本地副本。 这是使用硬链接来实现 `/var/lib/clickhouse/shadow/` 文件夹中,所以它通常不会占用旧数据的额外磁盘空间。 创建的文件副本不由ClickHouse服务器处理,所以你可以把它们留在那里:你将有一个简单的备份,不需要任何额外的外部系统,但它仍然会容易出现硬件问题。 出于这个原因,最好将它们远程复制到另一个位置,然后删除本地副本。 分布式文件系统和对象存储仍然是一个不错的选择,但是具有足够大容量的正常附加文件服务器也可以工作(在这种情况下,传输将通过网络文件系统 [rsync](https://en.wikipedia.org/wiki/Rsync)). + +有关与分区操作相关的查询的详细信息,请参阅 [更改文档](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). + +第三方工具可用于自动化此方法: [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup). + +[原始文章](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/zh/operations/configuration_files.md b/docs/zh/operations/configuration_files.md index b0c3d22fdaf..6505cfb1fb9 100644 --- a/docs/zh/operations/configuration_files.md +++ b/docs/zh/operations/configuration_files.md @@ -1,24 +1,25 @@ -# Configuration Files {#configuration_files} -The main server config file is `config.xml`. It resides in the `/etc/clickhouse-server/` directory. +# 配置文件 {#configuration_files} -Individual settings can be overridden in the `*.xml` and `*.conf` files in the `conf.d` and `config.d` directories next to the config file. +主服务器配置文件是 `config.xml`. 它驻留在 `/etc/clickhouse-server/` 目录。 -The `replace` or `remove` attributes can be specified for the elements of these config files. +单个设置可以在复盖 `*.xml` 和 `*.conf` 在文件 `conf.d` 和 `config.d` 配置文件旁边的目录。 -If neither is specified, it combines the contents of elements recursively, replacing values of duplicate children. +该 `replace` 或 `remove` 可以为这些配置文件的元素指定属性。 -If `replace` is specified, it replaces the entire element with the specified one. +如果两者都未指定,则递归组合元素的内容,替换重复子项的值。 -If `remove` is specified, it deletes the element. +如果 `replace` 如果指定,则将整个元素替换为指定的元素。 -The config can also define «substitutions». If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include\_from](server_settings/settings.md#server_settings-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](#macros) server\_settings/settings.md)). +如果 `remove` 如果指定,则删除该元素。 -Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element. +The config can also define «substitutions». If an element has the `incl` 属性时,从文件中的相应替换将被用作该值。 默认情况下,具有替换的文件的路径为 `/etc/metrika.xml`. 这可以在改变 [包括\_从](server_configuration_parameters/settings.md#server_configuration_parameters-include_from) 服务器配置中的元素。 替换值在指定 `/yandex/substitution_name` 这个文件中的元素。 如果在指定的替换 `incl` 不存在,则将其记录在日志中。 要防止ClickHouse记录丢失的替换,请指定 `optional="true"` 属性(例如,设置 [宏](#macros) server\_settings/settings.md))。 -The `config.xml` file can specify a separate config with user settings, profiles, and quotas. The relative path to this config is set in the ‘users\_config’ element. By default, it is `users.xml`. If `users_config` is omitted, the user settings, profiles, and quotas are specified directly in `config.xml`. +替换也可以从ZooKeeper执行。 为此,请指定属性 `from_zk = "/path/to/node"`. 元素值被替换为节点的内容 `/path/to/node` 在动物园管理员。 您还可以将整个XML子树放在ZooKeeper节点上,并将其完全插入到源元素中。 -In addition, `users_config` may have overrides in files from the `users_config.d` directory (for example, `users.d`) and substitutions. For example, you can have separate config file for each user like this: +该 `config.xml` 文件可以指定具有用户设置、配置文件和配额的单独配置。 这个配置的相对路径在 ‘users\_config’ 元素。 默认情况下,它是 `users.xml`. 如果 `users_config` 被省略,用户设置,配置文件和配额直接在指定 `config.xml`. + +此外, `users_config` 可以从文件中复盖 `users_config.d` 目录(例如, `users.d`)和替换。 例如,您可以为每个用户提供单独的配置文件,如下所示: ``` xml $ cat /etc/clickhouse-server/users.d/alice.xml @@ -36,8 +37,8 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
    ``` -For each config file, the server also generates `file-preprocessed.xml` files when starting. These files contain all the completed substitutions and overrides, and they are intended for informational use. If ZooKeeper substitutions were used in the config files but ZooKeeper is not available on the server start, the server loads the configuration from the preprocessed file. +对于每个配置文件,服务器还会生成 `file-preprocessed.xml` 启动时的文件。 这些文件包含所有已完成的替换和复盖,并且它们旨在提供信息。 如果zookeeper替换在配置文件中使用,但ZooKeeper在服务器启动时不可用,则服务器将从预处理的文件中加载配置。 -The server tracks changes in config files, as well as files and ZooKeeper nodes that were used when performing substitutions and overrides, and reloads the settings for users and clusters on the fly. This means that you can modify the cluster, users, and their settings without restarting the server. +服务器跟踪配置文件中的更改,以及执行替换和复盖时使用的文件和ZooKeeper节点,并动态重新加载用户和集群的设置。 这意味着您可以在不重新启动服务器的情况下修改群集、用户及其设置。 -[Original article](https://clickhouse.tech/docs/en/operations/configuration_files/) +[原始文章](https://clickhouse.tech/docs/en/operations/configuration_files/) diff --git a/docs/zh/operations/index.md b/docs/zh/operations/index.md index 596ec065f40..4d31fce45af 100644 --- a/docs/zh/operations/index.md +++ b/docs/zh/operations/index.md @@ -1,3 +1,4 @@ -# Operations {#operations} -[Original article](https://clickhouse.tech/docs/en/operations/) +# 操作 {#operations} + +[原始文章](https://clickhouse.tech/docs/en/operations/) diff --git a/docs/zh/operations/monitoring.md b/docs/zh/operations/monitoring.md index 97cb8329b2b..6683903f531 100644 --- a/docs/zh/operations/monitoring.md +++ b/docs/zh/operations/monitoring.md @@ -1,3 +1,4 @@ + # 监控 {#jian-kong} 可以监控到: @@ -28,9 +29,9 @@ ClickHouse 收集的指标项: - 服务用于计算的资源占用的各种指标。 - 关于查询处理的常见统计信息。 -可以在 [system.metrics](system_tables.md#system_tables-metrics) ,[system.events](system_tables.md#system_tables-events) 以及[system.asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。 +可以在 [系统。指标](system_tables.md#system_tables-metrics) ,[系统。活动](system_tables.md#system_tables-events) 以及[系统。asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。 -可以配置ClickHouse 往 [Graphite](https://github.com/graphite-project)导入指标。 参考 [Graphite section](server_settings/settings.md#server_settings-graphite) 配置文件。在配置指标导出之前,需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。 +可以配置ClickHouse 往 [石墨](https://github.com/graphite-project)导入指标。 参考 [石墨部分](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) 配置文件。在配置指标导出之前,需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。 此外,您可以通过HTTP API监视服务器可用性。 将HTTP GET请求发送到 `/ping`。 如果服务器可用,它将以 `200 OK` 响应。 diff --git a/docs/zh/operations/optimizing_performance/index.md b/docs/zh/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..786a7200b28 --- /dev/null +++ b/docs/zh/operations/optimizing_performance/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u4F18\u5316\u6027\u80FD" +toc_priority: 52 +--- + + diff --git a/docs/zh/operations/optimizing_performance/sampling_query_profiler.md b/docs/zh/operations/optimizing_performance/sampling_query_profiler.md new file mode 100644 index 00000000000..6f0eef0a1ed --- /dev/null +++ b/docs/zh/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1,64 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 54 +toc_title: "\u67E5\u8BE2\u5206\u6790" +--- + +# 采样查询探查器 {#sampling-query-profiler} + +ClickHouse运行允许分析查询执行的采样探查器。 使用探查器,您可以找到在查询执行期间使用最频繁的源代码例程。 您可以跟踪CPU时间和挂钟花费的时间,包括空闲时间。 + +使用概要分析器: + +- 设置 [trace\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) 服务器配置部分。 + + 本节配置 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) 系统表包含探查器运行的结果。 它是默认配置的。 请记住,此表中的数据仅对正在运行的服务器有效。 服务器重新启动后,ClickHouse不会清理表,所有存储的虚拟内存地址都可能无效。 + +- 设置 [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) 或 [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) 设置。 这两种设置可以同时使用。 + + 这些设置允许您配置探查器计时器。 由于这些是会话设置,您可以为整个服务器、单个用户或用户配置文件、交互式会话以及每个单个查询获取不同的采样频率。 + +默认采样频率为每秒一个采样,CPU和实时定时器都启用。 该频率允许收集有关ClickHouse集群的足够信息。 同时,使用此频率,profiler不会影响ClickHouse服务器的性能。 如果您需要分析每个单独的查询,请尝试使用更高的采样频率。 + +分析 `trace_log` 系统表: + +- 安装 `clickhouse-common-static-dbg` 包。 看 [从DEB软件包安装](../../getting_started/install.md#install-from-deb-packages). + +- 允许由内省功能 [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) 设置。 + + 出于安全原因,默认情况下禁用内省功能。 + +- 使用 `addressToLine`, `addressToSymbol` 和 `demangle` [内省功能](../../sql_reference/functions/introspection.md) 获取函数名称及其在ClickHouse代码中的位置。 要获取某些查询的配置文件,您需要从以下内容汇总数据 `trace_log` 桌子 您可以通过单个函数或整个堆栈跟踪聚合数据。 + +如果你需要想象 `trace_log` 信息,尝试 [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) 和 [测速镜](https://github.com/laplab/clickhouse-speedscope). + +## 示例 {#example} + +在这个例子中,我们: + +- 过滤 `trace_log` 数据由查询标识符和当前日期组成。 + +- 通过堆栈跟踪聚合。 + +- 使用内省功能,我们将得到一个报告: + + - 符号名称和相应的源代码函数。 + - 这些函数的源代码位置。 + + + +``` sql +SELECT + count(), + arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym +FROM system.trace_log +WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) +GROUP BY trace +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +{% include "operations/performance/sampling_query_profiler_example_result.txt" %} +``` diff --git a/docs/zh/operations/performance/sampling_query_profiler.md b/docs/zh/operations/performance/sampling_query_profiler.md deleted file mode 120000 index c55c58684ba..00000000000 --- a/docs/zh/operations/performance/sampling_query_profiler.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/performance/sampling_query_profiler.md \ No newline at end of file diff --git a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt deleted file mode 120000 index 58c5abe7122..00000000000 --- a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/performance/sampling_query_profiler_example_result.txt \ No newline at end of file diff --git a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt new file mode 100644 index 00000000000..56c2fdf9c65 --- /dev/null +++ b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt @@ -0,0 +1,556 @@ +Row 1: +────── +count(): 6344 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +read + +DB::ReadBufferFromFileDescriptor::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 +DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 +DB::CompressedReadBufferFromFile::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 +DB::CompressedReadBufferFromFile::seek(unsigned long, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:63 +DB::MergeTreeReaderStream::seekToMark(unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:200 +std::_Function_handler > const&), DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool)::{lambda(bool)#1}::operator()(bool) const::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:212 +DB::IDataType::deserializeBinaryBulkWithMultipleStreams(DB::IColumn&, unsigned long, DB::IDataType::DeserializeBinaryBulkSettings&, std::shared_ptr&) const + /usr/local/include/c++/9.1.0/bits/std_function.h:690 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::continueReadingChain(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:487 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 2: +────── +count(): 3295 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +__pthread_cond_wait + +std::condition_variable::wait(std::unique_lock&) + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/src/c++11/../../../../../gcc-9.1.0/libstdc++-v3/src/c++11/condition_variable.cc:55 +Poco::Semaphore::wait() + /home/milovidov/ClickHouse/build_gcc9/../contrib/poco/Foundation/src/Semaphore.cpp:61 +DB::UnionBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Core/Block.h:90 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::LimitBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::AsynchronousBlockInputStream::calculate() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +std::_Function_handler::_M_invoke(std::_Any_data const&) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:551 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 +ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/ThreadPool.h:146 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 3: +────── +count(): 1978 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 4: +────── +count(): 1913 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 5: +────── +count(): 1672 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 6: +────── +count(): 1531 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +read + +DB::ReadBufferFromFileDescriptor::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 +DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 +DB::CompressedReadBufferFromFile::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 +void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:53 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 7: +────── +count(): 1034 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 8: +────── +count(): 989 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +__lll_lock_wait + +pthread_mutex_lock + +DB::MergeTreeReaderStream::loadMarks() + /usr/local/include/c++/9.1.0/bits/std_mutex.h:103 +DB::MergeTreeReaderStream::MergeTreeReaderStream(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&, unsigned long, std::vector > const&, DB::MarkCache*, bool, DB::UncompressedCache*, unsigned long, unsigned long, unsigned long, DB::MergeTreeIndexGranularityInfo const*, std::function const&, int) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:107 +std::_Function_handler > const&), DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int)::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) + /usr/local/include/c++/9.1.0/bits/unique_ptr.h:147 +DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::MergeTreeReader::MergeTreeReader(std::__cxx11::basic_string, std::allocator > const&, std::shared_ptr const&, DB::NamesAndTypesList const&, DB::UncompressedCache*, DB::MarkCache*, bool, DB::MergeTreeData const&, std::vector > const&, unsigned long, unsigned long, std::map, std::allocator >, double, std::less, std::allocator > >, std::allocator, std::allocator > const, double> > > const&, std::function const&, int) + /usr/local/include/c++/9.1.0/bits/stl_list.h:303 +DB::MergeTreeThreadSelectBlockInputStream::getNewTask() + /usr/local/include/c++/9.1.0/bits/std_function.h:259 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:54 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 9: +─────── +count(): 779 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) + /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 10: +─────── +count(): 666 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) + /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone diff --git a/docs/zh/operations/performance_test.md b/docs/zh/operations/performance_test.md deleted file mode 120000 index a74c126c63f..00000000000 --- a/docs/zh/operations/performance_test.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/performance_test.md \ No newline at end of file diff --git a/docs/zh/operations/performance_test.md b/docs/zh/operations/performance_test.md new file mode 100644 index 00000000000..f567a9528a0 --- /dev/null +++ b/docs/zh/operations/performance_test.md @@ -0,0 +1,82 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 54 +toc_title: "\u6D4B\u8BD5\u786C\u4EF6" +--- + +# 如何使用ClickHouse测试您的硬件 {#how-to-test-your-hardware-with-clickhouse} + +使用此指令,您可以在任何服务器上运行基本的ClickHouse性能测试,而无需安装ClickHouse软件包。 + +1. 转到 “commits” 页数:https://github.com/ClickHouse/ClickHouse/commits/master + +2. 点击第一个绿色复选标记或红色十字与绿色 “ClickHouse Build Check” 然后点击 “Details” 附近链接 “ClickHouse Build Check”. 在一些提交中没有这样的链接,例如与文档的提交。 在这种情况下,请选择具有此链接的最近提交。 + +3. 将链接复制到 “clickhouse” 二进制为amd64或aarch64. + +4. ssh到服务器并使用wget下载它: + + + + # For amd64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse + # For aarch64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse + # Then do: + chmod a+x clickhouse + +1. 下载配置: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml + mkdir config.d + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml + +1. 下载基准测试文件: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh + chmod a+x benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql + +1. 根据下载测试数据 [Yandex梅里卡数据集](../getting_started/example_datasets/metrica.md) 说明 (“hits” 表包含100万行)。 + + + + wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz + tar xvf hits_100m_obfuscated_v1.tar.xz -C . + mv hits_100m_obfuscated_v1/* . + +1. 运行服务器: + + + + ./clickhouse server + +1. 检查数据:ssh到另一个终端中的服务器 + + + + ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" + 100000000 + +1. 编辑benchmark-new.sh,改变 “clickhouse-client” 到 “./clickhouse client” 并添加 “–max\_memory\_usage 100000000000” 参数。 + + + + mcedit benchmark-new.sh + +1. 运行基准测试: + + + + ./benchmark-new.sh hits_100m_obfuscated + +1. 将有关硬件配置的编号和信息发送到clickhouse-feedback@yandex-team.com + +所有结果都在这里公布:https://clickhouse.技术/benchmark\_hardware.html diff --git a/docs/zh/operations/quotas.md b/docs/zh/operations/quotas.md index 06f25f57016..3838da00e56 100644 --- a/docs/zh/operations/quotas.md +++ b/docs/zh/operations/quotas.md @@ -1,16 +1,17 @@ -# Quotas {#quotas} -Quotas allow you to limit resource usage over a period of time, or simply track the use of resources. -Quotas are set up in the user config. This is usually ‘users.xml’. +# 配额 {#quotas} + +配额允许您在一段时间内限制资源使用情况,或者只是跟踪资源的使用。 +配额在用户配置中设置。 这通常是 ‘users.xml’. The system also has a feature for limiting the complexity of a single query. See the section «Restrictions on query complexity»). -In contrast to query complexity restrictions, quotas: +与查询复杂性限制相比,配额: -- Place restrictions on a set of queries that can be run over a period of time, instead of limiting a single query. -- Account for resources spent on all remote servers for distributed query processing. +- 对可以在一段时间内运行的一组查询设置限制,而不是限制单个查询。 +- 占用在所有远程服务器上用于分布式查询处理的资源。 -Let’s look at the section of the ‘users.xml’ file that defines quotas. +让我们来看看的部分 ‘users.xml’ 定义配额的文件。 ``` xml @@ -32,8 +33,8 @@ Let’s look at the section of the ‘users.xml’ file that defines quotas. ``` -By default, the quota just tracks resource consumption for each hour, without limiting usage. -The resource consumption calculated for each interval is output to the server log after each request. +默认情况下,配额只跟踪每小时的资源消耗,而不限制使用情况。 +每次请求后,计算出的每个时间间隔的资源消耗将输出到服务器日志中。 ``` xml @@ -61,11 +62,11 @@ The resource consumption calculated for each interval is output to the server lo ``` -For the ‘statbox’ quota, restrictions are set for every hour and for every 24 hours (86,400 seconds). The time interval is counted starting from an implementation-defined fixed moment in time. In other words, the 24-hour interval doesn’t necessarily begin at midnight. +为 ‘statbox’ 配额,限制设置为每小时和每24小时(86,400秒)。 时间间隔从实现定义的固定时刻开始计数。 换句话说,24小时间隔不一定从午夜开始。 -When the interval ends, all collected values are cleared. For the next hour, the quota calculation starts over. +间隔结束时,将清除所有收集的值。 在下一个小时内,配额计算将重新开始。 -Here are the amounts that can be restricted: +以下是可以限制的金额: `queries` – The total number of requests. @@ -77,7 +78,7 @@ Here are the amounts that can be restricted: `execution_time` – The total query execution time, in seconds (wall time). -If the limit is exceeded for at least one time interval, an exception is thrown with a text about which restriction was exceeded, for which interval, and when the new interval begins (when queries can be sent again). +如果在至少一个时间间隔内超出限制,则会引发异常,其中包含有关超出了哪个限制、哪个时间间隔以及新时间间隔开始时(何时可以再次发送查询)的文本。 Quotas can use the «quota key» feature in order to report on resources for multiple keys independently. Here is an example of this: @@ -96,10 +97,10 @@ Quotas can use the «quota key» feature in order to report on resources for mul ``` -The quota is assigned to users in the ‘users’ section of the config. See the section «Access rights». +配额分配给用户 ‘users’ section of the config. See the section «Access rights». For distributed query processing, the accumulated amounts are stored on the requestor server. So if the user goes to another server, the quota there will «start over». -When the server is restarted, quotas are reset. +服务器重新启动时,将重置配额。 -[Original article](https://clickhouse.tech/docs/en/operations/quotas/) +[原始文章](https://clickhouse.tech/docs/en/operations/quotas/) diff --git a/docs/zh/operations/requirements.md b/docs/zh/operations/requirements.md deleted file mode 120000 index a71283af25c..00000000000 --- a/docs/zh/operations/requirements.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/requirements.md \ No newline at end of file diff --git a/docs/zh/operations/requirements.md b/docs/zh/operations/requirements.md new file mode 100644 index 00000000000..d48de98f85c --- /dev/null +++ b/docs/zh/operations/requirements.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 44 +toc_title: "\u8981\u6C42" +--- + +# 要求 {#requirements} + +## CPU {#cpu} + +对于从预构建的deb包进行安装,请使用具有x86\_64架构并支持SSE4.2指令的CPU。 要使用不支持SSE4.2或具有AArch64或PowerPC64LE体系结构的处理器运行ClickHouse,您应该从源代码构建ClickHouse。 + +ClickHouse实现并行数据处理并使用所有可用的硬件资源。 在选择处理器时,考虑到ClickHouse在具有大量内核但时钟速率较低的配置中的工作效率要高于具有较少内核和较高时钟速率的配置。 例如,具有2600MHz的16核心优于具有3600MHz的8核心。 + +建议使用 **涡轮增压** 和 **超线程** 技术。 它显着提高了典型工作负载的性能。 + +## RAM {#ram} + +我们建议使用至少4GB的RAM来执行非平凡的查询。 ClickHouse服务器可以使用少得多的RAM运行,但它需要处理查询的内存。 + +RAM所需的体积取决于: + +- 查询的复杂性。 +- 在查询中处理的数据量。 + +要计算所需的RAM体积,您应该估计临时数据的大小 [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) 和您使用的其他操作。 + +ClickHouse可以使用外部存储器来存储临时数据。 看 [在外部存储器中分组](../sql_reference/statements/select.md#select-group-by-in-external-memory) 有关详细信息。 + +## 交换文件 {#swap-file} + +禁用生产环境的交换文件。 + +## 存储子系统 {#storage-subsystem} + +您需要有2GB的可用磁盘空间来安装ClickHouse。 + +数据所需的存储量应单独计算。 评估应包括: + +- 估计数据量。 + + 您可以采取数据的样本并从中获取行的平均大小。 然后将该值乘以计划存储的行数。 + +- 的数据压缩系数。 + + 要估计数据压缩系数,请将数据的样本加载到ClickHouse中,并将数据的实际大小与存储的表的大小进行比较。 例如,点击流数据通常被压缩6-10次。 + +要计算要存储的最终数据量,请将压缩系数应用于估计的数据量。 如果计划将数据存储在多个副本中,则将估计的卷乘以副本数。 + +## 网络 {#network} + +如果可能的话,使用10G或更高级别的网络。 + +网络带宽对于处理具有大量中间数据的分布式查询至关重要。 此外,网络速度会影响复制过程。 + +## 软件 {#software} + +ClickHouse主要是为Linux系列操作系统开发的。 推荐的Linux发行版是Ubuntu。 该 `tzdata` 软件包应安装在系统中。 + +ClickHouse也可以在其他操作系统系列中工作。 查看详细信息 [开始](../getting_started/index.md) 文档的部分。 diff --git a/docs/zh/operations/server_configuration_parameters/index.md b/docs/zh/operations/server_configuration_parameters/index.md new file mode 100644 index 00000000000..cf3f158b37c --- /dev/null +++ b/docs/zh/operations/server_configuration_parameters/index.md @@ -0,0 +1,12 @@ + +# 服务器配置参数 {#server-settings} + +本节包含无法在会话或查询级别更改的服务器设置的说明。 + +这些设置存储在 `config.xml` ClickHouse服务器上的文件。 + +Other settings are described in the «[设置](../settings/index.md#settings)» section. + +在研究设置之前,请阅读 [配置文件](../configuration_files.md#configuration_files) 部分和注意使用替换(的 `incl` 和 `optional` 属性)。 + +[原始文章](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/zh/operations/server_configuration_parameters/settings.md b/docs/zh/operations/server_configuration_parameters/settings.md new file mode 100644 index 00000000000..f10b6311b27 --- /dev/null +++ b/docs/zh/operations/server_configuration_parameters/settings.md @@ -0,0 +1,870 @@ +--- +toc_priority: 57 +toc_title: "\u670D\u52A1\u5668\u8BBE\u7F6E" +--- + +# 服务器配置 {#server-settings} + +## builtin\_dictionaries\_reload\_interval {#builtin-dictionaries-reload-interval} + +重新加载内置字典的间隔时间(以秒为单位)。 + +ClickHouse每x秒重新加载内置字典。 这使得编辑字典 “on the fly”,而无需重新启动服务器。 + +默认值:3600. + +**示例** + +``` xml +3600 +``` + +## 压缩 {#server-settings-compression} + +数据压缩配置 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-引擎表。 + +!!! warning "警告" + 如果您刚开始使用ClickHouse,请不要使用它。 + +配置模板: + +``` xml + + + ... + ... + ... + + ... + +``` + +`` 参数: + +- `min_part_size` – The minimum size of a data part. +- `min_part_size_ratio` – The ratio of the data part size to the table size. +- `method` – Compression method. Acceptable values: `lz4` 或 `zstd`. + +您可以配置多个 `` 部分。 + +满足条件时的操作: + +- 如果数据部分与条件集匹配,ClickHouse将使用指定的压缩方法。 +- 如果数据部分匹配多个条件集,ClickHouse将使用第一个匹配的条件集。 + +如果没有满足数据部分的条件,ClickHouse使用 `lz4` 压缩。 + +**示例** + +``` xml + + + 10000000000 + 0.01 + zstd + + +``` + +## default\_database {#default-database} + +默认数据库。 + +要获取数据库列表,请使用 [SHOW DATABASES](../../sql_reference/statements/show.md#show-databases) 查询。 + +**示例** + +``` xml +default +``` + +## default\_profile {#default-profile} + +默认配置文件。 + +配置文件位于`user_config`参数指定的文件中 . + +**示例** + +``` xml +default +``` + +## dictionaries\_config {#server_configuration_parameters-dictionaries_config} + +外部字典的配置文件的路径。 + +路径: + +- 指定相对于服务器配置文件的绝对路径或路径。 +- 路径可以包含通配符\*和?. + +另请参阅 “[外部字典](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)”. + +**示例** + +``` xml +*_dictionary.xml +``` + +## dictionaries\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load} + +延迟加载字典。 + +如果 `true`,然后在第一次使用时创建每个字典。 如果字典创建失败,则使用该字典的函数将引发异常。 + +如果 `false`,服务器启动时创建所有字典,如果出现错误,服务器将关闭。 + +默认值为 `true`. + +**示例** + +``` xml +true +``` + +## format\_schema\_path {#server_configuration_parameters-format_schema_path} + +包含输入数据方案的目录路径,例如输入数据的方案 [CapnProto](../../interfaces/formats.md#capnproto) 格式。 + +**示例** + +``` xml + + format_schemas/ +``` + +## 石墨 {#server_configuration_parameters-graphite} + +将数据发送到 [石墨](https://github.com/graphite-project). + +设置: + +- host – The Graphite server. +- port – The port on the Graphite server. +- interval – The interval for sending, in seconds. +- timeout – The timeout for sending data, in seconds. +- root\_path – Prefix for keys. +- metrics – Sending data from the [系统。指标](../../operations/system_tables.md#system_tables-metrics) 桌子 +- events – Sending deltas data accumulated for the time period from the [系统。活动](../../operations/system_tables.md#system_tables-events) 桌子 +- events\_cumulative – Sending cumulative data from the [系统。活动](../../operations/system_tables.md#system_tables-events) 桌子 +- asynchronous\_metrics – Sending data from the [系统。asynchronous\_metrics](../../operations/system_tables.md#system_tables-asynchronous_metrics) 桌子 + +您可以配置多个 `` 条款 例如,您可以使用它以不同的时间间隔发送不同的数据。 + +**示例** + +``` xml + + localhost + 42000 + 0.1 + 60 + one_min + true + true + false + true + +``` + +## graphite\_rollup {#server_configuration_parameters-graphite-rollup} + +石墨细化数据的设置。 + +有关详细信息,请参阅 [GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md). + +**示例** + +``` xml + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +## http\_port/https\_port {#http-porthttps-port} + +通过HTTP连接到服务器的端口。 + +如果 `https_port` 被指定, [openSSL](#server_configuration_parameters-openssl) 必须配置。 + +如果 `http_port` 指定时,即使设置了OpenSSL配置,也会忽略该配置。 + +**示例** + +``` xml +0000 +``` + +## http\_server\_default\_response {#server_configuration_parameters-http_server_default_response} + +访问ClickHouse HTTP(s)服务器时默认显示的页面。 +默认值为 “Ok.” (最后有换行符) + +**示例** + +打开 `https://tabix.io/` 访问时 `http://localhost: http_port`. + +``` xml + +
    ]]> +
    +``` + +## 包括\_从 {#server_configuration_parameters-include_from} + +带替换的文件的路径。 + +有关详细信息,请参阅部分 “[配置文件](../configuration_files.md#configuration_files)”. + +**示例** + +``` xml +/etc/metrica.xml +``` + +## interserver\_http\_port {#interserver-http-port} + +用于在ClickHouse服务器之间交换数据的端口。 + +**示例** + +``` xml +9009 +``` + +## interserver\_http\_host {#interserver-http-host} + +其他服务器可用于访问此服务器的主机名。 + +如果省略,它以相同的方式作为定义 `hostname-f` 指挥部 + +用于脱离特定的网络接口。 + +**示例** + +``` xml +example.yandex.ru +``` + +## interserver\_http\_credentials {#server-settings-interserver-http-credentials} + +用户名和密码用于在以下期间进行身份验证 [复制](../../engines/table_engines/mergetree_family/replication.md) 与复制\*引擎。 这些凭据仅用于副本之间的通信,与ClickHouse客户端的凭据无关。 服务器正在检查这些凭据以连接副本,并在连接到其他副本时使用相同的凭据。 因此,这些凭据应该为集群中的所有副本设置相同。 +默认情况下,不使用身份验证。 + +本节包含以下参数: + +- `user` — username. +- `password` — password. + +**示例** + +``` xml + + admin + 222 + +``` + +## keep\_alive\_timeout {#keep-alive-timeout} + +ClickHouse在关闭连接之前等待传入请求的秒数。 默认为3秒。 + +**示例** + +``` xml +3 +``` + +## listen\_host {#server_configuration_parameters-listen_host} + +对请求可能来自的主机的限制。 如果您希望服务器回答所有这些问题,请指定 `::`. + +例: + +``` xml +::1 +127.0.0.1 +``` + +## 记录器 {#server_configuration_parameters-logger} + +日志记录设置。 + +键: + +- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. +- log – The log file. Contains all the entries according to `level`. +- errorlog – Error log file. +- size – Size of the file. Applies to `log`和`errorlog`. 一旦文件到达 `size`,ClickHouse存档并重命名它,并在其位置创建一个新的日志文件。 +- count – The number of archived log files that ClickHouse stores. + +**示例** + +``` xml + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + +``` + +还支持写入系统日志。 配置示例: + +``` xml + + 1 + +
    syslog.remote:10514
    + myhost.local + LOG_LOCAL6 + syslog +
    +
    +``` + +键: + +- use\_syslog — Required setting if you want to write to the syslog. +- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. +- hostname — Optional. The name of the host that logs are sent from. +- facility — [系统日志工具关键字](https://en.wikipedia.org/wiki/Syslog#Facility) 在大写字母与 “LOG\_” 前缀: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`,等等)。 + 默认值: `LOG_USER` 如果 `address` 被指定, `LOG_DAEMON otherwise.` +- format – Message format. Possible values: `bsd` 和 `syslog.` + +## 宏 {#macros} + +复制表的参数替换。 + +如果不使用复制的表,则可以省略。 + +有关详细信息,请参阅部分 “[创建复制的表](../../engines/table_engines/mergetree_family/replication.md)”. + +**示例** + +``` xml + +``` + +## mark\_cache\_size {#server-mark-cache-size} + +表引擎使用的标记缓存的近似大小(以字节为单位) [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 家人 + +缓存为服务器共享,并根据需要分配内存。 缓存大小必须至少为5368709120。 + +**示例** + +``` xml +5368709120 +``` + +## max\_concurrent\_queries {#max-concurrent-queries} + +同时处理的请求的最大数量。 + +**示例** + +``` xml +100 +``` + +## max\_connections {#max-connections} + +入站连接的最大数量。 + +**示例** + +``` xml +4096 +``` + +## max\_open\_files {#max-open-files} + +打开文件的最大数量。 + +默认情况下: `maximum`. + +我们建议在Mac OS X中使用此选项,因为 `getrlimit()` 函数返回一个不正确的值。 + +**示例** + +``` xml +262144 +``` + +## max\_table\_size\_to\_drop {#max-table-size-to-drop} + +限制删除表。 + +如果一个大小 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 表超过 `max_table_size_to_drop` (以字节为单位),您无法使用删除查询将其删除。 + +如果仍然需要在不重新启动ClickHouse服务器的情况下删除表,请创建 `/flags/force_drop_table` 文件并运行DROP查询。 + +默认值:50GB。 + +值0表示您可以删除所有表而不受任何限制。 + +**示例** + +``` xml +0 +``` + +## merge\_tree {#server_configuration_parameters-merge_tree} + +微调中的表 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +有关详细信息,请参阅MergeTreeSettings。h头文件。 + +**示例** + +``` xml + + 5 + +``` + +## openSSL {#server_configuration_parameters-openssl} + +SSL客户端/服务器配置。 + +对SSL的支持由 `libpoco` 图书馆. 该接口在文件中描述 [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) + +服务器/客户端设置的密钥: + +- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. +- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` 包含证书。 +- caConfig – The path to the file or directory that contains trusted root certificates. +- verificationMode – The method for checking the node's certificates. Details are in the description of the [A.背景](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) 同学们 可能的值: `none`, `relaxed`, `strict`, `once`. +- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| +- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. +- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. 可接受的值: `true`, `false`. +- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. 始终建议使用此参数,因为如果服务器缓存会话,以及客户端请求缓存,它有助于避免出现问题。 默认值: `${application.name}`. +- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. +- sessionTimeout – Time for caching the session on the server. +- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS. +- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols – Protocols that are not allowed to use. +- preferServerCiphers – Preferred server ciphers on the client. + +**设置示例:** + +``` xml + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + +``` + +## part\_log {#server_configuration_parameters-part-log} + +记录与之关联的事件 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). 例如,添加或合并数据。 您可以使用日志来模拟合并算法并比较它们的特征。 您可以可视化合并过程。 + +查询记录在 [系统。part\_log](../../operations/system_tables.md#system_tables-part-log) 表,而不是在一个单独的文件。 您可以在以下命令中配置此表的名称 `table` 参数(见下文)。 + +使用以下参数配置日志记录: + +- `database` – Name of the database. +- `table` – Name of the system table. +- `partition_by` – Sets a [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md). +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +**示例** + +``` xml + + system + part_log
    + toMonday(event_date) + 7500 +
    +``` + +## 路径 {#server_configuration_parameters-path} + +包含数据的目录的路径。 + +!!! note "注" + 尾部斜杠是强制性的。 + +**示例** + +``` xml +/var/lib/clickhouse/ +``` + +## query\_log {#server_configuration_parameters-query-log} + +用于记录接收到的查询的设置 [log\_queries=1](../settings/settings.md) 设置。 + +查询记录在 [系统。query\_log](../../operations/system_tables.md#system_tables-query_log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。 + +使用以下参数配置日志记录: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 为了一张桌子 +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +如果该表不存在,ClickHouse将创建它。 如果在ClickHouse服务器更新时查询日志的结构发生了更改,则会重命名具有旧结构的表,并自动创建新表。 + +**示例** + +``` xml + + system + query_log
    + toMonday(event_date) + 7500 +
    +``` + +## query\_thread\_log {#server_configuration_parameters-query-thread-log} + +设置用于记录接收到的查询的线程 [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) 设置。 + +查询记录在 [系统。query\_thread\_log](../../operations/system_tables.md#system_tables-query-thread-log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。 + +使用以下参数配置日志记录: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 对于一个系统表。 +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +如果该表不存在,ClickHouse将创建它。 如果更新ClickHouse服务器时查询线程日志的结构发生了更改,则会重命名具有旧结构的表,并自动创建新表。 + +**示例** + +``` xml + + system + query_thread_log
    + toMonday(event_date) + 7500 +
    +``` + +## trace\_log {#server_configuration_parameters-trace_log} + +设置为 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) 系统表操作。 + +参数: + +- `database` — Database for storing a table. +- `table` — Table name. +- `partition_by` — [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 对于一个系统表。 +- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. + +默认服务器配置文件 `config.xml` 包含以下设置部分: + +``` xml + + system + trace_log
    + toYYYYMM(event_date) + 7500 +
    +``` + +## query\_masking\_rules {#query-masking-rules} + +基于正则表达式的规则,在将查询以及所有日志消息存储在服务器日志中之前,这些规则将应用于查询以及所有日志消息, +`system.query_log`, `system.text_log`, `system.processes` 表,并在日志中发送给客户端。 这允许防止 +从SQL查询敏感数据泄漏(如姓名,电子邮件,个人 +标识符或信用卡号码)记录。 + +**示例** + +``` xml + + + hide SSN + (^|\D)\d{3}-\d{2}-\d{4}($|\D) + 000-00-0000 + + +``` + +配置字段: +- `name` -规则的名称(可选) +- `regexp` -RE2兼容正则表达式(强制性) +- `replace` -敏感数据的替换字符串(可选,默认情况下-六个星号) + +屏蔽规则应用于整个查询(以防止敏感数据从格式错误/不可解析的查询泄漏)。 + +`system.events` 表有计数器 `QueryMaskingRulesMatch` 其中具有匹配的查询屏蔽规则的总数。 + +对于分布式查询,每个服务器必须单独配置,否则,子查询传递给其他 +节点将被存储而不屏蔽。 + +## remote\_servers {#server-settings-remote-servers} + +所使用的集群的配置 [分布](../../engines/table_engines/special/distributed.md) 表引擎和由 `cluster` 表功能。 + +**示例** + +``` xml + +``` + +对于该值 `incl` 属性,请参阅部分 “[配置文件](../configuration_files.md#configuration_files)”. + +**另请参阅** + +- [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) + +## 时区 {#server_configuration_parameters-timezone} + +服务器的时区。 + +指定为UTC时区或地理位置(例如,非洲/阿比让)的IANA标识符。 + +当DateTime字段输出为文本格式(打印在屏幕上或文件中)时,以及从字符串获取DateTime时,时区对于字符串和DateTime格式之间的转换是必需的。 此外,如果在输入参数中没有收到时区,则时区用于处理时间和日期的函数。 + +**示例** + +``` xml +Europe/Moscow +``` + +## tcp\_port {#server_configuration_parameters-tcp_port} + +通过TCP协议与客户端通信的端口。 + +**示例** + +``` xml +9000 +``` + +## tcp\_port\_secure {#server_configuration_parameters-tcp_port-secure} + +TCP端口,用于与客户端进行安全通信。 使用它与 [OpenSSL](#server_configuration_parameters-openssl) 设置。 + +**可能的值** + +整数。 + +**默认值** + +``` xml +9440 +``` + +## mysql\_port {#server_configuration_parameters-mysql_port} + +通过MySQL协议与客户端通信的端口。 + +**可能的值** + +整数。 + +示例 + +``` xml +9004 +``` + +## tmp\_path {#server-settings-tmp_path} + +用于处理大型查询的临时数据的路径。 + +!!! note "注" + 尾部斜杠是强制性的。 + +**示例** + +``` xml +/var/lib/clickhouse/tmp/ +``` + +## tmp\_policy {#server-settings-tmp-policy} + +从政策 [`storage_configuration`](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) 存储临时文件。 +如果没有设置 [`tmp_path`](#server-settings-tmp_path) 被使用,否则被忽略。 + +!!! note "注" + - `move_factor` 被忽略 +- `keep_free_space_bytes` 被忽略 +- `max_data_part_size_bytes` 被忽略 +-您必须在该政策中只有一个卷 + +## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} + +表引擎使用的未压缩数据的缓存大小(以字节为单位) [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +服务器有一个共享缓存。 内存按需分配。 如果选项使用缓存 [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) 被启用。 + +在个别情况下,未压缩的缓存对于非常短的查询是有利的。 + +**示例** + +``` xml +8589934592 +``` + +## user\_files\_path {#server_configuration_parameters-user_files_path} + +包含用户文件的目录。 在表函数中使用 [文件()](../../sql_reference/table_functions/file.md). + +**示例** + +``` xml +/var/lib/clickhouse/user_files/ +``` + +## users\_config {#users-config} + +包含文件的路径: + +- 用户配置。 +- 访问权限。 +- 设置配置文件。 +- 配额设置。 + +**示例** + +``` xml +users.xml +``` + +## zookeeper {#server-settings_zookeeper} + +包含允许ClickHouse与 [zookpeer](http://zookeeper.apache.org/) 集群。 + +ClickHouse使用ZooKeeper存储复制表副本的元数据。 如果未使用复制的表,则可以省略此部分参数。 + +本节包含以下参数: + +- `node` — ZooKeeper endpoint. You can set multiple endpoints. + + 例如: + + + +``` xml + + example_host + 2181 + +``` + + The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. + +- `session_timeout` — Maximum timeout for the client session in milliseconds. +- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) 隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄.陇.貌路.隆拢脳枚脢虏.麓脢for脱 可选。 +- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. + +**配置示例** + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + 30000 + 10000 + + /path/to/zookeeper/node + + user:password + +``` + +**另请参阅** + +- [复制](../../engines/table_engines/mergetree_family/replication.md) +- [动物园管理员程序员指南](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) + +## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} + +ZooKeeper中数据部分头的存储方法。 + +此设置仅适用于 `MergeTree` 家人 它可以指定: + +- 在全球范围内 [merge\_tree](#server_configuration_parameters-merge_tree) 一节 `config.xml` 文件 + + ClickHouse使用服务器上所有表的设置。 您可以随时更改设置。 当设置更改时,现有表会更改其行为。 + +- 对于每个表。 + + 创建表时,指定相应的 [发动机设置](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). 即使全局设置更改,具有此设置的现有表的行为也不会更改。 + +**可能的值** + +- 0 — Functionality is turned off. +- 1 — Functionality is turned on. + +如果 `use_minimalistic_part_header_in_zookeeper = 1`,然后 [复制](../../engines/table_engines/mergetree_family/replication.md) 表存储的数据部分的头紧凑使用一个单一的 `znode`. 如果表包含许多列,则此存储方法显着减少了Zookeeper中存储的数据量。 + +!!! attention "注意" + 申请后 `use_minimalistic_part_header_in_zookeeper = 1`,您不能将ClickHouse服务器降级到不支持此设置的版本。 在集群中的服务器上升级ClickHouse时要小心。 不要一次升级所有服务器。 在测试环境中或在集群的几台服务器上测试ClickHouse的新版本更安全。 + + Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. + +**默认值:** 0. + +## disable\_internal\_dns\_cache {#server-settings-disable-internal-dns-cache} + +禁用内部DNS缓存。 推荐用于在系统中运行ClickHouse +随着频繁变化的基础设施,如Kubernetes。 + +**默认值:** 0. + +## dns\_cache\_update\_period {#server-settings-dns-cache-update-period} + +更新存储在ClickHouse内部DNS缓存中的IP地址的周期(以秒为单位)。 +更新是在一个单独的系统线程中异步执行的。 + +**默认值**: 15. + +[原始文章](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/zh/operations/server_settings/index.md b/docs/zh/operations/server_settings/index.md deleted file mode 100644 index 4a1276a2ce1..00000000000 --- a/docs/zh/operations/server_settings/index.md +++ /dev/null @@ -1,11 +0,0 @@ -# Server configuration parameters {#server-settings} - -This section contains descriptions of server settings that cannot be changed at the session or query level. - -These settings are stored in the `config.xml` file on the ClickHouse server. - -Other settings are described in the «[Settings](../settings/index.md#settings)» section. - -Before studying the settings, read the [Configuration files](../configuration_files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes). - -[Original article](https://clickhouse.tech/docs/en/operations/server_settings/) diff --git a/docs/zh/operations/server_settings/settings.md b/docs/zh/operations/server_settings/settings.md deleted file mode 120000 index 19cd2e82ce7..00000000000 --- a/docs/zh/operations/server_settings/settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/server_settings/settings.md \ No newline at end of file diff --git a/docs/zh/operations/settings/constraints_on_settings.md b/docs/zh/operations/settings/constraints_on_settings.md deleted file mode 120000 index 4dacf908662..00000000000 --- a/docs/zh/operations/settings/constraints_on_settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/constraints_on_settings.md \ No newline at end of file diff --git a/docs/zh/operations/settings/constraints_on_settings.md b/docs/zh/operations/settings/constraints_on_settings.md new file mode 100644 index 00000000000..c9e572dd907 --- /dev/null +++ b/docs/zh/operations/settings/constraints_on_settings.md @@ -0,0 +1,75 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 62 +toc_title: "\u5BF9\u8BBE\u7F6E\u7684\u9650\u5236" +--- + +# 对设置的限制 {#constraints-on-settings} + +在设置的约束可以在定义 `profiles` 一节 `user.xml` 配置文件,并禁止用户更改一些设置与 `SET` 查询。 +约束定义如下: + +``` xml + + + + + lower_boundary + + + upper_boundary + + + lower_boundary + upper_boundary + + + + + + + +``` + +如果用户试图违反约束,将引发异常,并且设置不会更改。 +支持三种类型的约束: `min`, `max`, `readonly`. 该 `min` 和 `max` 约束指定数值设置的上边界和下边界,并且可以组合使用。 该 `readonly` constraint指定用户根本无法更改相应的设置。 + +**示例:** 让 `users.xml` 包括行: + +``` xml + + + 10000000000 + 0 + ... + + + 5000000000 + 20000000000 + + + + + + + +``` + +以下查询都会引发异常: + +``` sql +SET max_memory_usage=20000000001; +SET max_memory_usage=4999999999; +SET force_index_by_date=1; +``` + +``` text +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be greater than 20000000000. +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be less than 5000000000. +Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. +``` + +**注:** 该 `default` 配置文件具有特殊的处理:所有定义的约束 `default` 配置文件成为默认约束,因此它们限制所有用户,直到为这些用户显式复盖它们。 + +[原始文章](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/zh/operations/settings/index.md b/docs/zh/operations/settings/index.md index 6d3d96dfbf3..9c598d241d1 100644 --- a/docs/zh/operations/settings/index.md +++ b/docs/zh/operations/settings/index.md @@ -1,25 +1,28 @@ -# Settings {#settings} +--- +toc_folder_title: "\u8bbe\u7f6e" +--- +# 设置 {#settings} -There are multiple ways to make all the settings described below. -Settings are configured in layers, so each subsequent layer redefines the previous settings. +有多种方法可以进行以下所述的所有设置。 +设置是在图层中配置的,因此每个后续图层都会重新定义以前的设置。 -Ways to configure settings, in order of priority: +按优先级顺序配置设置的方法: -- Settings in the `users.xml` server configuration file. +- 在设置 `users.xml` 服务器配置文件。 Set in the element ``. -- Session settings. +- 会话设置。 Send ` SET setting=value` from the ClickHouse console client in interactive mode. - Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to specify the `session_id` HTTP parameter. + 同样,您可以在HTTP协议中使用ClickHouse会话。 要做到这一点,你需要指定 `session_id` HTTP参数。 -- Query settings. +- 查询设置。 - - When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`. - - When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`). + - 在非交互模式下启动ClickHouse控制台客户端时,设置startup参数 `--setting=value`. + - 使用HTTP API时,请传递CGI参数 (`URL?setting_1=value&setting_2=value...`). -Settings that can only be made in the server config file are not covered in this section. +本节不介绍只能在服务器配置文件中进行的设置。 -[Original article](https://clickhouse.tech/docs/en/operations/settings/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/) diff --git a/docs/zh/operations/settings/permissions_for_queries.md b/docs/zh/operations/settings/permissions_for_queries.md deleted file mode 120000 index ce8473bf01c..00000000000 --- a/docs/zh/operations/settings/permissions_for_queries.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/permissions_for_queries.md \ No newline at end of file diff --git a/docs/zh/operations/settings/permissions_for_queries.md b/docs/zh/operations/settings/permissions_for_queries.md new file mode 100644 index 00000000000..a7d2e843b66 --- /dev/null +++ b/docs/zh/operations/settings/permissions_for_queries.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 58 +toc_title: "\u67E5\u8BE2\u6743\u9650" +--- + +# 查询权限 {#permissions_for_queries} + +ClickHouse中的查询可以分为几种类型: + +1. 读取数据查询: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. +2. 写入数据查询: `INSERT`, `OPTIMIZE`. +3. 更改设置查询: `SET`, `USE`. +4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) 查询: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. +5. `KILL QUERY`. + +以下设置按查询类型规范用户权限: + +- [只读](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. +- [allow\_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries. + +`KILL QUERY` 可以与任何设置进行。 + +## 只读 {#settings_readonly} + +限制读取数据、写入数据和更改设置查询的权限。 + +查看查询如何划分为多种类型 [以上](#permissions_for_queries). + +可能的值: + +- 0 — All queries are allowed. +- 1 — Only read data queries are allowed. +- 2 — Read data and change settings queries are allowed. + +设置后 `readonly = 1`,用户无法更改 `readonly` 和 `allow_ddl` 当前会话中的设置。 + +使用时 `GET` 方法中的 [HTTP接口](../../interfaces/http.md), `readonly = 1` 自动设置。 要修改数据,请使用 `POST` 方法。 + +设置 `readonly = 1` 禁止用户更改所有设置。 有一种方法可以禁止用户 +从只更改特定设置,有关详细信息,请参阅 [对设置的限制](constraints_on_settings.md). + +默认值:0 + +## allow\_ddl {#settings_allow_ddl} + +允许或拒绝 [DDL](https://en.wikipedia.org/wiki/Data_definition_language) 查询。 + +查看查询如何划分为多种类型 [以上](#permissions_for_queries). + +可能的值: + +- 0 — DDL queries are not allowed. +- 1 — DDL queries are allowed. + +你不能执行 `SET allow_ddl = 1` 如果 `allow_ddl = 0` 对于当前会话。 + +默认值:1 + +[原始文章](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/zh/operations/settings/query_complexity.md b/docs/zh/operations/settings/query_complexity.md index a06c65ec072..b17f5b7aa70 100644 --- a/docs/zh/operations/settings/query_complexity.md +++ b/docs/zh/operations/settings/query_complexity.md @@ -1,193 +1,194 @@ -# Restrictions on query complexity {#restrictions-on-query-complexity} -Restrictions on query complexity are part of the settings. -They are used in order to provide safer execution from the user interface. -Almost all the restrictions only apply to SELECTs.For distributed query processing, restrictions are applied on each server separately. +# 查询复杂性的限制 {#restrictions-on-query-complexity} + +对查询复杂性的限制是设置的一部分。 +它们被用来从用户界面提供更安全的执行。 +几乎所有的限制只适用于选择。对于分布式查询处理,每个服务器上分别应用限制。 Restrictions on the «maximum amount of something» can take the value 0, which means «unrestricted». -Most restrictions also have an ‘overflow\_mode’ setting, meaning what to do when the limit is exceeded. -It can take one of two values: `throw` or `break`. Restrictions on aggregation (group\_by\_overflow\_mode) also have the value `any`. +大多数限制也有一个 ‘overflow\_mode’ 设置,这意味着超过限制时该怎么做。 +它可以采用以下两个值之一: `throw` 或 `break`. 对聚合的限制(group\_by\_overflow\_mode)也具有以下值 `any`. `throw` – Throw an exception (default). `break` – Stop executing the query and return the partial result, as if the source data ran out. -`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don’t add new keys to the set. +`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don't add new keys to the set. -## readonly {#query-complexity-readonly} +## 只读 {#query-complexity-readonly} -With a value of 0, you can execute any queries. -With a value of 1, you can only execute read requests (such as SELECT and SHOW). Requests for writing and changing settings (INSERT, SET) are prohibited. -With a value of 2, you can process read queries (SELECT, SHOW) and change settings (SET). +值为0时,可以执行任何查询。 +如果值为1,则只能执行读取请求(如SELECT和SHOW)。 禁止写入和更改设置(插入,设置)的请求。 +值为2时,可以处理读取查询(选择、显示)和更改设置(设置)。 -After enabling readonly mode, you can’t disable it in the current session. +启用只读模式后,您无法在当前会话中禁用它。 -When using the GET method in the HTTP interface, ‘readonly = 1’ is set automatically. In other words, for queries that modify data, you can only use the POST method. You can send the query itself either in the POST body, or in the URL parameter. +在HTTP接口中使用GET方法时, ‘readonly = 1’ 自动设置。 换句话说,对于修改数据的查询,您只能使用POST方法。 您可以在POST正文或URL参数中发送查询本身。 ## max\_memory\_usage {#settings_max_memory_usage} -The maximum amount of RAM to use for running a query on a single server. +用于在单个服务器上运行查询的最大RAM量。 -In the default configuration file, the maximum is 10 GB. +在默认配置文件中,最大值为10GB。 -The setting doesn’t consider the volume of available memory or the total volume of memory on the machine. -The restriction applies to a single query within a single server. -You can use `SHOW PROCESSLIST` to see the current memory consumption for each query. -In addition, the peak memory consumption is tracked for each query and written to the log. +该设置不考虑计算机上的可用内存量或内存总量。 +该限制适用于单个服务器中的单个查询。 +您可以使用 `SHOW PROCESSLIST` 查看每个查询的当前内存消耗。 +此外,还会跟踪每个查询的内存消耗峰值并将其写入日志。 -Memory usage is not monitored for the states of certain aggregate functions. +不监视某些聚合函数的状态的内存使用情况。 -Memory usage is not fully tracked for states of the aggregate functions `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` from `String` and `Array` arguments. +未完全跟踪聚合函数的状态的内存使用情况 `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` 从 `String` 和 `Array` 争论。 -Memory consumption is also restricted by the parameters `max_memory_usage_for_user` and `max_memory_usage_for_all_queries`. +内存消耗也受到参数的限制 `max_memory_usage_for_user` 和 `max_memory_usage_for_all_queries`. ## max\_memory\_usage\_for\_user {#max-memory-usage-for-user} -The maximum amount of RAM to use for running a user’s queries on a single server. +用于在单个服务器上运行用户查询的最大RAM量。 -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Interpreters/Settings.h#L244). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). +默认值定义在 [设置。h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L244). 默认情况下,金额不受限制 (`max_memory_usage_for_user = 0`). -See also the description of [max\_memory\_usage](#settings_max_memory_usage). +另请参阅说明 [max\_memory\_usage](#settings_max_memory_usage). ## max\_memory\_usage\_for\_all\_queries {#max-memory-usage-for-all-queries} -The maximum amount of RAM to use for running all queries on a single server. +用于在单个服务器上运行所有查询的最大RAM数量。 -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Interpreters/Settings.h#L245). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). +默认值定义在 [设置。h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L245). 默认情况下,金额不受限制 (`max_memory_usage_for_all_queries = 0`). -See also the description of [max\_memory\_usage](#settings_max_memory_usage). +另请参阅说明 [max\_memory\_usage](#settings_max_memory_usage). ## max\_rows\_to\_read {#max-rows-to-read} -The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little. -When running a query in multiple threads, the following restrictions apply to each thread separately. +可以在每个块(而不是每行)上检查以下限制。 也就是说,限制可以打破一点。 +在多个线程中运行查询时,以下限制单独应用于每个线程。 -Maximum number of rows that can be read from a table when running a query. +运行查询时可从表中读取的最大行数。 ## max\_bytes\_to\_read {#max-bytes-to-read} -Maximum number of bytes (uncompressed data) that can be read from a table when running a query. +运行查询时可以从表中读取的最大字节数(未压缩数据)。 ## read\_overflow\_mode {#read-overflow-mode} -What to do when the volume of data read exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +读取的数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_rows\_to\_group\_by {#max-rows-to-group-by} -Maximum number of unique keys received from aggregation. This setting lets you limit memory consumption when aggregating. +从聚合接收的唯一密钥的最大数量。 此设置允许您在聚合时限制内存消耗。 ## group\_by\_overflow\_mode {#group-by-overflow-mode} -What to do when the number of unique keys for aggregation exceeds the limit: ‘throw’, ‘break’, or ‘any’. By default, throw. -Using the ‘any’ value lets you run an approximation of GROUP BY. The quality of this approximation depends on the statistical nature of the data. +当聚合的唯一键数超过限制时该怎么办: ‘throw’, ‘break’,或 ‘any’. 默认情况下,扔。 +使用 ‘any’ 值允许您运行GROUP BY的近似值。 这种近似值的质量取决于数据的统计性质。 ## max\_rows\_to\_sort {#max-rows-to-sort} -Maximum number of rows before sorting. This allows you to limit memory consumption when sorting. +排序前的最大行数。 这允许您在排序时限制内存消耗。 ## max\_bytes\_to\_sort {#max-bytes-to-sort} -Maximum number of bytes before sorting. +排序前的最大字节数。 ## sort\_overflow\_mode {#sort-overflow-mode} -What to do if the number of rows received before sorting exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +如果排序前收到的行数超过其中一个限制,该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_result\_rows {#max-result-rows} -Limit on the number of rows in the result. Also checked for subqueries, and on remote servers when running parts of a distributed query. +限制结果中的行数。 还检查子查询,并在运行分布式查询的部分时在远程服务器上。 ## max\_result\_bytes {#max-result-bytes} -Limit on the number of bytes in the result. The same as the previous setting. +限制结果中的字节数。 与之前的设置相同。 ## result\_overflow\_mode {#result-overflow-mode} -What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. -Using ‘break’ is similar to using LIMIT. +如果结果的体积超过其中一个限制,该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 +使用 ‘break’ 类似于使用限制。 ## max\_execution\_time {#max-execution-time} -Maximum query execution time in seconds. -At this time, it is not checked for one of the sorting stages, or when merging and finalizing aggregate functions. +最大查询执行时间(以秒为单位)。 +此时,不会检查其中一个排序阶段,也不会在合并和最终确定聚合函数时进行检查。 ## timeout\_overflow\_mode {#timeout-overflow-mode} -What to do if the query is run longer than ‘max\_execution\_time’: ‘throw’ or ‘break’. By default, throw. +如果查询的运行时间长于 ‘max\_execution\_time’: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## min\_execution\_speed {#min-execution-speed} -Minimal execution speed in rows per second. Checked on every data block when ‘timeout\_before\_checking\_execution\_speed’ expires. If the execution speed is lower, an exception is thrown. +以每秒行为单位的最小执行速度。 检查每个数据块时 ‘timeout\_before\_checking\_execution\_speed’ 到期。 如果执行速度较低,则会引发异常。 ## timeout\_before\_checking\_execution\_speed {#timeout-before-checking-execution-speed} -Checks that execution speed is not too slow (no less than ‘min\_execution\_speed’), after the specified time in seconds has expired. +检查执行速度是不是太慢(不低于 ‘min\_execution\_speed’),在指定的时间以秒为单位已过期之后。 ## max\_columns\_to\_read {#max-columns-to-read} -Maximum number of columns that can be read from a table in a single query. If a query requires reading a greater number of columns, it throws an exception. +单个查询中可从表中读取的最大列数。 如果查询需要读取更多列,则会引发异常。 ## max\_temporary\_columns {#max-temporary-columns} -Maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. If there are more temporary columns than this, it throws an exception. +运行查询时必须同时保留在RAM中的最大临时列数,包括常量列。 如果有比这更多的临时列,它会引发异常。 ## max\_temporary\_non\_const\_columns {#max-temporary-non-const-columns} -The same thing as ‘max\_temporary\_columns’, but without counting constant columns. -Note that constant columns are formed fairly often when running a query, but they require approximately zero computing resources. +同样的事情 ‘max\_temporary\_columns’,但不计数常数列。 +请注意,常量列在运行查询时经常形成,但它们需要大约零计算资源。 ## max\_subquery\_depth {#max-subquery-depth} -Maximum nesting depth of subqueries. If subqueries are deeper, an exception is thrown. By default, 100. +子查询的最大嵌套深度。 如果子查询更深,则会引发异常。 默认情况下,100。 ## max\_pipeline\_depth {#max-pipeline-depth} -Maximum pipeline depth. Corresponds to the number of transformations that each data block goes through during query processing. Counted within the limits of a single server. If the pipeline depth is greater, an exception is thrown. By default, 1000. +最大管道深度。 对应于查询处理期间每个数据块经历的转换数。 在单个服务器的限制范围内计算。 如果管道深度较大,则会引发异常。 默认情况下,1000。 ## max\_ast\_depth {#max-ast-depth} -Maximum nesting depth of a query syntactic tree. If exceeded, an exception is thrown. -At this time, it isn’t checked during parsing, but only after parsing the query. That is, a syntactic tree that is too deep can be created during parsing, but the query will fail. By default, 1000. +查询语法树的最大嵌套深度。 如果超出,将引发异常。 +此时,在解析过程中不会对其进行检查,而是仅在解析查询之后进行检查。 也就是说,在分析过程中可以创建一个太深的语法树,但查询将失败。 默认情况下,1000。 ## max\_ast\_elements {#max-ast-elements} -Maximum number of elements in a query syntactic tree. If exceeded, an exception is thrown. -In the same way as the previous setting, it is checked only after parsing the query. By default, 50,000. +查询语法树中的最大元素数。 如果超出,将引发异常。 +与前面的设置相同,只有在解析查询后才会检查它。 默认情况下,50,000。 ## max\_rows\_in\_set {#max-rows-in-set} -Maximum number of rows for a data set in the IN clause created from a subquery. +从子查询创建的IN子句中数据集的最大行数。 ## max\_bytes\_in\_set {#max-bytes-in-set} -Maximum number of bytes (uncompressed data) used by a set in the IN clause created from a subquery. +从子查询创建的IN子句中的集合使用的最大字节数(未压缩数据)。 ## set\_overflow\_mode {#set-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +当数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_rows\_in\_distinct {#max-rows-in-distinct} -Maximum number of different rows when using DISTINCT. +使用DISTINCT时的最大不同行数。 ## max\_bytes\_in\_distinct {#max-bytes-in-distinct} -Maximum number of bytes used by a hash table when using DISTINCT. +使用DISTINCT时哈希表使用的最大字节数。 ## distinct\_overflow\_mode {#distinct-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +当数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_rows\_to\_transfer {#max-rows-to-transfer} -Maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. +使用GLOBAL IN时,可以传递到远程服务器或保存在临时表中的最大行数。 ## max\_bytes\_to\_transfer {#max-bytes-to-transfer} -Maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. +使用GLOBAL IN时,可以传递到远程服务器或保存在临时表中的最大字节数(未压缩数据)。 ## transfer\_overflow\_mode {#transfer-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +当数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 -[Original article](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md deleted file mode 120000 index 0c8df3cfc90..00000000000 --- a/docs/zh/operations/settings/settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings.md \ No newline at end of file diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md new file mode 100644 index 00000000000..8e0a60d4f03 --- /dev/null +++ b/docs/zh/operations/settings/settings.md @@ -0,0 +1,1202 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 60 +toc_title: "\u8BBE\u7F6E" +--- + +# 设置 {#settings} + +## 分布\_产品\_模式 {#distributed-product-mode} + +改变的行为 [分布式子查询](../../sql_reference/statements/select.md). + +ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. + +限制: + +- 仅适用于IN和JOIN子查询。 +- 仅当FROM部分使用包含多个分片的分布式表时。 +- 如果子查询涉及包含多个分片的分布式表。 +- 不用于表值 [远程](../../sql_reference/table_functions/remote.md) 功能。 + +可能的值: + +- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” 例外)。 +- `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` +- `global` — Replaces the `IN`/`JOIN` 查询与 `GLOBAL IN`/`GLOBAL JOIN.` +- `allow` — Allows the use of these types of subqueries. + +## enable\_optimize\_predicate\_expression {#enable-optimize-predicate-expression} + +打开谓词下推 `SELECT` 查询。 + +谓词下推可以显着减少分布式查询的网络流量。 + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:1。 + +用途 + +请考虑以下查询: + +1. `SELECT count() FROM test_table WHERE date = '2018-10-10'` +2. `SELECT count() FROM (SELECT * FROM test_table) WHERE date = '2018-10-10'` + +如果 `enable_optimize_predicate_expression = 1`,则这些查询的执行时间相等,因为ClickHouse应用 `WHERE` 对子查询进行处理。 + +如果 `enable_optimize_predicate_expression = 0`,那么第二个查询的执行时间要长得多,因为 `WHERE` 子句适用于子查询完成后的所有数据。 + +## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} + +如果更新的数据不可用,则强制对过期副本进行查询。 看 [复制](../../engines/table_engines/mergetree_family/replication.md). + +ClickHouse从表的过时副本中选择最相关的副本。 + +执行时使用 `SELECT` 从指向复制表的分布式表。 + +默认情况下,1(已启用)。 + +## force\_index\_by\_date {#settings-force_index_by_date} + +如果索引不能按日期使用,则禁用查询执行。 + +适用于MergeTree系列中的表。 + +如果 `force_index_by_date=1`,ClickHouse检查查询是否具有可用于限制数据范围的date键条件。 如果没有合适的条件,则会引发异常。 但是,它不检查条件是否减少了要读取的数据量。 例如,条件 `Date != ' 2000-01-01 '` 即使它与表中的所有数据匹配(即运行查询需要完全扫描),也是可以接受的。 有关MergeTree表中数据范围的详细信息,请参阅 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +## force\_primary\_key {#force-primary-key} + +如果无法按主键编制索引,则禁用查询执行。 + +适用于MergeTree系列中的表。 + +如果 `force_primary_key=1`,ClickHouse检查查询是否具有可用于限制数据范围的主键条件。 如果没有合适的条件,则会引发异常。 但是,它不检查条件是否减少了要读取的数据量。 有关MergeTree表中数据范围的详细信息,请参阅 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +## format\_schema {#format-schema} + +当您使用需要架构定义的格式时,此参数非常有用,例如 [普罗托船长](https://capnproto.org/) 或 [Protobuf](https://developers.google.com/protocol-buffers/). 该值取决于格式。 + +## fsync\_metadata {#fsync-metadata} + +启用或禁用 [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) 写作时 `.sql` 文件 默认情况下启用。 + +如果服务器有数百万个不断创建和销毁的小表,那么禁用它是有意义的。 + +## enable\_http\_compression {#settings-enable_http_compression} + +在对HTTP请求的响应中启用或禁用数据压缩。 + +欲了解更多信息,请阅读 [HTTP接口描述](../../interfaces/http.md). + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:0。 + +## http\_zlib\_compression\_level {#settings-http_zlib_compression_level} + +在以下情况下,设置对HTTP请求的响应中的数据压缩级别 [enable\_http\_compression=1](#settings-enable_http_compression). + +可能的值:数字从1到9。 + +默认值:3。 + +## http\_native\_compression\_disable\_checksumming\_on\_decompress {#settings-http_native_compression_disable_checksumming_on_decompress} + +在从客户端解压缩HTTP POST数据时启用或禁用校验和验证。 仅用于ClickHouse原生压缩格式(不用于 `gzip` 或 `deflate`). + +欲了解更多信息,请阅读 [HTTP接口描述](../../interfaces/http.md). + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:0。 + +## send\_progress\_in\_http\_headers {#settings-send_progress_in_http_headers} + +启用或禁用 `X-ClickHouse-Progress` Http响应头 `clickhouse-server` 答复。 + +欲了解更多信息,请阅读 [HTTP接口描述](../../interfaces/http.md). + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:0。 + +## max\_http\_get\_redirects {#setting-max_http_get_redirects} + +限制HTTP GET重定向跳数的最大数量 [URL](../../engines/table_engines/special/url.md)-发动机表。 该设置适用于两种类型的表:由 [CREATE TABLE](../../sql_reference/statements/create.md#create-table-query) 查询和由 [url](../../sql_reference/table_functions/url.md) 表功能。 + +可能的值: + +- 跳数的任何正整数。 +- 0 — No hops allowed. + +默认值:0。 + +## input\_format\_allow\_errors\_num {#settings-input_format_allow_errors_num} + +设置从文本格式(CSV,TSV等)读取时可接受的错误的最大数量。). + +默认值为0。 + +总是与它配对 `input_format_allow_errors_ratio`. + +如果在读取行时发生错误,但错误计数器仍小于 `input_format_allow_errors_num`,ClickHouse忽略该行并移动到下一个。 + +如果两者 `input_format_allow_errors_num` 和 `input_format_allow_errors_ratio` 超出时,ClickHouse引发异常。 + +## input\_format\_allow\_errors\_ratio {#settings-input_format_allow_errors_ratio} + +设置从文本格式(CSV,TSV等)读取时允许的最大错误百分比。). +错误百分比设置为介于0和1之间的浮点数。 + +默认值为0。 + +总是与它配对 `input_format_allow_errors_num`. + +如果在读取行时发生错误,但错误计数器仍小于 `input_format_allow_errors_ratio`,ClickHouse忽略该行并移动到下一个。 + +如果两者 `input_format_allow_errors_num` 和 `input_format_allow_errors_ratio` 超出时,ClickHouse引发异常。 + +## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} + +如果快速流解析器无法解析数据,则启用或禁用完整SQL解析器。 此设置仅用于 [值](../../interfaces/formats.md#data-format-values) 格式在数据插入。 有关语法分析的详细信息,请参阅 [语法](../../sql_reference/syntax.md) 科。 + +可能的值: + +- 0 — Disabled. + + 在这种情况下,您必须提供格式化的数据。 见 [格式](../../interfaces/formats.md) 科。 + +- 1 — Enabled. + + 在这种情况下,您可以使用SQL表达式作为值,但数据插入速度要慢得多。 如果仅插入格式化的数据,则ClickHouse的行为就好像设置值为0。 + +默认值:1。 + +使用示例 + +插入 [日期时间](../../sql_reference/data_types/datetime.md) 使用不同的设置键入值。 + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Exception on client: +Code: 27. DB::Exception: Cannot parse input: expected ) before: now()): (at row 1) +``` + +``` sql +SET input_format_values_interpret_expressions = 1; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Ok. +``` + +最后一个查询等效于以下内容: + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t SELECT now() +``` + +``` text +Ok. +``` + +## input\_format\_values\_deduce\_templates\_of\_expressions {#settings-input_format_values_deduce_templates_of_expressions} + +启用或禁用以下内容中的SQL表达式的模板扣除 [值](../../interfaces/formats.md#data-format-values) 格式。 它允许解析和解释表达式 `Values` 如果连续行中的表达式具有相同的结构,速度要快得多。 ClickHouse将尝试推导表达式的模板,使用此模板解析以下行,并在一批成功解析的行上评估表达式。 对于以下查询: + +``` sql +INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (upper('Values')), ... +``` + +- 如果 `input_format_values_interpret_expressions=1` 和 `format_values_deduce_templates_of_expressions=0` 表达式将单独解释每行(对于大量行来说,这非常慢) +- 如果 `input_format_values_interpret_expressions=0` 和 `format_values_deduce_templates_of_expressions=1` 第一行,第二行和第三行中的表达式将使用template进行分析 `lower(String)` 并一起解释,expression是第四行将与另一个模板进行解析 (`upper(String)`) +- 如果 `input_format_values_interpret_expressions=1` 和 `format_values_deduce_templates_of_expressions=1` -与前面的情况相同,但如果无法推断模板,也可以回退到单独解释表达式。 + +默认情况下启用。 + +## input\_format\_values\_accurate\_types\_of\_literals {#settings-input-format-values-accurate-types-of-literals} + +此设置仅在以下情况下使用 `input_format_values_deduce_templates_of_expressions = 1`. 它可能发生,某些列的表达式具有相同的结构,但包含不同类型的数字文字,例如 + +``` sql +(..., abs(0), ...), -- UInt64 literal +(..., abs(3.141592654), ...), -- Float64 literal +(..., abs(-1), ...), -- Int64 literal +``` + +启用此设置后,ClickHouse将检查文本的实际类型,并使用相应类型的表达式模板。 在某些情况下,可能会显着减慢表达式评估 `Values`. +When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` 或 `Int64` 而不是 `UInt64` 为 `42`),但它可能会导致溢出和精度问题。 +默认情况下启用。 + +## input\_format\_defaults\_for\_omitted\_fields {#session_settings-input_format_defaults_for_omitted_fields} + +执行时 `INSERT` 查询时,将省略的输入列值替换为相应列的默认值。 此选项仅适用于 [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) 和 [TabSeparated](../../interfaces/formats.md#tabseparated) 格式。 + +!!! note "注" + 启用此选项后,扩展表元数据将从服务器发送到客户端。 它会消耗服务器上的额外计算资源,并可能降低性能。 + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:1。 + +## input\_format\_tsv\_empty\_as\_default {#settings-input-format-tsv-empty-as-default} + +启用后,将TSV中的空输入字段替换为默认值。 对于复杂的默认表达式 `input_format_defaults_for_omitted_fields` 必须启用了。 + +默认情况下禁用。 + +## input\_format\_null\_as\_default {#settings-input-format-null-as-default} + +如果输入数据包含 `NULL`,但相应列的数据类型不 `Nullable(T)` (对于文本输入格式)。 + +## input\_format\_skip\_unknown\_fields {#settings-input-format-skip-unknown-fields} + +启用或禁用跳过额外数据的插入。 + +写入数据时,如果输入数据包含目标表中不存在的列,ClickHouse将引发异常。 如果启用了跳过,ClickHouse不会插入额外的数据,也不会引发异常。 + +支持的格式: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) +- [CSVWithNames](../../interfaces/formats.md#csvwithnames) +- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) +- [TSKV](../../interfaces/formats.md#tskv) + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:0。 + +## input\_format\_import\_nested\_json {#settings-input_format_import_nested_json} + +启用或禁用具有嵌套对象的JSON数据的插入。 + +支持的格式: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:0。 + +另请参阅: + +- [嵌套结构的使用](../../interfaces/formats.md#jsoneachrow-nested) 与 `JSONEachRow` 格式。 + +## input\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} + +启用或禁用插入数据时检查列顺序。 + +为了提高插入性能,如果您确定输入数据的列顺序与目标表中的列顺序相同,建议禁用此检查。 + +支持的格式: + +- [CSVWithNames](../../interfaces/formats.md#csvwithnames) +- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:1。 + +## date\_time\_input\_format {#settings-date_time_input_format} + +允许选择日期和时间的文本表示的解析器。 + +该设置不适用于 [日期和时间功能](../../sql_reference/functions/date_time_functions.md). + +可能的值: + +- `'best_effort'` — Enables extended parsing. + + ClickHouse可以解析基本 `YYYY-MM-DD HH:MM:SS` 格式和所有 [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) 日期和时间格式。 例如, `'2018-06-08T01:02:03.000Z'`. + +- `'basic'` — Use basic parser. + + ClickHouse只能解析基本的 `YYYY-MM-DD HH:MM:SS` 格式。 例如, `'2019-08-20 10:18:56'`. + +默认值: `'basic'`. + +另请参阅: + +- [日期时间数据类型。](../../sql_reference/data_types/datetime.md) +- [用于处理日期和时间的函数。](../../sql_reference/functions/date_time_functions.md) + +## join\_default\_strictness {#settings-join_default_strictness} + +设置默认严格性 [加入子句](../../sql_reference/statements/select.md#select-join). + +可能的值: + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [笛卡尔积](https://en.wikipedia.org/wiki/Cartesian_product) 从匹配的行。 这是正常的 `JOIN` 来自标准SQL的行为。 +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` 和 `ALL` 都是一样的 +- `ASOF` — For joining sequences with an uncertain match. +- `Empty string` — If `ALL` 或 `ANY` 如果未在查询中指定,ClickHouse将引发异常。 + +默认值: `ALL`. + +## join\_any\_take\_last\_row {#settings-join_any_take_last_row} + +更改联接操作的行为 `ANY` 严格。 + +!!! warning "注意" + 此设置仅适用于 `JOIN` 操作与 [加入我们](../../engines/table_engines/special/join.md) 发动机表. + +可能的值: + +- 0 — If the right table has more than one matching row, only the first one found is joined. +- 1 — If the right table has more than one matching row, only the last one found is joined. + +默认值:0。 + +另请参阅: + +- [JOIN子句](../../sql_reference/statements/select.md#select-join) +- [联接表引擎](../../engines/table_engines/special/join.md) +- [join\_default\_strictness](#settings-join_default_strictness) + +## join\_use\_nulls {#join_use_nulls} + +设置类型 [JOIN](../../sql_reference/statements/select.md) 行为 合并表时,可能会出现空单元格。 ClickHouse根据此设置以不同的方式填充它们。 + +可能的值: + +- 0 — The empty cells are filled with the default value of the corresponding field type. +- 1 — `JOIN` 其行为方式与标准SQL中的行为方式相同。 相应字段的类型将转换为 [可为空](../../sql_reference/data_types/nullable.md#data_type-nullable),和空单元格填充 [NULL](../../sql_reference/syntax.md). + +默认值:0。 + +## max\_block\_size {#setting-max_block_size} + +在ClickHouse中,数据由块(列部分集)处理。 单个块的内部处理周期足够高效,但每个块都有明显的支出。 该 `max_block_size` 设置是建议从表中加载块的大小(行数)。 块大小不应该太小,以便每个块上的支出仍然明显,但不能太大,以便在第一个块处理完成后快速完成限制查询。 目标是避免在多个线程中提取大量列时占用太多内存,并且至少保留一些缓存局部性。 + +默认值:65,536。 + +块的大小 `max_block_size` 并不总是从表中加载。 如果显然需要检索的数据较少,则处理较小的块。 + +## preferred\_block\_size\_bytes {#preferred-block-size-bytes} + +用于相同的目的 `max_block_size`,但它通过使其适应块中的行数来设置推荐的块大小(以字节为单位)。 +但是,块大小不能超过 `max_block_size` 行。 +默认情况下:1,000,000。 它只有在从MergeTree引擎读取时才有效。 + +## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} + +如果从a的文件中读取的行数 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 表超过 `merge_tree_min_rows_for_concurrent_read` 然后ClickHouse尝试在多个线程上从该文件执行并发读取。 + +可能的值: + +- 任何正整数。 + +默认值:163840. + +## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} + +如果从一个文件中读取的字节数 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-发动机表超过 `merge_tree_min_bytes_for_concurrent_read`,然后ClickHouse尝试在多个线程中并发读取此文件。 + +可能的值: + +- 任何正整数。 + +默认值:251658240. + +## merge\_tree\_min\_rows\_for\_seek {#setting-merge-tree-min-rows-for-seek} + +如果要在一个文件中读取的两个数据块之间的距离小于 `merge_tree_min_rows_for_seek` 行,然后ClickHouse不查找文件,而是按顺序读取数据。 + +可能的值: + +- 任何正整数。 + +默认值:0。 + +## merge\_tree\_min\_bytes\_for\_seek {#setting-merge-tree-min-bytes-for-seek} + +如果要在一个文件中读取的两个数据块之间的距离小于 `merge_tree_min_bytes_for_seek` 字节数,然后ClickHouse依次读取包含两个块的文件范围,从而避免了额外的寻道。 + +可能的值: + +- 任何正整数。 + +默认值:0。 + +## merge\_tree\_coarse\_index\_granularity {#setting-merge-tree-coarse-index-granularity} + +搜索数据时,ClickHouse会检查索引文件中的数据标记。 如果ClickHouse发现所需的键在某个范围内,它将此范围划分为 `merge_tree_coarse_index_granularity` 子范围和递归地搜索所需的键。 + +可能的值: + +- 任何正偶数整数。 + +默认值:8。 + +## merge\_tree\_max\_rows\_to\_use\_cache {#setting-merge-tree-max-rows-to-use-cache} + +如果克里克豪斯应该阅读更多 `merge_tree_max_rows_to_use_cache` 在一个查询中的行,它不使用未压缩块的缓存。 + +未压缩块的缓存存储为查询提取的数据。 ClickHouse使用此缓存来加快对重复的小查询的响应。 此设置可保护缓存免受读取大量数据的查询的破坏。 该 [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) 服务器设置定义未压缩块的高速缓存的大小。 + +可能的值: + +- 任何正整数。 + +Default value: 128 ✕ 8192. + +## merge\_tree\_max\_bytes\_to\_use\_cache {#setting-merge-tree-max-bytes-to-use-cache} + +如果克里克豪斯应该阅读更多 `merge_tree_max_bytes_to_use_cache` 在一个查询中的字节,它不使用未压缩块的缓存。 + +未压缩块的缓存存储为查询提取的数据。 ClickHouse使用此缓存来加快对重复的小查询的响应。 此设置可保护缓存免受读取大量数据的查询的破坏。 该 [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) 服务器设置定义未压缩块的高速缓存的大小。 + +可能的值: + +- 任何正整数。 + +默认值:2013265920. + +## min\_bytes\_to\_use\_direct\_io {#settings-min-bytes-to-use-direct-io} + +使用直接I/O访问存储磁盘所需的最小数据量。 + +ClickHouse在从表中读取数据时使用此设置。 如果要读取的所有数据的总存储量超过 `min_bytes_to_use_direct_io` 字节,然后ClickHouse读取从存储磁盘的数据 `O_DIRECT` 选项。 + +可能的值: + +- 0 — Direct I/O is disabled. +- 整数。 + +默认值:0。 + +## log\_queries {#settings-log-queries} + +设置查询日志记录。 + +使用此设置发送到ClickHouse的查询将根据以下内容中的规则记录 [query\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 服务器配置参数。 + +示例: + +``` text +log_queries=1 +``` + +## log\_query\_threads {#settings-log-query-threads} + +设置查询线程日志记录。 + +ClickHouse使用此设置运行的查询线程将根据以下命令中的规则记录 [query\_thread\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 服务器配置参数。 + +示例: + +``` text +log_query_threads=1 +``` + +## max\_insert\_block\_size {#settings-max_insert_block_size} + +要插入到表中的块的大小。 +此设置仅适用于服务器形成块的情况。 +例如,对于通过HTTP接口进行的插入,服务器会分析数据格式并形成指定大小的块。 +但是当使用clickhouse-client时,客户端解析数据本身,并且 ‘max\_insert\_block\_size’ 服务器上的设置不会影响插入的块的大小。 +使用INSERT SELECT时,该设置也没有目的,因为数据是使用在SELECT之后形成的相同块插入的。 + +默认值:1,048,576。 + +默认值略高于 `max_block_size`. 这样做的原因是因为某些表引擎 (`*MergeTree`)在磁盘上为每个插入的块形成一个数据部分,这是一个相当大的实体。 同样, `*MergeTree` 表在插入过程中对数据进行排序,并且足够大的块大小允许在RAM中对更多数据进行排序。 + +## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} + +禁用分布式查询的滞后副本。 看 [复制](../../engines/table_engines/mergetree_family/replication.md). + +以秒为单位设置时间。 如果副本滞后超过设定值,则不使用此副本。 + +默认值:300。 + +执行时使用 `SELECT` 从指向复制表的分布式表。 + +## max\_threads {#settings-max_threads} + +查询处理线程的最大数量,不包括用于从远程服务器检索数据的线程(请参阅 ‘max\_distributed\_connections’ 参数)。 + +此参数适用于并行执行查询处理管道的相同阶段的线程。 +例如,当从表中读取时,如果可以使用函数来评估表达式,请使用WHERE进行过滤,并且至少使用并行方式对GROUP BY进行预聚合 ‘max\_threads’ 线程数,然后 ‘max\_threads’ 被使用。 + +默认值:物理CPU内核数。 + +如果一次在服务器上运行的SELECT查询通常少于一个,请将此参数设置为略小于实际处理器内核数的值。 + +对于由于限制而快速完成的查询,可以设置较低的 ‘max\_threads’. 例如,如果必要数量的条目位于每个块中,并且max\_threads=8,则会检索8个块,尽管只读取一个块就足够了。 + +越小 `max_threads` 值,较少的内存被消耗。 + +## max\_insert\_threads {#settings-max-insert-threads} + +要执行的最大线程数 `INSERT SELECT` 查询。 + +可能的值: + +- 0 (or 1) — `INSERT SELECT` 没有并行执行。 +- 整数。 大于1。 + +默认值:0。 + +平行 `INSERT SELECT` 只有在 `SELECT` 部分并行执行,请参阅 [max\_threads](#settings-max_threads) 设置。 +更高的值将导致更高的内存使用率。 + +## max\_compress\_block\_size {#max-compress-block-size} + +在压缩写入表之前,未压缩数据块的最大大小。 默认情况下,1,048,576(1MiB)。 如果大小减小,则压缩率显着降低,压缩和解压缩速度由于高速缓存局部性而略微增加,并且内存消耗减少。 通常没有任何理由更改此设置。 + +不要将用于压缩的块(由字节组成的内存块)与用于查询处理的块(表中的一组行)混淆。 + +## min\_compress\_block\_size {#min-compress-block-size} + +为 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)"表。 为了减少处理查询时的延迟,在写入下一个标记时,如果块的大小至少为 ‘min\_compress\_block\_size’. 默认情况下,65,536。 + +块的实际大小,如果未压缩的数据小于 ‘max\_compress\_block\_size’,是不小于该值且不小于一个标记的数据量。 + +让我们来看看一个例子。 假设 ‘index\_granularity’ 在表创建期间设置为8192。 + +我们正在编写一个UInt32类型的列(每个值4个字节)。 当写入8192行时,总数将是32KB的数据。 由于min\_compress\_block\_size=65,536,将为每两个标记形成一个压缩块。 + +我们正在编写一个字符串类型的URL列(每个值的平均大小60字节)。 当写入8192行时,平均数据将略少于500KB。 由于这超过65,536,将为每个标记形成一个压缩块。 在这种情况下,当从单个标记范围内的磁盘读取数据时,额外的数据不会被解压缩。 + +通常没有任何理由更改此设置。 + +## max\_query\_size {#settings-max_query_size} + +查询的最大部分,可以被带到RAM用于使用SQL解析器进行解析。 +插入查询还包含由单独的流解析器(消耗O(1)RAM)处理的插入数据,这些数据不包含在此限制中。 + +默认值:256KiB。 + +## interactive\_delay {#interactive-delay} + +以微秒为单位的间隔,用于检查请求执行是否已被取消并发送进度。 + +默认值:100,000(检查取消并每秒发送十次进度)。 + +## connect\_timeout,receive\_timeout,send\_timeout {#connect-timeout-receive-timeout-send-timeout} + +用于与客户端通信的套接字上的超时以秒为单位。 + +默认值:10,300,300。 + +## cancel\_http\_readonly\_queries\_on\_client\_close {#cancel-http-readonly-queries-on-client-close} + +Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. + +默认值:0 + +## poll\_interval {#poll-interval} + +锁定在指定秒数的等待循环。 + +默认值:10。 + +## max\_distributed\_connections {#max-distributed-connections} + +与远程服务器同时连接的最大数量,用于分布式处理对单个分布式表的单个查询。 我们建议设置不小于群集中服务器数量的值。 + +默认值:1024。 + +以下参数仅在创建分布式表(以及启动服务器时)时使用,因此没有理由在运行时更改它们。 + +## distributed\_connections\_pool\_size {#distributed-connections-pool-size} + +与远程服务器同时连接的最大数量,用于分布式处理对单个分布式表的所有查询。 我们建议设置不小于群集中服务器数量的值。 + +默认值:1024。 + +## connect\_timeout\_with\_failover\_ms {#connect-timeout-with-failover-ms} + +以毫秒为单位连接到分布式表引擎的远程服务器的超时,如果 ‘shard’ 和 ‘replica’ 部分用于群集定义。 +如果不成功,将尝试多次连接到各种副本。 + +默认值:50。 + +## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries} + +分布式表引擎的每个副本的最大连接尝试次数。 + +默认值:3。 + +## 极端 {#extremes} + +是否计算极值(查询结果列中的最小值和最大值)。 接受0或1。 默认情况下,0(禁用)。 +有关详细信息,请参阅部分 “Extreme values”. + +## use\_uncompressed\_cache {#setting-use_uncompressed_cache} + +是否使用未压缩块的缓存。 接受0或1。 默认情况下,0(禁用)。 +使用未压缩缓存(仅适用于MergeTree系列中的表)可以在处理大量短查询时显着减少延迟并提高吞吐量。 为频繁发送短请求的用户启用此设置。 还要注意 [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. + +对于至少读取大量数据(一百万行或更多行)的查询,将自动禁用未压缩缓存,以节省真正小型查询的空间。 这意味着你可以保持 ‘use\_uncompressed\_cache’ 设置始终设置为1。 + +## replace\_running\_query {#replace-running-query} + +当使用HTTP接口时, ‘query\_id’ 参数可以传递。 这是用作查询标识符的任何字符串。 +如果来自同一用户的查询具有相同的 ‘query\_id’ 已经存在在这个时候,行为取决于 ‘replace\_running\_query’ 参数。 + +`0` (default) – Throw an exception (don't allow the query to run if a query with the same ‘query\_id’ 已经运行)。 + +`1` – Cancel the old query and start running the new one. + +YandexMetrica使用此参数设置为1来实现分段条件的建议。 输入下一个字符后,如果旧的查询还没有完成,应该取消。 + +## stream\_flush\_interval\_ms {#stream-flush-interval-ms} + +适用于在超时的情况下或线程生成流式传输的表 [max\_insert\_block\_size](#settings-max_insert_block_size) 行。 + +默认值为7500。 + +值越小,数据被刷新到表中的频率就越高。 将该值设置得太低会导致性能较差。 + +## load\_balancing {#settings-load_balancing} + +指定用于分布式查询处理的副本选择算法。 + +ClickHouse支持以下选择副本的算法: + +- [随机](#load_balancing-random) (默认情况下) +- [最近的主机名](#load_balancing-nearest_hostname) +- [按顺序](#load_balancing-in_order) +- [第一次或随机](#load_balancing-first_or_random) + +### 随机(默认情况下) {#load_balancing-random} + +``` sql +load_balancing = random +``` + +对每个副本计算错误数。 查询发送到错误最少的副本,如果存在其中几个错误,则发送给其中任何一个。 +缺点:不考虑服务器邻近度;如果副本具有不同的数据,则也会获得不同的数据。 + +### 最近的主机名 {#load_balancing-nearest_hostname} + +``` sql +load_balancing = nearest_hostname +``` + +The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server's hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). + +例如,例如01-01-1和example01-01-2.yandex.ru 在一个位置是不同的,而example01-01-1和example01-02-2在两个地方不同。 +这种方法可能看起来很原始,但它不需要有关网络拓扑的外部数据,也不比较IP地址,这对于我们的IPv6地址来说会很复杂。 + +因此,如果存在等效副本,则首选按名称最接近的副本。 +我们还可以假设,当向同一台服务器发送查询时,在没有失败的情况下,分布式查询也将转到同一台服务器。 因此,即使在副本上放置了不同的数据,查询也会返回大多相同的结果。 + +### 按顺序 {#load_balancing-in_order} + +``` sql +load_balancing = in_order +``` + +具有相同错误数的副本的访问顺序与配置中指定的顺序相同。 +当您确切知道哪个副本是可取的时,此方法是适当的。 + +### 第一次或随机 {#load_balancing-first_or_random} + +``` sql +load_balancing = first_or_random +``` + +此算法选择集合中的第一个副本,如果第一个副本不可用,则选择随机副本。 它在跨复制拓扑设置中有效,但在其他配置中无用。 + +该 `first_or_random` 算法解决的问题 `in_order` 算法。 与 `in_order`,如果一个副本出现故障,下一个副本将获得双重负载,而其余副本将处理通常的流量。 使用时 `first_or_random` 算法中,负载均匀分布在仍然可用的副本之间。 + +## prefer\_localhost\_replica {#settings-prefer-localhost-replica} + +在处理分布式查询时,最好使用localhost副本启用/禁用该副本。 + +可能的值: + +- 1 — ClickHouse always sends a query to the localhost replica if it exists. +- 0 — ClickHouse uses the balancing strategy specified by the [load\_balancing](#settings-load_balancing) 设置。 + +默认值:1。 + +!!! warning "警告" + 如果使用此设置,请禁用此设置 [max\_parallel\_replicas](#settings-max_parallel_replicas). + +## totals\_mode {#totals-mode} + +如何计算总计时有存在,以及当max\_rows\_to\_group\_by和group\_by\_overflow\_mode= ‘any’ 都在场。 +请参阅部分 “WITH TOTALS modifier”. + +## totals\_auto\_threshold {#totals-auto-threshold} + +阈值 `totals_mode = 'auto'`. +请参阅部分 “WITH TOTALS modifier”. + +## max\_parallel\_replicas {#settings-max_parallel_replicas} + +执行查询时每个分片的最大副本数。 +为了保持一致性(以获取相同数据拆分的不同部分),此选项仅在设置了采样键时有效。 +副本滞后不受控制。 + +## 编译 {#compile} + +启用查询的编译。 默认情况下,0(禁用)。 + +编译仅用于查询处理管道的一部分:用于聚合的第一阶段(GROUP BY)。 +如果编译了管道的这一部分,则由于部署周期较短和内联聚合函数调用,查询可能运行得更快。 对于具有多个简单聚合函数的查询,可以看到最大的性能改进(在极少数情况下可快四倍)。 通常,性能增益是微不足道的。 在极少数情况下,它可能会减慢查询执行速度。 + +## min\_count\_to\_compile {#min-count-to-compile} + +在运行编译之前可能使用已编译代码块的次数。 默认情况下,3。 +For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. +如果该值为1或更大,则编译在单独的线程中异步进行。 结果将在准备就绪后立即使用,包括当前正在运行的查询。 + +对于查询中使用的聚合函数的每个不同组合以及GROUP BY子句中的键类型,都需要编译代码。 +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. + +## output\_format\_json\_quote\_64bit\_integers {#session_settings-output_format_json_quote_64bit_integers} + +如果该值为true,则在使用JSON\*Int64和UInt64格式时,整数将显示在引号中(为了与大多数JavaScript实现兼容);否则,整数将不带引号输出。 + +## format\_csv\_delimiter {#settings-format_csv_delimiter} + +将字符解释为CSV数据中的分隔符。 默认情况下,分隔符为 `,`. + +## input\_format\_csv\_unquoted\_null\_literal\_as\_null {#settings-input_format_csv_unquoted_null_literal_as_null} + +对于CSV输入格式,启用或禁用未引用的解析 `NULL` 作为文字(同义词 `\N`). + +## output\_format\_csv\_crlf\_end\_of\_line {#settings-output-format-csv-crlf-end-of-line} + +在CSV中使用DOS/Windows样式的行分隔符(CRLF)而不是Unix样式(LF)。 + +## output\_format\_tsv\_crlf\_end\_of\_line {#settings-output-format-tsv-crlf-end-of-line} + +在TSV中使用DOC/Windows样式的行分隔符(CRLF)而不是Unix样式(LF)。 + +## insert\_quorum {#settings-insert_quorum} + +启用仲裁写入。 + +- 如果 `insert_quorum < 2`,仲裁写入被禁用。 +- 如果 `insert_quorum >= 2`,仲裁写入已启用。 + +默认值:0。 + +仲裁写入 + +`INSERT` 只有当ClickHouse设法正确地将数据写入成功 `insert_quorum` 在复制品的 `insert_quorum_timeout`. 如果由于任何原因,成功写入的副本数量没有达到 `insert_quorum`,写入被认为失败,ClickHouse将从已经写入数据的所有副本中删除插入的块。 + +仲裁中的所有副本都是一致的,即它们包含来自所有以前的数据 `INSERT` 查询。 该 `INSERT` 序列线性化。 + +当读取从写入的数据 `insert_quorum`,您可以使用 [select\_sequential\_consistency](#settings-select_sequential_consistency) 选项。 + +ClickHouse生成异常 + +- 如果查询时可用副本的数量小于 `insert_quorum`. +- 在尝试写入数据时,以前的块尚未被插入 `insert_quorum` 的复制品。 如果用户尝试执行 `INSERT` 前一个与 `insert_quorum` 完成。 + +另请参阅: + +- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## insert\_quorum\_timeout {#settings-insert_quorum-timeout} + +写入仲裁超时以秒为单位。 如果超时已经过去,并且还没有发生写入,ClickHouse将生成异常,客户端必须重复查询以将相同的块写入相同的副本或任何其他副本。 + +默认值:60秒。 + +另请参阅: + +- [insert\_quorum](#settings-insert_quorum) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## select\_sequential\_consistency {#settings-select_sequential_consistency} + +启用或禁用顺序一致性 `SELECT` 查询: + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:0。 + +用途 + +当启用顺序一致性时,ClickHouse允许客户端执行 `SELECT` 仅查询那些包含来自所有先前数据的副本 `INSERT` 查询执行 `insert_quorum`. 如果客户端引用了部分副本,ClickHouse将生成异常。 SELECT查询将不包括尚未写入副本仲裁的数据。 + +另请参阅: + +- [insert\_quorum](#settings-insert_quorum) +- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) + +## insert\_deduplicate {#settings-insert-deduplicate} + +启用或禁用块重复数据删除 `INSERT` (对于复制的\*表)。 + +可能的值: + +- 0 — Disabled. +- 1 — Enabled. + +默认值:1。 + +默认情况下,块插入到复制的表 `INSERT` 重复数据删除语句(请参阅\[数据复制\](../engines/table\_engines/mergetree\_family/replication.md)。 + +## deduplicate\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} + +启用或禁用从已复制\*表接收数据的实例化视图的重复数据删除检查。 + +可能的值: + + 0 — Disabled. + 1 — Enabled. + +默认值:0。 + +用途 + +默认情况下,重复数据删除不对实例化视图执行,而是在源表的上游执行。 +如果由于源表中的重复数据删除而跳过了插入的块,则不会插入附加的实例化视图。 这种行为的存在是为了允许将高度聚合的数据插入到实例化视图中,对于在实例化视图聚合之后插入的块相同,但是从源表中的不同插入派生的情况。 +与此同时,这种行为 “breaks” `INSERT` 幂等性 如果一个 `INSERT` 进入主表是成功的, `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won't receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` 允许改变这种行为。 重试时,实例化视图将收到重复插入,并自行执行重复数据删除检查, +忽略源表的检查结果,并将插入由于第一次失败而丢失的行。 + +## max\_network\_bytes {#settings-max-network-bytes} + +限制在执行查询时通过网络接收或传输的数据量(以字节为单位)。 此设置适用于每个单独的查询。 + +可能的值: + +- 整数。 +- 0 — Data volume control is disabled. + +默认值:0。 + +## max\_network\_bandwidth {#settings-max-network-bandwidth} + +限制通过网络进行数据交换的速度,以每秒字节为单位。 此设置适用于每个查询。 + +可能的值: + +- 整数。 +- 0 — Bandwidth control is disabled. + +默认值:0。 + +## max\_network\_bandwidth\_for\_user {#settings-max-network-bandwidth-for-user} + +限制通过网络进行数据交换的速度,以每秒字节为单位。 此设置适用于单个用户执行的所有并发运行的查询。 + +可能的值: + +- 整数。 +- 0 — Control of the data speed is disabled. + +默认值:0。 + +## max\_network\_bandwidth\_for\_all\_users {#settings-max-network-bandwidth-for-all-users} + +限制通过网络交换数据的速度,以每秒字节为单位。 此设置适用于服务器上同时运行的所有查询。 + +可能的值: + +- 整数。 +- 0 — Control of the data speed is disabled. + +默认值:0。 + +## count\_distinct\_implementation {#settings-count_distinct_implementation} + +指定其中的 `uniq*` 函数应用于执行 [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count) 建筑。 + +可能的值: + +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [uniqCombined64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [uniqHLL12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [uniqExact](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) + +默认值: `uniqExact`. + +## skip\_unavailable\_shards {#settings-skip_unavailable_shards} + +启用或禁用静默跳过不可用分片。 + +如果分片的所有副本都不可用,则视为不可用。 副本在以下情况下不可用: + +- ClickHouse出于任何原因无法连接到副本。 + + 连接到副本时,ClickHouse会执行多次尝试。 如果所有这些尝试都失败,则认为副本不可用。 + +- 副本无法通过DNS解析。 + + 如果无法通过DNS解析副本的主机名,则可能指示以下情况: + + - 副本的主机没有DNS记录。 它可以发生在具有动态DNS的系统中,例如, [Kubernetes](https://kubernetes.io),其中节点在停机期间可能无法解决问题,这不是错误。 + + - 配置错误。 ClickHouse配置文件包含错误的主机名。 + +可能的值: + +- 1 — skipping enabled. + + 如果分片不可用,ClickHouse将基于部分数据返回结果,并且不报告节点可用性问题。 + +- 0 — skipping disabled. + + 如果分片不可用,ClickHouse将引发异常。 + +默认值:0。 + +## optimize\_skip\_unused\_shards {#settings-optimize_skip_unused_shards} + +对于在PREWHERE/WHERE中具有分片键条件的SELECT查询,启用或禁用跳过未使用的分片(假定数据是通过分片键分发的,否则不执行任何操作)。 + +默认值:0 + +## force\_optimize\_skip\_unused\_shards {#settings-force_optimize_skip_unused_shards} + +在以下情况下启用或禁用查询执行 [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) 无法启用和跳过未使用的分片。 如果跳过是不可能的,并且设置为启用异常将被抛出。 + +可能的值: + +- 0-禁用(不抛出) +- 1-仅当表具有分片键时禁用查询执行 +- 2-无论为表定义了分片键,都禁用查询执行 + +默认值:0 + +## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested} + +重置 [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) 对于嵌套 `Distributed` 表 + +可能的值: + +- 1 — Enabled. +- 0 — Disabled. + +默认值:0。 + +## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} + +启用或禁用抛出异常,如果 [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) 查询未执行合并。 + +默认情况下, `OPTIMIZE` 即使它没有做任何事情,也会成功返回。 此设置允许您区分这些情况并在异常消息中获取原因。 + +可能的值: + +- 1 — Throwing an exception is enabled. +- 0 — Throwing an exception is disabled. + +默认值:0。 + +## distributed\_replica\_error\_half\_life {#settings-distributed_replica_error_half_life} + +- 类型:秒 +- 默认值:60秒 + +控制清零分布式表中的错误的速度。 如果某个副本在一段时间内不可用,累计出现5个错误,并且distributed\_replica\_error\_half\_life设置为1秒,则该副本在上一个错误发生3秒后视为正常。 + +另请参阅: + +- [表引擎分布式](../../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) + +## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} + +- 类型:无符号int +- 默认值:1000 + +每个副本的错误计数上限为此值,从而防止单个副本累积太多错误。 + +另请参阅: + +- [表引擎分布式](../../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) + +## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} + +对于基本间隔 [分布](../../engines/table_engines/special/distributed.md) 表引擎发送数据。 在发生错误时,实际间隔呈指数级增长。 + +可能的值: + +- 毫秒的正整数。 + +默认值:100毫秒。 + +## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} + +的最大间隔 [分布](../../engines/table_engines/special/distributed.md) 表引擎发送数据。 限制在设置的区间的指数增长 [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) 设置。 + +可能的值: + +- 毫秒的正整数。 + +默认值:30000毫秒(30秒)。 + +## distributed\_directory\_monitor\_batch\_inserts {#distributed_directory_monitor_batch_inserts} + +启用/禁用批量发送插入的数据。 + +当批量发送被启用时, [分布](../../engines/table_engines/special/distributed.md) 表引擎尝试在一个操作中发送插入数据的多个文件,而不是单独发送它们。 批量发送通过更好地利用服务器和网络资源来提高集群性能。 + +可能的值: + +- 1 — Enabled. +- 0 — Disabled. + +默认值:0。 + +## os\_thread\_priority {#setting-os-thread-priority} + +设置优先级 ([不错](https://en.wikipedia.org/wiki/Nice_(Unix)))对于执行查询的线程。 当选择要在每个可用CPU内核上运行的下一个线程时,操作系统调度程序会考虑此优先级。 + +!!! warning "警告" + 要使用此设置,您需要设置 `CAP_SYS_NICE` 能力。 该 `clickhouse-server` 软件包在安装过程中设置它。 某些虚拟环境不允许您设置 `CAP_SYS_NICE` 能力。 在这种情况下, `clickhouse-server` 在开始时显示关于它的消息。 + +可能的值: + +- 您可以在范围内设置值 `[-20, 19]`. + +值越低意味着优先级越高。 低螺纹 `nice` 与具有高值的线程相比,优先级值的执行频率更高。 高值对于长时间运行的非交互式查询更为可取,因为这使得它们可以在到达时快速放弃资源,转而使用短交互式查询。 + +默认值:0。 + +## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} + +设置周期的实时时钟定时器 [查询探查器](../../operations/optimizing_performance/sampling_query_profiler.md). 真正的时钟计时器计数挂钟时间。 + +可能的值: + +- 正整数,以纳秒为单位。 + + 推荐值: + + - 10000000 (100 times a second) nanoseconds and less for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- 0用于关闭计时器。 + +类型: [UInt64](../../sql_reference/data_types/int_uint.md). + +默认值:1000000000纳秒(每秒一次)。 + +另请参阅: + +- 系统表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} + +设置周期的CPU时钟定时器 [查询探查器](../../operations/optimizing_performance/sampling_query_profiler.md). 此计时器仅计算CPU时间。 + +可能的值: + +- 纳秒的正整数。 + + 推荐值: + + - 10000000 (100 times a second) nanoseconds and more for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- 0用于关闭计时器。 + +类型: [UInt64](../../sql_reference/data_types/int_uint.md). + +默认值:1000000000纳秒。 + +另请参阅: + +- 系统表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## allow\_introspection\_functions {#settings-allow_introspection_functions} + +启用禁用 [反省函数](../../sql_reference/functions/introspection.md) 用于查询分析。 + +可能的值: + +- 1 — Introspection functions enabled. +- 0 — Introspection functions disabled. + +默认值:0。 + +**另请参阅** + +- [采样查询探查器](../optimizing_performance/sampling_query_profiler.md) +- 系统表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## input\_format\_parallel\_parsing {#input-format-parallel-parsing} + +- 类型:布尔 +- 默认值:True + +启用数据格式的保序并行分析。 仅支持TSV,TKSV,CSV和JSONEachRow格式。 + +## min\_chunk\_bytes\_for\_parallel\_parsing {#min-chunk-bytes-for-parallel-parsing} + +- 类型:无符号int +- 默认值:1MiB + +以字节为单位的最小块大小,每个线程将并行解析。 + +## output\_format\_avro\_codec {#settings-output_format_avro_codec} + +设置用于输出Avro文件的压缩编解ec。 + +类型:字符串 + +可能的值: + +- `null` — No compression +- `deflate` — Compress with Deflate (zlib) +- `snappy` — Compress with [活泼的](https://google.github.io/snappy/) + +默认值: `snappy` (如果可用)或 `deflate`. + +## output\_format\_avro\_sync\_interval {#settings-output_format_avro_sync_interval} + +设置输出Avro文件的同步标记之间的最小数据大小(以字节为单位)。 + +类型:无符号int + +可能的值:32(32字节)-1073741824(1GiB) + +默认值:32768(32KiB) + +## format\_avro\_schema\_registry\_url {#settings-format_avro_schema_registry_url} + +设置要与之一起使用的汇合架构注册表URL [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) 格式 + +类型:网址 + +默认值:空 + +[原始文章](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/zh/operations/settings/settings_profiles.md b/docs/zh/operations/settings/settings_profiles.md deleted file mode 120000 index 35d9747ad56..00000000000 --- a/docs/zh/operations/settings/settings_profiles.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings_profiles.md \ No newline at end of file diff --git a/docs/zh/operations/settings/settings_profiles.md b/docs/zh/operations/settings/settings_profiles.md new file mode 100644 index 00000000000..21379a65ebc --- /dev/null +++ b/docs/zh/operations/settings/settings_profiles.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 61 +toc_title: "\u8BBE\u7F6E\u914D\u7F6E\u6587\u4EF6" +--- + +# 设置配置文件 {#settings-profiles} + +设置配置文件是以相同名称分组的设置的集合。 每个ClickHouse用户都有一个配置文件。 +要应用配置文件中的所有设置,请设置 `profile` 设置。 + +示例: + +安装 `web` 侧写 + +``` sql +SET profile = 'web' +``` + +设置配置文件在用户配置文件中声明。 这通常是 `users.xml`. + +示例: + +``` xml + + + + + + 8 + + + + + 1000000000 + 100000000000 + + 1000000 + any + + 1000000 + 1000000000 + + 100000 + 100000000 + break + + 600 + 1000000 + 15 + + 25 + 100 + 50 + + 2 + 25 + 50 + 100 + + 1 + + +``` + +该示例指定了两个配置文件: `default` 和 `web`. 该 `default` 配置文件有一个特殊用途:它必须始终存在并在启动服务器时应用。 换句话说, `default` 配置文件包含默认设置。 该 `web` 配置文件是一个常规的配置文件,可以使用设置 `SET` 查询或在HTTP查询中使用URL参数。 + +设置配置文件可以彼此继承。 要使用继承,请指示一个或多个 `profile` 配置文件中列出的其他设置之前的设置。 如果在不同的配置文件中定义了一个设置,则使用最新定义。 + +[原始文章](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/zh/operations/settings/settings_users.md b/docs/zh/operations/settings/settings_users.md deleted file mode 120000 index 3a6a7cf6948..00000000000 --- a/docs/zh/operations/settings/settings_users.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings_users.md \ No newline at end of file diff --git a/docs/zh/operations/settings/settings_users.md b/docs/zh/operations/settings/settings_users.md new file mode 100644 index 00000000000..2dba689d08f --- /dev/null +++ b/docs/zh/operations/settings/settings_users.md @@ -0,0 +1,148 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 63 +toc_title: "\u7528\u6237\u8BBE\u7F6E" +--- + +# 用户设置 {#user-settings} + +该 `users` 一节 `user.xml` 配置文件包含用户设置。 + +的结构 `users` 科: + +``` xml + + + + + + + + + + + profile_name + + default + + + + + expression + + + + + + +``` + +### 用户名称/密码 {#user-namepassword} + +密码可以以明文或SHA256(十六进制格式)指定。 + +- 以明文形式分配密码 (**不推荐**),把它放在一个 `password` 元素。 + + 例如, `qwerty`. 密码可以留空。 + + + +- 要使用其SHA256散列分配密码,请将其放置在 `password_sha256_hex` 元素。 + + 例如, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. + + 如何从shell生成密码的示例: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' + + 结果的第一行是密码。 第二行是相应的SHA256哈希。 + + + +- 为了与MySQL客户端兼容,密码可以在双SHA1哈希中指定。 放进去 `password_double_sha1_hex` 元素。 + + 例如, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. + + 如何从shell生成密码的示例: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' + + 结果的第一行是密码。 第二行是相应的双SHA1哈希。 + +### 用户名称/网络 {#user-namenetworks} + +用户可以从中连接到ClickHouse服务器的网络列表。 + +列表中的每个元素都可以具有以下形式之一: + +- `` — IP address or network mask. + + 例: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. + +- `` — Hostname. + + 示例: `example01.host.ru`. + + 要检查访问,将执行DNS查询,并将所有返回的IP地址与对等地址进行比较。 + +- `` — Regular expression for hostnames. + + 示例, `^example\d\d-\d\d-\d\.host\.ru$` + + 要检查访问,a [DNS PTR查询](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) 对对等体地址执行,然后应用指定的正则表达式。 然后,对PTR查询的结果执行另一个DNS查询,并将所有接收到的地址与对等地址进行比较。 我们强烈建议正则表达式以$结尾。 + +DNS请求的所有结果都将被缓存,直到服务器重新启动。 + +**例** + +要从任何网络打开用户的访问权限,请指定: + +``` xml +::/0 +``` + +!!! warning "警告" + 从任何网络开放访问是不安全的,除非你有一个防火墙正确配置或服务器没有直接连接到互联网。 + +若要仅从本地主机打开访问权限,请指定: + +``` xml +::1 +127.0.0.1 +``` + +### user\_name/profile {#user-nameprofile} + +您可以为用户分配设置配置文件。 设置配置文件在单独的部分配置 `users.xml` 文件 有关详细信息,请参阅 [设置配置文件](settings_profiles.md). + +### 用户名称/配额 {#user-namequota} + +配额允许您在一段时间内跟踪或限制资源使用情况。 配额在配置 `quotas` +一节 `users.xml` 配置文件。 + +您可以为用户分配配额。 有关配额配置的详细说明,请参阅 [配额](../quotas.md#quotas). + +### 用户名/数据库 {#user-namedatabases} + +在本节中,您可以限制ClickHouse返回的行 `SELECT` 由当前用户进行的查询,从而实现基本的行级安全性。 + +**示例** + +以下配置强制该用户 `user1` 只能看到的行 `table1` 作为结果 `SELECT` 查询,其中的值 `id` 场是1000。 + +``` xml + + + + + id = 1000 + + + + +``` + +该 `filter` 可以是导致任何表达式 [UInt8](../../sql_reference/data_types/int_uint.md)-键入值。 它通常包含比较和逻辑运算符。 从行 `database_name.table1` 其中,不会为此用户返回为0的筛选结果。 过滤是不兼容的 `PREWHERE` 操作和禁用 `WHERE→PREWHERE` 优化。 + +[原始文章](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/zh/operations/system_tables.md b/docs/zh/operations/system_tables.md deleted file mode 120000 index c5701190dca..00000000000 --- a/docs/zh/operations/system_tables.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/system_tables.md \ No newline at end of file diff --git a/docs/zh/operations/system_tables.md b/docs/zh/operations/system_tables.md new file mode 100644 index 00000000000..ba762ddb562 --- /dev/null +++ b/docs/zh/operations/system_tables.md @@ -0,0 +1,1126 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 52 +toc_title: "\u7CFB\u7EDF\u8868" +--- + +# 系统表 {#system-tables} + +系统表用于实现系统的部分功能,并提供对有关系统如何工作的信息的访问。 +您无法删除系统表(但可以执行分离)。 +系统表没有包含磁盘上数据的文件或包含元数据的文件。 服务器在启动时创建所有系统表。 +系统表是只读的。 +它们位于 ‘system’ 数据库。 + +## 系统。asynchronous\_metrics {#system_tables-asynchronous_metrics} + +包含在后台定期计算的指标。 例如,在使用的RAM量。 + +列: + +- `metric` ([字符串](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Float64](../sql_reference/data_types/float.md)) — Metric value. + +**示例** + +``` sql +SELECT * FROM system.asynchronous_metrics LIMIT 10 +``` + +``` text +┌─metric──────────────────────────────────┬──────value─┐ +│ jemalloc.background_thread.run_interval │ 0 │ +│ jemalloc.background_thread.num_runs │ 0 │ +│ jemalloc.background_thread.num_threads │ 0 │ +│ jemalloc.retained │ 422551552 │ +│ jemalloc.mapped │ 1682989056 │ +│ jemalloc.resident │ 1656446976 │ +│ jemalloc.metadata_thp │ 0 │ +│ jemalloc.metadata │ 10226856 │ +│ UncompressedCacheCells │ 0 │ +│ MarkCacheFiles │ 0 │ +└─────────────────────────────────────────┴────────────┘ +``` + +**另请参阅** + +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. +- [系统。指标](#system_tables-metrics) — Contains instantly calculated metrics. +- [系统。活动](#system_tables-events) — Contains a number of events that have occurred. +- [系统。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. + +## 系统。集群 {#system-clusters} + +包含有关配置文件中可用的集群及其中的服务器的信息。 + +列: + +- `cluster` (String) — The cluster name. +- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. +- `shard_weight` (UInt32) — The relative weight of the shard when writing data. +- `replica_num` (UInt32) — The replica number in the shard, starting from 1. +- `host_name` (String) — The host name, as specified in the config. +- `host_address` (String) — The host IP address obtained from DNS. +- `port` (UInt16) — The port to use for connecting to the server. +- `user` (String) — The name of the user for connecting to the server. +- `errors_count` (UInt32)-此主机无法到达副本的次数。 +- `estimated_recovery_time` (UInt32)-剩下的秒数,直到副本错误计数归零,它被认为是恢复正常。 + +请注意 `errors_count` 每个查询集群更新一次,但 `estimated_recovery_time` 按需重新计算。 所以有可能是非零的情况 `errors_count` 和零 `estimated_recovery_time`,下一个查询将为零 `errors_count` 并尝试使用副本,就好像它没有错误。 + +**另请参阅** + +- [表引擎分布式](../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_cap设置](settings/settings.md#settings-distributed_replica_error_cap) +- [distributed\_replica\_error\_half\_life设置](settings/settings.md#settings-distributed_replica_error_half_life) + +## 系统。列 {#system-columns} + +包含有关所有表中列的信息。 + +您可以使用此表获取类似于以下内容的信息 [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table) 查询,但对于多个表一次。 + +该 `system.columns` 表包含以下列(列类型显示在括号中): + +- `database` (String) — Database name. +- `table` (String) — Table name. +- `name` (String) — Column name. +- `type` (String) — Column type. +- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`)为默认值,如果没有定义,则为空字符串。 +- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. +- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. +- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. +- `marks_bytes` (UInt64) — The size of marks, in bytes. +- `comment` (String) — Comment on the column, or an empty string if it is not defined. +- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression. +- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. +- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. +- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. + +## 系统。贡献者 {#system-contributors} + +包含有关贡献者的信息。 按随机顺序排列所有构造。 该顺序在查询执行时是随机的。 + +列: + +- `name` (String) — Contributor (author) name from git log. + +**示例** + +``` sql +SELECT * FROM system.contributors LIMIT 10 +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +│ Max Vetrov │ +│ LiuYangkuan │ +│ svladykin │ +│ zamulla │ +│ Šimon Podlipský │ +│ BayoNet │ +│ Ilya Khomutov │ +│ Amy Krishnevsky │ +│ Loud_Scream │ +└──────────────────┘ +``` + +要在表中找出自己,请使用查询: + +``` sql +SELECT * FROM system.contributors WHERE name='Olga Khvostikova' +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +└──────────────────┘ +``` + +## 系统。数据库 {#system-databases} + +此表包含一个名为"字符串"的列 ‘name’ – the name of a database. +服务器知道的每个数据库在表中都有相应的条目。 +该系统表用于实现 `SHOW DATABASES` 查询。 + +## 系统。detached\_parts {#system_tables-detached_parts} + +包含有关分离部分的信息 [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) 桌子 该 `reason` 列指定分离部件的原因。 对于用户分离的部件,原因是空的。 这些部件可以附加 [ALTER TABLE ATTACH PARTITION\|PART](../sql_reference/statements/alter.md#alter_attach-partition) 指挥部 有关其他列的说明,请参阅 [系统。零件](#system_tables-parts). 如果部件名称无效,某些列的值可能为 `NULL`. 这些部分可以删除 [ALTER TABLE DROP DETACHED PART](../sql_reference/statements/alter.md#alter_drop-detached). + +## 系统。字典 {#system-dictionaries} + +包含有关外部字典的信息。 + +列: + +- `name` (String) — Dictionary name. +- `type` (String) — Dictionary type: Flat, Hashed, Cache. +- `origin` (String) — Path to the configuration file that describes the dictionary. +- `attribute.names` (Array(String)) — Array of attribute names provided by the dictionary. +- `attribute.types` (Array(String)) — Corresponding array of attribute types that are provided by the dictionary. +- `has_hierarchy` (UInt8) — Whether the dictionary is hierarchical. +- `bytes_allocated` (UInt64) — The amount of RAM the dictionary uses. +- `hit_rate` (Float64) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `element_count` (UInt64) — The number of items stored in the dictionary. +- `load_factor` (Float64) — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `creation_time` (DateTime) — The time when the dictionary was created or last successfully reloaded. +- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. +- `source` (String) — Text describing the data source for the dictionary. + +请注意,字典使用的内存量与其中存储的项目数量不成正比。 因此,对于平面和缓存字典,所有的内存单元都是预先分配的,而不管字典实际上有多满。 + +## 系统。活动 {#system_tables-events} + +包含有关系统中发生的事件数的信息。 例如,在表中,您可以找到多少 `SELECT` 自ClickHouse服务器启动以来已处理查询。 + +列: + +- `event` ([字符串](../sql_reference/data_types/string.md)) — Event name. +- `value` ([UInt64](../sql_reference/data_types/int_uint.md)) — Number of events occurred. +- `description` ([字符串](../sql_reference/data_types/string.md)) — Event description. + +**示例** + +``` sql +SELECT * FROM system.events LIMIT 5 +``` + +``` text +┌─event─────────────────────────────────┬─value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 12 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ +│ SelectQuery │ 8 │ Same as Query, but only for SELECT queries. │ +│ FileOpen │ 73 │ Number of files opened. │ +│ ReadBufferFromFileDescriptorRead │ 155 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ +│ ReadBufferFromFileDescriptorReadBytes │ 9931 │ Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. │ +└───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**另请参阅** + +- [系统。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [系统。指标](#system_tables-metrics) — Contains instantly calculated metrics. +- [系统。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. + +## 系统。功能 {#system-functions} + +包含有关正常函数和聚合函数的信息。 + +列: + +- `name`(`String`) – The name of the function. +- `is_aggregate`(`UInt8`) — Whether the function is aggregate. + +## 系统。graphite\_retentions {#system-graphite-retentions} + +包含有关参数的信息 [graphite\_rollup](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) 这是在表中使用 [\*GraphiteMergeTree](../engines/table_engines/mergetree_family/graphitemergetree.md) 引擎 + +列: + +- `config_name` (字符串) - `graphite_rollup` 参数名称。 +- `regexp` (String)-指标名称的模式。 +- `function` (String)-聚合函数的名称。 +- `age` (UInt64)-以秒为单位的数据的最小期限。 +- `precision` (UInt64)-如何精确地定义以秒为单位的数据的年龄。 +- `priority` (UInt16)-模式优先级。 +- `is_default` (UInt8)-模式是否为默认值。 +- `Tables.database` (Array(String))-使用数据库表名称的数组 `config_name` 参数。 +- `Tables.table` (Array(String))-使用表名称的数组 `config_name` 参数。 + +## 系统。合并 {#system-merges} + +包含有关MergeTree系列中表当前正在进行的合并和部件突变的信息。 + +列: + +- `database` (String) — The name of the database the table is in. +- `table` (String) — Table name. +- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started. +- `progress` (Float64) — The percentage of completed work from 0 to 1. +- `num_parts` (UInt64) — The number of pieces to be merged. +- `result_part_name` (String) — The name of the part that will be formed as the result of merging. +- `is_mutation` (UInt8)-1如果这个过程是一个部分突变. +- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. +- `total_size_marks` (UInt64) — The total number of marks in the merged parts. +- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. +- `rows_read` (UInt64) — Number of rows read. +- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. +- `rows_written` (UInt64) — Number of rows written. + +## 系统。指标 {#system_tables-metrics} + +包含可以立即计算或具有当前值的指标。 例如,同时处理的查询的数量或当前副本的延迟。 此表始终是最新的。 + +列: + +- `metric` ([字符串](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — Metric value. +- `description` ([字符串](../sql_reference/data_types/string.md)) — Metric description. + +支持的指标列表,您可以在 [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) ClickHouse的源文件。 + +**示例** + +``` sql +SELECT * FROM system.metrics LIMIT 10 +``` + +``` text +┌─metric─────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 1 │ Number of executing queries │ +│ Merge │ 0 │ Number of executing background merges │ +│ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ +│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replicas │ +│ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ +│ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ +│ BackgroundPoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping) │ +│ BackgroundSchedulePoolTask │ 0 │ Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc. │ +│ DiskSpaceReservedForMerge │ 0 │ Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts. │ +│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │ +└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**另请参阅** + +- [系统。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [系统。活动](#system_tables-events) — Contains a number of events that occurred. +- [系统。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. + +## 系统。metric\_log {#system_tables-metric_log} + +包含表中度量值的历史记录 `system.metrics` 和 `system.events`,定期刷新到磁盘。 +打开指标历史记录收集 `system.metric_log`,创建 `/etc/clickhouse-server/config.d/metric_log.xml` 具有以下内容: + +``` xml + + + system + metric_log
    + 7500 + 1000 +
    +
    +``` + +**示例** + +``` sql +SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical; +``` + +``` text +Row 1: +────── +event_date: 2020-02-18 +event_time: 2020-02-18 07:15:33 +milliseconds: 554 +ProfileEvent_Query: 0 +ProfileEvent_SelectQuery: 0 +ProfileEvent_InsertQuery: 0 +ProfileEvent_FileOpen: 0 +ProfileEvent_Seek: 0 +ProfileEvent_ReadBufferFromFileDescriptorRead: 1 +ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0 +ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0 +ProfileEvent_WriteBufferFromFileDescriptorWrite: 1 +ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0 +ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56 +... +CurrentMetric_Query: 0 +CurrentMetric_Merge: 0 +CurrentMetric_PartMutation: 0 +CurrentMetric_ReplicatedFetch: 0 +CurrentMetric_ReplicatedSend: 0 +CurrentMetric_ReplicatedChecks: 0 +... +``` + +**另请参阅** + +- [系统。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [系统。活动](#system_tables-events) — Contains a number of events that occurred. +- [系统。指标](#system_tables-metrics) — Contains instantly calculated metrics. +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. + +## 系统。数字 {#system-numbers} + +此表包含一个名为UInt64的列 ‘number’ 它包含几乎所有从零开始的自然数。 +您可以使用此表进行测试,或者如果您需要进行暴力搜索。 +从此表中读取的内容不是并行的。 + +## 系统。numbers\_mt {#system-numbers-mt} + +一样的 ‘system.numbers’ 但读取是并行的。 这些数字可以以任何顺序返回。 +用于测试。 + +## 系统。一 {#system-one} + +此表包含一行,其中包含一行 ‘dummy’ UInt8列包含值0。 +如果SELECT查询未指定FROM子句,则使用此表。 +这与其他Dbms中的双表类似。 + +## 系统。零件 {#system_tables-parts} + +包含有关的部分信息 [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) 桌子 + +每行描述一个数据部分。 + +列: + +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql_reference/statements/alter.md#query_language_queries_alter) 查询。 + + 格式: + + - `YYYYMM` 用于按月自动分区。 + - `any_string` 手动分区时。 + +- `name` (`String`) – Name of the data part. + +- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging. + +- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` 通过索引粒度(通常为8192)(此提示不适用于自适应粒度)。 + +- `rows` (`UInt64`) – The number of rows. + +- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes. + +- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `marks_bytes` (`UInt64`) – The size of the file with marks. + +- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\| + +- `remove_time` (`DateTime`) – The time when the data part became inactive. + +- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges. + +- `min_date` (`Date`) – The minimum value of the date key in the data part. + +- `max_date` (`Date`) – The maximum value of the date key in the data part. + +- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part. + +- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part. + +- `partition_id` (`String`) – ID of the partition. + +- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging. + +- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging. + +- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts. + +- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`). + +- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values. + +- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. + +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition) + +- `database` (`String`) – Name of the database. + +- `table` (`String`) – Name of the table. + +- `engine` (`String`) – Name of the table engine without parameters. + +- `path` (`String`) – Absolute path to the folder with data part files. + +- `disk` (`String`) – Name of a disk that stores the data part. + +- `hash_of_all_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 的压缩文件。 + +- `hash_of_uncompressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 未压缩的文件(带标记的文件,索引文件等。). + +- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 压缩文件中的数据,就好像它们是未压缩的。 + +- `bytes` (`UInt64`) – Alias for `bytes_on_disk`. + +- `marks_size` (`UInt64`) – Alias for `marks_bytes`. + +## 系统。part\_log {#system_tables-part-log} + +该 `system.part_log` 表只有当创建 [part\_log](server_configuration_parameters/settings.md#server_configuration_parameters-part-log) 指定了服务器设置。 + +此表包含与以下情况发生的事件有关的信息 [数据部分](../engines/table_engines/mergetree_family/custom_partitioning_key.md) 在 [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) 家庭表,例如添加或合并数据。 + +该 `system.part_log` 表包含以下列: + +- `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: + - `NEW_PART` — Inserting of a new data part. + - `MERGE_PARTS` — Merging of data parts. + - `DOWNLOAD_PART` — Downloading a data part. + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). + - `MUTATE_PART` — Mutating of a data part. + - `MOVE_PART` — Moving the data part from the one disk to another one. +- `event_date` (Date) — Event date. +- `event_time` (DateTime) — Event time. +- `duration_ms` (UInt64) — Duration. +- `database` (String) — Name of the database the data part is in. +- `table` (String) — Name of the table the data part is in. +- `part_name` (String) — Name of the data part. +- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ 值,如果分区是由 `tuple()`. +- `rows` (UInt64) — The number of rows in the data part. +- `size_in_bytes` (UInt64) — Size of the data part in bytes. +- `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). +- `bytes_uncompressed` (UInt64) — Size of uncompressed bytes. +- `read_rows` (UInt64) — The number of rows was read during the merge. +- `read_bytes` (UInt64) — The number of bytes was read during the merge. +- `error` (UInt16) — The code number of the occurred error. +- `exception` (String) — Text message of the occurred error. + +该 `system.part_log` 表的第一个插入数据到后创建 `MergeTree` 桌子 + +## 系统。流程 {#system_tables-processes} + +该系统表用于实现 `SHOW PROCESSLIST` 查询。 + +列: + +- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` 用户。 该字段包含特定查询的用户名,而不是此查询启动的查询的用户名。 +- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` 查询请求者服务器上。 +- `elapsed` (Float64) – The time in seconds since request execution started. +- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. +- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../operations/settings/query_complexity.md#settings_max_memory_usage) 设置。 +- `query` (String) – The query text. For `INSERT`,它不包括要插入的数据。 +- `query_id` (String) – Query ID, if defined. + +## 系统。text\_log {#system-tables-text-log} + +包含日志记录条目。 进入该表的日志记录级别可以通过以下方式进行限制 `text_log.level` 服务器设置。 + +列: + +- `event_date` (`Date`)-条目的日期。 +- `event_time` (`DateTime`)-条目的时间。 +- `microseconds` (`UInt32`)-条目的微秒。 +- `thread_name` (String) — Name of the thread from which the logging was done. +- `thread_id` (UInt64) — OS thread ID. +- `level` (`Enum8`)-入门级。 + - `'Fatal' = 1` + - `'Critical' = 2` + - `'Error' = 3` + - `'Warning' = 4` + - `'Notice' = 5` + - `'Information' = 6` + - `'Debug' = 7` + - `'Trace' = 8` +- `query_id` (`String`)-查询的ID。 +- `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) +- `message` (`String`)-消息本身。 +- `revision` (`UInt32`)-ClickHouse修订。 +- `source_file` (`LowCardinality(String)`)-从中完成日志记录的源文件。 +- `source_line` (`UInt64`)-从中完成日志记录的源代码行。 + +## 系统。query\_log {#system_tables-query_log} + +包含有关查询执行的信息。 对于每个查询,您可以看到处理开始时间,处理持续时间,错误消息和其他信息。 + +!!! note "注" + 该表不包含以下内容的输入数据 `INSERT` 查询。 + +ClickHouse仅在以下情况下创建此表 [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 指定服务器参数。 此参数设置日志记录规则,例如日志记录间隔或将记录查询的表的名称。 + +要启用查询日志记录,请设置 [log\_queries](settings/settings.md#settings-log-queries) 参数为1。 有关详细信息,请参阅 [设置](settings/settings.md) 科。 + +该 `system.query_log` 表注册两种查询: + +1. 客户端直接运行的初始查询。 +2. 由其他查询启动的子查询(用于分布式查询执行)。 对于这些类型的查询,有关父查询的信息显示在 `initial_*` 列。 + +列: + +- `type` (`Enum8`) — Type of event that occurred when executing the query. Values: + - `'QueryStart' = 1` — Successful start of query execution. + - `'QueryFinish' = 2` — Successful end of query execution. + - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. + - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. +- `event_date` (Date) — Query starting date. +- `event_time` (DateTime) — Query starting time. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。 +- `written_bytes` (UInt64) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。 +- `result_rows` (UInt64) — Number of rows in the result. +- `result_bytes` (UInt64) — Number of bytes in the result. +- `memory_usage` (UInt64) — Memory consumption by the query. +- `query` (String) — Query string. +- `exception` (String) — Exception message. +- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS's username who runs [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或者运行另一个TCP客户端。 +- `client_name` (String) — The [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端名称。 +- `client_revision` (UInt32) — Revision of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_major` (UInt32) — Major version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_minor` (UInt32) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_patch` (UInt32) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端版本。 +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` 方法被使用。 + - 2 — `POST` 方法被使用。 +- `http_user_agent` (String) — The `UserAgent` http请求中传递的标头。 +- `quota_key` (String) — The “quota key” 在指定 [配额](quotas.md) 设置(见 `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。 +- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。 +- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` 列。 + +每个查询创建一个或两个行中 `query_log` 表,具体取决于查询的状态: + +1. 如果查询执行成功,将创建两个类型为1和2的事件(请参阅 `type` 列)。 +2. 如果在查询处理过程中发生错误,将创建两个类型为1和4的事件。 +3. 如果在启动查询之前发生错误,将创建类型为3的单个事件。 + +默认情况下,日志以7.5秒的间隔添加到表中。 您可以在设置此时间间隔 [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 服务器设置(请参阅 `flush_interval_milliseconds` 参数)。 要强制将日志从内存缓冲区刷新到表中,请使用 `SYSTEM FLUSH LOGS` 查询。 + +当手动删除表时,它将自动动态创建。 请注意,所有以前的日志将被删除。 + +!!! note "注" + 日志的存储周期是无限的。 日志不会自动从表中删除。 您需要自己组织删除过时的日志。 + +您可以指定一个任意的分区键 `system.query_log` 表中的 [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 服务器设置(请参阅 `partition_by` 参数)。 + +## 系统。query\_thread\_log {#system_tables-query-thread-log} + +该表包含有关每个查询执行线程的信息。 + +ClickHouse仅在以下情况下创建此表 [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 指定服务器参数。 此参数设置日志记录规则,例如日志记录间隔或将记录查询的表的名称。 + +要启用查询日志记录,请设置 [log\_query\_threads](settings/settings.md#settings-log-query-threads) 参数为1。 有关详细信息,请参阅 [设置](settings/settings.md) 科。 + +列: + +- `event_date` (Date) — the date when the thread has finished execution of the query. +- `event_time` (DateTime) — the date and time when the thread has finished execution of the query. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。 +- `written_bytes` (UInt64) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。 +- `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. +- `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. +- `thread_name` (String) — Name of the thread. +- `thread_number` (UInt32) — Internal thread ID. +- `os_thread_id` (Int32) — OS thread ID. +- `master_thread_id` (UInt64) — OS initial ID of initial thread. +- `query` (String) — Query string. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS's username who runs [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或者运行另一个TCP客户端。 +- `client_name` (String) — The [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端名称。 +- `client_revision` (UInt32) — Revision of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_major` (UInt32) — Major version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_minor` (UInt32) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_patch` (UInt32) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端版本。 +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` 方法被使用。 + - 2 — `POST` 方法被使用。 +- `http_user_agent` (String) — The `UserAgent` http请求中传递的标头。 +- `quota_key` (String) — The “quota key” 在指定 [配额](quotas.md) 设置(见 `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。 + +默认情况下,日志以7.5秒的间隔添加到表中。 您可以在设置此时间间隔 [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 服务器设置(请参阅 `flush_interval_milliseconds` 参数)。 要强制将日志从内存缓冲区刷新到表中,请使用 `SYSTEM FLUSH LOGS` 查询。 + +当手动删除表时,它将自动动态创建。 请注意,所有以前的日志将被删除。 + +!!! note "注" + 日志的存储周期是无限的。 日志不会自动从表中删除。 您需要自己组织删除过时的日志。 + +您可以指定一个任意的分区键 `system.query_thread_log` 表中的 [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 服务器设置(请参阅 `partition_by` 参数)。 + +## 系统。trace\_log {#system_tables-trace_log} + +包含采样查询探查器收集的堆栈跟踪。 + +ClickHouse创建此表时 [trace\_log](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) 服务器配置部分被设置。 也是 [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) 和 [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) 应设置设置。 + +要分析日志,请使用 `addressToLine`, `addressToSymbol` 和 `demangle` 内省功能。 + +列: + +- `event_date`([日期](../sql_reference/data_types/date.md)) — Date of sampling moment. + +- `event_time`([日期时间](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. + +- `revision`([UInt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. + + 通过以下方式连接到服务器 `clickhouse-client`,你看到的字符串类似于 `Connected to ClickHouse server version 19.18.1 revision 54429.`. 该字段包含 `revision`,但不是 `version` 的服务器。 + +- `timer_type`([枚举8](../sql_reference/data_types/enum.md)) — Timer type: + + - `Real` 表示挂钟时间。 + - `CPU` 表示CPU时间。 + +- `thread_number`([UInt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. + +- `query_id`([字符串](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) 系统表. + +- `trace`([数组(UInt64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. + +**示例** + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-15 +event_time: 2019-11-15 15:09:38 +revision: 54428 +timer_type: Real +thread_number: 48 +query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 +trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] +``` + +## 系统。副本 {#system_tables-replicas} + +包含驻留在本地服务器上的复制表的信息和状态。 +此表可用于监视。 该表对于每个已复制的\*表都包含一行。 + +示例: + +``` sql +SELECT * +FROM system.replicas +WHERE table = 'visits' +FORMAT Vertical +``` + +``` text +Row 1: +────── +database: merge +table: visits +engine: ReplicatedCollapsingMergeTree +is_leader: 1 +can_become_leader: 1 +is_readonly: 0 +is_session_expired: 0 +future_parts: 1 +parts_to_check: 0 +zookeeper_path: /clickhouse/tables/01-06/visits +replica_name: example01-06-1.yandex.ru +replica_path: /clickhouse/tables/01-06/visits/replicas/example01-06-1.yandex.ru +columns_version: 9 +queue_size: 1 +inserts_in_queue: 0 +merges_in_queue: 1 +part_mutations_in_queue: 0 +queue_oldest_time: 2020-02-20 08:34:30 +inserts_oldest_time: 0000-00-00 00:00:00 +merges_oldest_time: 2020-02-20 08:34:30 +part_mutations_oldest_time: 0000-00-00 00:00:00 +oldest_part_to_get: +oldest_part_to_merge_to: 20200220_20284_20840_7 +oldest_part_to_mutate_to: +log_max_index: 596273 +log_pointer: 596274 +last_queue_update: 2020-02-20 08:34:32 +absolute_delay: 0 +total_replicas: 2 +active_replicas: 2 +``` + +列: + +- `database` (`String`)-数据库名称 +- `table` (`String`)-表名 +- `engine` (`String`)-表引擎名称 +- `is_leader` (`UInt8`)-副本是否是领导者。 + 一次只有一个副本可以成为领导者。 领导者负责选择要执行的后台合并。 + 请注意,可以对任何可用且在ZK中具有会话的副本执行写操作,而不管该副本是否为leader。 +- `can_become_leader` (`UInt8`)-副本是否可以当选为领导者。 +- `is_readonly` (`UInt8`)-副本是否处于只读模式。 + 如果配置没有ZooKeeper的部分,如果在ZooKeeper中重新初始化会话时发生未知错误,以及在ZooKeeper中重新初始化会话时发生未知错误,则此模式将打开。 +- `is_session_expired` (`UInt8`)-与ZooKeeper的会话已经过期。 基本上一样 `is_readonly`. +- `future_parts` (`UInt32`)-由于尚未完成的插入或合并而显示的数据部分的数量。 +- `parts_to_check` (`UInt32`)-队列中用于验证的数据部分的数量。 如果怀疑零件可能已损坏,则将其放入验证队列。 +- `zookeeper_path` (`String`)-在ZooKeeper中的表数据路径。 +- `replica_name` (`String`)-在动物园管理员副本名称. 同一表的不同副本具有不同的名称。 +- `replica_path` (`String`)-在ZooKeeper中的副本数据的路径。 与连接相同 ‘zookeeper\_path/replicas/replica\_path’. +- `columns_version` (`Int32`)-表结构的版本号。 指示执行ALTER的次数。 如果副本有不同的版本,这意味着一些副本还没有做出所有的改变。 +- `queue_size` (`UInt32`)-等待执行的操作的队列大小。 操作包括插入数据块、合并和某些其他操作。 它通常与 `future_parts`. +- `inserts_in_queue` (`UInt32`)-需要插入数据块的数量。 插入通常复制得相当快。 如果这个数字很大,这意味着有什么不对劲。 +- `merges_in_queue` (`UInt32`)-等待进行合并的数量。 有时合并时间很长,因此此值可能长时间大于零。 +- `part_mutations_in_queue` (`UInt32`)-等待进行的突变的数量。 +- `queue_oldest_time` (`DateTime`)-如果 `queue_size` 大于0,显示何时将最旧的操作添加到队列中。 +- `inserts_oldest_time` (`DateTime`)-看 `queue_oldest_time` +- `merges_oldest_time` (`DateTime`)-看 `queue_oldest_time` +- `part_mutations_oldest_time` (`DateTime`)-看 `queue_oldest_time` + +接下来的4列只有在有ZK活动会话的情况下才具有非零值。 + +- `log_max_index` (`UInt64`)-一般活动日志中的最大条目数。 +- `log_pointer` (`UInt64`)-副本复制到其执行队列的常规活动日志中的最大条目数加一。 如果 `log_pointer` 比 `log_max_index`,有点不对劲。 +- `last_queue_update` (`DateTime`)-上次更新队列时。 +- `absolute_delay` (`UInt64`)-当前副本有多大滞后秒。 +- `total_replicas` (`UInt8`)-此表的已知副本总数。 +- `active_replicas` (`UInt8`)-在ZooKeeper中具有会话的此表的副本的数量(即正常运行的副本的数量)。 + +如果您请求所有列,表可能会工作得有点慢,因为每行都会从ZooKeeper进行几次读取。 +如果您没有请求最后4列(log\_max\_index,log\_pointer,total\_replicas,active\_replicas),表工作得很快。 + +例如,您可以检查一切是否正常工作,如下所示: + +``` sql +SELECT + database, + table, + is_leader, + is_readonly, + is_session_expired, + future_parts, + parts_to_check, + columns_version, + queue_size, + inserts_in_queue, + merges_in_queue, + log_max_index, + log_pointer, + total_replicas, + active_replicas +FROM system.replicas +WHERE + is_readonly + OR is_session_expired + OR future_parts > 20 + OR parts_to_check > 10 + OR queue_size > 20 + OR inserts_in_queue > 10 + OR log_max_index - log_pointer > 10 + OR total_replicas < 2 + OR active_replicas < total_replicas +``` + +如果这个查询没有返回任何东西,这意味着一切都很好。 + +## 系统。设置 {#system-tables-system-settings} + +包含有关当前用户的会话设置的信息。 + +列: + +- `name` ([字符串](../sql_reference/data_types/string.md)) — Setting name. +- `value` ([字符串](../sql_reference/data_types/string.md)) — Setting value. +- `changed` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether a setting is changed from its default value. +- `description` ([字符串](../sql_reference/data_types/string.md)) — Short setting description. +- `min` ([可为空](../sql_reference/data_types/nullable.md)([字符串](../sql_reference/data_types/string.md))) — Minimum value of the setting, if any is set via [制约因素](settings/constraints_on_settings.md#constraints-on-settings). 如果设置没有最小值,则包含 [NULL](../sql_reference/syntax.md#null-literal). +- `max` ([可为空](../sql_reference/data_types/nullable.md)([字符串](../sql_reference/data_types/string.md))) — Maximum value of the setting, if any is set via [制约因素](settings/constraints_on_settings.md#constraints-on-settings). 如果设置没有最大值,则包含 [NULL](../sql_reference/syntax.md#null-literal). +- `readonly` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether the current user can change the setting: + - `0` — Current user can change the setting. + - `1` — Current user can't change the setting. + +**示例** + +下面的示例演示如何获取有关名称包含的设置的信息 `min_i`. + +``` sql +SELECT * +FROM system.settings +WHERE name LIKE '%min_i%' +``` + +``` text +┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ +│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘ +``` + +使用 `WHERE changed` 可以是有用的,例如,当你想检查: + +- 配置文件中的设置是否正确加载并正在使用。 +- 在当前会话中更改的设置。 + + + +``` sql +SELECT * FROM system.settings WHERE changed AND name='load_balancing' +``` + +**另请参阅** + +- [设置](settings/index.md#settings) +- [查询权限](settings/permissions_for_queries.md#settings_readonly) +- [对设置的限制](settings/constraints_on_settings.md) + +## 系统。表\_engines {#system.table_engines} + +``` text +┌─name───────────────────┬─value───────┐ +│ max_threads │ 8 │ +│ use_uncompressed_cache │ 0 │ +│ load_balancing │ random │ +│ max_memory_usage │ 10000000000 │ +└────────────────────────┴─────────────┘ +``` + +## 系统。merge\_tree\_settings {#system-merge_tree_settings} + +包含有关以下设置的信息 `MergeTree` 桌子 + +列: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. + +## 系统。表\_engines {#system-table-engines} + +包含服务器支持的表引擎的描述及其功能支持信息。 + +此表包含以下列(列类型显示在括号中): + +- `name` (String) — The name of table engine. +- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` 条款 +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [跳过索引](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). +- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` 和 `SAMPLE_BY`. +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [数据复制](../engines/table_engines/mergetree_family/replication.md). +- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. + +示例: + +``` sql +SELECT * +FROM system.table_engines +WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') +``` + +``` text +┌─name──────────────────────────┬─supports_settings─┬─supports_skipping_indices─┬─supports_sort_order─┬─supports_ttl─┬─supports_replication─┬─supports_deduplication─┐ +│ Kafka │ 1 │ 0 │ 0 │ 0 │ 0 │ 0 │ +│ MergeTree │ 1 │ 1 │ 1 │ 1 │ 0 │ 0 │ +│ ReplicatedCollapsingMergeTree │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ +└───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘ +``` + +**另请参阅** + +- 梅树家族 [查询子句](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) +- 卡夫卡 [设置](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) +- 加入我们 [设置](../engines/table_engines/special/join.md#join-limitations-and-settings) + +## 系统。表 {#system-tables} + +包含服务器知道的每个表的元数据。 分离的表不显示在 `system.tables`. + +此表包含以下列(列类型显示在括号中): + +- `database` (String) — The name of the database the table is in. + +- `name` (String) — Table name. + +- `engine` (String) — Table engine name (without parameters). + +- `is_temporary` (UInt8)-指示表是否是临时的标志。 + +- `data_path` (String)-文件系统中表数据的路径。 + +- `metadata_path` (String)-文件系统中表元数据的路径。 + +- `metadata_modification_time` (DateTime)-表元数据的最新修改时间。 + +- `dependencies_database` (数组(字符串))-数据库依赖关系. + +- `dependencies_table` (数组(字符串))-表依赖关系 ([MaterializedView](../engines/table_engines/special/materializedview.md) 基于当前表的表)。 + +- `create_table_query` (String)-用于创建表的查询。 + +- `engine_full` (String)-表引擎的参数。 + +- `partition_key` (String)-表中指定的分区键表达式。 + +- `sorting_key` (String)-表中指定的排序键表达式。 + +- `primary_key` (String)-表中指定的主键表达式。 + +- `sampling_key` (String)-表中指定的采样键表达式。 + +- `storage_policy` (字符串)-存储策略: + + - [MergeTree](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) + - [分布](../engines/table_engines/special/distributed.md#distributed) + +- `total_rows` (Nullable(UInt64))-总行数,如果可以快速确定表中的确切行数,否则 `Null` (包括内衣 `Buffer` 表)。 + +- `total_bytes` (Nullable(UInt64))-总字节数,如果可以快速确定存储表的确切字节数,否则 `Null` (**不** 包括任何底层存储)。 + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - 如果表在内存中存储数据,返回在内存中使用的近似字节数. + +该 `system.tables` 表中使用 `SHOW TABLES` 查询实现。 + +## 系统。动物园管理员 {#system-zookeeper} + +如果未配置ZooKeeper,则表不存在。 允许从配置中定义的ZooKeeper集群读取数据。 +查询必须具有 ‘path’ WHERE子句中的平等条件。 这是ZooKeeper中您想要获取数据的孩子的路径。 + +查询 `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` 输出对所有孩子的数据 `/clickhouse` 节点。 +要输出所有根节点的数据,write path= ‘/’. +如果在指定的路径 ‘path’ 不存在,将引发异常。 + +列: + +- `name` (String) — The name of the node. +- `path` (String) — The path to the node. +- `value` (String) — Node value. +- `dataLength` (Int32) — Size of the value. +- `numChildren` (Int32) — Number of descendants. +- `czxid` (Int64) — ID of the transaction that created the node. +- `mzxid` (Int64) — ID of the transaction that last changed the node. +- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. +- `ctime` (DateTime) — Time of node creation. +- `mtime` (DateTime) — Time of the last modification of the node. +- `version` (Int32) — Node version: the number of times the node was changed. +- `cversion` (Int32) — Number of added or removed descendants. +- `aversion` (Int32) — Number of changes to the ACL. +- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. + +示例: + +``` sql +SELECT * +FROM system.zookeeper +WHERE path = '/clickhouse/tables/01-08/visits/replicas' +FORMAT Vertical +``` + +``` text +Row 1: +────── +name: example01-08-1.yandex.ru +value: +czxid: 932998691229 +mzxid: 932998691229 +ctime: 2015-03-27 16:49:51 +mtime: 2015-03-27 16:49:51 +version: 0 +cversion: 47 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021031383 +path: /clickhouse/tables/01-08/visits/replicas + +Row 2: +────── +name: example01-08-2.yandex.ru +value: +czxid: 933002738135 +mzxid: 933002738135 +ctime: 2015-03-27 16:57:01 +mtime: 2015-03-27 16:57:01 +version: 0 +cversion: 37 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021252247 +path: /clickhouse/tables/01-08/visits/replicas +``` + +## 系统。突变 {#system_tables-mutations} + +该表包含以下信息 [突变](../sql_reference/statements/alter.md#alter-mutations) MergeTree表及其进展。 每个突变命令由一行表示。 该表具有以下列: + +**数据库**, **表** -应用突变的数据库和表的名称。 + +**mutation\_id** -变异的ID 对于复制的表,这些Id对应于znode中的名称 `/mutations/` 动物园管理员的目录。 对于未复制的表,Id对应于表的数据目录中的文件名。 + +**命令** -Mutation命令字符串(查询后的部分 `ALTER TABLE [db.]table`). + +**create\_time** -当这个突变命令被提交执行。 + +**block\_numbers.partition\_id**, **block\_numbers.编号** -嵌套列。 对于复制表的突变,它包含每个分区的一条记录:分区ID和通过突变获取的块编号(在每个分区中,只有包含编号小于该分区中突变获取的块编号的块的 在非复制表中,所有分区中的块编号形成一个序列。 这意味着对于非复制表的突变,该列将包含一条记录,其中包含由突变获取的单个块编号。 + +**parts\_to\_do** -为了完成突变,需要突变的数据部分的数量。 + +**is\_done** -变异完成了?? 请注意,即使 `parts_to_do = 0` 由于长时间运行的INSERT将创建需要突变的新数据部分,因此可能尚未完成复制表的突变。 + +如果在改变某些部分时出现问题,以下列将包含其他信息: + +**latest\_failed\_part** -不能变异的最新部分的名称。 + +**latest\_fail\_time** -最近的部分突变失败的时间。 + +**latest\_fail\_reason** -导致最近部件变异失败的异常消息。 + +## 系统。磁盘 {#system_tables-disks} + +包含有关在定义的磁盘信息 [服务器配置](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +列: + +- `name` ([字符串](../sql_reference/data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([字符串](../sql_reference/data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` 磁盘配置参数。 + +## 系统。storage\_policies {#system_tables-storage_policies} + +包含有关存储策略和卷中定义的信息 [服务器配置](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +列: + +- `policy_name` ([字符串](../sql_reference/data_types/string.md)) — Name of the storage policy. +- `volume_name` ([字符串](../sql_reference/data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([UInt64](../sql_reference/data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([数组(字符串)](../sql_reference/data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([UInt64](../sql_reference/data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([Float64](../sql_reference/data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. + +如果存储策略包含多个卷,则每个卷的信息将存储在表的单独行中。 + +[原始文章](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/zh/operations/table_engines/aggregatingmergetree.md b/docs/zh/operations/table_engines/aggregatingmergetree.md deleted file mode 100644 index 2b18b2fbe48..00000000000 --- a/docs/zh/operations/table_engines/aggregatingmergetree.md +++ /dev/null @@ -1,94 +0,0 @@ -# AggregatingMergeTree {#aggregatingmergetree} - -该引擎继承自 [MergeTree](mergetree.md),并改变了数据片段的合并逻辑。 ClickHouse 会将相同主键的所有行(在一个数据片段内)替换为单个存储一系列聚合函数状态的行。 - -可以使用 `AggregatingMergeTree` 表来做增量数据统计聚合,包括物化视图的数据聚合。 - -引擎需使用 [AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md) 类型来处理所有列。 - -如果要按一组规则来合并减少行数,则使用 `AggregatingMergeTree` 是合适的。 - -## 建表 {#jian-biao} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = AggregatingMergeTree() -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -语句参数的说明,请参阅 [语句描述](../../query_language/create.md)。 - -**子句** - -创建 `AggregatingMergeTree` 表时,需用跟创建 `MergeTree` 表一样的[子句](mergetree.md)。 - -
    - -已弃用的建表方法 - -!!! 注意 "注意" - 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -上面的所有参数跟 `MergeTree` 中的一样。 -
    - -## SELECT 和 INSERT {#select-he-insert} - -插入数据,需使用带有聚合 -State- 函数的 [INSERT SELECT](../../query_language/insert_into.md) 语句。 -从 `AggregatingMergeTree` 表中查询数据时,需使用 `GROUP BY` 子句并且要使用与插入时相同的聚合函数,但后缀要改为 `-Merge` 。 - -在 `SELECT` 查询的结果中,对于 ClickHouse 的所有输出格式 `AggregateFunction` 类型的值都实现了特定的二进制表示法。如果直接用 `SELECT` 导出这些数据,例如如用 `TabSeparated` 格式,那么这些导出数据也能直接用 `INSERT` 语句加载导入。 - -## 聚合物化视图的示例 {#ju-he-wu-hua-shi-tu-de-shi-li} - -创建一个跟踪 `test.visits` 表的 `AggregatingMergeTree` 物化视图: - -``` sql -CREATE MATERIALIZED VIEW test.basic -ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate) -AS SELECT - CounterID, - StartDate, - sumState(Sign) AS Visits, - uniqState(UserID) AS Users -FROM test.visits -GROUP BY CounterID, StartDate; -``` - -向 `test.visits` 表中插入数据。 - -``` sql -INSERT INTO test.visits ... -``` - -数据会同时插入到表和视图中,并且视图 `test.basic` 会将里面的数据聚合。 - -要获取聚合数据,我们需要在 `test.basic` 视图上执行类似 `SELECT ... GROUP BY ...` 这样的查询 : - -``` sql -SELECT - StartDate, - sumMerge(Visits) AS Visits, - uniqMerge(Users) AS Users -FROM test.basic -GROUP BY StartDate -ORDER BY StartDate; -``` - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/zh/operations/table_engines/buffer.md b/docs/zh/operations/table_engines/buffer.md deleted file mode 100644 index 70fc8c6cbb2..00000000000 --- a/docs/zh/operations/table_engines/buffer.md +++ /dev/null @@ -1,53 +0,0 @@ -# Buffer {#buffer} - -缓冲数据写入 RAM 中,周期性地将数据刷新到另一个表。在读取操作时,同时从缓冲区和另一个表读取数据。 - - Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes) - -引擎的参数:database,table - 要刷新数据的表。可以使用返回字符串的常量表达式而不是数据库名称。 num\_layers - 并行层数。在物理上,该表将表示为 num\_layers 个独立缓冲区。建议值为16。min\_time,max\_time,min\_rows,max\_rows,min\_bytes,max\_bytes - 从缓冲区刷新数据的条件。 - -如果满足所有 «min» 条件或至少一个 «max» 条件,则从缓冲区刷新数据并将其写入目标表。min\_time,max\_time — 从第一次写入缓冲区时起以秒为单位的时间条件。min\_rows,max\_rows - 缓冲区中行数的条件。min\_bytes,max\_bytes - 缓冲区中字节数的条件。 - -写入时,数据从 num\_layers 个缓冲区中随机插入。或者,如果插入数据的大小足够大(大于 max\_rows 或 max\_bytes ),则会绕过缓冲区将其写入目标表。 - -每个 «num\_layers» 缓冲区刷新数据的条件是分别计算。例如,如果 num\_layers = 16 且 max\_bytes = 100000000,则最大RAM消耗将为1.6 GB。 - -示例: - -``` sql -CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10, 100, 10000, 1000000, 10000000, 100000000) -``` - -创建一个 «merge.hits\_buffer» 表,其结构与 «merge.hits» 相同,并使用 Buffer 引擎。写入此表时,数据缓冲在 RAM 中,然后写入 «merge.hits» 表。创建了16个缓冲区。如果已经过了100秒,或者已写入100万行,或者已写入100 MB数据,则刷新每个缓冲区的数据;或者如果同时已经过了10秒并且已经写入了10,000行和10 MB的数据。例如,如果只写了一行,那么在100秒之后,都会被刷新。但是如果写了很多行,数据将会更快地刷新。 - -当服务器停止时,使用 DROP TABLE 或 DETACH TABLE,缓冲区数据也会刷新到目标表。 - -可以为数据库和表名在单个引号中设置空字符串。这表示没有目的地表。在这种情况下,当达到数据刷新条件时,缓冲器被简单地清除。这可能对于保持数据窗口在内存中是有用的。 - -从 Buffer 表读取时,将从缓冲区和目标表(如果有)处理数据。 -请注意,Buffer 表不支持索引。换句话说,缓冲区中的数据被完全扫描,对于大缓冲区来说可能很慢。(对于目标表中的数据,将使用它支持的索引。) - -如果 Buffer 表中的列集与目标表中的列集不匹配,则会插入两个表中存在的列的子集。 - -如果类型与 Buffer 表和目标表中的某列不匹配,则会在服务器日志中输入错误消息并清除缓冲区。 -如果在刷新缓冲区时目标表不存在,则会发生同样的情况。 - -如果需要为目标表和 Buffer 表运行 ALTER,我们建议先删除 Buffer 表,为目标表运行 ALTER,然后再次创建 Buffer 表。 - -如果服务器异常重启,缓冲区中的数据将丢失。 - -PREWHERE,FINAL 和 SAMPLE 对缓冲表不起作用。这些条件将传递到目标表,但不用于处理缓冲区中的数据。因此,我们建议只使用Buffer表进行写入,同时从目标表进行读取。 - -将数据添加到缓冲区时,其中一个缓冲区被锁定。如果同时从表执行读操作,则会导致延迟。 - -插入到 Buffer 表中的数据可能以不同的顺序和不同的块写入目标表中。因此,Buffer 表很难用于正确写入 CollapsingMergeTree。为避免出现问题,您可以将 «num\_layers» 设置为1。 - -如果目标表是复制表,则在写入 Buffer 表时会丢失复制表的某些预期特征。数据部分的行次序和大小的随机变化导致数据不能去重,这意味着无法对复制表进行可靠的 «exactly once» 写入。 - -由于这些缺点,我们只建议在极少数情况下使用 Buffer 表。 - -当在单位时间内从大量服务器接收到太多 INSERTs 并且在插入之前无法缓冲数据时使用 Buffer 表,这意味着这些 INSERTs 不能足够快地执行。 - -请注意,一次插入一行数据是没有意义的,即使对于 Buffer 表也是如此。这将只产生每秒几千行的速度,而插入更大的数据块每秒可以产生超过一百万行(参见 «性能» 部分)。 - -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/buffer/) diff --git a/docs/zh/operations/table_engines/collapsingmergetree.md b/docs/zh/operations/table_engines/collapsingmergetree.md deleted file mode 100644 index dd48fdd58ab..00000000000 --- a/docs/zh/operations/table_engines/collapsingmergetree.md +++ /dev/null @@ -1,206 +0,0 @@ -# CollapsingMergeTree {#table_engine-collapsingmergetree} - -该引擎继承于 [MergeTree](mergetree.md),并在数据块合并算法中添加了折叠行的逻辑。 - -`CollapsingMergeTree` 会异步的删除(折叠)这些除了特定列 `Sign` 有 `1` 和 `-1` 的值以外,其余所有字段的值都相等的成对的行。没有成对的行会被保留。更多的细节请看本文的[折叠](#table_engine-collapsingmergetree-collapsing)部分。 - -因此,该引擎可以显著的降低存储量并提高 `SELECT` 查询效率。 - -## 建表 {#jian-biao} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = CollapsingMergeTree(sign) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -请求参数的描述,参考[请求参数](../../query_language/create.md)。 - -**CollapsingMergeTree 参数** - -- `sign` — 类型列的名称: `1` 是«状态»行,`-1` 是«取消»行。 - - 列数据类型 — `Int8`。 - -**子句** - -创建 `CollapsingMergeTree` 表时,需要与创建 `MergeTree` 表时相同的[子句](mergetree.md#table_engine-mergetree-creating-a-table)。 - -
    - -已弃用的建表方法 - -!!! attention "注意" - 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign) -``` - -除了 `sign` 的所有参数都与 `MergeTree` 中的含义相同。 - -- `sign` — 类型列的名称: `1` 是«状态»行,`-1` 是«取消»行。 - - 列数据类型 — `Int8`。 - -
    - -## 折叠 {#table_engine-collapsingmergetree-collapsing} - -### 数据 {#shu-ju} - -考虑你需要为某个对象保存不断变化的数据的情景。似乎为一个对象保存一行记录并在其发生任何变化时更新记录是合乎逻辑的,但是更新操作对 DBMS 来说是昂贵且缓慢的,因为它需要重写存储中的数据。如果你需要快速的写入数据,则更新操作是不可接受的,但是你可以按下面的描述顺序地更新一个对象的变化。 - -在写入行的时候使用特定的列 `Sign`。如果 `Sign = 1` 则表示这一行是对象的状态,我们称之为«状态»行。如果 `Sign = -1` 则表示是对具有相同属性的状态行的取消,我们称之为«取消»行。 - -例如,我们想要计算用户在某个站点访问的页面页面数以及他们在那里停留的时间。在某个时候,我们将用户的活动状态写入下面这样的行。 - - ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ - │ 4324182021466249494 │ 5 │ 146 │ 1 │ - └─────────────────────┴───────────┴──────────┴──────┘ - -一段时间后,我们写入下面的两行来记录用户活动的变化。 - - ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ - │ 4324182021466249494 │ 5 │ 146 │ -1 │ - │ 4324182021466249494 │ 6 │ 185 │ 1 │ - └─────────────────────┴───────────┴──────────┴──────┘ - -第一行取消了这个对象(用户)的状态。它需要复制被取消的状态行的所有除了 `Sign` 的属性。 - -第二行包含了当前的状态。 - -因为我们只需要用户活动的最后状态,这些行 - - ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ - │ 4324182021466249494 │ 5 │ 146 │ 1 │ - │ 4324182021466249494 │ 5 │ 146 │ -1 │ - └─────────────────────┴───────────┴──────────┴──────┘ - -可以在折叠对象的失效(老的)状态的时候被删除。`CollapsingMergeTree` 会在合并数据片段的时候做这件事。 - -为什么我们每次改变需要 2 行可以阅读[算法](#table_engine-collapsingmergetree-collapsing-algorithm)段。 - -**这种方法的特殊属性** - -1. 写入的程序应该记住对象的状态从而可以取消它。«取消»字符串应该是«状态»字符串的复制,除了相反的 `Sign`。它增加了存储的初始数据的大小,但使得写入数据更快速。 -2. 由于写入的负载,列中长的增长阵列会降低引擎的效率。数据越简单,效率越高。 -3. `SELECT` 的结果很大程度取决于对象变更历史的一致性。在准备插入数据时要准确。在不一致的数据中会得到不可预料的结果,例如,像会话深度这种非负指标的负值。 - -### 算法 {#table_engine-collapsingmergetree-collapsing-algorithm} - -当 ClickHouse 合并数据片段时,每组具有相同主键的连续行被减少到不超过两行,一行 `Sign = 1`(«状态»行),另一行 `Sign = -1` («取消»行),换句话说,数据项被折叠了。 - -对每个结果的数据部分 ClickHouse 保存: - - 1. 第一个«取消»和最后一个«状态»行,如果«状态»和«取消»行的数量匹配和最后一个行是«状态»行 - 2. 最后一个«状态»行,如果«状态»行比«取消»行多一个或一个以上。 - 3. 第一个«取消»行,如果«取消»行比«状态»行多一个或一个以上。 - 4. 没有行,在其他所有情况下。 - - 合并会继续,但是 ClickHouse 会把此情况视为逻辑错误并将其记录在服务日志中。这个错误会在相同的数据被插入超过一次时出现。 - -因此,折叠不应该改变统计数据的结果。 -变化逐渐地被折叠,因此最终几乎每个对象都只剩下了最后的状态。 - -`Sign` 是必须的因为合并算法不保证所有有相同主键的行都会在同一个结果数据片段中,甚至是在同一台物理服务器上。ClickHouse 用多线程来处理 `SELECT` 请求,所以它不能预测结果中行的顺序。如果要从 `CollapsingMergeTree` 表中获取完全«折叠»后的数据,则需要聚合。 - -要完成折叠,请使用 `GROUP BY` 子句和用于处理符号的聚合函数编写请求。例如,要计算数量,使用 `sum(Sign)` 而不是 `count()`。要计算某物的总和,使用 `sum(Sign * x)` 而不是 `sum(x)`,并添加 `HAVING sum(Sign) > 0` 子句。 - -聚合体 `count`,`sum` 和 `avg` 可以用这种方式计算。如果一个对象至少有一个未被折叠的状态,则可以计算 `uniq` 聚合。`min` 和 `max` 聚合无法计算,因为 `CollaspingMergeTree` 不会保存折叠状态的值的历史记录。 - -如果你需要在不进行聚合的情况下获取数据(例如,要检查是否存在最新值与特定条件匹配的行),你可以在 `FROM` 从句中使用 `FINAL` 修饰符。这种方法显然是更低效的。 - -## 示例 {#shi-li} - -示例数据: - - ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ - │ 4324182021466249494 │ 5 │ 146 │ 1 │ - │ 4324182021466249494 │ 5 │ 146 │ -1 │ - │ 4324182021466249494 │ 6 │ 185 │ 1 │ - └─────────────────────┴───────────┴──────────┴──────┘ - -建表: - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8 -) -ENGINE = CollapsingMergeTree(Sign) -ORDER BY UserID -``` - -插入数据: - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) -``` - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) -``` - -我们使用两次 `INSERT` 请求来创建两个不同的数据片段。如果我们使用一个请求插入数据,ClickHouse 只会创建一个数据片段且不会执行任何合并操作。 - -获取数据: - - SELECT * FROM UAct - - ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ - │ 4324182021466249494 │ 5 │ 146 │ -1 │ - │ 4324182021466249494 │ 6 │ 185 │ 1 │ - └─────────────────────┴───────────┴──────────┴──────┘ - ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ - │ 4324182021466249494 │ 5 │ 146 │ 1 │ - └─────────────────────┴───────────┴──────────┴──────┘ - -我们看到了什么,哪里有折叠? - -通过两个 `INSERT` 请求,我们创建了两个数据片段。`SELECT` 请求在两个线程中被执行,我们得到了随机顺序的行。没有发生折叠是因为还没有合并数据片段。ClickHouse 在一个我们无法预料的未知时刻合并数据片段。 - -因此我们需要聚合: - -``` sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration -FROM UAct -GROUP BY UserID -HAVING sum(Sign) > 0 -``` - - ┌──────────────UserID─┬─PageViews─┬─Duration─┐ - │ 4324182021466249494 │ 6 │ 185 │ - └─────────────────────┴───────────┴──────────┘ - -如果我们不需要聚合并想要强制进行折叠,我们可以在 `FROM` 从句中使用 `FINAL` 修饰语。 - -``` sql -SELECT * FROM UAct FINAL -``` - - ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ - │ 4324182021466249494 │ 6 │ 185 │ 1 │ - └─────────────────────┴───────────┴──────────┴──────┘ - -这种查询数据的方法是非常低效的。不要在大表中使用它。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/zh/operations/table_engines/custom_partitioning_key.md b/docs/zh/operations/table_engines/custom_partitioning_key.md deleted file mode 100644 index 1a8cea2d0e2..00000000000 --- a/docs/zh/operations/table_engines/custom_partitioning_key.md +++ /dev/null @@ -1,116 +0,0 @@ -# 自定义分区键 {#zi-ding-yi-fen-qu-jian} - -[MergeTree](mergetree.md) 系列的表(包括 [可复制表](replication.md) )可以使用分区。基于 MergeTree 表的 [物化视图](materializedview.md) 也支持分区。 - -一个分区是指按指定规则逻辑组合一起的表的记录集。可以按任意标准进行分区,如按月,按日或按事件类型。为了减少需要操作的数据,每个分区都是分开存储的。访问数据时,ClickHouse 尽量使用这些分区的最小子集。 - -分区是在 [建表](mergetree.md#table_engine-mergetree-creating-a-table) 的 `PARTITION BY expr` 子句中指定。分区键可以是关于列的任何表达式。例如,指定按月分区,表达式为 `toYYYYMM(date_column)`: - -``` sql -CREATE TABLE visits -( - VisitDate Date, - Hour UInt8, - ClientID UUID -) -ENGINE = MergeTree() -PARTITION BY toYYYYMM(VisitDate) -ORDER BY Hour; -``` - -分区键也可以是表达式元组(类似 [主键](mergetree.md#primary-keys-and-indexes-in-queries) )。例如: - -``` sql -ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) -PARTITION BY (toMonday(StartDate), EventType) -ORDER BY (CounterID, StartDate, intHash32(UserID)); -``` - -上例中,我们设置按一周内的事件类型分区。 - -新数据插入到表中时,这些数据会存储为按主键排序的新片段(块)。插入后 10-15 分钟,同一分区的各个片段会合并为一整个片段。 - -!!! attention "注意" - 那些有相同分区表达式值的数据片段才会合并。这意味着 **你不应该用太精细的分区方案**(超过一千个分区)。否则,会因为文件系统中的文件数量和需要找开的文件描述符过多,导致 `SELECT` 查询效率不佳。 - -可以通过 [system.parts](../system_tables.md#system_tables-parts) 表查看表片段和分区信息。例如,假设我们有一个 `visits` 表,按月分区。对 `system.parts` 表执行 `SELECT`: - -``` sql -SELECT - partition, - name, - active -FROM system.parts -WHERE table = 'visits' -``` - - ┌─partition─┬─name───────────┬─active─┐ - │ 201901 │ 201901_1_3_1 │ 0 │ - │ 201901 │ 201901_1_9_2 │ 1 │ - │ 201901 │ 201901_8_8_0 │ 0 │ - │ 201901 │ 201901_9_9_0 │ 0 │ - │ 201902 │ 201902_4_6_1 │ 1 │ - │ 201902 │ 201902_10_10_0 │ 1 │ - │ 201902 │ 201902_11_11_0 │ 1 │ - └───────────┴────────────────┴────────┘ - -`partition` 列存储分区的名称。此示例中有两个分区:`201901` 和 `201902`。在 [ALTER … PARTITION](#alter_manipulations-with-partitions) 语句中你可以使用该列值来指定分区名称。 - -`name` 列为分区中数据片段的名称。在 [ALTER ATTACH PART](#alter_attach-partition) 语句中你可以使用此列值中来指定片段名称。 - -这里我们拆解下第一部分的名称:`201901_1_3_1`: - -- `201901` 是分区名称。 -- `1` 是数据块的最小编号。 -- `3` 是数据块的最大编号。 -- `1` 是块级别(即在由块组成的合并树中,该块在树中的深度)。 - -!!! attention "注意" - 旧类型表的片段名称为:`20190117_20190123_2_2_0`(最小日期 - 最大日期 - 最小块编号 - 最大块编号 - 块级别)。 - -`active` 列为片段状态。`1` 激活状态;`0` 非激活状态。非激活片段是那些在合并到较大片段之后剩余的源数据片段。损坏的数据片段也表示为非活动状态。 - -正如在示例中所看到的,同一分区中有几个独立的片段(例如,`201901_1_3_1`和`201901_1_9_2`)。这意味着这些片段尚未合并。ClickHouse 大约在插入后15分钟定期报告合并操作,合并插入的数据片段。此外,你也可以使用 [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) 语句直接执行合并。例: - -``` sql -OPTIMIZE TABLE visits PARTITION 201902; -``` - - ┌─partition─┬─name───────────┬─active─┐ - │ 201901 │ 201901_1_3_1 │ 0 │ - │ 201901 │ 201901_1_9_2 │ 1 │ - │ 201901 │ 201901_8_8_0 │ 0 │ - │ 201901 │ 201901_9_9_0 │ 0 │ - │ 201902 │ 201902_4_6_1 │ 0 │ - │ 201902 │ 201902_4_11_2 │ 1 │ - │ 201902 │ 201902_10_10_0 │ 0 │ - │ 201902 │ 201902_11_11_0 │ 0 │ - └───────────┴────────────────┴────────┘ - -非激活片段会在合并后的10分钟左右删除。 - -查看片段和分区信息的另一种方法是进入表的目录:`/var/lib/clickhouse/data///`。例如: - -``` bash -dev:/var/lib/clickhouse/data/default/visits$ ls -l -total 40 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached -``` - -文件夹 ‘201901\_1\_1\_0’,‘201901\_1\_7\_1’ 等是片段的目录。每个片段都与一个对应的分区相关,并且只包含这个月的数据(本例中的表按月分区)。 - -`detached` 目录存放着使用 [DETACH](#alter_detach-partition) 语句从表中分离的片段。损坏的片段也会移到该目录,而不是删除。服务器不使用`detached`目录中的片段。可以随时添加,删除或修改此目录中的数据 – 在运行 [ATTACH](../../query_language/alter.md#alter_attach-partition) 语句前,服务器不会感知到。 - -注意,在操作服务器时,你不能手动更改文件系统上的片段集或其数据,因为服务器不会感知到这些修改。对于非复制表,可以在服务器停止时执行这些操作,但不建议这样做。对于复制表,在任何情况下都不要更改片段文件。 - -ClickHouse 支持对分区执行这些操作:删除分区,从一个表复制到另一个表,或创建备份。了解分区的所有操作,请参阅 [分区和片段的操作](../../query_language/alter.md#alter_manipulations-with-partitions) 一节。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/zh/operations/table_engines/dictionary.md b/docs/zh/operations/table_engines/dictionary.md deleted file mode 100644 index 3bd6b9d78b6..00000000000 --- a/docs/zh/operations/table_engines/dictionary.md +++ /dev/null @@ -1,101 +0,0 @@ -# Dictionary {#dictionary} - -`Dictionary` 引擎将字典数据展示为一个ClickHouse的表。 - -例如,考虑使用一个具有以下配置的 `products` 字典: - -``` xml - - - products - - -
    products
    - DSN=some-db-server - - - - 300 - 360 - - - - - - - product_id - - - title - String - - - - - -``` - -查询字典中的数据: - -``` sql -select name, type, key, attribute.names, attribute.types, bytes_allocated, element_count,source from system.dictionaries where name = 'products'; - -SELECT - name, - type, - key, - attribute.names, - attribute.types, - bytes_allocated, - element_count, - source -FROM system.dictionaries -WHERE name = 'products' -``` - - ┌─name─────┬─type─┬─key────┬─attribute.names─┬─attribute.types─┬─bytes_allocated─┬─element_count─┬─source──────────┐ - │ products │ Flat │ UInt64 │ ['title'] │ ['String'] │ 23065376 │ 175032 │ ODBC: .products │ - └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ - -你可以使用 [dictGet\*](../../query_language/functions/ext_dict_functions.md) 函数来获取这种格式的字典数据。 - -当你需要获取原始数据,或者是想要使用 `JOIN` 操作的时候,这种视图并没有什么帮助。对于这些情况,你可以使用 `Dictionary` 引擎,它可以将字典数据展示在表中。 - -语法: - - CREATE TABLE %table_name% (%fields%) engine = Dictionary(%dictionary_name%)` - -示例: - -``` sql -create table products (product_id UInt64, title String) Engine = Dictionary(products); - -CREATE TABLE products -( - product_id UInt64, - title String, -) -ENGINE = Dictionary(products) -``` - - Ok. - - 0 rows in set. Elapsed: 0.004 sec. - -看一看表中的内容。 - -``` sql -select * from products limit 1; - -SELECT * -FROM products -LIMIT 1 -``` - - ┌────product_id─┬─title───────────┐ - │ 152689 │ Some item │ - └───────────────┴─────────────────┘ - - 1 rows in set. Elapsed: 0.006 sec. - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) diff --git a/docs/zh/operations/table_engines/distributed.md b/docs/zh/operations/table_engines/distributed.md deleted file mode 100644 index b81e52348e6..00000000000 --- a/docs/zh/operations/table_engines/distributed.md +++ /dev/null @@ -1,120 +0,0 @@ -# Distributed {#distributed} - -**分布式引擎本身不存储数据**, 但可以在多个服务器上进行分布式查询。 -读是自动并行的。读取时,远程服务器表的索引(如果有的话)会被使用。 -分布式引擎参数:服务器配置文件中的集群名,远程数据库名,远程表名,数据分片键(可选)。 -示例: - - Distributed(logs, default, hits[, sharding_key]) - -将会从位于«logs»集群中 default.hits 表所有服务器上读取数据。 -远程服务器不仅用于读取数据,还会对尽可能数据做部分处理。 -例如,对于使用 GROUP BY 的查询,数据首先在远程服务器聚合,之后返回聚合函数的中间状态给查询请求的服务器。再在请求的服务器上进一步汇总数据。 - -数据库名参数除了用数据库名之外,也可用返回字符串的常量表达式。例如:currentDatabase()。 - -logs – 服务器配置文件中的集群名称。 - -集群示例配置如下: - -``` xml - - - - - 1 - - false - - example01-01-1 - 9000 - - - example01-01-2 - 9000 - - - - 2 - false - - example01-02-1 - 9000 - - - example01-02-2 - 1 - 9440 - - - - -``` - -这里定义了一个名为‘logs’的集群,它由两个分片组成,每个分片包含两个副本。 -分片是指包含数据不同部分的服务器(要读取所有数据,必须访问所有分片)。 -副本是存储复制数据的服务器(要读取所有数据,访问任一副本上的数据即可)。 - -集群名称不能包含点号。 - -每个服务器需要指定 `host`,`port`,和可选的 `user`,`password`,`secure`,`compression` 的参数: -- `host` – 远程服务器地址。可以域名、IPv4或IPv6。如果指定域名,则服务在启动时发起一个 DNS 请求,并且请求结果会在服务器运行期间一直被记录。如果 DNS 请求失败,则服务不会启动。如果你修改了 DNS 记录,则需要重启服务。 -- `port` – 消息传递的 TCP 端口(「tcp\_port」配置通常设为 9000)。不要跟 http\_port 混淆。 -- `user` – 用于连接远程服务器的用户名。默认值:default。该用户必须有权限访问该远程服务器。访问权限配置在 users.xml 文件中。更多信息,请查看«访问权限»部分。 -- `password` – 用于连接远程服务器的密码。默认值:空字符串。 -- `secure` – 是否使用ssl进行连接,设为true时,通常也应该设置 `port` = 9440。服务器也要监听 9440 并有正确的证书。 -- `compression` - 是否使用数据压缩。默认值:true。 - -配置了副本,读取操作会从每个分片里选择一个可用的副本。可配置负载平衡算法(挑选副本的方式) - 请参阅«load\_balancing»设置。 -如果跟服务器的连接不可用,则在尝试短超时的重连。如果重连失败,则选择下一个副本,依此类推。如果跟所有副本的连接尝试都失败,则尝试用相同的方式再重复几次。 -该机制有利于系统可用性,但不保证完全容错:如有远程服务器能够接受连接,但无法正常工作或状况不佳。 - -你可以配置一个(这种情况下,查询操作更应该称为远程查询,而不是分布式查询)或任意多个分片。在每个分片中,可以配置一个或任意多个副本。不同分片可配置不同数量的副本。 - -可以在配置中配置任意数量的集群。 - -要查看集群,可使用«system.clusters»表。 - -通过分布式引擎可以像使用本地服务器一样使用集群。但是,集群不是自动扩展的:你必须编写集群配置到服务器配置文件中(最好,给所有集群的服务器写上完整配置)。 - -不支持用分布式表查询别的分布式表(除非该表只有一个分片)。或者说,要用分布表查查询«最终»的数据表。 - -分布式引擎需要将集群信息写入配置文件。配置文件中的集群信息会即时更新,无需重启服务器。如果你每次是要向不确定的一组分片和副本发送查询,则不适合创建分布式表 - 而应该使用«远程»表函数。 请参阅«表函数»部分。 - -向集群写数据的方法有两种: - -一,自已指定要将哪些数据写入哪些服务器,并直接在每个分片上执行写入。换句话说,在分布式表上«查询»,在数据表上 INSERT。 -这是最灵活的解决方案 – 你可以使用任何分片方案,对于复杂业务特性的需求,这可能是非常重要的。 -这也是最佳解决方案,因为数据可以完全独立地写入不同的分片。 - -二,在分布式表上执行 INSERT。在这种情况下,分布式表会跨服务器分发插入数据。 -为了写入分布式表,必须要配置分片键(最后一个参数)。当然,如果只有一个分片,则写操作在没有分片键的情况下也能工作,因为这种情况下分片键没有意义。 - -每个分片都可以在配置文件中定义权重。默认情况下,权重等于1。数据依据分片权重按比例分发到分片上。例如,如果有两个分片,第一个分片的权重是9,而第二个分片的权重是10,则发送 9 / 19 的行到第一个分片, 10 / 19 的行到第二个分片。 - -分片可在配置文件中定义 ‘internal\_replication’ 参数。 - -此参数设置为«true»时,写操作只选一个正常的副本写入数据。如果分布式表的子表是复制表(\*ReplicaMergeTree),请使用此方案。换句话说,这其实是把数据的复制工作交给实际需要写入数据的表本身而不是分布式表。 - -若此参数设置为«false»(默认值),写操作会将数据写入所有副本。实质上,这意味着要分布式表本身来复制数据。这种方式不如使用复制表的好,因为不会检查副本的一致性,并且随着时间的推移,副本数据可能会有些不一样。 - -选择将一行数据发送到哪个分片的方法是,首先计算分片表达式,然后将这个计算结果除以所有分片的权重总和得到余数。该行会发送到那个包含该余数的从’prev\_weight’到’prev\_weights + weight’的半闭半开区间对应的分片上,其中 ‘prev\_weights’ 是该分片前面的所有分片的权重和,‘weight’ 是该分片的权重。例如,如果有两个分片,第一个分片权重为9,而第二个分片权重为10,则余数在 \[0,9) 中的行发给第一个分片,余数在 \[9,19) 中的行发给第二个分片。 - -分片表达式可以是由常量和表列组成的任何返回整数表达式。例如,您可以使用表达式 ‘rand()’ 来随机分配数据,或者使用 ‘UserID’ 来按用户 ID 的余数分布(相同用户的数据将分配到单个分片上,这可降低带有用户信息的 IN 和 JOIN 的语句运行的复杂度)。如果该列数据分布不够均匀,可以将其包装在散列函数中:intHash64(UserID)。 - -这种简单的用余数来选择分片的方案是有局限的,并不总适用。它适用于中型和大型数据(数十台服务器)的场景,但不适用于巨量数据(数百台或更多服务器)的场景。后一种情况下,应根据业务特性需求考虑的分片方案,而不是直接用分布式表的多分片。 - -SELECT 查询会被发送到所有分片,并且无论数据在分片中如何分布(即使数据完全随机分布)都可正常工作。添加新分片时,不必将旧数据传输到该分片。你可以给新分片分配大权重然后写新数据 - 数据可能会稍分布不均,但查询会正确高效地运行。 - -下面的情况,你需要关注分片方案: - -- 使用需要特定键连接数据( IN 或 JOIN )的查询。如果数据是用该键进行分片,则应使用本地 IN 或 JOIN 而不是 GLOBAL IN 或 GLOBAL JOIN,这样效率更高。 -- 使用大量服务器(上百或更多),但有大量小查询(个别客户的查询 - 网站,广告商或合作伙伴)。为了使小查询不影响整个集群,让单个客户的数据处于单个分片上是有意义的。或者,正如我们在 Yandex.Metrica 中所做的那样,你可以配置两级分片:将整个集群划分为«层»,一个层可以包含多个分片。单个客户的数据位于单个层上,根据需要将分片添加到层中,层中的数据随机分布。然后给每层创建分布式表,再创建一个全局的分布式表用于全局的查询。 - -数据是异步写入的。对于分布式表的 INSERT,数据块只写本地文件系统。之后会尽快地在后台发送到远程服务器。你可以通过查看表目录中的文件列表(等待发送的数据)来检查数据是否成功发送:/var/lib/clickhouse/data/database/table/ 。 - -如果在 INSERT 到分布式表时服务器节点丢失或重启(如,设备故障),则插入的数据可能会丢失。如果在表目录中检测到损坏的数据分片,则会将其转移到«broken»子目录,并不再使用。 - -启用 max\_parallel\_replicas 选项后,会在分表的所有副本上并行查询处理。更多信息,请参阅«设置,max\_parallel\_replicas»部分。 - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/zh/operations/table_engines/external_data.md b/docs/zh/operations/table_engines/external_data.md deleted file mode 100644 index d993a796e52..00000000000 --- a/docs/zh/operations/table_engines/external_data.md +++ /dev/null @@ -1,61 +0,0 @@ -# External Data for Query Processing {#external-data-for-query-processing} - -ClickHouse 允许向服务器发送处理查询所需的数据以及 SELECT 查询。这些数据放在一个临时表中(请参阅 «临时表» 一节),可以在查询中使用(例如,在 IN 操作符中)。 - -例如,如果您有一个包含重要用户标识符的文本文件,则可以将其与使用此列表过滤的查询一起上传到服务器。 - -如果需要使用大量外部数据运行多个查询,请不要使用该特性。最好提前把数据上传到数据库。 - -可以使用命令行客户端(在非交互模式下)或使用 HTTP 接口上传外部数据。 - -在命令行客户端中,您可以指定格式的参数部分 - -``` bash ---external --file=... [--name=...] [--format=...] [--types=...|--structure=...] -``` - -对于传输的表的数量,可能有多个这样的部分。 - -**–external** – 标记子句的开始。 -**–file** – 带有表存储的文件的路径,或者,它指的是STDIN。 -只能从 stdin 中检索单个表。 - -以下的参数是可选的:**–name** – 表的名称,如果省略,则采用 \_data。 -**–format** – 文件中的数据格式。 如果省略,则使用 TabSeparated。 - -以下的参数必选一个:**–types** – 逗号分隔列类型的列表。例如:`UInt64,String`。列将被命名为 \_1,\_2,… -**–structure**– 表结构的格式 `UserID UInt64`,`URL String`。定义列的名字以及类型。 - -在 «file» 中指定的文件将由 «format» 中指定的格式解析,使用在 «types» 或 «structure» 中指定的数据类型。该表将被上传到服务器,并在作为名称为 «name»临时表。 - -示例: - -``` bash -echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 -849897 -cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -当使用HTTP接口时,外部数据以 multipart/form-data 格式传递。每个表作为一个单独的文件传输。表名取自文件名。«query\_string» 传递参数 «name\_format»、«name\_types»和«name\_structure»,其中 «name» 是这些参数对应的表的名称。参数的含义与使用命令行客户端时的含义相同。 - -示例: - -``` bash -cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv - -curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -对于分布式查询,将临时表发送到所有远程服务器。 - -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/external_data/) diff --git a/docs/zh/operations/table_engines/file.md b/docs/zh/operations/table_engines/file.md deleted file mode 100644 index 10293130088..00000000000 --- a/docs/zh/operations/table_engines/file.md +++ /dev/null @@ -1,73 +0,0 @@ -# File(InputFormat) {#table_engines-file} - -数据源是以 Clickhouse 支持的一种输入格式(TabSeparated,Native等)存储数据的文件。 - -用法示例: - -- 从 ClickHouse 导出数据到文件。 -- 将数据从一种格式转换为另一种格式。 -- 通过编辑磁盘上的文件来更新 ClickHouse 中的数据。 - -## 在 ClickHouse 服务器中的使用 {#zai-clickhouse-fu-wu-qi-zhong-de-shi-yong} - - File(Format) - -选用的 `Format` 需要支持 `INSERT` 或 `SELECT` 。有关支持格式的完整列表,请参阅 [格式](../../interfaces/formats.md#formats)。 - -ClickHouse 不支持给 `File` 指定文件系统路径。它使用服务器配置中 [path](../server_settings/settings.md) 设定的文件夹。 - -使用 `File(Format)` 创建表时,它会在该文件夹中创建空的子目录。当数据写入该表时,它会写到该子目录中的 `data.Format` 文件中。 - -你也可以在服务器文件系统中手动创建这些子文件夹和文件,然后通过 [ATTACH](../../query_language/misc.md) 将其创建为具有对应名称的表,这样你就可以从该文件中查询数据了。 - -!!! 注意 "注意" - 注意这个功能,因为 ClickHouse 不会跟踪这些文件在外部的更改。在 ClickHouse 中和 ClickHouse 外部同时写入会造成结果是不确定的。 - -**示例:** - -**1.** 创建 `file_engine_table` 表: - -``` sql -CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) -``` - -默认情况下,Clickhouse 会创建目录 `/var/lib/clickhouse/data/default/file_engine_table` 。 - -**2.** 手动创建 `/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated` 文件,并且包含内容: - -``` bash -$ cat data.TabSeparated -one 1 -two 2 -``` - -**3.** 查询这些数据: - -``` sql -SELECT * FROM file_engine_table -``` - - ┌─name─┬─value─┐ - │ one │ 1 │ - │ two │ 2 │ - └──────┴───────┘ - -## 在 Clickhouse-local 中的使用 {#zai-clickhouse-local-zhong-de-shi-yong} - -使用 [clickhouse-local](../utils/clickhouse-local.md) 时,File 引擎除了 `Format` 之外,还可以接受文件路径参数。可以使用数字或人类可读的名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 -**例如:** - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" -``` - -## 功能实现 {#gong-neng-shi-xian} - -- 读操作可支持并发,但写操作不支持 -- 不支持: - - `ALTER` - - `SELECT ... SAMPLE` - - 索引 - - 副本 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/zh/operations/table_engines/generate.md b/docs/zh/operations/table_engines/generate.md deleted file mode 120000 index 28cd09533e5..00000000000 --- a/docs/zh/operations/table_engines/generate.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/generate.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/graphitemergetree.md b/docs/zh/operations/table_engines/graphitemergetree.md deleted file mode 120000 index 654425d050a..00000000000 --- a/docs/zh/operations/table_engines/graphitemergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/graphitemergetree.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/hdfs.md b/docs/zh/operations/table_engines/hdfs.md deleted file mode 120000 index d4dbfa46e68..00000000000 --- a/docs/zh/operations/table_engines/hdfs.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/hdfs.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/index.md b/docs/zh/operations/table_engines/index.md deleted file mode 100644 index 6a3a752561c..00000000000 --- a/docs/zh/operations/table_engines/index.md +++ /dev/null @@ -1,73 +0,0 @@ -# 表引擎 {#biao-yin-qing} - -表引擎(即表的类型)决定了: - -- 数据的存储方式和位置,写到哪里以及从哪里读取数据 -- 支持哪些查询以及如何支持。 -- 并发数据访问。 -- 索引的使用(如果存在)。 -- 是否可以执行多线程请求。 -- 数据复制参数。 - -# 引擎类型 {#yin-qing-lei-xing} - -## MergeTree {#mergetree} - -适用于高负载任务的最通用和功能最强大的表引擎。这些引擎的共同特点是可以快速插入数据并进行后续的后台数据处理。 MergeTree系列引擎支持数据复制(使用[Replicated\*](replication.md) 的引擎版本),分区和一些其他引擎不支持的其他功能。 - -该类型的引擎: -\* [MergeTree](mergetree.md) -\* [ReplacingMergeTree](replacingmergetree.md) -\* [SummingMergeTree](summingmergetree.md) -\* [AggregatingMergeTree](aggregatingmergetree.md) -\* [CollapsingMergeTree](collapsingmergetree.md) -\* [VersionedCollapsingMergeTree](versionedcollapsingmergetree.md) -\* [GraphiteMergeTree](graphitemergetree.md) - -## Log {#log} - -具有最小功能的[轻量级引擎](log_family.md)。当您需要快速写入许多小表(最多约100万行)并在以后整体读取它们时,该类型的引擎是最有效的。 - -该类型的引擎: - -- \[TinyLog\](tinylog/) -- \[StripeLog\](stripelog/) -- [Log](#log)(log/) - -## Integration engines {#integration-engines} - -用于与其他的数据存储与处理系统集成的引擎。 -该类型的引擎: - -- [Kafka](kafka.md) -- [MySQL](mysql.md) -- [ODBC](odbc.md) -- [JDBC](jdbc.md) -- [HDFS](hdfs.md) - -## 用于其他特定功能的引擎 {#yong-yu-qi-ta-te-ding-gong-neng-de-yin-qing} - -该类型的引擎: - -- [Distributed](distributed.md) -- [MaterializedView](materializedview.md) -- [Dictionary](dictionary.md) -- [Merge](merge.md) -- [File](file.md) -- [Null](null.md) -- [Set](set.md) -- [Join](join.md) -- [URL](url.md) -- [View](view.md) -- [Memory](memory.md) -- [Buffer](buffer.md) - -# 虚拟列 {#xu-ni-lie} - -虚拟列是表引擎组成的一部分,它在对应的表引擎的源代码中定义。 - -您不能在 `CREATE TABLE` 中指定虚拟列,并且虚拟列不会包含在 `SHOW CREATE TABLE` 和 `DESCRIBE TABLE` 的查询结果中。虚拟列是只读的,所以您不能向虚拟列中写入数据。 - -如果想要查询虚拟列中的数据,您必须在SELECT查询中包含虚拟列的名字。SELECT \* 不会返回虚拟列的内容。 - -若您创建的表中有一列与虚拟列的名字相同,那么虚拟列将不能再被访问。我们不建议您这样做。为了避免这种列名的冲突,虚拟列的名字一般都以下划线开头。 diff --git a/docs/zh/operations/table_engines/jdbc.md b/docs/zh/operations/table_engines/jdbc.md deleted file mode 120000 index 5165d704b9a..00000000000 --- a/docs/zh/operations/table_engines/jdbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/jdbc.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/join.md b/docs/zh/operations/table_engines/join.md deleted file mode 100644 index 024530cc0b7..00000000000 --- a/docs/zh/operations/table_engines/join.md +++ /dev/null @@ -1,28 +0,0 @@ -# Join {#join} - -加载好的 JOIN 表数据会常驻内存中。 - - Join(ANY|ALL, LEFT|INNER, k1[, k2, ...]) - -引擎参数:`ANY|ALL` – 连接修饰;`LEFT|INNER` – 连接类型。更多信息可参考 [JOIN子句](../../query_language/select.md#select-join)。 -这些参数设置不用带引号,但必须与要 JOIN 表匹配。 k1,k2,……是 USING 子句中要用于连接的关键列。 - -此引擎表不能用于 GLOBAL JOIN 。 - -类似于 Set 引擎,可以使用 INSERT 向表中添加数据。设置为 ANY 时,重复键的数据会被忽略(仅一条用于连接)。设置为 ALL 时,重复键的数据都会用于连接。不能直接对 JOIN 表进行 SELECT。检索其数据的唯一方法是将其作为 JOIN 语句右边的表。 - -跟 Set 引擎类似,Join 引擎把数据存储在磁盘中。 - -### Limitations and Settings {#join-limitations-and-settings} - -When creating a table, the following settings are applied: - -- join\_use\_nulls -- max\_rows\_in\_join -- max\_bytes\_in\_join -- join\_overflow\_mode -- join\_any\_take\_last\_row - -The `Join`-engine tables can’t be used in `GLOBAL JOIN` operations. - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/zh/operations/table_engines/kafka.md b/docs/zh/operations/table_engines/kafka.md deleted file mode 100644 index e992a76519e..00000000000 --- a/docs/zh/operations/table_engines/kafka.md +++ /dev/null @@ -1,135 +0,0 @@ -# Kafka {#kafka} - -此引擎与 [Apache Kafka](http://kafka.apache.org/) 结合使用。 - -Kafka 特性: - -- 发布或者订阅数据流。 -- 容错存储机制。 -- 处理流数据。 - - - -老版格式: - - Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format - [, kafka_row_delimiter, kafka_schema, kafka_num_consumers]) - -新版格式: - - Kafka SETTINGS - kafka_broker_list = 'localhost:9092', - kafka_topic_list = 'topic1,topic2', - kafka_group_name = 'group1', - kafka_format = 'JSONEachRow', - kafka_row_delimiter = '\n', - kafka_schema = '', - kafka_num_consumers = 2 - -必要参数: - -- `kafka_broker_list` – 以逗号分隔的 brokers 列表 (`localhost:9092`)。 -- `kafka_topic_list` – topic 列表 (`my_topic`)。 -- `kafka_group_name` – Kafka 消费组名称 (`group1`)。如果不希望消息在集群中重复,请在每个分片中使用相同的组名。 -- `kafka_format` – 消息体格式。使用与 SQL 部分的 `FORMAT` 函数相同表示方法,例如 `JSONEachRow`。了解详细信息,请参考 `Formats` 部分。 - -可选参数: - -- `kafka_row_delimiter` - 每个消息体(记录)之间的分隔符。 -- `kafka_schema` – 如果解析格式需要一个 schema 时,此参数必填。例如,[Cap’n Proto](https://capnproto.org/) 需要 schema 文件路径以及根对象 `schema.capnp:Message` 的名字。 -- `kafka_num_consumers` – 单个表的消费者数量。默认值是:`1`,如果一个消费者的吞吐量不足,则指定更多的消费者。消费者的总数不应该超过 topic 中分区的数量,因为每个分区只能分配一个消费者。 - -示例: - -``` sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - SELECT * FROM queue LIMIT 5; - - CREATE TABLE queue2 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', - kafka_topic_list = 'topic', - kafka_group_name = 'group1', - kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; - - CREATE TABLE queue2 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') - SETTINGS kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; -``` - -消费的消息会被自动追踪,因此每个消息在不同的消费组里只会记录一次。如果希望获得两次数据,则使用另一个组名创建副本。 - -消费组可以灵活配置并且在集群之间同步。例如,如果群集中有10个主题和5个表副本,则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 http://kafka.apache.org/intro。 - -`SELECT` 查询对于读取消息并不是很有用(调试除外),因为每条消息只能被读取一次。使用物化视图创建实时线程更实用。您可以这样做: - -1. 使用引擎创建一个 Kafka 消费者并作为一条数据流。 -2. 创建一个结构表。 -3. 创建物化视图,改视图会在后台转换引擎中的数据并将其放入之前创建的表中。 - -当 `MATERIALIZED VIEW` 添加至引擎,它将会在后台收集数据。可以持续不断地从 Kafka 收集数据并通过 `SELECT` 将数据转换为所需要的格式。 - -示例: - -``` sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - CREATE TABLE daily ( - day Date, - level String, - total UInt64 - ) ENGINE = SummingMergeTree(day, (day, level), 8192); - - CREATE MATERIALIZED VIEW consumer TO daily - AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total - FROM queue GROUP BY day, level; - - SELECT level, sum(total) FROM daily GROUP BY level; -``` - -为了提高性能,接受的消息被分组为 [max\_insert\_block\_size](../settings/settings.md#settings-max_insert_block_size) 大小的块。如果未在 [stream\_flush\_interval\_ms](../settings/settings.md) 毫秒内形成块,则不关心块的完整性,都会将数据刷新到表中。 - -停止接收主题数据或更改转换逻辑,请 detach 物化视图: - - DETACH TABLE consumer; - ATTACH MATERIALIZED VIEW consumer; - -如果使用 `ALTER` 更改目标表,为了避免目标表与视图中的数据之间存在差异,推荐停止物化视图。 - -## 配置 {#pei-zhi} - -与 `GraphiteMergeTree` 类似,Kafka 引擎支持使用ClickHouse配置文件进行扩展配置。可以使用两个配置键:全局 (`kafka`) 和 主题级别 (`kafka_*`)。首先应用全局配置,然后应用主题级配置(如果存在)。 - -``` xml - - - cgrp - smallest - - - - - 250 - 100000 - -``` - -有关详细配置选项列表,请参阅 [librdkafka configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)。在 ClickHouse 配置中使用下划线 (`_`) ,并不是使用点 (`.`)。例如,`check.crcs=true` 将是 `true`。 - -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/kafka/) diff --git a/docs/zh/operations/table_engines/log.md b/docs/zh/operations/table_engines/log.md deleted file mode 100644 index 852575181cd..00000000000 --- a/docs/zh/operations/table_engines/log.md +++ /dev/null @@ -1,5 +0,0 @@ -# Log {#log} - -日志与 TinyLog 的不同之处在于,«标记» 的小文件与列文件存在一起。这些标记写在每个数据块上,并且包含偏移量,这些偏移量指示从哪里开始读取文件以便跳过指定的行数。这使得可以在多个线程中读取表数据。对于并发数据访问,可以同时执行读取操作,而写入操作则阻塞读取和其它写入。Log 引擎不支持索引。同样,如果写入表失败,则该表将被破坏,并且从该表读取将返回错误。Log 引擎适用于临时数据,write-once 表以及测试或演示目的。 - -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/log/) diff --git a/docs/zh/operations/table_engines/log_family.md b/docs/zh/operations/table_engines/log_family.md deleted file mode 100644 index 9ce3ab95b55..00000000000 --- a/docs/zh/operations/table_engines/log_family.md +++ /dev/null @@ -1,45 +0,0 @@ -# 日志引擎系列 {#table_engines-log-engine-family} - -这些引擎是为了需要写入许多小数据量(少于一百万行)的表的场景而开发的。 - -这系列的引擎有: - -- [StripeLog](stripelog.md) -- [Log](log.md) -- [TinyLog](tinylog.md) - -## 共同属性 {#table_engines-log-engine-family-common-properties} - -引擎: - -- 数据存储在磁盘上。 - -- 写入时将数据追加在文件末尾。 - -- 不支持[突变](../../query_language/alter.md#alter-mutations)操作。 - -- 不支持索引。 - - 这意味着 `SELECT` 在范围查询时效率不高。 - -- 非原子地写入数据。 - - 如果某些事情破坏了写操作,例如服务器的异常关闭,你将会得到一张包含了损坏数据的表。 - -## 差异 {#table_engines-log-engine-family-differences} - -`Log` 和 `StripeLog` 引擎支持: - -- 并发访问数据的锁。 - - `INSERT` 请求执行过程中表会被锁定,并且其他的读写数据的请求都会等待直到锁定被解除。如果没有写数据的请求,任意数量的读请求都可以并发执行。 - -- 并行读取数据。 - - 在读取数据时,ClickHouse 使用多线程。 每个线程处理不同的数据块。 - -`Log` 引擎为表中的每一列使用不同的文件。`StripeLog` 将所有的数据存储在一个文件中。因此 `StripeLog` 引擎在操作系统中使用更少的描述符,但是 `Log` 引擎提供更高的读性能。 - -`TingLog` 引擎是该系列中最简单的引擎并且提供了最少的功能和最低的性能。`TingLog` 引擎不支持并行读取和并发数据访问,并将每一列存储在不同的文件中。它比其余两种支持并行读取的引擎的读取速度更慢,并且使用了和 `Log` 引擎同样多的描述符。你可以在简单的低负载的情景下使用它。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/zh/operations/table_engines/materializedview.md b/docs/zh/operations/table_engines/materializedview.md deleted file mode 100644 index b22700fe3c6..00000000000 --- a/docs/zh/operations/table_engines/materializedview.md +++ /dev/null @@ -1,5 +0,0 @@ -# 物化视图 {#wu-hua-shi-tu} - -物化视图的使用(更多信息请参阅 [CREATE TABLE](../../query_language/create.md) )。它需要使用一个不同的引擎来存储数据,这个引擎要在创建物化视图时指定。当从表中读取时,它就会使用该引擎。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/zh/operations/table_engines/memory.md b/docs/zh/operations/table_engines/memory.md deleted file mode 100644 index a48308f7b17..00000000000 --- a/docs/zh/operations/table_engines/memory.md +++ /dev/null @@ -1,7 +0,0 @@ -# Memory {#memory} - -Memory 引擎以未压缩的形式将数据存储在 RAM 中。数据完全以读取时获得的形式存储。换句话说,从这张表中读取是很轻松的。并发数据访问是同步的。锁范围小:读写操作不会相互阻塞。不支持索引。阅读是并行化的。在简单查询上达到最大生产率(超过10 GB /秒),因为没有磁盘读取,不需要解压缩或反序列化数据。(值得注意的是,在许多情况下,与 MergeTree 引擎的性能几乎一样高)。重新启动服务器时,表中的数据消失,表将变为空。通常,使用此表引擎是不合理的。但是,它可用于测试,以及在相对较少的行(最多约100,000,000)上需要最高性能的查询。 - -Memory 引擎是由系统用于临时表进行外部数据的查询(请参阅 «外部数据用于请求处理» 部分),以及用于实现 `GLOBAL IN`(请参见 «IN 运算符» 部分)。 - -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/memory/) diff --git a/docs/zh/operations/table_engines/merge.md b/docs/zh/operations/table_engines/merge.md deleted file mode 100644 index bbcbf8772b4..00000000000 --- a/docs/zh/operations/table_engines/merge.md +++ /dev/null @@ -1,63 +0,0 @@ -# Merge {#merge} - -`Merge` 引擎 (不要跟 `MergeTree` 引擎混淆) 本身不存储数据,但可用于同时从任意多个其他的表中读取数据。 -读是自动并行的,不支持写入。读取时,那些被真正读取到数据的表的索引(如果有的话)会被使用。 -`Merge` 引擎的参数:一个数据库名和一个用于匹配表名的正则表达式。 - -示例: - - Merge(hits, '^WatchLog') - -数据会从 `hits` 数据库中表名匹配正则 ‘`^WatchLog`’ 的表中读取。 - -除了数据库名,你也可以用一个返回字符串的常量表达式。例如, `currentDatabase()` 。 - -正则表达式 — [re2](https://github.com/google/re2) (支持 PCRE 一个子集的功能),大小写敏感。 -了解关于正则表达式中转义字符的说明可参看 «match» 一节。 - -当选择需要读的表时,`Merge` 表本身会被排除,即使它匹配上了该正则。这样设计为了避免循环。 -当然,是能够创建两个相互无限递归读取对方数据的 `Merge` 表的,但这并没有什么意义。 - -`Merge` 引擎的一个典型应用是可以像使用一张表一样使用大量的 `TinyLog` 表。 - -示例 2 : - -我们假定你有一个旧表(WatchLog\_old),你想改变数据分区了,但又不想把旧数据转移到新表(WatchLog\_new)里,并且你需要同时能看到这两个表的数据。 - - CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) - ENGINE=MergeTree(date, (UserId, EventType), 8192); - INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); - - CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) - ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; - INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); - - CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); - - SELECT * - FROM WatchLog - - ┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ - │ 2018-01-01 │ 1 │ hit │ 3 │ - └────────────┴────────┴───────────┴─────┘ - ┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ - │ 2018-01-02 │ 2 │ hit │ 3 │ - └────────────┴────────┴───────────┴─────┘ - -## 虚拟列 {#xu-ni-lie} - -虚拟列是一种由表引擎提供而不是在表定义中的列。换种说法就是,这些列并没有在 `CREATE TABLE` 中指定,但可以在 `SELECT` 中使用。 - -下面列出虚拟列跟普通列的不同点: - -- 虚拟列不在表结构定义里指定。 -- 不能用 `INSERT` 向虚拟列写数据。 -- 使用不指定列名的 `INSERT` 语句时,虚拟列要会被忽略掉。 -- 使用星号通配符( `SELECT *` )时虚拟列不会包含在里面。 -- 虚拟列不会出现在 `SHOW CREATE TABLE` 和 `DESC TABLE` 的查询结果里。 - -`Merge` 类型的表包括一个 `String` 类型的 `_table` 虚拟列。(如果该表本来已有了一个 `_table` 的列,那这个虚拟列会命名为 `_table1` ;如果 `_table1` 也本就存在了,那这个虚拟列会被命名为 `_table2` ,依此类推)该列包含被读数据的表名。 - -如果 `WHERE/PREWHERE` 子句包含了带 `_table` 的条件,并且没有依赖其他的列(如作为表达式谓词链接的一个子项或作为整个的表达式),这些条件的作用会像索引一样。这些条件会在那些可能被读数据的表的表名上执行,并且读操作只会在那些满足了该条件的表上去执行。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/zh/operations/table_engines/mergetree.md b/docs/zh/operations/table_engines/mergetree.md deleted file mode 100644 index 3ae2e9bc615..00000000000 --- a/docs/zh/operations/table_engines/mergetree.md +++ /dev/null @@ -1,394 +0,0 @@ -# MergeTree {#table_engines-mergetree} - -Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该系列(`*MergeTree`)中的其他引擎。 - -`MergeTree` 引擎系列的基本理念如下。当你有巨量数据要插入到表中,你要高效地一批批写入数据片段,并希望这些数据片段在后台按照一定规则合并。相比在插入时不断修改(重写)数据进存储,这种策略会高效很多。 - -主要特点: - -- 存储的数据按主键排序。 - - 这让你可以创建一个用于快速检索数据的小稀疏索引。 - -- 允许使用分区,如果指定了 [分区键](custom_partitioning_key.md) 的话。 - - 在相同数据集和相同结果集的情况下 ClickHouse 中某些带分区的操作会比普通操作更快。查询中指定了分区键时 ClickHouse 会自动截取分区数据。这也有效增加了查询性能。 - -- 支持数据副本。 - - `ReplicatedMergeTree` 系列的表便是用于此。更多信息,请参阅 [数据副本](replication.md) 一节。 - -- 支持数据采样。 - - 需要的话,你可以给表设置一个采样方法。 - -!!! 注意 "注意" - [Merge](merge.md) 引擎并不属于 `*MergeTree` 系列。 - -## 建表 {#table_engine-mergetree-creating-a-table} - - CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - ( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... - INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, - INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 - ) ENGINE = MergeTree() - [PARTITION BY expr] - [ORDER BY expr] - [PRIMARY KEY expr] - [SAMPLE BY expr] - [SETTINGS name=value, ...] - -请求参数的描述,参考 [请求描述](../../query_language/create.md) 。 - - - -**子句** - -- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`. `MergeTree` 引擎没有参数。 - -- `PARTITION BY` — [分区键](custom_partitioning_key.md) 。 - - 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../data_types/date.md) 类型的列。这里该分区名格式会是 `"YYYYMM"` 这样。 - -- `ORDER BY` — 表的排序键。 - - 可以是一组列的元组或任意的表达式。 例如: `ORDER BY (CounterID, EventDate)` 。 - -- `PRIMARY KEY` - 主键,如果要设成 [跟排序键不相同](mergetree.md)。 - - 默认情况下主键跟排序键(由 `ORDER BY` 子句指定)相同。 - 因此,大部分情况下不需要再专门指定一个 `PRIMARY KEY` 子句。 - -- `SAMPLE BY` — 用于抽样的表达式。 - - 如果要用抽样表达式,主键中必须包含这个表达式。例如: - `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))` 。 - -- `SETTINGS` — 影响 `MergeTree` 性能的额外参数: - - - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Storages/MergeTree/MergeTreeSettings.h) 。 - - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果仅按数据行数限制索引粒度, 请设置为0(不建议)。 - - `enable_mixed_granularity_parts` — 启用或禁用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从大表(数十或数百兆)中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果你的表内数据量很大,可以开启这项配置用以提升`SELECT` 查询的性能。 - - `use_minimalistic_part_header_in_zookeeper` — 数据片段头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 - - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。 - - - `merge_with_ttl_timeout` — TTL合并频率的最小间隔时间。默认值: 86400 (1 天)。 - - `write_final_mark` — 启用或禁用在数据片段尾部写入最终索引标记。默认值: 1(不建议更改)。 - - `storage_policy` — 存储策略。 参见 [使用多个区块装置进行数据存储](#table_engine-mergetree-multiple-volumes). - -**示例配置** - - ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 - -示例中,我们设为按月分区。 - -同时我们设置了一个按用户ID哈希的抽样表达式。这让你可以有该表中每个 `CounterID` 和 `EventDate` 下面的数据的伪随机分布。如果你在查询时指定了 [SAMPLE](../../query_language/select.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 - -`index_granularity` 可省略,默认值为 8192 。 - -
    - -已弃用的建表方法 - -!!! attention "注意" - 不要在新版项目中使用该方法,可能的话,请将旧项目切换到上述方法。 - - CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - ( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... - ) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) - -**MergeTree() 参数** - -- `date-column` — 类型为 [Date](../../data_types/date.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 -- `sampling_expression` — 采样表达式。 -- `(primary, key)` — 主键。类型 — [Tuple()](../../data_types/tuple.md) -- `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。设为 8192 可以适用大部分场景。 - -**示例** - - MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) - -对于主要的配置方法,这里 `MergeTree` 引擎跟前面的例子一样,可以以同样的方式配置。 -
    - -## 数据存储 {#mergetree-data-storage} - -表由按主键排序的数据 *片段* 组成。 - -当数据被插入到表中时,会分成数据片段并按主键的字典序排序。例如,主键是 `(CounterID, Date)` 时,片段中数据按 `CounterID` 排序,具有相同 `CounterID` 的部分按 `Date` 排序。 - -不同分区的数据会被分成不同的片段,ClickHouse 在后台合并数据片段以便更高效存储。不会合并来自不同分区的数据片段。这个合并机制并不保证相同主键的所有行都会合并到同一个数据片段中。 - -ClickHouse 会为每个数据片段创建一个索引文件,索引文件包含每个索引行(『标记』)的主键值。索引行号定义为 `n * index_granularity` 。最大的 `n` 等于总行数除以 `index_granularity` 的值的整数部分。对于每列,跟主键相同的索引行处也会写入『标记』。这些『标记』让你可以直接找到数据所在的列。 - -你可以只用一单一大表并不断地一块块往里面加入数据 – `MergeTree` 引擎的就是为了这样的场景。 - -## 主键和索引在查询中的表现 {#primary-keys-and-indexes-in-queries} - -我们以 `(CounterID, Date)` 以主键。排序好的索引的图示会是下面这样: - - 全部数据 : [-------------------------------------------------------------------------] - CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] - Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] - 标记: | | | | | | | | | | | - a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 - 标记号: 0 1 2 3 4 5 6 7 8 9 10 - -如果指定查询如下: - -- `CounterID in ('a', 'h')`,服务器会读取标记号在 `[0, 3)` 和 `[6, 8)` 区间中的数据。 -- `CounterID IN ('a', 'h') AND Date = 3`,服务器会读取标记号在 `[1, 3)` 和 `[7, 8)` 区间中的数据。 -- `Date = 3`,服务器会读取标记号在 `[1, 10]` 区间中的数据。 - -上面例子可以看出使用索引通常会比全表描述要高效。 - -稀疏索引会引起额外的数据读取。当读取主键单个区间范围的数据时,每个数据块中最多会多读 `index_granularity * 2` 行额外的数据。大部分情况下,当 `index_granularity = 8192` 时,ClickHouse的性能并不会降级。 - -稀疏索引让你能操作有巨量行的表。因为这些索引是常驻内存(RAM)的。 - -ClickHouse 不要求主键惟一。所以,你可以插入多条具有相同主键的行。 - -### 主键的选择 {#zhu-jian-de-xuan-ze} - -主键中列的数量并没有明确的限制。依据数据结构,你应该让主键包含多些或少些列。这样可以: - -- 改善索引的性能。 - - 如果当前主键是 `(a, b)` ,然后加入另一个 `c` 列,满足下面条件时,则可以改善性能: - - 有带有 `c` 列条件的查询。 - - 很长的数据范围( `index_granularity` 的数倍)里 `(a, b)` 都是相同的值,并且这种的情况很普遍。换言之,就是加入另一列后,可以让你的查询略过很长的数据范围。 - -- 改善数据压缩。 - - ClickHouse 以主键排序片段数据,所以,数据的一致性越高,压缩越好。 - -- [CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里,数据合并时,会有额外的处理逻辑。 - - 在这种情况下,指定一个跟主键不同的 *排序键* 也是有意义的。 - -长的主键会对插入性能和内存消耗有负面影响,但主键中额外的列并不影响 `SELECT` 查询的性能。 - -### 选择跟排序键不一样主键 {#xuan-ze-gen-pai-xu-jian-bu-yi-yang-zhu-jian} - -指定一个跟排序键(用于排序数据片段中行的表达式) -不一样的主键(用于计算写到索引文件的每个标记值的表达式)是可以的。 -这种情况下,主键表达式元组必须是排序键表达式元组的一个前缀。 - -当使用 [SummingMergeTree](summingmergetree.md) 和 -[AggregatingMergeTree](aggregatingmergetree.md) 引擎时,这个特性非常有用。 -通常,使用这类引擎时,表里列分两种:*维度* 和 *度量* 。 -典型的查询是在 `GROUP BY` 并过虑维度的情况下统计度量列的值。 -像 SummingMergeTree 和 AggregatingMergeTree ,用相同的排序键值统计行时, -通常会加上所有的维度。结果就是,这键的表达式会是一长串的列组成, -并且这组列还会因为新加维度必须频繁更新。 - -这种情况下,主键中仅预留少量列保证高效范围扫描, -剩下的维度列放到排序键元组里。这样是合理的。 - -[排序键的修改](../../query_language/alter.md) 是轻量级的操作,因为一个新列同时被加入到表里和排序键后时,已存在的数据片段并不需要修改。由于旧的排序键是新排序键的前缀,并且刚刚添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。 - -### 索引和分区在查询中的应用 {#suo-yin-he-fen-qu-zai-cha-xun-zhong-de-ying-yong} - -对于 `SELECT` 查询,ClickHouse 分析是否可以使用索引。如果 `WHERE/PREWHERE` 子句具有下面这些表达式(作为谓词链接一子项或整个)则可以使用索引:基于主键或分区键的列或表达式的部分的等式或比较运算表达式;基于主键或分区键的列或表达式的固定前缀的 `IN` 或 `LIKE` 表达式;基于主键或分区键的列的某些函数;基于主键或分区键的表达式的逻辑表达式。 - -因此,在索引键的一个或多个区间上快速地跑查询都是可能的。下面例子中,指定标签;指定标签和日期范围;指定标签和日期;指定多个标签和日期范围等运行查询,都会非常快。 - -当引擎配置如下时: - - ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 - -这种情况下,这些查询: - -``` sql -SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 -SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) -SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) -``` - -ClickHouse 会依据主键索引剪掉不符合的数据,依据按月分区的分区键剪掉那些不包含符合数据的分区。 - -上文的查询显示,即使索引用于复杂表达式。因为读表操作是组织好的,所以,使用索引不会比完整扫描慢。 - -下面这个例子中,不会使用索引。 - -``` sql -SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' -``` - -要检查 ClickHouse 执行一个查询时能否使用索引,可设置 [force\_index\_by\_date](../settings/settings.md#settings-force_index_by_date) 和 [force\_primary\_key](../settings/settings.md) 。 - -按月分区的分区键是只能读取包含适当范围日期的数据块。这种情况下,数据块会包含很多天(最多整月)的数据。在块中,数据按主键排序,主键第一列可能不包含日期。因此,仅使用日期而没有带主键前缀条件的查询将会导致读取超过这个日期范围。 - -### 跳数索引(分段汇总索引,实验性的) {#tiao-shu-suo-yin-fen-duan-hui-zong-suo-yin-shi-yan-xing-de} - -需要设置 `allow_experimental_data_skipping_indices` 为 1 才能使用此索引。(执行 `SET allow_experimental_data_skipping_indices = 1`)。 - -此索引在 `CREATE` 语句的列部分里定义。 - -``` sql -INDEX index_name expr TYPE type(...) GRANULARITY granularity_value -``` - -`*MergeTree` 系列的表都能指定跳数索引。 - -这些索引是由数据块按粒度分割后的每部分在指定表达式上汇总信息 `granularity_value` 组成(粒度大小用表引擎里 `index_granularity` 的指定)。 -这些汇总信息有助于用 `where` 语句跳过大片不满足的数据,从而减少 `SELECT` 查询从磁盘读取的数据量, - -示例 - -``` sql -CREATE TABLE table_name -( - u64 UInt64, - i32 Int32, - s String, - ... - INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, - INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 -) ENGINE = MergeTree() -... -``` - -上例中的索引能让 ClickHouse 执行下面这些查询时减少读取数据量。 - -``` sql -SELECT count() FROM table WHERE s < 'z' -SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 -``` - -#### 索引的可用类型 {#table_engine-mergetree-data_skipping-indexes} - -- `minmax` - 存储指定表达式的极值(如果表达式是 `tuple` ,则存储 `tuple` 中每个元素的极值),这些信息用于跳过数据块,类似主键。 - -- `set(max_rows)` - 存储指定表达式的惟一值(不超过 `max_rows` 个,`max_rows=0` 则表示『无限制』)。这些信息可用于检查 `WHERE` 表达式是否满足某个数据块。 - -- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - 存储包含数据块中所有 n 元短语的 [布隆过滤器](https://en.wikipedia.org/wiki/Bloom_filter) 。只可用在字符串上。 - 可用于优化 `equals` , `like` 和 `in` 表达式的性能。 - `n` – 短语长度。 - `size_of_bloom_filter_in_bytes` – 布隆过滤器大小,单位字节。(因为压缩得好,可以指定比较大的值,如256或512)。 - `number_of_hash_functions` – 布隆过滤器中使用的 hash 函数的个数。 - `random_seed` – hash 函数的随机种子。 - -- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - 跟 `ngrambf_v1` 类似,不同于 ngrams 存储字符串指定长度的所有片段。它只存储被非字母数据字符分割的片段。 - - - -``` sql -INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 -INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 -INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 -``` - -## 并发数据访问 {#bing-fa-shu-ju-fang-wen} - -应对表的并发访问,我们使用多版本机制。换言之,当同时读和更新表时,数据从当前查询到的一组片段中读取。没有冗长的的锁。插入不会阻碍读取。 - -对表的读操作是自动并行的。 - -## 列和表的TTL {#table_engine-mergetree-ttl} - -TTL可以设置值的生命周期,它既可以为整张表设置,也可以为每个列字段单独设置。如果`TTL`同时作用于表和字段,ClickHouse会使用先到期的那个。 - -被设置TTL的表,必须拥有[Date](../../data_types/date.md) 或 [DateTime](../../data_types/datetime.md) 类型的字段。要定义数据的生命周期,需要在这个日期字段上使用操作符,例如: - -``` sql -TTL time_column -TTL time_column + interval -``` - -要定义`interval`, 需要使用 [time interval](../../query_language/operators.md#operators-datetime) 操作符。 - -``` sql -TTL date_time + INTERVAL 1 MONTH -TTL date_time + INTERVAL 15 HOUR -``` - -### 列字段 TTL {#mergetree-column-ttl} - -当列字段中的值过期时, ClickHouse会将它们替换成数据类型的默认值。如果分区内,某一列的所有值均已过期,则ClickHouse会从文件系统中删除这个分区目录下的列文件。 - -`TTL`子句不能被用于主键字段。 - -示例说明: - -创建一张包含 `TTL` 的表 - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int TTL d + INTERVAL 1 MONTH, - b Int TTL d + INTERVAL 1 MONTH, - c String -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d; -``` - -为表中已存在的列字段添加 `TTL` - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 DAY; -``` - -修改列字段的 `TTL` - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 MONTH; -``` - -### 表 TTL {#mergetree-table-ttl} - -当表内的数据过期时, ClickHouse会删除所有对应的行。 - -举例说明: - -创建一张包含 `TTL` 的表 - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d -TTL d + INTERVAL 1 MONTH; -``` - -修改表的 `TTL` - -``` sql -ALTER TABLE example_table - MODIFY TTL d + INTERVAL 1 DAY; -``` - -**删除数据** - -当ClickHouse合并数据分区时, 会删除TTL过期的数据。 - -当ClickHouse发现数据过期时, 它将会执行一个计划外的合并。要控制这类合并的频率, 你可以设置 [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout)。如果该值被设置的太低, 它将导致执行许多的计划外合并,这可能会消耗大量资源。 - -如果在合并的时候执行`SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在`SELECT`之前使用 [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) 查询。 - -## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes} - -### Configuration {#table_engine-mergetree-multiple-volumes-configure} - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/mergetree/) diff --git a/docs/zh/operations/table_engines/mysql.md b/docs/zh/operations/table_engines/mysql.md deleted file mode 100644 index e97f5f12106..00000000000 --- a/docs/zh/operations/table_engines/mysql.md +++ /dev/null @@ -1,25 +0,0 @@ -# MySQL {#mysql} - -MySQL 引擎可以对存储在远程 MySQL 服务器上的数据执行 `SELECT` 查询。 - -调用格式: - - MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); - -**调用参数** - -- `host:port` — MySQL 服务器地址。 -- `database` — 数据库的名称。 -- `table` — 表名称。 -- `user` — 数据库用户。 -- `password` — 用户密码。 -- `replace_query` — 将 `INSERT INTO` 查询是否替换为 `REPLACE INTO` 的标志。如果 `replace_query=1`,则替换查询 -- `'on_duplicate_clause'` — 将 `ON DUPLICATE KEY UPDATE 'on_duplicate_clause'` 表达式添加到 `INSERT` 查询语句中。例如:`impression = VALUES(impression) + impression`。如果需要指定 `'on_duplicate_clause'`,则需要设置 `replace_query=0`。如果同时设置 `replace_query = 1` 和 `'on_duplicate_clause'`,则会抛出异常。 - -此时,简单的 `WHERE` 子句(例如 `=, !=, >, >=, <, <=`)是在 MySQL 服务器上执行。 - -其余条件以及 `LIMIT` 采样约束语句仅在对MySQL的查询完成后才在ClickHouse中执行。 - -`MySQL` 引擎不支持 [Nullable](../../data_types/nullable.md) 数据类型,因此,当从MySQL表中读取数据时,`NULL` 将转换为指定列类型的默认值(通常为0或空字符串)。 - -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/mysql/) diff --git a/docs/zh/operations/table_engines/null.md b/docs/zh/operations/table_engines/null.md deleted file mode 100644 index 94f731f756d..00000000000 --- a/docs/zh/operations/table_engines/null.md +++ /dev/null @@ -1,7 +0,0 @@ -# Null {#null} - -当写入 Null 类型的表时,将忽略数据。从 Null 类型的表中读取时,返回空。 - -但是,可以在 Null 类型的表上创建物化视图。写入表的数据将转发到视图中。 - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/zh/operations/table_engines/odbc.md b/docs/zh/operations/table_engines/odbc.md deleted file mode 120000 index 06091fd5377..00000000000 --- a/docs/zh/operations/table_engines/odbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/odbc.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/replacingmergetree.md b/docs/zh/operations/table_engines/replacingmergetree.md deleted file mode 100644 index 66c3246f272..00000000000 --- a/docs/zh/operations/table_engines/replacingmergetree.md +++ /dev/null @@ -1,60 +0,0 @@ -# ReplacingMergeTree {#replacingmergetree} - -该引擎和[MergeTree](mergetree.md)的不同之处在于它会删除具有相同主键的重复项。 - -数据的去重只会在合并的过程中出现。合并会在未知的时间在后台进行,因此你无法预先作出计划。有一些数据可能仍未被处理。尽管你可以调用 `OPTIMIZE` 语句发起计划外的合并,但请不要指望使用它,因为 `OPTIMIZE` 语句会引发对大量数据的读和写。 - -因此,`ReplacingMergeTree` 适用于在后台清除重复的数据以节省空间,但是它不保证没有重复的数据出现。 - -## 建表 {#jian-biao} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = ReplacingMergeTree([ver]) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -请求参数的描述,参考[请求参数](../../query_language/create.md)。 - -**ReplacingMergeTree Parameters** - -- `ver` — 版本列。类型为 `UInt*`, `Date` 或 `DateTime`。可选参数。 - - 合并的时候,`ReplacingMergeTree` 从所有具有相同主键的行中选择一行留下: - - 如果 `ver` 列未指定,选择最后一条。 - - 如果 `ver` 列已指定,选择 `ver` 值最大的版本。 - -**子句** - -创建 `ReplacingMergeTree` 表时,需要与创建 `MergeTree` 表时相同的[子句](mergetree.md)。 - -
    - -已弃用的建表方法 - -!!! attention "注意" - 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) -``` - -除了 `ver` 的所有参数都与 `MergeTree` 中的含义相同。 - -- `ver` - 版本列。可选参数,有关说明,请参阅上文。 - -
    - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/zh/operations/table_engines/replication.md b/docs/zh/operations/table_engines/replication.md deleted file mode 100644 index c2f4d3eb849..00000000000 --- a/docs/zh/operations/table_engines/replication.md +++ /dev/null @@ -1,202 +0,0 @@ -# 数据副本 {#table_engines-replication} - -只有 MergeTree 系列里的表可支持副本: - -- ReplicatedMergeTree -- ReplicatedSummingMergeTree -- ReplicatedReplacingMergeTree -- ReplicatedAggregatingMergeTree -- ReplicatedCollapsingMergeTree -- ReplicatedVersionedCollapsingMergeTree -- ReplicatedGraphiteMergeTree - -副本是表级别的,不是整个服务器级的。所以,服务器里可以同时有复制表和非复制表。 - -副本不依赖分片。每个分片有它自己的独立副本。 - -对于 `INSERT` 和 `ALTER` 语句操作数据的会在压缩的情况下被复制(更多信息,看 [ALTER](../../query_language/alter.md#query_language_queries_alter) )。 - -而 `CREATE`,`DROP`,`ATTACH`,`DETACH` 和 `RENAME` 语句只会在单个服务器上执行,不会被复制。 - -- `The CREATE TABLE` 在运行此语句的服务器上创建一个新的可复制表。如果此表已存在其他服务器上,则给该表添加新副本。 -- `The DROP TABLE` 删除运行此查询的服务器上的副本。 -- `The RENAME` 重命名一个副本。换句话说,可复制表不同的副本可以有不同的名称。 - -要使用副本,需在配置文件中设置 ZooKeeper 集群的地址。例如: - -``` xml - - - example1 - 2181 - - - example2 - 2181 - - - example3 - 2181 - - -``` - -需要 ZooKeeper 3.4.5 或更高版本。 - -你可以配置任何现有的 ZooKeeper 集群,系统会使用里面的目录来存取元数据(该目录在创建可复制表时指定)。 - -如果配置文件中没有设置 ZooKeeper ,则无法创建复制表,并且任何现有的复制表都将变为只读。 - -`SELECT` 查询并不需要借助 ZooKeeper ,复本并不影响 `SELECT` 的性能,查询复制表与非复制表速度是一样的。查询分布式表时,ClickHouse的处理方式可通过设置 [max\_replica\_delay\_for\_distributed\_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) 和 [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../settings/settings.md) 修改。 - -对于每个 `INSERT` 语句,会通过几个事务将十来个记录添加到 ZooKeeper。(确切地说,这是针对每个插入的数据块; 每个 INSERT 语句的每 `max_insert_block_size = 1048576` 行和最后剩余的都各算作一个块。)相比非复制表,写 zk 会导致 `INSERT` 的延迟略长一些。但只要你按照建议每秒不超过一个 `INSERT` 地批量插入数据,不会有任何问题。一个 ZooKeeper 集群能给整个 ClickHouse 集群支撑协调每秒几百个 `INSERT`。数据插入的吞吐量(每秒的行数)可以跟不用复制的数据一样高。 - -对于非常大的集群,你可以把不同的 ZooKeeper 集群用于不同的分片。然而,即使 Yandex.Metrica 集群(大约300台服务器)也证明还不需要这么做。 - -复制是多主异步。 `INSERT` 语句(以及 `ALTER` )可以发给任意可用的服务器。数据会先插入到执行该语句的服务器上,然后被复制到其他服务器。由于它是异步的,在其他副本上最近插入的数据会有一些延迟。如果部分副本不可用,则数据在其可用时再写入。副本可用的情况下,则延迟时长是通过网络传输压缩数据块所需的时间。 - -默认情况下,INSERT 语句仅等待一个副本写入成功后返回。如果数据只成功写入一个副本后该副本所在的服务器不再存在,则存储的数据会丢失。要启用数据写入多个副本才确认返回,使用 `insert_quorum` 选项。 - -单个数据块写入是原子的。 INSERT 的数据按每块最多 `max_insert_block_size = 1048576` 行进行分块,换句话说,如果 `INSERT` 插入的行少于 1048576,则该 INSERT 是原子的。 - -数据块会去重。对于被多次写的相同数据块(大小相同且具有相同顺序的相同行的数据块),该块仅会写入一次。这样设计的原因是万一在网络故障时客户端应用程序不知道数据是否成功写入DB,此时可以简单地重复 `INSERT` 。把相同的数据发送给多个副本 INSERT 并不会有问题。因为这些 `INSERT` 是完全相同的(会被去重)。去重参数参看服务器设置 [merge\_tree](../server_settings/settings.md) 。(注意:Replicated\*MergeTree 才会去重,不需要 zookeeper 的不带 MergeTree 不会去重) - -在复制期间,只有要插入的源数据通过网络传输。进一步的数据转换(合并)会在所有副本上以相同的方式进行处理执行。这样可以最大限度地减少网络使用,这意味着即使副本在不同的数据中心,数据同步也能工作良好。(能在不同数据中心中的同步数据是副本机制的主要目标。) - -你可以给数据做任意多的副本。Yandex.Metrica 在生产中使用双副本。某一些情况下,给每台服务器都使用 RAID-5 或 RAID-6 和 RAID-10。是一种相对可靠和方便的解决方案。 - -系统会监视副本数据同步情况,并能在发生故障后恢复。故障转移是自动的(对于小的数据差异)或半自动的(当数据差异很大时,这可能意味是有配置错误)。 - -## 创建复制表 {#creating-replicated-tables} - -在表引擎名称上加上 `Replicated` 前缀。例如:`ReplicatedMergeTree`。 - -**Replicated\*MergeTree 参数** - -- `zoo_path` — ZooKeeper 中该表的路径。 -- `replica_name` — ZooKeeper 中的该表的副本名称。 - -示例: - -``` sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}') -PARTITION BY toYYYYMM(EventDate) -ORDER BY (CounterID, EventDate, intHash32(UserID)) -SAMPLE BY intHash32(UserID) -``` - -已弃用的建表语法示例: - -``` sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192) -``` - -如上例所示,这些参数可以包含宏替换的占位符,即大括号的部分。它们会被替换为配置文件里 ‘macros’ 那部分配置的值。示例: - -``` xml - - 05 - 02 - example05-02-1.yandex.ru - -``` - -«ZooKeeper 中该表的路径»对每个可复制表都要是唯一的。不同分片上的表要有不同的路径。 -这种情况下,路径包含下面这些部分: - -`/clickhouse/tables/` 是公共前缀,我们推荐使用这个。 - -`{layer}-{shard}` 是分片标识部分。在此示例中,由于 Yandex.Metrica 集群使用了两级分片,所以它是由两部分组成的。但对于大多数情况来说,你只需保留 {shard} 占位符即可,它会替换展开为分片标识。 - -`table_name` 是该表在 ZooKeeper 中的名称。使其与 ClickHouse 中的表名相同比较好。 这里它被明确定义,跟 ClickHouse 表名不一样,它并不会被 RENAME 语句修改。 -*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name` - -副本名称用于标识同一个表分片的不同副本。你可以使用服务器名称,如上例所示。同个分片中不同副本的副本名称要唯一。 - -你也可以显式指定这些参数,而不是使用宏替换。对于测试和配置小型集群这可能会很方便。但是,这种情况下,则不能使用分布式 DDL 语句(`ON CLUSTER`)。 - -使用大型集群时,我们建议使用宏替换,因为它可以降低出错的可能性。 - -在每个副本服务器上运行 `CREATE TABLE` 查询。将创建新的复制表,或给现有表添加新副本。 - -如果其他副本上已包含了某些数据,在表上添加新副本,则在运行语句后,数据会从其他副本复制到新副本。换句话说,新副本会与其他副本同步。 - -要删除副本,使用 `DROP TABLE`。但它只删除那个 – 位于运行该语句的服务器上的副本。 - -## 故障恢复 {#gu-zhang-hui-fu} - -如果服务器启动时 ZooKeeper 不可用,则复制表会切换为只读模式。系统会定期尝试去连接 ZooKeeper。 - -如果在 `INSERT` 期间 ZooKeeper 不可用,或者在与 ZooKeeper 交互时发生错误,则抛出异常。 - -连接到 ZooKeeper 后,系统会检查本地文件系统中的数据集是否与预期的数据集( ZooKeeper 存储此信息)一致。如果存在轻微的不一致,系统会通过与副本同步数据来解决。 - -如果系统检测到损坏的数据片段(文件大小错误)或无法识别的片段(写入文件系统但未记录在 ZooKeeper 中的部分),则会把它们移动到 ‘detached’ 子目录(不会删除)。而副本中其他任何缺少的但正常数据片段都会被复制同步。 - -注意,ClickHouse 不会执行任何破坏性操作,例如自动删除大量数据。 - -当服务器启动(或与 ZooKeeper 建立新会话)时,它只检查所有文件的数量和大小。 如果文件大小一致但中间某处已有字节被修改过,不会立即被检测到,只有在尝试读取 `SELECT` 查询的数据时才会检测到。该查询会引发校验和不匹配或压缩块大小不一致的异常。这种情况下,数据片段会添加到验证队列中,并在必要时从其他副本中复制。 - -如果本地数据集与预期数据的差异太大,则会触发安全机制。服务器在日志中记录此内容并拒绝启动。这种情况很可能是配置错误,例如,一个分片上的副本意外配置为别的分片上的副本。然而,此机制的阈值设置得相当低,在正常故障恢复期间可能会出现这种情况。在这种情况下,数据恢复则是半自动模式,通过用户主动操作触发。 - -要触发启动恢复,可在 ZooKeeper 中创建节点 `/path_to_table/replica_name/flags/force_restore_data`,节点值可以是任何内容,或运行命令来恢复所有的可复制表: - -``` bash -sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data -``` - -然后重启服务器。启动时,服务器会删除这些标志并开始恢复。 - -## 在数据完全丢失后的恢复 {#zai-shu-ju-wan-quan-diu-shi-hou-de-hui-fu} - -如果其中一个服务器的所有数据和元数据都消失了,请按照以下步骤进行恢复: - -1. 在服务器上安装 ClickHouse。在包含分片标识符和副本的配置文件中正确定义宏配置,如果有用到的话, -2. 如果服务器上有非复制表则必须手动复制,可以从副本服务器上(在 `/var/lib/clickhouse/data/db_name/table_name/` 目录中)复制它们的数据。 -3. 从副本服务器上中复制位于 `/var/lib/clickhouse/metadata/` 中的表定义信息。如果在表定义信息中显式指定了分片或副本标识符,请更正它以使其对应于该副本。(另外,启动服务器,然后会在 `/var/lib/clickhouse/metadata/` 中的.sql文件中生成所有的 `ATTACH TABLE` 语句。) - 4.要开始恢复,ZooKeeper 中创建节点 `/path_to_table/replica_name/flags/force_restore_data`,节点内容不限,或运行命令来恢复所有复制的表:`sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` - -然后启动服务器(如果它已运行则重启)。数据会从副本中下载。 - -另一种恢复方式是从 ZooKeeper(`/path_to_table/replica_name`)中删除有数据丢的副本的所有元信息,然后再按照«[创建可复制表](#creating-replicated-tables)»中的描述重新创建副本。 - -恢复期间的网络带宽没有限制。特别注意这一点,尤其是要一次恢复很多副本。 - -## MergeTree 转换为 ReplicatedMergeTree {#mergetree-zhuan-huan-wei-replicatedmergetree} - -我们使用 `MergeTree` 来表示 `MergeTree系列` 中的所有表引擎,`ReplicatedMergeTree` 同理。 - -如果你有一个手动同步的 `MergeTree` 表,您可以将其转换为可复制表。如果你已经在 `MergeTree` 表中收集了大量数据,并且现在要启用复制,则可以执行这些操作。 - -如果各个副本上的数据不一致,则首先对其进行同步,或者除保留的一个副本外,删除其他所有副本上的数据。 - -重命名现有的 MergeTree 表,然后使用旧名称创建 `ReplicatedMergeTree` 表。 -将数据从旧表移动到新表(`/var/lib/clickhouse/data/db_name/table_name/`)目录内的 ‘detached’ 目录中。 -然后在其中一个副本上运行`ALTER TABLE ATTACH PARTITION`,将这些数据片段添加到工作集中。 - -## ReplicatedMergeTree 转换为 MergeTree {#replicatedmergetree-zhuan-huan-wei-mergetree} - -使用其他名称创建 MergeTree 表。将具有`ReplicatedMergeTree`表数据的目录中的所有数据移动到新表的数据目录中。然后删除`ReplicatedMergeTree`表并重新启动服务器。 - -如果你想在不启动服务器的情况下清除 `ReplicatedMergeTree` 表: - -- 删除元数据目录中的相应 `.sql` 文件(`/var/lib/clickhouse/metadata/`)。 -- 删除 ZooKeeper 中的相应路径(`/path_to_table/replica_name`)。 - -之后,你可以启动服务器,创建一个 `MergeTree` 表,将数据移动到其目录,然后重新启动服务器。 - -## 当 ZooKeeper 集群中的元数据丢失或损坏时恢复方法 {#dang-zookeeper-ji-qun-zhong-de-yuan-shu-ju-diu-shi-huo-sun-pi-shi-hui-fu-fang-fa} - -如果 ZooKeeper 中的数据丢失或损坏,如上所述,你可以通过将数据转移到非复制表来保存数据。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/zh/operations/table_engines/set.md b/docs/zh/operations/table_engines/set.md deleted file mode 100644 index e9be9ab7e56..00000000000 --- a/docs/zh/operations/table_engines/set.md +++ /dev/null @@ -1,11 +0,0 @@ -# Set {#set} - -始终存在于 RAM 中的数据集。它适用于IN运算符的右侧(请参见 «IN运算符» 部分)。 - -可以使用 INSERT 向表中插入数据。新元素将添加到数据集中,而重复项将被忽略。但是不能对此类型表执行 SELECT 语句。检索数据的唯一方法是在 IN 运算符的右半部分使用它。 - -数据始终存在于 RAM 中。对于 INSERT,插入数据块也会写入磁盘上的表目录。启动服务器时,此数据将加载到 RAM。也就是说,重新启动后,数据仍然存在。 - -对于强制服务器重启,磁盘上的数据块可能会丢失或损坏。在数据块损坏的情况下,可能需要手动删除包含损坏数据的文件。 - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/zh/operations/table_engines/stripelog.md b/docs/zh/operations/table_engines/stripelog.md deleted file mode 100644 index 1a5edfd23bf..00000000000 --- a/docs/zh/operations/table_engines/stripelog.md +++ /dev/null @@ -1,82 +0,0 @@ -# StripeLog {#table_engines-stripelog} - -该引擎属于日志引擎系列。请在[日志引擎系列](log_family.md)文章中查看引擎的共同属性和差异。 - -在你需要写入许多小数据量(小于一百万行)的表的场景下使用这个引擎。 - -## 建表 {#table_engines-stripelog-creating-a-table} - - CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - ( - column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... - ) ENGINE = StripeLog - -查看[建表](../../query_language/create.md#create-table-query)请求的详细说明。 - -## 写数据 {#table_engines-stripelog-writing-the-data} - -`StripeLog` 引擎将所有列存储在一个文件中。对每一次 `Insert` 请求,ClickHouse 将数据块追加在表文件的末尾,逐列写入。 - -ClickHouse 为每张表写入以下文件: - -- `data.bin` — 数据文件。 -- `index.mrk` — 带标记的文件。标记包含了已插入的每个数据块中每列的偏移量。 - -`StripeLog` 引擎不支持 `ALTER UPDATE` 和 `ALTER DELETE` 操作。 - -## 读数据 {#table_engines-stripelog-reading-the-data} - -带标记的文件使得 ClickHouse 可以并行的读取数据。这意味着 `SELECT` 请求返回行的顺序是不可预测的。使用 `ORDER BY` 子句对行进行排序。 - -## 使用示例 {#table_engines-stripelog-example-of-use} - -建表: - -``` sql -CREATE TABLE stripe_log_table -( - timestamp DateTime, - message_type String, - message String -) -ENGINE = StripeLog -``` - -插入数据: - -``` sql -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') -``` - -我们使用两次 `INSERT` 请求从而在 `data.bin` 文件中创建两个数据块。 - -ClickHouse 在查询数据时使用多线程。每个线程读取单独的数据块并在完成后独立的返回结果行。这样的结果是,大多数情况下,输出中块的顺序和输入时相应块的顺序是不同的。例如: - -``` sql -SELECT * FROM stripe_log_table -``` - - ┌───────────timestamp─┬─message_type─┬─message────────────────────┐ - │ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ - │ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ - └─────────────────────┴──────────────┴────────────────────────────┘ - ┌───────────timestamp─┬─message_type─┬─message───────────────────┐ - │ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ - └─────────────────────┴──────────────┴───────────────────────────┘ - -对结果排序(默认增序): - -``` sql -SELECT * FROM stripe_log_table ORDER BY timestamp -``` - - ┌───────────timestamp─┬─message_type─┬─message────────────────────┐ - │ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ - │ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ - │ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ - └─────────────────────┴──────────────┴────────────────────────────┘ - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/zh/operations/table_engines/summingmergetree.md b/docs/zh/operations/table_engines/summingmergetree.md deleted file mode 100644 index 326ccb7118e..00000000000 --- a/docs/zh/operations/table_engines/summingmergetree.md +++ /dev/null @@ -1,124 +0,0 @@ -# SummingMergeTree {#summingmergetree} - -该引擎继承自 [MergeTree](mergetree.md)。区别在于,当合并 `SummingMergeTree` 表的数据片段时,ClickHouse 会把所有具有相同主键的行合并为一行,该行包含了被合并的行中具有数值数据类型的列的汇总值。如果主键的组合方式使得单个键值对应于大量的行,则可以显著的减少存储空间并加快数据查询的速度。 - -我们推荐将该引擎和 `MergeTree` 一起使用。例如,在准备做报告的时候,将完整的数据存储在 `MergeTree` 表中,并且使用 `SummingMergeTree` 来存储聚合数据。这种方法可以使你避免因为使用不正确的主键组合方式而丢失有价值的数据。 - -## 建表 {#jian-biao} - - CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - ( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... - ) ENGINE = SummingMergeTree([columns]) - [PARTITION BY expr] - [ORDER BY expr] - [SAMPLE BY expr] - [SETTINGS name=value, ...] - -请求参数的描述,参考 [请求描述](../../query_language/create.md)。 - -**SummingMergeTree 的参数** - -- `columns` - 包含了将要被汇总的列的列名的元组。可选参数。 - 所选的列必须是数值类型,并且不可位于主键中。 - - 如果没有指定 `columns`,ClickHouse 会把所有不在主键中的数值类型的列都进行汇总。 - -**子句** - -创建 `SummingMergeTree` 表时,需要与创建 `MergeTree` 表时相同的[子句](mergetree.md)。 - -
    - -已弃用的建表方法 - -!!! attention "注意" - 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 - - CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - ( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... - ) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) - -除 `columns` 外的所有参数都与 `MergeTree` 中的含义相同。 - -- `columns` — 包含将要被汇总的列的列名的元组。可选参数。有关说明,请参阅上文。 - -
    - -## 用法示例 {#yong-fa-shi-li} - -考虑如下的表: - -``` sql -CREATE TABLE summtt -( - key UInt32, - value UInt32 -) -ENGINE = SummingMergeTree() -ORDER BY key -``` - -向其中插入数据: - - :) INSERT INTO summtt Values(1,1),(1,2),(2,1) - -ClickHouse可能不会完整的汇总所有行([见下文](#data-processing)),因此我们在查询中使用了聚合函数 `sum` 和 `GROUP BY` 子句。 - -``` sql -SELECT key, sum(value) FROM summtt GROUP BY key -``` - - ┌─key─┬─sum(value)─┐ - │ 2 │ 1 │ - │ 1 │ 3 │ - └─────┴────────────┘ - -## 数据处理 {#data-processing} - -当数据被插入到表中时,他们将被原样保存。ClickHouse 定期合并插入的数据片段,并在这个时候对所有具有相同主键的行中的列进行汇总,将这些行替换为包含汇总数据的一行记录。 - -ClickHouse 会按片段合并数据,以至于不同的数据片段中会包含具有相同主键的行,即单个汇总片段将会是不完整的。因此,聚合函数 [sum()](../../query_language/agg_functions/reference.md#agg_function-sum) 和 `GROUP BY` 子句应该在(`SELECT`)查询语句中被使用,如上文中的例子所述。 - -### 汇总的通用规则 {#hui-zong-de-tong-yong-gui-ze} - -列中数值类型的值会被汇总。这些列的集合在参数 `columns` 中被定义。 - -如果用于汇总的所有列中的值均为0,则该行会被删除。 - -如果列不在主键中且无法被汇总,则会在现有的值中任选一个。 - -主键所在的列中的值不会被汇总。 - -### AggregateFunction 列中的汇总 {#aggregatefunction-lie-zhong-de-hui-zong} - -对于 [AggregateFunction 类型](../../data_types/nested_data_structures/aggregatefunction.md)的列,ClickHouse 根据对应函数表现为 [AggregatingMergeTree](aggregatingmergetree.md) 引擎的聚合。 - -### 嵌套结构 {#qian-tao-jie-gou} - -表中可以具有以特殊方式处理的嵌套数据结构。 - -如果嵌套表的名称以 `Map` 结尾,并且包含至少两个符合以下条件的列: - -- 第一列是数值类型 `(*Int*, Date, DateTime)`,我们称之为 `key`, -- 其他的列是可计算的 `(*Int*, Float32/64)`,我们称之为 `(values...)`, - -然后这个嵌套表会被解释为一个 `key => (values...)` 的映射,当合并它们的行时,两个数据集中的元素会被根据 `key` 合并为相应的 `(values...)` 的汇总值。 - -示例: - - [(1, 100)] + [(2, 150)] -> [(1, 100), (2, 150)] - [(1, 100)] + [(1, 150)] -> [(1, 250)] - [(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] - [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] - -请求数据时,使用 [sumMap(key, value)](../../query_language/agg_functions/reference.md) 函数来对 `Map` 进行聚合。 - -对于嵌套数据结构,你无需在列的元组中指定列以进行汇总。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/zh/operations/table_engines/tinylog.md b/docs/zh/operations/table_engines/tinylog.md deleted file mode 100644 index 7c9d524d5e6..00000000000 --- a/docs/zh/operations/table_engines/tinylog.md +++ /dev/null @@ -1,13 +0,0 @@ -# TinyLog {#tinylog} - -最简单的表引擎,用于将数据存储在磁盘上。每列都存储在单独的压缩文件中。写入时,数据将附加到文件末尾。 - -并发数据访问不受任何限制: -- 如果同时从表中读取并在不同的查询中写入,则读取操作将抛出异常 -- 如果同时写入多个查询中的表,则数据将被破坏。 - -这种表引擎的典型用法是 write-once:首先只写入一次数据,然后根据需要多次读取。查询在单个流中执行。换句话说,此引擎适用于相对较小的表(建议最多1,000,000行)。如果您有许多小表,则使用此表引擎是适合的,因为它比Log引擎更简单(需要打开的文件更少)。当您拥有大量小表时,可能会导致性能低下,但在可能已经在其它 DBMS 时使用过,则您可能会发现切换使用 TinyLog 类型的表更容易。**不支持索引**。 - -在 Yandex.Metrica 中,TinyLog 表用于小批量处理的中间数据。 - -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/tinylog/) diff --git a/docs/zh/operations/table_engines/url.md b/docs/zh/operations/table_engines/url.md deleted file mode 100644 index c2ce37adf21..00000000000 --- a/docs/zh/operations/table_engines/url.md +++ /dev/null @@ -1,71 +0,0 @@ -# URL(URL, Format) {#table_engines-url} - -用于管理远程 HTTP/HTTPS 服务器上的数据。该引擎类似 -[File](file.md) 引擎。 - -## 在 ClickHouse 服务器中使用引擎 {#zai-clickhouse-fu-wu-qi-zhong-shi-yong-yin-qing} - -`Format` 必须是 ClickHouse 可以用于 -`SELECT` 查询的一种格式,若有必要,还要可用于 `INSERT` 。有关支持格式的完整列表,请查看 -[Formats](../../interfaces/formats.md#formats)。 - -`URL` 必须符合统一资源定位符的结构。指定的URL必须指向一个 -HTTP 或 HTTPS 服务器。对于服务端响应, -不需要任何额外的 HTTP 头标记。 - -`INSERT` 和 `SELECT` 查询会分别转换为 `POST` 和 `GET` 请求。 -对于 `POST` 请求的处理,远程服务器必须支持 -[分块传输编码](https://en.wikipedia.org/wiki/Chunked_transfer_encoding)。 - -**示例:** - -**1.** 在 Clickhouse 服务上创建一个 `url_engine_table` 表: - -``` sql -CREATE TABLE url_engine_table (word String, value UInt64) -ENGINE=URL('http://127.0.0.1:12345/', CSV) -``` - -**2.** 用标准的 Python 3 工具库创建一个基本的 HTTP 服务并 -启动它: - -``` python3 -from http.server import BaseHTTPRequestHandler, HTTPServer - -class CSVHTTPServer(BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(200) - self.send_header('Content-type', 'text/csv') - self.end_headers() - - self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) - -if __name__ == "__main__": - server_address = ('127.0.0.1', 12345) - HTTPServer(server_address, CSVHTTPServer).serve_forever() -``` - -``` bash -python3 server.py -``` - -**3.** 查询请求: - -``` sql -SELECT * FROM url_engine_table -``` - - ┌─word──┬─value─┐ - │ Hello │ 1 │ - │ World │ 2 │ - └───────┴───────┘ - -## 功能实现 {#gong-neng-shi-xian} - -- 读写操作都支持并发 -- 不支持: - - `ALTER` 和 `SELECT...SAMPLE` 操作。 - - 索引。 - - 副本。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/zh/operations/table_engines/versionedcollapsingmergetree.md b/docs/zh/operations/table_engines/versionedcollapsingmergetree.md deleted file mode 120000 index 5843fba70b8..00000000000 --- a/docs/zh/operations/table_engines/versionedcollapsingmergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/versionedcollapsingmergetree.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/view.md b/docs/zh/operations/table_engines/view.md deleted file mode 100644 index 5d15fc74218..00000000000 --- a/docs/zh/operations/table_engines/view.md +++ /dev/null @@ -1,5 +0,0 @@ -# View {#view} - -用于构建视图(有关更多信息,请参阅 `CREATE VIEW 查询`)。 它不存储数据,仅存储指定的 `SELECT` 查询。 从表中读取时,它会运行此查询(并从查询中删除所有不必要的列)。 - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/zh/operations/tips.md b/docs/zh/operations/tips.md index 8980d74e8b2..05509399d85 100644 --- a/docs/zh/operations/tips.md +++ b/docs/zh/operations/tips.md @@ -1,126 +1,127 @@ -# Usage Recommendations {#usage-recommendations} + +# 使用建议 {#usage-recommendations} ## CPU {#cpu} -The SSE 4.2 instruction set must be supported. Modern processors (since 2008) support it. +必须支持SSE4.2指令集。 现代处理器(自2008年以来)支持它。 -When choosing a processor, prefer a large number of cores and slightly slower clock rate over fewer cores and a higher clock rate. -For example, 16 cores with 2600 MHz is better than 8 cores with 3600 MHz. +选择处理器时,与较少的内核和较高的时钟速率相比,更喜欢大量内核和稍慢的时钟速率。 +例如,具有2600MHz的16核心比具有3600MHz的8核心更好。 -## Hyper-threading {#hyper-threading} +## 超线程 {#hyper-threading} -Don’t disable hyper-threading. It helps for some queries, but not for others. +不要禁用超线程。 它有助于某些查询,但不适用于其他查询。 -## Turbo Boost {#turbo-boost} +## 涡轮增压 {#turbo-boost} -Turbo Boost is highly recommended. It significantly improves performance with a typical load. -You can use `turbostat` to view the CPU’s actual clock rate under a load. +强烈推荐涡轮增压。 它显着提高了典型负载的性能。 +您可以使用 `turbostat` 要查看负载下的CPU的实际时钟速率。 -## CPU Scaling Governor {#cpu-scaling-governor} +## CPU缩放调控器 {#cpu-scaling-governor} -Always use the `performance` scaling governor. The `on-demand` scaling governor works much worse with constantly high demand. +始终使用 `performance` 缩放调控器。 该 `on-demand` 随着需求的不断增加,缩放调节器的工作要糟糕得多。 ``` bash echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ``` -## CPU Limitations {#cpu-limitations} +## CPU限制 {#cpu-limitations} -Processors can overheat. Use `dmesg` to see if the CPU’s clock rate was limited due to overheating. -The restriction can also be set externally at the datacenter level. You can use `turbostat` to monitor it under a load. +处理器可能会过热。 使用 `dmesg` 看看CPU的时钟速率是否由于过热而受到限制。 +此限制也可以在数据中心级别的外部设置。 您可以使用 `turbostat` 在负载下监视它。 ## RAM {#ram} -For small amounts of data (up to ~200 GB compressed), it is best to use as much memory as the volume of data. -For large amounts of data and when processing interactive (online) queries, you should use a reasonable amount of RAM (128 GB or more) so the hot data subset will fit in the cache of pages. -Even for data volumes of ~50 TB per server, using 128 GB of RAM significantly improves query performance compared to 64 GB. +对于少量数据(高达-200GB压缩),最好使用与数据量一样多的内存。 +对于大量数据和处理交互式(在线)查询时,应使用合理数量的RAM(128GB或更多),以便热数据子集适合页面缓存。 +即使对于每台服务器约50TB的数据量,使用128GB的RAM与64GB相比显着提高了查询性能。 -## Swap File {#swap-file} +## 交换文件 {#swap-file} -Always disable the swap file. The only reason for not doing this is if you are using ClickHouse on your personal laptop. +始终禁用交换文件。 不这样做的唯一原因是,如果您使用的ClickHouse在您的个人笔记本电脑。 -## Huge Pages {#huge-pages} +## 巨大的页面 {#huge-pages} -Always disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation. +始终禁用透明巨大的页面。 它会干扰内存分alloc,从而导致显着的性能下降。 ``` bash echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled ``` -Use `perf top` to watch the time spent in the kernel for memory management. -Permanent huge pages also do not need to be allocated. +使用 `perf top` 观看内核中用于内存管理的时间。 +永久巨大的页面也不需要被分配。 -## Storage Subsystem {#storage-subsystem} +## 存储子系统 {#storage-subsystem} -If your budget allows you to use SSD, use SSD. -If not, use HDD. SATA HDDs 7200 RPM will do. +如果您的预算允许您使用SSD,请使用SSD。 +如果没有,请使用硬盘。 SATA硬盘7200转就行了。 -Give preference to a lot of servers with local hard drives over a smaller number of servers with attached disk shelves. -But for storing archives with rare queries, shelves will work. +优先选择带有本地硬盘驱动器的大量服务器,而不是带有附加磁盘架的小量服务器。 +但是对于存储具有罕见查询的档案,货架将起作用。 ## RAID {#raid} -When using HDD, you can combine their RAID-10, RAID-5, RAID-6 or RAID-50. -For Linux, software RAID is better (with `mdadm`). We don’t recommend using LVM. -When creating RAID-10, select the `far` layout. -If your budget allows, choose RAID-10. +当使用硬盘,你可以结合他们的RAID-10,RAID-5,RAID-6或RAID-50。 +对于Linux,软件RAID更好(与 `mdadm`). 我们不建议使用LVM。 +当创建RAID-10,选择 `far` 布局。 +如果您的预算允许,请选择RAID-10。 -If you have more than 4 disks, use RAID-6 (preferred) or RAID-50, instead of RAID-5. -When using RAID-5, RAID-6 or RAID-50, always increase stripe\_cache\_size, since the default value is usually not the best choice. +如果您有超过4个磁盘,请使用RAID-6(首选)或RAID-50,而不是RAID-5。 +当使用RAID-5、RAID-6或RAID-50时,始终增加stripe\_cache\_size,因为默认值通常不是最佳选择。 ``` bash echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size ``` -Calculate the exact number from the number of devices and the block size, using the formula: `2 * num_devices * chunk_size_in_bytes / 4096`. +使用以下公式,从设备数量和块大小计算确切数量: `2 * num_devices * chunk_size_in_bytes / 4096`. -A block size of 1025 KB is sufficient for all RAID configurations. -Never set the block size too small or too large. +1025KB的块大小足以满足所有RAID配置。 +切勿将块大小设置得太小或太大。 -You can use RAID-0 on SSD. -Regardless of RAID use, always use replication for data security. +您可以在SSD上使用RAID-0。 +无论使用何种RAID,始终使用复制来保证数据安全。 -Enable NCQ with a long queue. For HDD, choose the CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting. -For HDD, enable the write cache. +使用长队列启用NCQ。 对于HDD,选择CFQ调度程序,对于SSD,选择noop。 不要减少 ‘readahead’ 设置。 +对于HDD,启用写入缓存。 -## File System {#file-system} +## 文件系统 {#file-system} -Ext4 is the most reliable option. Set the mount options `noatime, nobarrier`. -XFS is also suitable, but it hasn’t been as thoroughly tested with ClickHouse. -Most other file systems should also work fine. File systems with delayed allocation work better. +Ext4是最可靠的选择。 设置挂载选项 `noatime, nobarrier`. +XFS也是合适的,但它还没有经过ClickHouse的彻底测试。 +大多数其他文件系统也应该正常工作。 具有延迟分配的文件系统工作得更好。 -## Linux Kernel {#linux-kernel} +## Linux内核 {#linux-kernel} -Don’t use an outdated Linux kernel. +不要使用过时的Linux内核。 -## Network {#network} +## 网络 {#network} -If you are using IPv6, increase the size of the route cache. -The Linux kernel prior to 3.2 had a multitude of problems with IPv6 implementation. +如果您使用的是IPv6,请增加路由缓存的大小。 +3.2之前的Linux内核在IPv6实现方面遇到了许多问题。 -Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data. +如果可能的话,至少使用一个10GB的网络。 1Gb也可以工作,但对于使用数十tb的数据修补副本或处理具有大量中间数据的分布式查询,情况会更糟。 -## ZooKeeper {#zookeeper} +## 动物园管理员 {#zookeeper} -You are probably already using ZooKeeper for other purposes. You can use the same installation of ZooKeeper, if it isn’t already overloaded. +您可能已经将ZooKeeper用于其他目的。 您可以使用相同的zookeeper安装,如果它还没有超载。 -It’s best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. +It's best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. You should never use manually written scripts to transfer data between different ZooKeeper clusters, because the result will be incorrect for sequential nodes. Never use the «zkcopy» utility for the same reason: https://github.com/ksprojects/zkcopy/issues/15 -If you want to divide an existing ZooKeeper cluster into two, the correct way is to increase the number of its replicas and then reconfigure it as two independent clusters. +如果要将现有ZooKeeper集群分为两个,正确的方法是增加其副本的数量,然后将其重新配置为两个独立的集群。 -Do not run ZooKeeper on the same servers as ClickHouse. Because ZooKeeper is very sensitive for latency and ClickHouse may utilize all available system resources. +不要在与ClickHouse相同的服务器上运行ZooKeeper。 由于ZooKeeper对延迟非常敏感,ClickHouse可能会利用所有可用的系统资源。 -With the default settings, ZooKeeper is a time bomb: +使用默认设置,ZooKeeper是一个定时炸弹: -> The ZooKeeper server won’t delete files from old snapshots and logs when using the default configuration (see autopurge), and this is the responsibility of the operator. +> 使用默认配置时,ZooKeeper服务器不会从旧快照和日志中删除文件(请参阅autopurge),这是操作员的责任。 -This bomb must be defused. +必须拆除炸弹 -The ZooKeeper (3.5.1) configuration below is used in the Yandex.Metrica production environment as of May 20, 2017: +下面的ZooKeeper(3.5.1)配置在Yandex中使用。梅地卡生产环境截至2017年5月20日: -zoo.cfg: +动物园cfg: ``` bash # http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html @@ -176,12 +177,12 @@ standaloneEnabled=false dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo.cfg.dynamic ``` -Java version: +Java版本: Java(TM) SE Runtime Environment (build 1.8.0_25-b17) Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) -JVM parameters: +JVM参数: ``` bash NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} @@ -222,7 +223,7 @@ JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ -XX:+CMSParallelRemarkEnabled" ``` -Salt init: +盐初始化: description "zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} centralized coordination service" @@ -251,4 +252,4 @@ Salt init: -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG end script -[Original article](https://clickhouse.tech/docs/en/operations/tips/) +[原始文章](https://clickhouse.tech/docs/en/operations/tips/) diff --git a/docs/zh/operations/troubleshooting.md b/docs/zh/operations/troubleshooting.md deleted file mode 120000 index 84f0ff34f41..00000000000 --- a/docs/zh/operations/troubleshooting.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/troubleshooting.md \ No newline at end of file diff --git a/docs/zh/operations/troubleshooting.md b/docs/zh/operations/troubleshooting.md new file mode 100644 index 00000000000..db7bf6c6bb9 --- /dev/null +++ b/docs/zh/operations/troubleshooting.md @@ -0,0 +1,146 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: "\u7591\u96BE\u89E3\u7B54" +--- + +# 疑难解答 {#troubleshooting} + +- [安装方式](#troubleshooting-installation-errors) +- [连接到服务器](#troubleshooting-accepts-no-connections) +- [查询处理](#troubleshooting-does-not-process-queries) +- [查询处理效率](#troubleshooting-too-slow) + +## 安装方式 {#troubleshooting-installation-errors} + +### 您无法使用Apt-get从ClickHouse存储库获取Deb软件包 {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} + +- 检查防火墙设置。 +- 如果出于任何原因无法访问存储库,请按照以下文件中的描述下载软件包 [开始](../getting_started/index.md) 文章并使用手动安装它们 `sudo dpkg -i ` 指挥部 您还需要 `tzdata` 包。 + +## 连接到服务器 {#troubleshooting-accepts-no-connections} + +可能出现的问题: + +- 服务器未运行。 +- 意外或错误的配置参数。 + +### 服务器未运行 {#server-is-not-running} + +**检查服务器是否运行nnig** + +命令: + +``` bash +$ sudo service clickhouse-server status +``` + +如果服务器没有运行,请使用以下命令启动它: + +``` bash +$ sudo service clickhouse-server start +``` + +**检查日志** + +主日志 `clickhouse-server` 是在 `/var/log/clickhouse-server/clickhouse-server.log` 默认情况下。 + +如果服务器成功启动,您应该看到字符串: + +- ` Application: starting up.` — Server started. +- ` Application: Ready for connections.` — Server is running and ready for connections. + +如果 `clickhouse-server` 启动失败与配置错误,你应该看到 `` 具有错误描述的字符串。 例如: + +``` text +2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused +``` + +如果在文件末尾没有看到错误,请从字符串开始查看整个文件: + +``` text + Application: starting up. +``` + +如果您尝试启动第二个实例 `clickhouse-server` 在服务器上,您将看到以下日志: + +``` text +2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 +2019.01.11 15:25:11.154578 [ 1 ] {} Application: starting up +2019.01.11 15:25:11.156361 [ 1 ] {} StatusFile: Status file ./status already exists - unclean restart. Contents: +PID: 8510 +Started at: 2019-01-11 15:24:23 +Revision: 54413 + +2019.01.11 15:25:11.156673 [ 1 ] {} Application: DB::Exception: Cannot lock file ./status. Another server instance in same directory is already running. +2019.01.11 15:25:11.156682 [ 1 ] {} Application: shutting down +2019.01.11 15:25:11.156686 [ 1 ] {} Application: Uninitializing subsystem: Logging Subsystem +2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread +``` + +**请参阅系统。d日志** + +如果你没有找到任何有用的信息 `clickhouse-server` 日志或没有任何日志,您可以查看 `system.d` 使用命令记录: + +``` bash +$ sudo journalctl -u clickhouse-server +``` + +**在交互模式下启动clickhouse服务器** + +``` bash +$ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml +``` + +此命令将服务器作为带有自动启动脚本标准参数的交互式应用程序启动。 在这种模式下 `clickhouse-server` 打印控制台中的所有事件消息。 + +### 配置参数 {#configuration-parameters} + +检查: + +- 码头工人设置。 + + 如果您在IPv6网络中的Docker中运行ClickHouse,请确保 `network=host` 已设置。 + +- 端点设置。 + + 检查 [listen\_host](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) 和 [tcp\_port](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port) 设置。 + + ClickHouse服务器默认情况下仅接受本地主机连接。 + +- HTTP协议设置。 + + 检查HTTP API的协议设置。 + +- 安全连接设置。 + + 检查: + + - 该 [tcp\_port\_secure](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) 设置。 + - 设置 [SSL序列](server_configuration_parameters/settings.md#server_configuration_parameters-openssl). + + 连接时使用正确的参数。 例如,使用 `port_secure` 参数 `clickhouse_client`. + +- 用户设置。 + + 您可能使用了错误的用户名或密码。 + +## 查询处理 {#troubleshooting-does-not-process-queries} + +如果ClickHouse无法处理查询,它会向客户端发送错误描述。 在 `clickhouse-client` 您可以在控制台中获得错误的描述。 如果您使用的是HTTP接口,ClickHouse会在响应正文中发送错误描述。 例如: + +``` bash +$ curl 'http://localhost:8123/' --data-binary "SELECT a" +Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception +``` + +如果你开始 `clickhouse-client` 与 `stack-trace` 参数,ClickHouse返回包含错误描述的服务器堆栈跟踪。 + +您可能会看到一条关于连接中断的消息。 在这种情况下,可以重复查询。 如果每次执行查询时连接中断,请检查服务器日志中是否存在错误。 + +## 查询处理效率 {#troubleshooting-too-slow} + +如果您发现ClickHouse工作速度太慢,则需要为查询分析服务器资源和网络的负载。 + +您可以使用clickhouse-benchmark实用程序来分析查询。 它显示每秒处理的查询数、每秒处理的行数以及查询处理时间的百分位数。 diff --git a/docs/zh/operations/update.md b/docs/zh/operations/update.md deleted file mode 120000 index 88a092c0dff..00000000000 --- a/docs/zh/operations/update.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/update.md \ No newline at end of file diff --git a/docs/zh/operations/update.md b/docs/zh/operations/update.md new file mode 100644 index 00000000000..a465a8110eb --- /dev/null +++ b/docs/zh/operations/update.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 47 +toc_title: "\u70B9\u51FB\u66F4\u65B0" +--- + +# 点击更新 {#clickhouse-update} + +如果从deb包安装ClickHouse,请在服务器上执行以下命令: + +``` bash +$ sudo apt-get update +$ sudo apt-get install clickhouse-client clickhouse-server +$ sudo service clickhouse-server restart +``` + +如果您使用除推荐的deb包之外的其他内容安装ClickHouse,请使用适当的更新方法。 + +ClickHouse不支持分布式更新。 该操作应在每个单独的服务器上连续执行。 不要同时更新群集上的所有服务器,否则群集将在一段时间内不可用。 diff --git a/docs/zh/operations/utilities/clickhouse-benchmark.md b/docs/zh/operations/utilities/clickhouse-benchmark.md new file mode 100644 index 00000000000..809f4ebe2a1 --- /dev/null +++ b/docs/zh/operations/utilities/clickhouse-benchmark.md @@ -0,0 +1,156 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 61 +toc_title: "\uFF82\u6697\uFF6A\uFF82\u6C3E\u73AF\u50AC\uFF82\u56E3" +--- + +# ツ暗ェツ氾环催ツ団 {#clickhouse-benchmark} + +连接到ClickHouse服务器并重复发送指定的查询。 + +语法: + +``` bash +$ echo "single query" | clickhouse-benchmark [keys] +``` + +或 + +``` bash +$ clickhouse-benchmark [keys] <<< "single query" +``` + +如果要发送一组查询,请创建一个文本文件,并将每个查询放在此文件中的单个字符串上。 例如: + +``` sql +SELECT * FROM system.numbers LIMIT 10000000 +SELECT 1 +``` + +然后将此文件传递给标准输入 `clickhouse-benchmark`. + +``` bash +clickhouse-benchmark [keys] < queries_file +``` + +## 键 {#clickhouse-benchmark-keys} + +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` 同时发送。 默认值:1。 +- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. 为 [比较模式](#clickhouse-benchmark-comparison-mode) 您可以使用多个 `-h` 钥匙 +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [比较模式](#clickhouse-benchmark-comparison-mode) 您可以使用多个 `-p` 钥匙 +- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. +- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. +- `-s`, `--secure` — Using TLS connection. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` 达到指定的时间限制时停止发送查询。 默认值:0(禁用时间限制)。 +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [比较模式](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` 执行 [独立双样本学生的t测试](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) 测试以确定两个分布是否与所选置信水平没有不同。 +- `--cumulative` — Printing cumulative data instead of data per interval. +- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` 将报告输出到指定的JSON文件。 +- `--user=USERNAME` — ClickHouse user name. Default value: `default`. +- `--password=PSWD` — ClickHouse user password. Default value: empty string. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` 输出异常的堆栈跟踪。 +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` 在指定的阶段。 可能的值: `complete`, `fetch_columns`, `with_mergeable_state`. 默认值: `complete`. +- `--help` — Shows the help message. + +如果你想申请一些 [设置](../../operations/settings/index.md) 对于查询,请将它们作为键传递 `--= SETTING_VALUE`. 例如, `--max_memory_usage=1048576`. + +## 输出 {#clickhouse-benchmark-output} + +默认情况下, `clickhouse-benchmark` 每个报表 `--delay` 间隔。 + +报告示例: + +``` text +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. + +0.000% 0.145 sec. +10.000% 0.146 sec. +20.000% 0.146 sec. +30.000% 0.146 sec. +40.000% 0.147 sec. +50.000% 0.148 sec. +60.000% 0.148 sec. +70.000% 0.148 sec. +80.000% 0.149 sec. +90.000% 0.150 sec. +95.000% 0.150 sec. +99.000% 0.150 sec. +99.900% 0.150 sec. +99.990% 0.150 sec. +``` + +在报告中,您可以找到: + +- 在查询的数量 `Queries executed:` 场。 + +- 状态字符串包含(按顺序): + + - ClickHouse服务器的端点。 + - 已处理的查询数。 + - QPS:QPS:在指定的时间段内每秒执行多少个查询服务器 `--delay` 争论。 + - RPS:在指定的时间段内,服务器每秒读取多少行 `--delay` 争论。 + - MiB/s:在指定的时间段内每秒读取多少mebibytes服务器 `--delay` 争论。 + - 结果RPS:在指定的时间段内,服务器每秒放置到查询结果的行数 `--delay` 争论。 + - 结果MiB/s.在指定的时间段内,服务器每秒将多少mebibytes放置到查询结果中 `--delay` 争论。 + +- 查询执行时间的百分位数。 + +## 比较模式 {#clickhouse-benchmark-comparison-mode} + +`clickhouse-benchmark` 可以比较两个正在运行的ClickHouse服务器的性能。 + +要使用比较模式,请通过以下两对指定两个服务器的端点 `--host`, `--port` 钥匙 键在参数列表中的位置匹配在一起,第一 `--host` 与第一匹配 `--port` 等等。 `clickhouse-benchmark` 建立到两个服务器的连接,然后发送查询。 每个查询寻址到随机选择的服务器。 每个服务器的结果分别显示。 + +## 示例 {#clickhouse-benchmark-example} + +``` bash +$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 +``` + +``` text +Loaded 1 queries. + +Queries executed: 6. + +localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.159 sec. +30.000% 0.160 sec. +40.000% 0.160 sec. +50.000% 0.162 sec. +60.000% 0.164 sec. +70.000% 0.165 sec. +80.000% 0.166 sec. +90.000% 0.166 sec. +95.000% 0.167 sec. +99.000% 0.167 sec. +99.900% 0.167 sec. +99.990% 0.167 sec. + + + +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.160 sec. +30.000% 0.163 sec. +40.000% 0.164 sec. +50.000% 0.165 sec. +60.000% 0.166 sec. +70.000% 0.166 sec. +80.000% 0.167 sec. +90.000% 0.167 sec. +95.000% 0.170 sec. +99.000% 0.172 sec. +99.900% 0.172 sec. +99.990% 0.172 sec. +``` diff --git a/docs/zh/operations/utilities/clickhouse-copier.md b/docs/zh/operations/utilities/clickhouse-copier.md new file mode 100644 index 00000000000..9e982188499 --- /dev/null +++ b/docs/zh/operations/utilities/clickhouse-copier.md @@ -0,0 +1,168 @@ + +# ツ环板-ョツ嘉ッツ偲 {#clickhouse-copier} + +将数据从一个群集中的表复制到另一个(或相同)群集中的表。 + +您可以运行多个 `clickhouse-copier` 不同服务器上的实例执行相同的作业。 ZooKeeper用于同步进程。 + +开始后, `clickhouse-copier`: + +- 连接到动物园管理员和接收: + + - 复制作业。 + - 复制作业的状态。 + +- 它执行的工作。 + + Each running process chooses the "closest" shard of the source cluster and copies the data into the destination cluster, resharding the data if necessary. + +`clickhouse-copier` 跟踪ZooKeeper中的更改,并实时应用它们。 + +为了减少网络流量,我们建议运行 `clickhouse-copier` 在源数据所在的同一服务器上。 + +## ツ暗ェツ氾环催ツ団ツ法ツ人 {#running-clickhouse-copier} + +该实用程序应手动运行: + +``` bash +clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir +``` + +参数: + +- `daemon` — Starts `clickhouse-copier` 在守护进程模式。 +- `config` — The path to the `zookeeper.xml` 带有连接到ZooKeeper的参数的文件。 +- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` 处理和存储任务。 任务存储在 `$task-path/description`. +- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` 创建 `clickhouse-copier_YYYYMMHHSS_` 子目录 `$base-dir`. 如果省略此参数,则在以下目录中创建目录 `clickhouse-copier` 被推出。 + +## 动物园管理员的格式。xml {#format-of-zookeeper-xml} + +``` xml + + + trace + 100M + 3 + + + + + 127.0.0.1 + 2181 + + + +``` + +## 复制任务的配置 {#configuration-of-copying-tasks} + +``` xml + + + + + + false + + 127.0.0.1 + 9000 + + + ... + + + + ... + + + + + 2 + + + + 1 + + + + + 0 + + + + + 3 + + 1 + + + + + + + + source_cluster + test + hits + + + destination_cluster + test + hits2 + + + + ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') + PARTITION BY toMonday(date) + ORDER BY (CounterID, EventDate) + + + + jumpConsistentHash(intHash64(UserID), 2) + + + CounterID != 0 + + + + '2018-02-26' + '2018-03-05' + ... + + + + + + ... + + ... + + +``` + +`clickhouse-copier` 跟踪更改 `/task/path/description` 并在飞行中应用它们。 例如,如果你改变的值 `max_workers`,运行任务的进程数也会发生变化。 + +[原始文章](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/zh/operations/utilities/clickhouse-local.md b/docs/zh/operations/utilities/clickhouse-local.md new file mode 100644 index 00000000000..e29d8f6c4ac --- /dev/null +++ b/docs/zh/operations/utilities/clickhouse-local.md @@ -0,0 +1,71 @@ +# ツ环板-ョツ嘉ッツ偲 {#clickhouse-local} + +该 `clickhouse-local` 程序使您能够对本地文件执行快速处理,而无需部署和配置ClickHouse服务器。 + +接受表示表的数据并使用以下方式查询它们 [ツ环板ECTョツ嘉ッツ偲](../../operations/utilities/clickhouse-local.md). + +`clickhouse-local` 使用与ClickHouse server相同的核心,因此它支持大多数功能以及相同的格式和表引擎。 + +默认情况下 `clickhouse-local` 不能访问同一主机上的数据,但它支持使用以下方式加载服务器配置 `--config-file` 争论。 + +!!! warning "警告" + 不建议将生产服务器配置加载到 `clickhouse-local` 因为数据可以在人为错误的情况下被损坏。 + +## 用途 {#usage} + +基本用法: + +``` bash +clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" +``` + +参数: + +- `-S`, `--structure` — table structure for input data. +- `-if`, `--input-format` — input format, `TSV` 默认情况下。 +- `-f`, `--file` — path to data, `stdin` 默认情况下。 +- `-q` `--query` — queries to execute with `;` 如delimeter。 +- `-N`, `--table` — table name where to put output data, `table` 默认情况下。 +- `-of`, `--format`, `--output-format` — output format, `TSV` 默认情况下。 +- `--stacktrace` — whether to dump debug output in case of exception. +- `--verbose` — more details on query execution. +- `-s` — disables `stderr` 记录。 +- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. +- `--help` — arguments references for `clickhouse-local`. + +还有每个ClickHouse配置变量的参数,这些变量更常用,而不是 `--config-file`. + +## 例 {#examples} + +``` bash +echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" +Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. +1 2 +3 4 +``` + +前面的例子是一样的: + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. +1 2 +3 4 +``` + +现在让我们为每个Unix用户输出内存用户: + +``` bash +$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" +Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. +┏━━━━━━━━━━┳━━━━━━━━━━┓ +┃ user ┃ memTotal ┃ +┡━━━━━━━━━━╇━━━━━━━━━━┩ +│ bayonet │ 113.5 │ +├──────────┼──────────┤ +│ root │ 8.8 │ +├──────────┼──────────┤ +... +``` + +[原始文章](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/zh/operations/utilities/index.md b/docs/zh/operations/utilities/index.md new file mode 100644 index 00000000000..8d70ef4a6bb --- /dev/null +++ b/docs/zh/operations/utilities/index.md @@ -0,0 +1,8 @@ + +# ツ环板Utilityョツ嘉ッ {#clickhouse-utility} + +- [ツ环板-ョツ嘉ッツ偲](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` 做到这一点。 +- [ツ环板-ョツ嘉ッツ偲](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. +- [ツ暗ェツ氾环催ツ団](clickhouse-benchmark.md) — Loads server with the custom queries and settings. + +[原始文章](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/zh/operations/utils/clickhouse-benchmark.md b/docs/zh/operations/utils/clickhouse-benchmark.md deleted file mode 120000 index 133b4d2e511..00000000000 --- a/docs/zh/operations/utils/clickhouse-benchmark.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/utils/clickhouse-benchmark.md \ No newline at end of file diff --git a/docs/zh/operations/utils/clickhouse-copier.md b/docs/zh/operations/utils/clickhouse-copier.md deleted file mode 100644 index 1a1b8599dba..00000000000 --- a/docs/zh/operations/utils/clickhouse-copier.md +++ /dev/null @@ -1,167 +0,0 @@ -# clickhouse-copier {#clickhouse-copier} - -Copies data from the tables in one cluster to tables in another (or the same) cluster. - -You can run multiple `clickhouse-copier` instances on different servers to perform the same job. ZooKeeper is used for syncing the processes. - -After starting, `clickhouse-copier`: - -- Connects to ZooKeeper and receives: - - - Copying jobs. - - The state of the copying jobs. - -- It performs the jobs. - - Each running process chooses the "closest" shard of the source cluster and copies the data into the destination cluster, resharding the data if necessary. - -`clickhouse-copier` tracks the changes in ZooKeeper and applies them on the fly. - -To reduce network traffic, we recommend running `clickhouse-copier` on the same server where the source data is located. - -## Running clickhouse-copier {#running-clickhouse-copier} - -The utility should be run manually: - -``` bash -clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir -``` - -Parameters: - -- `daemon` — Starts `clickhouse-copier` in daemon mode. -- `config` — The path to the `zookeeper.xml` file with the parameters for the connection to ZooKeeper. -- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` processes and storing tasks. Tasks are stored in `$task-path/description`. -- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` creates `clickhouse-copier_YYYYMMHHSS_` subdirectories in `$base-dir`. If this parameter is omitted, the directories are created in the directory where `clickhouse-copier` was launched. - -## Format of zookeeper.xml {#format-of-zookeeper-xml} - -``` xml - - - trace - 100M - 3 - - - - - 127.0.0.1 - 2181 - - - -``` - -## Configuration of copying tasks {#configuration-of-copying-tasks} - -``` xml - - - - - - false - - 127.0.0.1 - 9000 - - - ... - - - - ... - - - - - 2 - - - - 1 - - - - - 0 - - - - - 3 - - 1 - - - - - - - - source_cluster - test - hits - - - destination_cluster - test - hits2 - - - - ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') - PARTITION BY toMonday(date) - ORDER BY (CounterID, EventDate) - - - - jumpConsistentHash(intHash64(UserID), 2) - - - CounterID != 0 - - - - '2018-02-26' - '2018-03-05' - ... - - - - - - ... - - ... - - -``` - -`clickhouse-copier` tracks the changes in `/task/path/description` and applies them on the fly. For instance, if you change the value of `max_workers`, the number of processes running tasks will also change. - -[Original article](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/zh/operations/utils/clickhouse-local.md b/docs/zh/operations/utils/clickhouse-local.md deleted file mode 100644 index 159e914f446..00000000000 --- a/docs/zh/operations/utils/clickhouse-local.md +++ /dev/null @@ -1,71 +0,0 @@ -# clickhouse-local {#clickhouse-local} - -The `clickhouse-local` program enables you to perform fast processing on local files, without having to deploy and configure the ClickHouse server. - -Accepts data that represent tables and queries them using [ClickHouse SQL dialect](../../query_language/index.md). - -`clickhouse-local` uses the same core as ClickHouse server, so it supports most of the features and the same set of formats and table engines. - -By default `clickhouse-local` does not have access to data on the same host, but it supports loading server configuration using `--config-file` argument. - -!!! warning "Warning" - It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error. - -## Usage {#usage} - -Basic usage: - -``` bash -clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" -``` - -Arguments: - -- `-S`, `--structure` — table structure for input data. -- `-if`, `--input-format` — input format, `TSV` by default. -- `-f`, `--file` — path to data, `stdin` by default. -- `-q` `--query` — queries to execute with `;` as delimeter. -- `-N`, `--table` — table name where to put output data, `table` by default. -- `-of`, `--format`, `--output-format` — output format, `TSV` by default. -- `--stacktrace` — whether to dump debug output in case of exception. -- `--verbose` — more details on query execution. -- `-s` — disables `stderr` logging. -- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. -- `--help` — arguments references for `clickhouse-local`. - -Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`. - -## Examples {#examples} - -``` bash -echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" -Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. -1 2 -3 4 -``` - -Previous example is the same as: - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" -Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. -1 2 -3 4 -``` - -Now let’s output memory user for each Unix user: - -``` bash -$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" -Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. -┏━━━━━━━━━━┳━━━━━━━━━━┓ -┃ user ┃ memTotal ┃ -┡━━━━━━━━━━╇━━━━━━━━━━┩ -│ bayonet │ 113.5 │ -├──────────┼──────────┤ -│ root │ 8.8 │ -├──────────┼──────────┤ -... -``` - -[Original article](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/zh/operations/utils/index.md b/docs/zh/operations/utils/index.md deleted file mode 100644 index ebc1396d031..00000000000 --- a/docs/zh/operations/utils/index.md +++ /dev/null @@ -1,7 +0,0 @@ -# ClickHouse Utility {#clickhouse-utility} - -- [clickhouse-local](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` does this. -- [clickhouse-copier](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. -- [clickhouse-benchmark](clickhouse-benchmark.md) — Loads server with the custom queries and settings. - -[Original article](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/zh/query_language/agg_functions/combinators.md b/docs/zh/query_language/agg_functions/combinators.md deleted file mode 120000 index 2b914cebd15..00000000000 --- a/docs/zh/query_language/agg_functions/combinators.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/combinators.md \ No newline at end of file diff --git a/docs/zh/query_language/agg_functions/index.md b/docs/zh/query_language/agg_functions/index.md deleted file mode 120000 index 2fcf67abdeb..00000000000 --- a/docs/zh/query_language/agg_functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/index.md \ No newline at end of file diff --git a/docs/zh/query_language/agg_functions/parametric_functions.md b/docs/zh/query_language/agg_functions/parametric_functions.md deleted file mode 120000 index fd3ffafcc5b..00000000000 --- a/docs/zh/query_language/agg_functions/parametric_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/parametric_functions.md \ No newline at end of file diff --git a/docs/zh/query_language/agg_functions/reference.md b/docs/zh/query_language/agg_functions/reference.md deleted file mode 120000 index c5651cb0793..00000000000 --- a/docs/zh/query_language/agg_functions/reference.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/reference.md \ No newline at end of file diff --git a/docs/zh/query_language/alter.md b/docs/zh/query_language/alter.md deleted file mode 120000 index 44f4ecf9737..00000000000 --- a/docs/zh/query_language/alter.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/alter.md \ No newline at end of file diff --git a/docs/zh/query_language/create.md b/docs/zh/query_language/create.md deleted file mode 100644 index 24cacdd9477..00000000000 --- a/docs/zh/query_language/create.md +++ /dev/null @@ -1,263 +0,0 @@ -## CREATE DATABASE {#create-database} - -该查询用于根据指定名称创建数据库。 - -``` sql -CREATE DATABASE [IF NOT EXISTS] db_name -``` - -数据库其实只是用于存放表的一个目录。 -如果查询中存在`IF NOT EXISTS`,则当数据库已经存在时,该查询不会返回任何错误。 - -## CREATE TABLE {#create-table-query} - -对于`CREATE TABLE`,存在以下几种方式。 - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = engine -``` - -在指定的‘db’数据库中创建一个名为‘name’的表,如果查询中没有包含‘db’,则默认使用当前选择的数据库作为‘db’。后面的是包含在括号中的表结构以及表引擎的声明。 -其中表结构声明是一个包含一组列描述声明的组合。如果表引擎是支持索引的,那么可以在表引擎的参数中对其进行说明。 - -在最简单的情况下,列描述是指`名称 类型`这样的子句。例如: `RegionID UInt32`。 -但是也可以为列另外定义默认值表达式(见后文)。 - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] -``` - -创建一个与`db2.name2`具有相同结构的表,同时你可以对其指定不同的表引擎声明。如果没有表引擎声明,则创建的表将与`db2.name2`使用相同的表引擎。 - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... -``` - -使用指定的引擎创建一个与`SELECT`子句的结果具有相同结构的表,并使用`SELECT`子句的结果填充它。 - -以上所有情况,如果指定了`IF NOT EXISTS`,那么在该表已经存在的情况下,查询不会返回任何错误。在这种情况下,查询几乎不会做任何事情。 - -在`ENGINE`子句后还可能存在一些其他的子句,更详细的信息可以参考 [表引擎](../operations/table_engines/index.md) 中关于建表的描述。 - -### 默认值 {#create-default-values} - -在列描述中你可以通过以下方式之一为列指定默认表达式:`DEFAULT expr`,`MATERIALIZED expr`,`ALIAS expr`。 -示例:`URLDomain String DEFAULT domain(URL)`。 - -如果在列描述中未定义任何默认表达式,那么系统将会根据类型设置对应的默认值,如:数值类型为零、字符串类型为空字符串、数组类型为空数组、日期类型为‘0000-00-00’以及时间类型为‘0000-00-00 00:00:00’。不支持使用NULL作为普通类型的默认值。 - -如果定义了默认表达式,则可以不定义列的类型。如果没有明确的定义类的类型,则使用默认表达式的类型。例如:`EventDate DEFAULT toDate(EventTime)` - 最终‘EventDate’将使用‘Date’作为类型。 - -如果同时指定了默认表达式与列的类型,则将使用类型转换函数将默认表达式转换为指定的类型。例如:`Hits UInt32 DEFAULT 0`与`Hits UInt32 DEFAULT toUInt32(0)`意思相同。 - -默认表达式可以包含常量或表的任意其他列。当创建或更改表结构时,系统将会运行检查,确保不会包含循环依赖。对于INSERT, 它仅检查表达式是否是可以解析的 - 它们可以从中计算出所有需要的列的默认值。 - -`DEFAULT expr` - -普通的默认值,如果INSERT中不包含指定的列,那么将通过表达式计算它的默认值并填充它。 - -`MATERIALIZED expr` - -物化表达式,被该表达式指定的列不能包含在INSERT的列表中,因为它总是被计算出来的。 -对于INSERT而言,不需要考虑这些列。 -另外,在SELECT查询中如果包含星号,此列不会被用来替换星号,这是因为考虑到数据转储,在使用`SELECT *`查询出的结果总能够被’INSERT’回表。 - -`ALIAS expr` - -别名。这样的列不会存储在表中。 -它的值不能够通过INSERT写入,同时使用SELECT查询星号时,这些列也不会被用来替换星号。 -但是它们可以显示的用于SELECT中,在这种情况下,在查询分析中别名将被替换。 - -当使用ALTER查询对添加新的列时,不同于为所有旧数据添加这个列,对于需要在旧数据中查询新列,只会在查询时动态计算这个新列的值。但是如果新列的默认表示中依赖其他列的值进行计算,那么同样会加载这些依赖的列的数据。 - -如果你向表中添加一个新列,并在之后的一段时间后修改它的默认表达式,则旧数据中的值将会被改变。请注意,在运行后台合并时,缺少的列的值将被计算后写入到合并后的数据部分中。 - -不能够为nested类型的列设置默认值。 - -### Constraints {#constraints} - -Along with columns descriptions constraints could be defined: - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], - ... - CONSTRAINT constraint_name_1 CHECK boolean_expr_1, - ... -) ENGINE = engine -``` - -`boolean_expr_1` could by any boolean expression. If constraints are defined for the table, each of them will be checked for every row in `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. - -Adding large amount of constraints can negatively affect performance of big `INSERT` queries. - -### TTL Expression {#ttl-expression} - -Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl). - -### Column Compression Codecs {#codecs} - -By default, ClickHouse applies the compression method, defined in [server settings](../operations/server_settings/settings.md#server-settings-compression), to columns. You can also define the compression method for each individual column in the `CREATE TABLE` query. - -``` sql -CREATE TABLE codec_example -( - dt Date CODEC(ZSTD), - ts DateTime CODEC(LZ4HC), - float_value Float32 CODEC(NONE), - double_value Float64 CODEC(LZ4HC(9)) - value Float32 CODEC(Delta, ZSTD) -) -ENGINE = -... -``` - -If a codec is specified, the default codec doesn’t apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. - -!!! warning "Warning" - You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor) utility. - -Compression is supported for the following table engines: - -- [MergeTree](../operations/table_engines/mergetree.md) family -- [Log](../operations/table_engines/log_family.md) family -- [Set](../operations/table_engines/set.md) -- [Join](../operations/table_engines/join.md) - -ClickHouse supports common purpose codecs and specialized codecs. - -#### Specialized Codecs {#create-query-specialized-codecs} - -These codecs are designed to make compression more effective by using specific features of data. Some of these codecs don’t compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. - -Specialized codecs: - -- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` are used for storing delta values, so `delta_bytes` is the maximum size of raw values. Possible `delta_bytes` values: 1, 2, 4, 8. The default value for `delta_bytes` is `sizeof(type)` if equal to 1, 2, 4, or 8. In all other cases, it’s 1. -- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` and `DateTime`). At each step of its algorithm, codec takes a block of 64 values, puts them into 64x64 bit matrix, transposes it, crops the unused bits of values and returns the rest as a sequence. Unused bits are the bits, that don’t differ between maximum and minimum values in the whole data part for which the compression is used. - -`DoubleDelta` and `Gorilla` codecs are used in Gorilla TSDB as the components of its compressing algorithm. Gorilla approach is effective in scenarios when there is a sequence of slowly changing values with their timestamps. Timestamps are effectively compressed by the `DoubleDelta` codec, and values are effectively compressed by the `Gorilla` codec. For example, to get an effectively stored table, you can create it in the following configuration: - -``` sql -CREATE TABLE codec_example -( - timestamp DateTime CODEC(DoubleDelta), - slow_values Float32 CODEC(Gorilla) -) -ENGINE = MergeTree() -``` - -#### Common purpose codecs {#create-query-common-purpose-codecs} - -Codecs: - -- `NONE` — No compression. -- `LZ4` — Lossless [data compression algorithm](https://github.com/lz4/lz4) used by default. Applies LZ4 fast compression. -- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` applies the default level. Possible levels: \[1, 12\]. Recommended level range: \[4, 9\]. -- `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: \[1, 22\]. Default value: 1. - -High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage. - -## 临时表 {#lin-shi-biao} - -ClickHouse支持临时表,其具有以下特征: - -- 当回话结束时,临时表将随会话一起消失,这包含链接中断。 -- 临时表仅能够使用Memory表引擎。 -- 无法为临时表指定数据库。它是在数据库之外创建的。 -- 如果临时表与另一个表名称相同,那么当在查询时没有显示的指定db的情况下,将优先使用临时表。 -- 对于分布式处理,查询中使用的临时表将被传递到远程服务器。 - -可以使用下面的语法创建一个临时表: - -``` sql -CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) -``` - -大多数情况下,临时表不是手动创建的,只有在分布式查询处理中使用`(GLOBAL) IN`时为外部数据创建。更多信息,可以参考相关章节。 - -## 分布式DDL查询 (ON CLUSTER 子句) {#fen-bu-shi-ddlcha-xun-on-cluster-zi-ju} - -对于 `CREATE`, `DROP`, `ALTER`,以及`RENAME`查询,系统支持其运行在整个集群上。 -例如,以下查询将在`cluster`集群的所有节点上创建名为`all_hits`的`Distributed`表: - -``` sql -CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits) -``` - -为了能够正确的运行这种查询,每台主机必须具有相同的cluster声明(为了简化配置的同步,你可以使用zookeeper的方式进行配置)。同时这些主机还必须链接到zookeeper服务器。 -这个查询将最终在集群的每台主机上运行,即使一些主机当前处于不可用状态。同时它还保证了所有的查询在单台主机中的执行顺序。 - -## CREATE VIEW {#create-view} - -``` sql -CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... -``` - -创建一个视图。它存在两种可选择的类型:普通视图与物化视图。 - -普通视图不存储任何数据,只是执行从另一个表中的读取。换句话说,普通视图只是保存了视图的查询,当从视图中查询时,此查询被作为子查询用于替换FROM子句。 - -举个例子,假设你已经创建了一个视图: - -``` sql -CREATE VIEW view AS SELECT ... -``` - -还有一个查询: - -``` sql -SELECT a, b, c FROM view -``` - -这个查询完全等价于: - -``` sql -SELECT a, b, c FROM (SELECT ...) -``` - -物化视图存储的数据是由相应的SELECT查询转换得来的。 - -在创建物化视图时,你还必须指定表的引擎 - 将会使用这个表引擎存储数据。 - -目前物化视图的工作原理:当将数据写入到物化视图中SELECT子句所指定的表时,插入的数据会通过SELECT子句查询进行转换并将最终结果插入到视图中。 - -如果创建物化视图时指定了POPULATE子句,则在创建时将该表的数据插入到物化视图中。就像使用`CREATE TABLE ... AS SELECT ...`一样。否则,物化视图只会包含在物化视图创建后的新写入的数据。我们不推荐使用POPULATE,因为在视图创建期间写入的数据将不会写入其中。 - -当一个`SELECT`子句包含`DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`时,请注意,这些仅会在插入数据时在每个单独的数据块上执行。例如,如果你在其中包含了`GROUP BY`,则只会在查询期间进行聚合,但聚合范围仅限于单个批的写入数据。数据不会进一步被聚合。但是当你使用一些其他数据聚合引擎时这是例外的,如:`SummingMergeTree`。 - -目前对物化视图执行`ALTER`是不支持的,因此这可能是不方便的。如果物化视图是使用的`TO [db.]name`的方式进行构建的,你可以使用`DETACH`语句现将视图剥离,然后使用`ALTER`运行在目标表上,然后使用`ATTACH`将之前剥离的表重新加载进来。 - -视图看起来和普通的表相同。例如,你可以通过`SHOW TABLES`查看到它们。 - -没有单独的删除视图的语法。如果要删除视图,请使用`DROP TABLE`。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/create/) - -## CREATE DICTIONARY {#create-dictionary-query} - -``` sql -CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] -( - key1 type1 [DEFAULT|EXPRESSION expr1] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], - key2 type2 [DEFAULT|EXPRESSION expr2] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], - attr1 type2 [DEFAULT|EXPRESSION expr3], - attr2 type2 [DEFAULT|EXPRESSION expr4] -) -PRIMARY KEY key1, key2 -SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) -LAYOUT(LAYOUT_NAME([param_name param_value])) -LIFETIME([MIN val1] MAX val2) -``` diff --git a/docs/zh/query_language/dicts/external_dicts.md b/docs/zh/query_language/dicts/external_dicts.md deleted file mode 120000 index 491b94bffe6..00000000000 --- a/docs/zh/query_language/dicts/external_dicts.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict.md b/docs/zh/query_language/dicts/external_dicts_dict.md deleted file mode 120000 index e27820fee60..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md deleted file mode 120000 index 3f244dc84de..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_hierarchical.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_layout.md b/docs/zh/query_language/dicts/external_dicts_dict_layout.md deleted file mode 120000 index e391c5be723..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_layout.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_layout.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md b/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md deleted file mode 120000 index 03b53c09077..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_lifetime.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_sources.md b/docs/zh/query_language/dicts/external_dicts_dict_sources.md deleted file mode 120000 index d4f4bf8ef3e..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_sources.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_sources.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_structure.md b/docs/zh/query_language/dicts/external_dicts_dict_structure.md deleted file mode 120000 index 69ff759caea..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_structure.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_structure.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/index.md b/docs/zh/query_language/dicts/index.md deleted file mode 120000 index fdc188ca2a2..00000000000 --- a/docs/zh/query_language/dicts/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/index.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/internal_dicts.md b/docs/zh/query_language/dicts/internal_dicts.md deleted file mode 120000 index 3f9408dcd45..00000000000 --- a/docs/zh/query_language/dicts/internal_dicts.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/internal_dicts.md \ No newline at end of file diff --git a/docs/zh/query_language/functions/arithmetic_functions.md b/docs/zh/query_language/functions/arithmetic_functions.md deleted file mode 100644 index 08d13b15af1..00000000000 --- a/docs/zh/query_language/functions/arithmetic_functions.md +++ /dev/null @@ -1,76 +0,0 @@ -# 算术函数 {#suan-zhu-han-shu} - -对于所有算术函数,结果类型为结果适合的最小数字类型(如果存在这样的类型)。最小数字类型是根据数字的位数,是否有符号以及是否是浮点类型而同时进行的。如果没有足够的位,则采用最高位类型。 - -例如: - -``` sql -SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 + 0 + 0) -``` - - ┌─toTypeName(0)─┬─toTypeName(plus(0, 0))─┬─toTypeName(plus(plus(0, 0), 0))─┬─toTypeName(plus(plus(plus(0, 0), 0), 0))─┐ - │ UInt8 │ UInt16 │ UInt32 │ UInt64 │ - └───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘ - -算术函数适用于UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64,Float32或Float64中的任何类型。 - -溢出的产生方式与C++相同。 - -## plus(a, b), a + b {#plusa-b-a-b} - -计算数字的总和。 -您还可以将Date或DateTime与整数进行相加。在Date的情况下,添加的整数意味着添加相应的天数。对于DateTime,这意味这添加相应的描述。 - -## minus(a, b), a - b {#minusa-b-a-b} - -计算数字之间的差,结果总是有符号的。 - -您还可以将Date或DateTime与整数进行相减。见上面的’plus’。 - -## multiply(a, b), a \* b {#multiplya-b-a-b} - -计算数字的乘积。 - -## divide(a, b), a / b {#dividea-b-a-b} - -计算数字的商。结果类型始终是浮点类型。 -它不是整数除法。对于整数除法,请使用’intDiv’函数。 -当除以零时,你得到’inf’,‘- inf’或’nan’。 - -## intDiv(a, b) {#intdiva-b} - -计算整数数字的商,向下舍入(按绝对值)。 -除以零或将最小负数除以-1时抛出异常。 - -## intDivOrZero(a, b) {#intdivorzeroa-b} - -与’intDiv’的不同之处在于它在除以零或将最小负数除以-1时返回零。 - -## modulo(a, b), a % b {#moduloa-b-a-b} - -计算除法后的余数。 -如果参数是浮点数,则通过删除小数部分将它们预转换为整数。 -其余部分与C++中的含义相同。截断除法用于负数。 -除以零或将最小负数除以-1时抛出异常。 - -## negate(a), -a {#negatea-a} - -计算一个数字的 -用反转符号计算一个数字。结果始终是签名的。 -Calculates a number with the reverse sign. The result is always signed. - -## abs(a) {#arithm_func-abs} - -计算数字(a)的绝对值。也就是说,如果a &lt; 0,它返回-a。对于无符号类型,它不执行任何操作。对于有符号整数类型,它返回无符号数。 - -## gcd(a, b) {#gcda-b} - -返回数字的最大公约数。 -除以零或将最小负数除以-1时抛出异常。 - -## lcm(a, b) {#lcma-b} - -返回数字的最小公倍数。 -除以零或将最小负数除以-1时抛出异常。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/zh/query_language/functions/array_functions.md b/docs/zh/query_language/functions/array_functions.md deleted file mode 100644 index 7f0d734a7c9..00000000000 --- a/docs/zh/query_language/functions/array_functions.md +++ /dev/null @@ -1,665 +0,0 @@ -# 数组函数 {#shu-zu-han-shu} - -## empty {#empty} - -对于空数组返回1,对于非空数组返回0。 -结果类型是UInt8。 -该函数也适用于字符串。 - -## notEmpty {#notempty} - -对于空数组返回0,对于非空数组返回1。 -结果类型是UInt8。 -该函数也适用于字符串。 - -## length {#array_functions-length} - -返回数组中的元素个数。 -结果类型是UInt64。 -该函数也适用于字符串。 - -## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} - -## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} - -## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} - -## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} - -## emptyArrayString {#emptyarraystring} - -不接受任何参数并返回适当类型的空数组。 - -## emptyArrayToSingle {#emptyarraytosingle} - -接受一个空数组并返回一个仅包含一个默认值元素的数组。 - -## range(N) {#rangen} - -返回从0到N-1的数字数组。 -以防万一,如果在数据块中创建总长度超过100,000,000个元素的数组,则抛出异常。 - -## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} - -使用函数的参数作为数组元素创建一个数组。 -参数必须是常量,并且具有最小公共类型的类型。必须至少传递一个参数,否则将不清楚要创建哪种类型的数组。也就是说,你不能使用这个函数来创建一个空数组(为此,使用上面描述的’emptyArray  \*’函数)。 -返回’Array(T)’类型的结果,其中’T’是传递的参数中最小的公共类型。 - -## arrayConcat {#arrayconcat} - -合并参数中传递的所有数组。 - - arrayConcat(arrays) - -**参数** - -- `arrays` – 任意数量的[Array](../../data_types/array.md)类型的参数. - **示例** - - - -``` sql -SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res -``` - - ┌─res───────────┐ - │ [1,2,3,4,5,6] │ - └───────────────┘ - -## arrayElement(arr, n), operator arr\[n\] {#arrayelementarr-n-operator-arrn} - -从数组`arr`中获取索引为«n»的元素。 `n`必须是任何整数类型。 -数组中的索引从一开始。 -支持负索引。在这种情况下,它选择从末尾开始编号的相应元素。例如,`arr [-1]`是数组中的最后一项。 - -如果索引超出数组的边界,则返回默认值(数字为0,字符串为空字符串等)。 - -## has(arr, elem) {#hasarr-elem} - -检查’arr’数组是否具有’elem’元素。 -如果元素不在数组中,则返回0;如果在,则返回1。 - -`NULL` 值的处理。 - - SELECT has([1, 2, NULL], NULL) - - ┌─has([1, 2, NULL], NULL)─┐ - │ 1 │ - └─────────────────────────┘ - -## hasAll {#hasall} - -检查一个数组是否是另一个数组的子集。 - - hasAll(set, subset) - -**参数** - -- `set` – 具有一组元素的任何类型的数组。 -- `subset` – 任何类型的数组,其元素应该被测试为`set`的子集。 - -**返回值** - -- `1`, 如果`set`包含`subset`中的所有元素。 -- `0`, 否则。 - -**特殊的定义** - -- 空数组是任何数组的子集。 -- «Null»作为数组中的元素值进行处理。 -- 忽略两个数组中的元素值的顺序。 - -**示例** - -`SELECT hasAll([], [])` returns 1. - -`SELECT hasAll([1, Null], [Null])` returns 1. - -`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` returns 1. - -`SELECT hasAll(['a', 'b'], ['a'])` returns 1. - -`SELECT hasAll([1], ['a'])` returns 0. - -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` returns 0. - -## hasAny {#hasany} - -检查两个数组是否存在交集。 - - hasAny(array1, array2) - -**参数** - -- `array1` – 具有一组元素的任何类型的数组。 -- `array2` – 具有一组元素的任何类型的数组。 - -**返回值** - -- `1`, 如果`array1`和`array2`存在交集。 -- `0`, 否则。 - -**特殊的定义** - -- «Null»作为数组中的元素值进行处理。 -- 忽略两个数组中的元素值的顺序。 - -**示例** - -`SELECT hasAny([1], [])` returns `0`. - -`SELECT hasAny([Null], [Null, 1])` returns `1`. - -`SELECT hasAny([-128, 1., 512], [1])` returns `1`. - -`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` returns `0`. - -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` returns `1`. - -## indexOf(arr, x) {#indexofarr-x} - -返回数组中第一个‘x’元素的索引(从1开始),如果‘x’元素不存在在数组中,则返回0。 - -示例: - - :) SELECT indexOf([1,3,NULL,NULL],NULL) - - SELECT indexOf([1, 3, NULL, NULL], NULL) - - ┌─indexOf([1, 3, NULL, NULL], NULL)─┐ - │ 3 │ - └───────────────────────────────────┘ - -设置为«NULL»的元素将作为普通的元素值处理。 - -## countEqual(arr, x) {#countequalarr-x} - -返回数组中等于x的元素的个数。相当于arrayCount(elem - \> elem = x,arr)。 - -`NULL`值将作为单独的元素值处理。 - -示例: - - SELECT countEqual([1, 2, NULL, NULL], NULL) - - ┌─countEqual([1, 2, NULL, NULL], NULL)─┐ - │ 2 │ - └──────────────────────────────────────┘ - -## arrayEnumerate(arr) {#array_functions-arrayenumerate} - -返回 Array \[1, 2, 3, …, length (arr) \] - -此功能通常与ARRAY JOIN一起使用。它允许在应用ARRAY JOIN后为每个数组计算一次。例如: - -``` sql -SELECT - count() AS Reaches, - countIf(num = 1) AS Hits -FROM test.hits -ARRAY JOIN - GoalsReached, - arrayEnumerate(GoalsReached) AS num -WHERE CounterID = 160656 -LIMIT 10 -``` - - ┌─Reaches─┬──Hits─┐ - │ 95606 │ 31406 │ - └─────────┴───────┘ - -在此示例中,Reaches是转换次数(应用ARRAY JOIN后接收的字符串),Hits是浏览量(ARRAY JOIN之前的字符串)。在这种特殊情况下,您可以更轻松地获得相同的结果: - -``` sql -SELECT - sum(length(GoalsReached)) AS Reaches, - count() AS Hits -FROM test.hits -WHERE (CounterID = 160656) AND notEmpty(GoalsReached) -``` - - ┌─Reaches─┬──Hits─┐ - │ 95606 │ 31406 │ - └─────────┴───────┘ - -此功能也可用于高阶函数。例如,您可以使用它来获取与条件匹配的元素的数组索引。 - -## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} - -返回与源数组大小相同的数组,其中每个元素表示与其下标对应的源数组元素在源数组中出现的次数。 -例如:arrayEnumerateUniq( \[10,20,10,30 \])=  \[1,1,2,1 \]。 - -使用ARRAY JOIN和数组元素的聚合时,此函数很有用。 - -示例: - -``` sql -SELECT - Goals.ID AS GoalID, - sum(Sign) AS Reaches, - sumIf(Sign, num = 1) AS Visits -FROM test.visits -ARRAY JOIN - Goals, - arrayEnumerateUniq(Goals.ID) AS num -WHERE CounterID = 160656 -GROUP BY GoalID -ORDER BY Reaches DESC -LIMIT 10 -``` - - ┌──GoalID─┬─Reaches─┬─Visits─┐ - │ 53225 │ 3214 │ 1097 │ - │ 2825062 │ 3188 │ 1097 │ - │ 56600 │ 2803 │ 488 │ - │ 1989037 │ 2401 │ 365 │ - │ 2830064 │ 2396 │ 910 │ - │ 1113562 │ 2372 │ 373 │ - │ 3270895 │ 2262 │ 812 │ - │ 1084657 │ 2262 │ 345 │ - │ 56599 │ 2260 │ 799 │ - │ 3271094 │ 2256 │ 812 │ - └─────────┴─────────┴────────┘ - -在此示例中,每个GoalID都计算转换次数(目标嵌套数据结构中的每个元素都是达到的目标,我们称之为转换)和会话数。如果没有ARRAY JOIN,我们会将会话数计为总和(Sign)。但在这种特殊情况下,行乘以嵌套的Goals结构,因此为了在此之后计算每个会话一次,我们将一个条件应用于arrayEnumerateUniq(Goals.ID)函数的值。 - -arrayEnumerateUniq函数可以使用与参数大小相同的多个数组。在这种情况下,对于所有阵列中相同位置的元素元组,考虑唯一性。 - -``` sql -SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res -``` - - ┌─res───────────┐ - │ [1,2,1,1,2,1] │ - └───────────────┘ - -当使用带有嵌套数据结构的ARRAY JOIN并在此结构中跨多个元素进一步聚合时,这是必需的。 - -## arrayPopBack {#arraypopback} - -从数组中删除最后一项。 - - arrayPopBack(array) - -**参数** - -- `array` – 数组。 - -**示例** - -``` sql -SELECT arrayPopBack([1, 2, 3]) AS res -``` - - ┌─res───┐ - │ [1,2] │ - └───────┘ - -## arrayPopFront {#arraypopfront} - -从数组中删除第一项。 - - arrayPopFront(array) - -**参数** - -- `array` – 数组。 - -**示例** - -``` sql -SELECT arrayPopFront([1, 2, 3]) AS res -``` - - ┌─res───┐ - │ [2,3] │ - └───────┘ - -## arrayPushBack {#arraypushback} - -添加一个元素到数组的末尾。 - - arrayPushBack(array, single_value) - -**参数** - -- `array` – 数组。 -- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../data_types/index.md#data_types)»。可以是’NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 - -**示例** - -``` sql -SELECT arrayPushBack(['a'], 'b') AS res -``` - - ┌─res───────┐ - │ ['a','b'] │ - └───────────┘ - -## arrayPushFront {#arraypushfront} - -将一个元素添加到数组的开头。 - - arrayPushFront(array, single_value) - -**参数** - -- `array` – 数组。 -- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../data_types/index.md#data_types)»。可以是’NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 - -**示例** - -``` sql -SELECT arrayPushFront(['b'], 'a') AS res -``` - - ┌─res───────┐ - │ ['a','b'] │ - └───────────┘ - -## arrayResize {#arrayresize} - -更改数组的长度。 - - arrayResize(array, size[, extender]) - -**参数:** - -- `array` — 数组. -- `size` — 数组所需的长度。 - - 如果`size`小于数组的原始大小,则数组将从右侧截断。 -- 如果`size`大于数组的初始大小,则使用`extender`值或数组项的数据类型的默认值将数组扩展到右侧。 -- `extender` — 扩展数组的值。可以是’NULL\`。 - -**返回值:** - -一个`size`长度的数组。 - -**调用示例** - - SELECT arrayResize([1], 3) - - ┌─arrayResize([1], 3)─┐ - │ [1,0,0] │ - └─────────────────────┘ - - SELECT arrayResize([1], 3, NULL) - - ┌─arrayResize([1], 3, NULL)─┐ - │ [1,NULL,NULL] │ - └───────────────────────────┘ - -## arraySlice {#arrayslice} - -返回一个子数组,包含从指定位置的指定长度的元素。 - - arraySlice(array, offset[, length]) - -**参数** - -- `array` – 数组。 -- `offset` – 数组的偏移。正值表示左侧的偏移量,负值表示右侧的缩进值。数组下标从1开始。 -- `length` - 子数组的长度。如果指定负值,则该函数返回`[offset,array_length - length`。如果省略该值,则该函数返回`[offset,the_end_of_array]`。 - -**示例** - -``` sql -SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res -``` - - ┌─res────────┐ - │ [2,NULL,4] │ - └────────────┘ - -设置为«NULL»的数组元素作为普通的数组元素值处理。 - -## arraySort(\[func,\] arr, …) {#array_functions-reverse-sort} - -以升序对`arr`数组的元素进行排序。如果指定了`func`函数,则排序顺序由`func`函数的调用结果决定。如果`func`接受多个参数,那么`arraySort`函数也将解析与`func`函数参数相同数量的数组参数。更详细的示例在`arraySort`的末尾。 - -整数排序示例: - -``` sql -SELECT arraySort([1, 3, 3, 0]); -``` - - ┌─arraySort([1, 3, 3, 0])─┐ - │ [0,1,3,3] │ - └─────────────────────────┘ - -字符串排序示例: - -``` sql -SELECT arraySort(['hello', 'world', '!']); -``` - - ┌─arraySort(['hello', 'world', '!'])─┐ - │ ['!','hello','world'] │ - └────────────────────────────────────┘ - -`NULL`,`NaN`和`Inf`的排序顺序: - -``` sql -SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); -``` - - ┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ - │ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ - └───────────────────────────────────────────────────────────┘ - -- `-Inf` 是数组中的第一个。 -- `NULL` 是数组中的最后一个。 -- `NaN` 在`NULL`的前面。 -- `Inf` 在`NaN`的前面。 - -注意:`arraySort`是[高阶函数](higher_order_functions.md)。您可以将lambda函数作为第一个参数传递给它。在这种情况下,排序顺序由lambda函数的调用结果决定。 - -让我们来看一下如下示例: - -``` sql -SELECT arraySort((x) -> -x, [1, 2, 3]) as res; -``` - - ┌─res─────┐ - │ [3,2,1] │ - └─────────┘ - -对于源数组的每个元素,lambda函数返回排序键,即\[1 -\> -1, 2 -\> -2, 3 -\> -3\]。由于`arraySort`函数按升序对键进行排序,因此结果为\[3,2,1\]。因此,`(x) -> -x` lambda函数将排序设置为[降序](#array_functions-reverse-sort)。 - -lambda函数可以接受多个参数。在这种情况下,您需要为`arraySort`传递与lambda参数个数相同的数组。函数使用第一个输入的数组中的元素组成返回结果;使用接下来传入的数组作为排序键。例如: - -``` sql -SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - - ┌─res────────────────┐ - │ ['world', 'hello'] │ - └────────────────────┘ - -这里,在第二个数组(\[2, 1\])中定义了第一个数组(\[‘hello’,‘world’\])的相应元素的排序键,即\[‘hello’ -\> 2,‘world’ -\> 1\]。 由于lambda函数中没有使用`x`,因此源数组中的实际值不会影响结果的顺序。所以,‘world’将是结果中的第一个元素,‘hello’将是结果中的第二个元素。 - -其他示例如下所示。 - -``` sql -SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; -``` - -``` sql -┌─res─────┐ -│ [2,1,0] │ -└─────────┘ -``` - -``` sql -SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; -``` - -``` sql -┌─res─────┐ -│ [2,1,0] │ -└─────────┘ -``` - -!!! 注意 "注意" - 为了提高排序效率, 使用了[Schwartzian transform](https://en.wikipedia.org/wiki/Schwartzian_transform)。 - -## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} - -以降序对`arr`数组的元素进行排序。如果指定了`func`函数,则排序顺序由`func`函数的调用结果决定。如果`func`接受多个参数,那么`arrayReverseSort`函数也将解析与`func`函数参数相同数量的数组作为参数。更详细的示例在`arrayReverseSort`的末尾。 - -整数排序示例: - -``` sql -SELECT arrayReverseSort([1, 3, 3, 0]); -``` - - ┌─arrayReverseSort([1, 3, 3, 0])─┐ - │ [3,3,1,0] │ - └────────────────────────────────┘ - -字符串排序示例: - -``` sql -SELECT arrayReverseSort(['hello', 'world', '!']); -``` - - ┌─arrayReverseSort(['hello', 'world', '!'])─┐ - │ ['world','hello','!'] │ - └───────────────────────────────────────────┘ - -`NULL`,`NaN`和`Inf`的排序顺序: - -``` sql -SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; -``` - -``` sql -┌─res───────────────────────────────────┐ -│ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ -└───────────────────────────────────────┘ -``` - -- `Inf` 是数组中的第一个。 -- `NULL` 是数组中的最后一个。 -- `NaN` 在`NULL`的前面。 -- `-Inf` 在`NaN`的前面。 - -注意:`arraySort`是[高阶函数](higher_order_functions.md)。您可以将lambda函数作为第一个参数传递给它。如下示例所示。 - -``` sql -SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; -``` - - ┌─res─────┐ - │ [1,2,3] │ - └─────────┘ - -数组按以下方式排序: -The array is sorted in the following way: - -1. 首先,根据lambda函数的调用结果对源数组(\[1, 2, 3\])进行排序。 结果是\[3, 2, 1\]。 -2. 反转上一步获得的数组。 所以,最终的结果是\[1, 2, 3\]。 - -lambda函数可以接受多个参数。在这种情况下,您需要为`arrayReverseSort`传递与lambda参数个数相同的数组。函数使用第一个输入的数组中的元素组成返回结果;使用接下来传入的数组作为排序键。例如: - -``` sql -SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - -``` sql -┌─res───────────────┐ -│ ['hello','world'] │ -└───────────────────┘ -``` - -在这个例子中,数组按以下方式排序: - -1. 首先,根据lambda函数的调用结果对源数组(\[‘hello’,‘world’\])进行排序。 其中,在第二个数组(\[2,1\])中定义了源数组中相应元素的排序键。 所以,排序结果\[‘world’,‘hello’\]。 -2. 反转上一步骤中获得的排序数组。 所以,最终的结果是\[‘hello’,‘world’\]。 - -其他示例如下所示。 - -``` sql -SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; -``` - -``` sql -┌─res─────┐ -│ [5,3,4] │ -└─────────┘ -``` - -``` sql -SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; -``` - -``` sql -┌─res─────┐ -│ [4,3,5] │ -└─────────┘ -``` - -## arrayUniq(arr, …) {#arrayuniqarr} - -如果传递一个参数,则计算数组中不同元素的数量。 -如果传递了多个参数,则它计算多个数组中相应位置的不同元素元组的数量。 - -如果要获取数组中唯一项的列表,可以使用arrayReduce(‘groupUniqArray’,arr)。 - -## arrayJoin(arr) {#array-functions-join} - -一个特殊的功能。请参见[«ArrayJoin函数»](array_join.md#functions_arrayjoin)部分。 - -## arrayDifference(arr) {#arraydifferencearr} - -返回一个数组,其中包含所有相邻元素对之间的差值。例如: - -``` sql -SELECT arrayDifference([1, 2, 3, 4]) -``` - - ┌─arrayDifference([1, 2, 3, 4])─┐ - │ [0,1,1,1] │ - └───────────────────────────────┘ - -## arrayDistinct(arr) {#arraydistinctarr} - -返回一个包含所有数组中不同元素的数组。例如: - -``` sql -SELECT arrayDistinct([1, 2, 2, 3, 1]) -``` - - ┌─arrayDistinct([1, 2, 2, 3, 1])─┐ - │ [1,2,3] │ - └────────────────────────────────┘ - -## arrayEnumerateDense(arr) {#arrayenumeratedensearr} - -返回与源数组大小相同的数组,指示每个元素首次出现在源数组中的位置。例如:arrayEnumerateDense(\[10,20,10,30\])= \[1,2,1,3\]。 - -## arrayIntersect(arr) {#arrayintersectarr} - -返回所有数组元素的交集。例如: - -``` sql -SELECT - arrayIntersect([1, 2], [1, 3], [2, 3]) AS no_intersect, - arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect -``` - - ┌─no_intersect─┬─intersect─┐ - │ [] │ [1] │ - └──────────────┴───────────┘ - -## arrayReduce(agg\_func, arr1, …) {#arrayreduceagg-func-arr1} - -将聚合函数应用于数组并返回其结果。如果聚合函数具有多个参数,则此函数可应用于相同大小的多个数组。 - -arrayReduce(‘agg\_func’,arr1,…) - 将聚合函数`agg_func`应用于数组`arr1 ...`。如果传递了多个数组,则相应位置上的元素将作为多个参数传递给聚合函数。例如:SELECT arrayReduce(‘max’,\[1,2,3\])= 3 - -## arrayReverse(arr) {#arrayreversearr} - -返回与源数组大小相同的数组,包含反转源数组的所有元素的结果。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/zh/query_language/functions/array_join.md b/docs/zh/query_language/functions/array_join.md deleted file mode 100644 index b7a4855efa5..00000000000 --- a/docs/zh/query_language/functions/array_join.md +++ /dev/null @@ -1,28 +0,0 @@ -# arrayJoin函数 {#functions_arrayjoin} - -这是一个非常有用的函数。 - -普通函数不会更改结果集的行数,而只是计算每行中的值(map)。 -聚合函数将多行压缩到一行中(fold或reduce)。 -’arrayJoin’函数获取每一行并将他们展开到多行(unfold)。 - -此函数将数组作为参数,并将该行在结果集中复制数组元素个数。 -除了应用此函数的列中的值之外,简单地复制列中的所有值;它被替换为相应的数组值。 - -查询可以使用多个`arrayJoin`函数。在这种情况下,转换被执行多次。 - -请注意SELECT查询中的ARRAY JOIN语法,它提供了更广泛的可能性。 - -示例: - -``` sql -SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src -``` - - ┌─dst─┬─\'Hello\'─┬─src─────┐ - │ 1 │ Hello │ [1,2,3] │ - │ 2 │ Hello │ [1,2,3] │ - │ 3 │ Hello │ [1,2,3] │ - └─────┴───────────┴─────────┘ - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/zh/query_language/functions/bit_functions.md b/docs/zh/query_language/functions/bit_functions.md deleted file mode 100644 index a9ded6b0930..00000000000 --- a/docs/zh/query_language/functions/bit_functions.md +++ /dev/null @@ -1,29 +0,0 @@ -# 位操作函数 {#wei-cao-zuo-han-shu} - -位操作函数适用于UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64,Float32或Float64中的任何类型。 - -结果类型是一个整数,其位数等于其参数的最大位。如果至少有一个参数为有符数字,则结果为有符数字。如果参数是浮点数,则将其强制转换为Int64。 - -## bitAnd(a, b) {#bitanda-b} - -## bitOr(a, b) {#bitora-b} - -## bitXor(a, b) {#bitxora-b} - -## bitNot(a) {#bitnota} - -## bitShiftLeft(a, b) {#bitshiftlefta-b} - -## bitShiftRight(a, b) {#bitshiftrighta-b} - -## bitRotateLeft(a, b) {#bitrotatelefta-b} - -## bitRotateRight(a, b) {#bitrotaterighta-b} - -## bitTest(a, b) {#bittesta-b} - -## bitTestAll(a, b) {#bittestalla-b} - -## bitTestAny(a, b) {#bittestanya-b} - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/zh/query_language/functions/bitmap_functions.md b/docs/zh/query_language/functions/bitmap_functions.md deleted file mode 100644 index 498212bc1fe..00000000000 --- a/docs/zh/query_language/functions/bitmap_functions.md +++ /dev/null @@ -1,384 +0,0 @@ -# 位图函数 {#wei-tu-han-shu} - -位图函数用于对两个位图对象进行计算,对于任何一个位图函数,它都将返回一个位图对象,例如and,or,xor,not等等。 - -位图对象有两种构造方法。一个是由聚合函数groupBitmapState构造的,另一个是由Array Object构造的。同时还可以将位图对象转化为数组对象。 - -我们使用RoaringBitmap实际存储位图对象,当基数小于或等于32时,它使用Set保存。当基数大于32时,它使用RoaringBitmap保存。这也是为什么低基数集的存储更快的原因。 - -有关RoaringBitmap的更多信息,请参阅:[CRoaring](https://github.com/RoaringBitmap/CRoaring)。 - -## bitmapBuild {#bitmapbuild} - -从无符号整数数组构建位图对象。 - - bitmapBuild(array) - -**参数** - -- `array` – 无符号整数数组. - -**示例** - -``` sql -SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res -``` - -## bitmapToArray {#bitmaptoarray} - -将位图转换为整数数组。 - - bitmapToArray(bitmap) - -**参数** - -- `bitmap` – 位图对象. - -**示例** - -``` sql -SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─────────┐ - │ [1,2,3,4,5] │ - └─────────────┘ - -## bitmapSubsetInRange {#bitmapsubsetinrange} - -将位图指定范围(不包含range\_end)转换为另一个位图。 - - bitmapSubsetInRange(bitmap, range_start, range_end) - -**参数** - -- `bitmap` – 位图对象. -- `range_start` – 范围起始点(含). -- `range_end` – 范围结束点(不含). - -**示例** - -``` sql -SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res -``` - - ┌─res───────────────┐ - │ [30,31,32,33,100] │ - └───────────────────┘ - -## bitmapSubsetLimit {#bitmapsubsetlimit} - -将位图指定范围(起始点和数目上限)转换为另一个位图。 - - bitmapSubsetLimit(bitmap, range_start, limit) - -**参数** - -- `bitmap` – 位图对象. -- `range_start` – 范围起始点(含). -- `limit` – 子位图基数上限. - -**示例** - -``` sql -SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res -``` - - ┌─res───────────────────────┐ - │ [30,31,32,33,100,200,500] │ - └───────────────────────────┘ - -## bitmapContains {#bitmapcontains} - -检查位图是否包含指定元素。 - - bitmapContains(haystack, needle) - -**参数** - -- `haystack` – 位图对象. -- `needle` – 元素,类型UInt32. - -**示例** - -``` sql -SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## bitmapHasAny {#bitmaphasany} - -与`hasAny(array,array)`类似,如果位图有任何公共元素则返回1,否则返回0。 -对于空位图,返回0。 - - bitmapHasAny(bitmap,bitmap) - -**参数** - -- `bitmap` – bitmap对象。 - -**示例** - -``` sql -SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res -``` - - ┌─res─┐ - │ 1 │ - └─────┘ - -## bitmapHasAll {#bitmaphasall} - -与`hasAll(array,array)`类似,如果第一个位图包含第二个位图的所有元素,则返回1,否则返回0。 -如果第二个参数是空位图,则返回1。 - - bitmapHasAll(bitmap,bitmap) - -**参数** - -- `bitmap` – bitmap 对象。 - -**示例** - -``` sql -SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res -``` - - ┌─res─┐ - │ 0 │ - └─────┘ - -## bitmapAnd {#bitmapand} - -为两个位图对象进行与操作,返回一个新的位图对象。 - - bitmapAnd(bitmap1,bitmap2) - -**参数** - -- `bitmap1` – 位图对象。 -- `bitmap2` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - - ┌─res─┐ - │ [3] │ - └─────┘ - -## bitmapOr {#bitmapor} - -为两个位图对象进行或操作,返回一个新的位图对象。 - - bitmapOr(bitmap1,bitmap2) - -**Parameters** - -- `bitmap1` – 位图对象。 -- `bitmap2` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - - ┌─res─────────┐ - │ [1,2,3,4,5] │ - └─────────────┘ - -## bitmapXor {#bitmapxor} - -为两个位图对象进行异或操作,返回一个新的位图对象。 - - bitmapXor(bitmap1,bitmap2) - -**参数** - -- `bitmap1` – 位图对象。 -- `bitmap2` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - - ┌─res───────┐ - │ [1,2,4,5] │ - └───────────┘ - -## bitmapAndnot {#bitmapandnot} - -计算两个位图的差异,返回一个新的位图对象。 - - bitmapAndnot(bitmap1,bitmap2) - -**参数** - -- `bitmap1` – 位图对象。 -- `bitmap2` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - - ┌─res───┐ - │ [1,2] │ - └───────┘ - -## bitmapCardinality {#bitmapcardinality} - -返回一个UInt64类型的数值,表示位图对象的基数。 - - bitmapCardinality(bitmap) - -**Parameters** - -- `bitmap` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─┐ - │ 5 │ - └─────┘ - -## bitmapMin {#bitmapmin} - -返回一个UInt64类型的数值,表示位图中的最小值。如果位图为空则返回UINT32\_MAX。 - - bitmapMin(bitmap) - -**Parameters** - -- `bitmap` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─┐ - │ 1 │ - └─────┘ - -## bitmapMax {#bitmapmax} - -返回一个UInt64类型的数值,表示位图中的最大值。如果位图为空则返回0。 - - bitmapMax(bitmap) - -**Parameters** - -- `bitmap` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─┐ - │ 5 │ - └─────┘ - -## bitmapAndCardinality {#bitmapandcardinality} - -为两个位图对象进行与操作,返回结果位图的基数。 - - bitmapAndCardinality(bitmap1,bitmap2) - -**参数** - -- `bitmap1` – 位图对象。 -- `bitmap2` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - - ┌─res─┐ - │ 1 │ - └─────┘ - -## bitmapOrCardinality {#bitmaporcardinality} - -为两个位图进行或运算,返回结果位图的基数。 - - bitmapOrCardinality(bitmap1,bitmap2) - -**参数** - -- `bitmap1` – 位图对象。 -- `bitmap2` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - - ┌─res─┐ - │ 5 │ - └─────┘ - -## bitmapXorCardinality {#bitmapxorcardinality} - -为两个位图进行异或运算,返回结果位图的基数。 - - bitmapXorCardinality(bitmap1,bitmap2) - -**参数** - -- `bitmap1` – 位图对象。 -- `bitmap2` – 位图对象。 - -**示例** - -``` sql -SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - - ┌─res─┐ - │ 4 │ - └─────┘ - -## bitmapAndnotCardinality {#bitmapandnotcardinality} - -计算两个位图的差异,返回结果位图的基数。 - - bitmapAndnotCardinality(bitmap1,bitmap2) - -**参数** - -- `bitmap1` – 位图对象。 -- `bitmap2` - 位图对象。 - -**示例** - -``` sql -SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - - ┌─res─┐ - │ 2 │ - └─────┘ - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/zh/query_language/functions/comparison_functions.md b/docs/zh/query_language/functions/comparison_functions.md deleted file mode 100644 index ce8c3728b5b..00000000000 --- a/docs/zh/query_language/functions/comparison_functions.md +++ /dev/null @@ -1,32 +0,0 @@ -# 比较函数 {#bi-jiao-han-shu} - -比较函数始终返回0或1(UInt8)。 - -可以比较以下类型: - -- Numbers -- String 和 FixedString -- Date -- DateTime - -以上每个组内的类型均可互相比较,但是对于不同组的类型间不能够进行比较。 - -例如,您无法将日期与字符串进行比较。您必须使用函数将字符串转换为日期,反之亦然。 - -字符串按字节进行比较。较短的字符串小于以其开头并且至少包含一个字符的所有字符串。 - -注意。直到1.1.54134版本,有符号和无符号数字的比较方式与C++相同。换句话说,在SELECT 9223372036854775807 &gt; -1 等情况下,您可能会得到错误的结果。 此行为在版本1.1.54134中已更改,现在在数学上是正确的。 - -## equals, a = b and a == b operator {#equals-a-b-and-a-b-operator} - -## notEquals, a ! operator= b and a `<>` b {#notequals-a-operator-b-and-a-b} - -## less, `< operator` {#less-operator} - -## greater, `> operator` {#greater-operator} - -## lessOrEquals, `<= operator` {#lessorequals-operator} - -## greaterOrEquals, `>= operator` {#greaterorequals-operator} - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/zh/query_language/functions/conditional_functions.md b/docs/zh/query_language/functions/conditional_functions.md deleted file mode 100644 index 175656b8374..00000000000 --- a/docs/zh/query_language/functions/conditional_functions.md +++ /dev/null @@ -1,44 +0,0 @@ -# 条件函数 {#tiao-jian-han-shu} - -## if(cond, then, else), cond ? operator then : else {#ifcond-then-else-cond-operator-then-else} - -如果`cond != 0`则返回`then`,如果`cond = 0`则返回`else`。 -`cond`必须是`UInt8`类型,`then`和`else`必须存在最低的共同类型。 - -`then`和`else`可以是`NULL` - -## multiIf {#multiif} - -允许您在查询中更紧凑地编写[CASE](../operators.md#operator_case)运算符。 - - multiIf(cond_1, then_1, cond_2, then_2...else) - -**参数:** - -- `cond_N` — 函数返回`then_N`的条件。 -- `then_N` — 执行时函数的结果。 -- `else` — 如果没有满足任何条件,则为函数的结果。 - -该函数接受`2N + 1`参数。 - -**返回值** - -该函数返回值«then\_N»或«else»之一,具体取决于条件`cond_N`。 - -**示例** - -存在如下一张表 - - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - │ 2 │ 3 │ - └───┴──────┘ - -执行查询 `SELECT multiIf(isNull(y) x, y < 3, y, NULL) FROM t_null`。结果: - - ┌─multiIf(isNull(y), x, less(y, 3), y, NULL)─┐ - │ 1 │ - │ ᴺᵁᴸᴸ │ - └────────────────────────────────────────────┘ - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/zh/query_language/functions/date_time_functions.md b/docs/zh/query_language/functions/date_time_functions.md deleted file mode 100644 index fe9961d7658..00000000000 --- a/docs/zh/query_language/functions/date_time_functions.md +++ /dev/null @@ -1,292 +0,0 @@ -# 时间日期函数 {#shi-jian-ri-qi-han-shu} - -支持时区。 - -所有的时间日期函数都可以在第二个可选参数中接受时区参数。示例:Asia / Yekaterinburg。在这种情况下,它们使用指定的时区而不是本地(默认)时区。 - -``` sql -SELECT - toDateTime('2016-06-15 23:00:00') AS time, - toDate(time) AS date_local, - toDate(time, 'Asia/Yekaterinburg') AS date_yekat, - toString(time, 'US/Samoa') AS time_samoa -``` - - ┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐ - │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │ - └─────────────────────┴────────────┴────────────┴─────────────────────┘ - -仅支持与UTC相差一整小时的时区。 - -## toTimeZone {#totimezone} - -将Date或DateTime转换为指定的时区。 - -## toYear {#toyear} - -将Date或DateTime转换为包含年份编号(AD)的UInt16类型的数字。 - -## toQuarter {#toquarter} - -将Date或DateTime转换为包含季度编号的UInt8类型的数字。 - -## toMonth {#tomonth} - -将Date或DateTime转换为包含月份编号(1-12)的UInt8类型的数字。 - -## toDayOfYear {#todayofyear} - -将Date或DateTime转换为包含一年中的某一天的编号的UInt16(1-366)类型的数字。 - -## toDayOfMonth {#todayofmonth} - -将Date或DateTime转换为包含一月中的某一天的编号的UInt8(1-31)类型的数字。 - -## toDayOfWeek {#todayofweek} - -将Date或DateTime转换为包含一周中的某一天的编号的UInt8(周一是1, 周日是7)类型的数字。 - -## toHour {#tohour} - -将DateTime转换为包含24小时制(0-23)小时数的UInt8数字。 -这个函数假设如果时钟向前移动,它是一个小时,发生在凌晨2点,如果时钟被移回,它是一个小时,发生在凌晨3点(这并非总是如此 - 即使在莫斯科时钟在不同的时间两次改变)。 - -## toMinute {#tominute} - -将DateTime转换为包含一小时中分钟数(0-59)的UInt8数字。 - -## toSecond {#tosecond} - -将DateTime转换为包含一分钟中秒数(0-59)的UInt8数字。 -闰秒不计算在内。 - -## toUnixTimestamp {#tounixtimestamp} - -将DateTime转换为unix时间戳。 - -## toStartOfYear {#tostartofyear} - -将Date或DateTime向前取整到本年的第一天。 -返回Date类型。 - -## toStartOfISOYear {#tostartofisoyear} - -将Date或DateTime向前取整到ISO本年的第一天。 -返回Date类型。 - -## toStartOfQuarter {#tostartofquarter} - -将Date或DateTime向前取整到本季度的第一天。 -返回Date类型。 - -## toStartOfMonth {#tostartofmonth} - -将Date或DateTime向前取整到本月的第一天。 -返回Date类型。 - -!!! 注意 "注意" -     解析不正确日期的行为是特定于实现的。 ClickHouse可能会返回零日期,抛出异常或执行«natural»溢出。 - -## toMonday {#tomonday} - -将Date或DateTime向前取整到本周的星期一。 -返回Date类型。 - -## toStartOfDay {#tostartofday} - -将DateTime向前取整到当日的开始。 - -## toStartOfHour {#tostartofhour} - -将DateTime向前取整到当前小时的开始。 - -## toStartOfMinute {#tostartofminute} - -将DateTime向前取整到当前分钟的开始。 - -## toStartOfFiveMinute {#tostartoffiveminute} - -将DateTime以五分钟为单位向前取整到最接近的时间点。 - -## toStartOfTenMinutes {#tostartoftenminutes} - -将DateTime以十分钟为单位向前取整到最接近的时间点。 - -## toStartOfFifteenMinutes {#tostartoffifteenminutes} - -将DateTime以十五分钟为单位向前取整到最接近的时间点。 - -## toStartOfInterval(time\_or\_data, INTERVAL x unit \[, time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} - -这是名为`toStartOf*`的所有函数的通用函数。例如, -`toStartOfInterval(t,INTERVAL 1 year)`返回与`toStartOfYear(t)`相同的结果, -`toStartOfInterval(t,INTERVAL 1 month)`返回与`toStartOfMonth(t)`相同的结果, -`toStartOfInterval(t,INTERVAL 1 day)`返回与`toStartOfDay(t)`相同的结果, -`toStartOfInterval(t,INTERVAL 15 minute)`返回与`toStartOfFifteenMinutes(t)`相同的结果。 - -## toTime {#totime} - -将DateTime中的日期转换为一个固定的日期,同时保留时间部分。 - -## toRelativeYearNum {#torelativeyearnum} - -将Date或DateTime转换为年份的编号,从过去的某个固定时间点开始。 - -## toRelativeQuarterNum {#torelativequarternum} - -将Date或DateTime转换为季度的数字,从过去的某个固定时间点开始。 - -## toRelativeMonthNum {#torelativemonthnum} - -将Date或DateTime转换为月份的编号,从过去的某个固定时间点开始。 - -## toRelativeWeekNum {#torelativeweeknum} - -将Date或DateTime转换为星期数,从过去的某个固定时间点开始。 - -## toRelativeDayNum {#torelativedaynum} - -将Date或DateTime转换为当天的编号,从过去的某个固定时间点开始。 - -## toRelativeHourNum {#torelativehournum} - -将DateTime转换为小时数,从过去的某个固定时间点开始。 - -## toRelativeMinuteNum {#torelativeminutenum} - -将DateTime转换为分钟数,从过去的某个固定时间点开始。 - -## toRelativeSecondNum {#torelativesecondnum} - -将DateTime转换为秒数,从过去的某个固定时间点开始。 - -## toISOYear {#toisoyear} - -将Date或DateTime转换为包含ISO年份的UInt16类型的编号。 - -## toISOWeek {#toisoweek} - -将Date或DateTime转换为包含ISO周数的UInt8类型的编号。 - -## now {#now} - -不接受任何参数并在请求执行时的某一刻返回当前时间(DateTime)。 -此函数返回一个常量,即时请求需要很长时间能够完成。 - -## today {#today} - -不接受任何参数并在请求执行时的某一刻返回当前日期(Date)。 -其功能与’toDate(now())’相同。 - -## yesterday {#yesterday} - -不接受任何参数并在请求执行时的某一刻返回昨天的日期(Date)。 -其功能与’today() - 1’相同。 - -## timeSlot {#timeslot} - -将时间向前取整半小时。 -此功能用于Yandex.Metrica,因为如果跟踪标记显示单个用户的连续综合浏览量在时间上严格超过此数量,则半小时是将会话分成两个会话的最短时间。这意味着(tag id,user id,time slot)可用于搜索相应会话中包含的综合浏览量。 - -## toYYYYMM {#toyyyymm} - -将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 100 + MM)。 - -## toYYYYMMDD {#toyyyymmdd} - -将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 10000 + MM \* 100 + DD)。 - -## toYYYYMMDDhhmmss {#toyyyymmddhhmmss} - -将Date或DateTime转换为包含年份和月份编号的UInt64类型的数字(YYYY \* 10000000000 + MM \* 100000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss)。 - -## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} - -函数将一段时间间隔添加到Date/DateTime,然后返回Date/DateTime。例如: - -``` sql -WITH - toDate('2018-01-01') AS date, - toDateTime('2018-01-01 00:00:00') AS date_time -SELECT - addYears(date, 1) AS add_years_with_date, - addYears(date_time, 1) AS add_years_with_date_time -``` - - ┌─add_years_with_date─┬─add_years_with_date_time─┐ - │ 2019-01-01 │ 2019-01-01 00:00:00 │ - └─────────────────────┴──────────────────────────┘ - -## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} - -函数将Date/DateTime减去一段时间间隔,然后返回Date/DateTime。例如: - -``` sql -WITH - toDate('2019-01-01') AS date, - toDateTime('2019-01-01 00:00:00') AS date_time -SELECT - subtractYears(date, 1) AS subtract_years_with_date, - subtractYears(date_time, 1) AS subtract_years_with_date_time -``` - - ┌─subtract_years_with_date─┬─subtract_years_with_date_time─┐ - │ 2018-01-01 │ 2018-01-01 00:00:00 │ - └──────────────────────────┴───────────────────────────────┘ - -## dateDiff(‘unit’, t1, t2, \[timezone\]) {#datediffunit-t1-t2-timezone} - -返回以’unit’为单位表示的两个时间之间的差异,例如`'hours'`。 ‘t1’和’t2’可以是Date或DateTime,如果指定’timezone’,它将应用于两个参数。如果不是,则使用来自数据类型’t1’和’t2’的时区。如果时区不相同,则结果将是未定义的。 - -Supported unit values: - -| unit | -|---------| -| second | -| minute | -| hour | -| day | -| week | -| month | -| quarter | -| year | - -## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size} - -它返回一个时间数组,其中包括从从«StartTime»开始到«StartTime + Duration 秒»内的所有符合«size»(以秒为单位)步长的时间点。其中«size»是一个可选参数,默认为1800。 -例如,`timeSlots(toDateTime('2012-01-01 12:20:00'),600) = [toDateTime('2012-01-01 12:00:00'),toDateTime('2012-01-01 12:30:00' )]`。 -这对于搜索在相应会话中综合浏览量是非常有用的。 - -## formatDateTime(Time, Format\[, Timezone\]) {#formatdatetimetime-format-timezone} - -函数根据给定的格式字符串来格式化时间。请注意:格式字符串必须是常量表达式,例如:单个结果列不能有多种格式字符串。 - -支持的格式修饰符: -(«Example» 列是对`2018-01-02 22:33:44`的格式化结果) - -| Modifier | Description | Example | -|----------|---------------------------------------------------------|------------| -| %C | year divided by 100 and truncated to integer (00-99) | 20 | -| %d | day of the month, zero-padded (01-31) | 02 | -| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 01/02/2018 | -| %e | day of the month, space-padded ( 1-31) | 2 | -| %F | short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2018-01-02 | -| %H | hour in 24h format (00-23) | 22 | -| %I | hour in 12h format (01-12) | 10 | -| %j | day of the year (001-366) | 002 | -| %m | month as a decimal number (01-12) | 01 | -| %M | minute (00-59) | 33 | -| %n | new-line character (‘’) | | -| %p | AM or PM designation | PM | -| %R | 24-hour HH:MM time, equivalent to %H:%M | 22:33 | -| %S | second (00-59) | 44 | -| %t | horizontal-tab character (’) | | -| %T | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 22:33:44 | -| %u | ISO 8601 weekday as number with Monday as 1 (1-7) | 2 | -| %V | ISO 8601 week number (01-53) | 01 | -| %w | weekday as a decimal number with Sunday as 0 (0-6) | 2 | -| %y | Year, last two digits (00-99) | 18 | -| %Y | Year | 2018 | -| %% | a % sign | % | - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/zh/query_language/functions/encoding_functions.md b/docs/zh/query_language/functions/encoding_functions.md deleted file mode 100644 index 589edd75450..00000000000 --- a/docs/zh/query_language/functions/encoding_functions.md +++ /dev/null @@ -1,28 +0,0 @@ -# 编码函数 {#bian-ma-han-shu} - -## hex {#hex} - -接受`String`,`unsigned integer`,`Date`或`DateTime`类型的参数。返回包含参数的十六进制表示的字符串。使用大写字母`A-F`。不使用`0x`前缀或`h`后缀。对于字符串,所有字节都简单地编码为两个十六进制数字。数字转换为大端(«易阅读»)格式。对于数字,去除其中较旧的零,但仅限整个字节。例如,`hex(1)='01'`。 `Date`被编码为自Unix时间开始以来的天数。 `DateTime`编码为自Unix时间开始以来的秒数。 - -## unhex(str) {#unhexstr} - -接受包含任意数量的十六进制数字的字符串,并返回包含相应字节的字符串。支持大写和小写字母A-F。十六进制数字的数量不必是偶数。如果是奇数,则最后一位数被解释为00-0F字节的低位。如果参数字符串包含除十六进制数字以外的任何内容,则返回一些实现定义的结果(不抛出异常)。 -如果要将结果转换为数字,可以使用«reverse»和«reinterpretAsType»函数。 - -## UUIDStringToNum(str) {#uuidstringtonumstr} - -接受包含36个字符的字符串,格式为«123e4567-e89b-12d3-a456-426655440000»,并将其转化为FixedString(16)返回。 - -## UUIDNumToString(str) {#uuidnumtostringstr} - -接受FixedString(16)值。返回包含36个字符的文本格式的字符串。 - -## bitmaskToList(num) {#bitmasktolistnum} - -接受一个整数。返回一个字符串,其中包含一组2的幂列表,其列表中的所有值相加等于这个整数。列表使用逗号分割,按升序排列。 - -## bitmaskToArray(num) {#bitmasktoarraynum} - -接受一个整数。返回一个UInt64类型数组,其中包含一组2的幂列表,其列表中的所有值相加等于这个整数。数组中的数字按升序排列。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/zh/query_language/functions/ext_dict_functions.md b/docs/zh/query_language/functions/ext_dict_functions.md deleted file mode 100644 index c1d5d9b60ba..00000000000 --- a/docs/zh/query_language/functions/ext_dict_functions.md +++ /dev/null @@ -1,46 +0,0 @@ -# 字典函数 {#zi-dian-han-shu} - -有关连接和配置外部词典的信息,请参阅[外部词典](../dicts/external_dicts.md)。 - -## dictGetUInt8, dictGetUInt16, dictGetUInt32, dictGetUInt64 {#dictgetuint8-dictgetuint16-dictgetuint32-dictgetuint64} - -## dictGetInt8, dictGetInt16, dictGetInt32, dictGetInt64 {#dictgetint8-dictgetint16-dictgetint32-dictgetint64} - -## dictGetFloat32, dictGetFloat64 {#dictgetfloat32-dictgetfloat64} - -## dictGetDate, dictGetDateTime {#dictgetdate-dictgetdatetime} - -## dictGetUUID {#dictgetuuid} - -## dictGetString {#dictgetstring} - -`dictGetT('dict_name', 'attr_name', id)` - -- 使用’id’键获取dict\_name字典中attr\_name属性的值。`dict_name`和`attr_name`是常量字符串。`id`必须是UInt64。 - 如果字典中没有`id`键,则返回字典描述中指定的默认值。 - -## dictGetTOrDefault {#ext_dict_functions-dictgettordefault} - -`dictGetTOrDefault('dict_name', 'attr_name', id, default)` - -与`dictGetT`函数相同,但默认值取自函数的最后一个参数。 - -## dictIsIn {#dictisin} - -`dictIsIn ('dict_name', child_id, ancestor_id)` - -- 对于’dict\_name’分层字典,查找’child\_id’键是否位于’ancestor\_id’内(或匹配’ancestor\_id’)。返回UInt8。 - -## dictGetHierarchy {#dictgethierarchy} - -`dictGetHierarchy('dict_name', id)` - -- 对于’dict\_name’分层字典,返回从’id’开始并沿父元素链继续的字典键数组。返回Array(UInt64) - -## dictHas {#dicthas} - -`dictHas('dict_name', id)` - -- 检查字典是否存在指定的`id`。如果不存在,则返回0;如果存在,则返回1。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/zh/query_language/functions/functions_for_nulls.md b/docs/zh/query_language/functions/functions_for_nulls.md deleted file mode 100644 index d6db2906e92..00000000000 --- a/docs/zh/query_language/functions/functions_for_nulls.md +++ /dev/null @@ -1,252 +0,0 @@ -# Nullable处理函数 {#nullablechu-li-han-shu} - -## isNull {#isnull} - -检查参数是否为[NULL](../syntax.md#null)。 - - isNull(x) - -**参数** - -- `x` — 一个非复合数据类型的值。 - -**返回值** - -- `1` 如果`x`为`NULL`。 -- `0` 如果`x`不为`NULL`。 - -**示例** - -存在以下内容的表 - - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - │ 2 │ 3 │ - └───┴──────┘ - -对其进行查询 - - :) SELECT x FROM t_null WHERE isNull(y) - - SELECT x - FROM t_null - WHERE isNull(y) - - ┌─x─┐ - │ 1 │ - └───┘ - - 1 rows in set. Elapsed: 0.010 sec. - -## isNotNull {#isnotnull} - -检查参数是否不为 [NULL](../syntax.md#null). - - isNotNull(x) - -**参数:** - -- `x` — 一个非复合数据类型的值。 - -**返回值** - -- `0` 如果`x`为`NULL`。 -- `1` 如果`x`不为`NULL`。 - -**示例** - -存在以下内容的表 - - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - │ 2 │ 3 │ - └───┴──────┘ - -对其进行查询 - - :) SELECT x FROM t_null WHERE isNotNull(y) - - SELECT x - FROM t_null - WHERE isNotNull(y) - - ┌─x─┐ - │ 2 │ - └───┘ - - 1 rows in set. Elapsed: 0.010 sec. - -## coalesce {#coalesce} - -检查从左到右是否传递了«NULL»参数并返回第一个非`'NULL`参数。 - - coalesce(x,...) - -**参数:** - -- 任何数量的非复合类型的参数。所有参数必须与数据类型兼容。 - -**返回值** - -- 第一个非’NULL\`参数。 -- `NULL`,如果所有参数都是’NULL\`。 - -**示例** - -考虑可以指定多种联系客户的方式的联系人列表。 - - ┌─name─────┬─mail─┬─phone─────┬──icq─┐ - │ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │ - │ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ - └──────────┴──────┴───────────┴──────┘ - -`mail`和`phone`字段是String类型,但`icq`字段是`UInt32`,所以它需要转换为`String`。 - -从联系人列表中获取客户的第一个可用联系方式: - - :) SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook - - SELECT coalesce(mail, phone, CAST(icq, 'Nullable(String)')) - FROM aBook - - ┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐ - │ client 1 │ 123-45-67 │ - │ client 2 │ ᴺᵁᴸᴸ │ - └──────────┴──────────────────────────────────────────────────────┘ - - 2 rows in set. Elapsed: 0.006 sec. - -## ifNull {#ifnull} - -如果第一个参数为«NULL»,则返回第二个参数的值。 - - ifNull(x,alt) - -**参数:** - -- `x` — 要检查«NULL»的值。 -- `alt` — 如果`x`为’NULL\`,函数返回的值。 - -**返回值** - -- The value `x`, if `x` is not `NULL`. -- The value `alt`, if `x` is `NULL`. - -**示例** - - SELECT ifNull('a', 'b') - - ┌─ifNull('a', 'b')─┐ - │ a │ - └──────────────────┘ - - SELECT ifNull(NULL, 'b') - - ┌─ifNull(NULL, 'b')─┐ - │ b │ - └───────────────────┘ - -## nullIf {#nullif} - -如果参数相等,则返回`NULL`。 - - nullIf(x, y) - -**参数:** - -`x`, `y` — 用于比较的值。 它们必须是类型兼容的,否则将抛出异常。 - -**返回值** - -- 如果参数相等,则为`NULL`。 -- 如果参数不相等,则为`x`值。 - -**示例** - - SELECT nullIf(1, 1) - - ┌─nullIf(1, 1)─┐ - │ ᴺᵁᴸᴸ │ - └──────────────┘ - - SELECT nullIf(1, 2) - - ┌─nullIf(1, 2)─┐ - │ 1 │ - └──────────────┘ - -## assumeNotNull {#assumenotnull} - -将[Nullable](../../data_types/nullable.md)类型的值转换为非`Nullable`类型的值。 - - assumeNotNull(x) - -**参数:** - -- `x` — 原始值。 - -**返回值** - -- 如果`x`不为`NULL`,返回非`Nullable`类型的原始值。 -- 如果`x`为`NULL`,返回对应非`Nullable`类型的默认值。 - -**示例** - -存在如下`t_null`表。 - - SHOW CREATE TABLE t_null - - ┌─statement─────────────────────────────────────────────────────────────────┐ - │ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │ - └───────────────────────────────────────────────────────────────────────────┘ - - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - │ 2 │ 3 │ - └───┴──────┘ - -将列`y`作为`assumeNotNull`函数的参数。 - - SELECT assumeNotNull(y) FROM t_null - - ┌─assumeNotNull(y)─┐ - │ 0 │ - │ 3 │ - └──────────────────┘ - - SELECT toTypeName(assumeNotNull(y)) FROM t_null - - ┌─toTypeName(assumeNotNull(y))─┐ - │ Int8 │ - │ Int8 │ - └──────────────────────────────┘ - -## toNullable {#tonullable} - -将参数的类型转换为`Nullable`。 - - toNullable(x) - -**参数:** - -- `x` — 任何非复合类型的值。 - -**返回值** - -- 输入的值,但其类型为`Nullable`。 - -**示例** - - SELECT toTypeName(10) - - ┌─toTypeName(10)─┐ - │ UInt8 │ - └────────────────┘ - - SELECT toTypeName(toNullable(10)) - - ┌─toTypeName(toNullable(10))─┐ - │ Nullable(UInt8) │ - └────────────────────────────┘ - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/zh/query_language/functions/geo.md b/docs/zh/query_language/functions/geo.md deleted file mode 100644 index 3e6e6aa6b64..00000000000 --- a/docs/zh/query_language/functions/geo.md +++ /dev/null @@ -1,222 +0,0 @@ -# GEO函数 {#geohan-shu} - -## greatCircleDistance {#greatcircledistance} - -使用[great-circle distance公式](https://en.wikipedia.org/wiki/Great-circle_distance)计算地球表面两点之间的距离。 - -``` sql -greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) -``` - -**输入参数** - -- `lon1Deg` — 第一个点的经度,单位:度,范围: `[-180°, 180°]`。 -- `lat1Deg` — 第一个点的纬度,单位:度,范围: `[-90°, 90°]`。 -- `lon2Deg` — 第二个点的经度,单位:度,范围: `[-180°, 180°]`。 -- `lat2Deg` — 第二个点的纬度,单位:度,范围: `[-90°, 90°]`。 - -正值对应北纬和东经,负值对应南纬和西经。 - -**返回值** - -地球表面的两点之间的距离,以米为单位。 - -当输入参数值超出规定的范围时将抛出异常。 - -**示例** - -``` sql -SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) -``` - -``` text -┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐ -│ 14132374.194975413 │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## pointInEllipses {#pointinellipses} - -检查指定的点是否至少包含在指定的一个椭圆中。 -下述中的坐标是几何图形在笛卡尔坐标系中的位置。 - -``` sql -pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) -``` - -**输入参数** - -- `x, y` — 平面上某个点的坐标。 -- `xᵢ, yᵢ` — 第i个椭圆的中心坐标。 -- `aᵢ, bᵢ` — 以x, y坐标为单位的第i个椭圆的轴。 - -输入参数的个数必须是`2+4⋅n`,其中`n`是椭圆的数量。 - -**返回值** - -如果该点至少包含在一个椭圆中,则返回`1`;否则,则返回`0`。 - -**示例** - -``` sql -SELECT pointInEllipses(55.755831, 37.617673, 55.755831, 37.617673, 1.0, 2.0) -``` - -``` text -┌─pointInEllipses(55.755831, 37.617673, 55.755831, 37.617673, 1., 2.)─┐ -│ 1 │ -└─────────────────────────────────────────────────────────────────────┘ -``` - -## pointInPolygon {#pointinpolygon} - -检查指定的点是否包含在指定的多边形中。 - -``` sql -pointInPolygon((x, y), [(a, b), (c, d) ...], ...) -``` - -**输入参数** - -- `(x, y)` — 平面上某个点的坐标。[Tuple](../../data_types/tuple.md)类型,包含坐标的两个数字。 -- `[(a, b), (c, d) ...]` — 多边形的顶点。[Array](../../data_types/array.md)类型。每个顶点由一对坐标`(a, b)`表示。顶点可以按顺时针或逆时针指定。顶点的个数应该大于等于3。同时只能是常量的。 -- 该函数还支持镂空的多边形(切除部分)。如果需要,可以使用函数的其他参数定义需要切除部分的多边形。(The function does not support non-simply-connected polygons.) - -**返回值** - -如果坐标点存在在多边形范围内,则返回`1`。否则返回`0`。 -如果坐标位于多边形的边界上,则该函数可能返回`1`,或可能返回`0`。 - -**示例** - -``` sql -SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## geohashEncode {#geohashencode} - -将经度和纬度编码为geohash-string,请参阅(http://geohash.org/,https://en.wikipedia.org/wiki/Geohash)。 - -``` sql -geohashEncode(longitude, latitude, [precision]) -``` - -**输入值** - -- longitude - 要编码的坐标的经度部分。其值应在`[-180°,180°]`范围内 -- latitude - 要编码的坐标的纬度部分。其值应在`[-90°,90°]`范围内 -- precision - 可选,生成的geohash-string的长度,默认为`12`。取值范围为`[1,12]`。任何小于`1`或大于`12`的值都会默认转换为`12`。 - -**返回值** - -- 坐标编码的字符串(使用base32编码的修改版本)。 - -**示例** - -``` sql -SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res -``` - -``` text -┌─res──────────┐ -│ ezs42d000000 │ -└──────────────┘ -``` - -## geohashDecode {#geohashdecode} - -将任何geohash编码的字符串解码为经度和纬度。 - -**输入值** - -- encoded string - geohash编码的字符串。 - -**返回值** - -- (longitude, latitude) - 经度和纬度的`Float64`值的2元组。 - -**示例** - -``` sql -SELECT geohashDecode('ezs42') AS res -``` - -``` text -┌─res─────────────────────────────┐ -│ (-5.60302734375,42.60498046875) │ -└─────────────────────────────────┘ -``` - -## geoToH3 {#geotoh3} - -计算指定的分辨率的[H3](https://uber.github.io/h3/#/documentation/overview/introduction)索引`(lon, lat)`。 - -``` sql -geoToH3(lon, lat, resolution) -``` - -**输入值** - -- `lon` — 经度。 [Float64](../../data_types/float.md)类型。 -- `lat` — 纬度。 [Float64](../../data_types/float.md)类型。 -- `resolution` — 索引的分辨率。 取值范围为: `[0, 15]`。 [UInt8](../../data_types/int_uint.md)类型。 - -**返回值** - -- H3中六边形的索引值。 -- 发生异常时返回0。 - -[UInt64](../../data_types/int_uint.md)类型。 - -**示例** - -``` sql -SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index -``` - -``` text -┌────────────h3Index─┐ -│ 644325524701193974 │ -└────────────────────┘ -``` - -## geohashesInBox {#geohashesinbox} - -计算在指定精度下计算最小包含指定的经纬范围的最小图形的geohash数组。 - -**输入值** - -- longitude\_min - 最小经度。其值应在`[-180°,180°]`范围内 -- latitude\_min - 最小纬度。其值应在`[-90°,90°]`范围内 -- longitude\_max - 最大经度。其值应在`[-180°,180°]`范围内 -- latitude\_max - 最大纬度。其值应在`[-90°,90°]`范围内 -- precision - geohash的精度。其值应在`[1, 12]`内的`UInt8`类型的数字 - -请注意,上述所有的坐标参数必须同为`Float32`或`Float64`中的一种类型。 - -**返回值** - -- 包含指定范围内的指定精度的geohash字符串数组。注意,您不应该依赖返回数组中geohash的顺序。 -- \[\] - 当传入的最小经纬度大于最大经纬度时将返回一个空数组。 - -请注意,如果生成的数组长度超过10000时,则函数将抛出异常。 - -**示例** - -``` sql -SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos -``` - -``` text -┌─thasos──────────────────────────────────────┐ -│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │ -└─────────────────────────────────────────────┘ -``` - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/zh/query_language/functions/hash_functions.md b/docs/zh/query_language/functions/hash_functions.md deleted file mode 100644 index 835da4a9204..00000000000 --- a/docs/zh/query_language/functions/hash_functions.md +++ /dev/null @@ -1,114 +0,0 @@ -# Hash函数 {#hashhan-shu} - -Hash函数可以用于将元素不可逆的伪随机打乱。 - -## halfMD5 {#halfmd5} - -计算字符串的MD5。然后获取结果的前8个字节并将它们作为UInt64(大端)返回。 -此函数相当低效(500万个短字符串/秒/核心)。 -如果您不需要一定使用MD5,请使用‘sipHash64’函数。 - -## MD5 {#md5} - -计算字符串的MD5并将结果放入FixedString(16)中返回。 -如果您只是需要一个128位的hash,同时不需要一定使用MD5,请使用‘sipHash128’函数。 -如果您要获得与md5sum程序相同的输出结果,请使用lower(hex(MD5(s)))。 - -## sipHash64 {#siphash64} - -计算字符串的SipHash。 -接受String类型的参数,返回UInt64。 -SipHash是一种加密哈希函数。它的处理性能至少比MD5快三倍。 -有关详细信息,请参阅链接:https://131002.net/siphash/ - -## sipHash128 {#hash_functions-siphash128} - -计算字符串的SipHash。 -接受String类型的参数,返回FixedString(16)。 -与sipHash64函数的不同在于它的最终计算结果为128位。 - -## cityHash64 {#cityhash64} - -计算任意数量字符串的CityHash64或使用特定实现的Hash函数计算任意数量其他类型的Hash。 -对于字符串,使用CityHash算法。 这是一个快速的非加密哈希函数,用于字符串。 -对于其他类型的参数,使用特定实现的Hash函数,这是一种快速的非加密的散列函数。 -如果传递了多个参数,则使用CityHash组合这些参数的Hash结果。 -例如,您可以计算整个表的checksum,其结果取决于行的顺序:`SELECT sum(cityHash64(*)) FROM table`。 - -## intHash32 {#inthash32} - -为任何类型的整数计算32位的哈希。 -这是相对高效的非加密Hash函数。 - -## intHash64 {#inthash64} - -从任何类型的整数计算64位哈希码。 -它的工作速度比intHash32函数快。 - -## SHA1 {#sha1} - -## SHA224 {#sha224} - -## SHA256 {#sha256} - -计算字符串的SHA-1,SHA-224或SHA-256,并将结果字节集返回为FixedString(20),FixedString(28)或FixedString(32)。 -该函数相当低效(SHA-1大约500万个短字符串/秒/核心,而SHA-224和SHA-256大约220万个短字符串/秒/核心)。 -我们建议仅在必须使用这些Hash函数且无法更改的情况下使用这些函数。 -即使在这些情况下,我们仍建议将函数采用在写入数据时使用预计算的方式将其计算完毕。而不是在SELECT中计算它们。 - -## URLHash(url\[, N\]) {#urlhashurl-n} - -一种快速的非加密哈希函数,用于规范化的从URL获得的字符串。 -`URLHash(s)` - 从一个字符串计算一个哈希,如果结尾存在尾随符号`/`,`?`或`#`则忽略。 -`URLHash(s,N)` - 计算URL层次结构中字符串到N级别的哈希值,如果末尾存在尾随符号`/`,`?`或`#`则忽略。 -URL的层级与URLHierarchy中的层级相同。 此函数被用于Yandex.Metrica。 - -## farmHash64 {#farmhash64} - -计算字符串的FarmHash64。 -接受一个String类型的参数。返回UInt64。 -有关详细信息,请参阅链接:[FarmHash64](https://github.com/google/farmhash) - -## javaHash {#hash_functions-javahash} - -计算字符串的JavaHash。 -接受一个String类型的参数。返回Int32。 -有关更多信息,请参阅链接:[JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) - -## hiveHash {#hivehash} - -计算字符串的HiveHash。 -接受一个String类型的参数。返回Int32。 -与[JavaHash](#hash_functions-javahash)相同,但不会返回负数。 - -## metroHash64 {#metrohash64} - -计算字符串的MetroHash。 -接受一个String类型的参数。返回UInt64。 -有关详细信息,请参阅链接:[MetroHash64](http://www.jandrewrogers.com/2015/05/27/metrohash/) - -## jumpConsistentHash {#jumpconsistenthash} - -计算UInt64的JumpConsistentHash。 -接受UInt64类型的参数。返回Int32。 -有关更多信息,请参见链接:[JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) - -## murmurHash2\_32, murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} - -计算字符串的MurmurHash2。 -接受一个String类型的参数。返回UInt64或UInt32。 -有关更多信息,请参阅链接:[MurmurHash2](https://github.com/aappleby/smhasher) - -## murmurHash3\_32, murmurHash3\_64, murmurHash3\_128 {#murmurhash3-32-murmurhash3-64-murmurhash3-128} - -计算字符串的MurmurHash3。 -接受一个String类型的参数。返回UInt64或UInt32或FixedString(16)。 -有关更多信息,请参阅链接:[MurmurHash3](https://github.com/aappleby/smhasher) - -## xxHash32, xxHash64 {#xxhash32-xxhash64} - -计算字符串的xxHash。 -接受一个String类型的参数。返回UInt64或UInt32。 -有关更多信息,请参见链接:[xxHash](http://cyan4973.github.io/xxHash/) - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/zh/query_language/functions/higher_order_functions.md b/docs/zh/query_language/functions/higher_order_functions.md deleted file mode 100644 index 9e84a73f0b8..00000000000 --- a/docs/zh/query_language/functions/higher_order_functions.md +++ /dev/null @@ -1,138 +0,0 @@ -# 高阶函数 {#gao-jie-han-shu} - -## `->` 运算符, lambda(params, expr) 函数 {#yun-suan-fu-lambdaparams-expr-han-shu} - -用于描述一个lambda函数用来传递给其他高阶函数。箭头的左侧有一个形式参数,它可以是一个标识符或多个标识符所组成的元祖。箭头的右侧是一个表达式,在这个表达式中可以使用形式参数列表中的任何一个标识符或表的任何一个列名。 - -示例: `x -> 2 * x, str -> str != Referer.` - -高阶函数只能接受lambda函数作为其参数。 - -高阶函数可以接受多个参数的lambda函数作为其参数,在这种情况下,高阶函数需要同时传递几个长度相等的数组,这些数组将被传递给lambda参数。 - -除了’arrayMap’和’arrayFilter’以外的所有其他函数,都可以省略第一个参数(lambda函数)。在这种情况下,默认返回数组元素本身。 - -### arrayMap(func, arr1, …) {#higher_order_functions-array-map} - -将arr -将从’func’函数的原始应用程序获得的数组返回到’arr’数组中的每个元素。 -Returns an array obtained from the original application of the ‘func’ function to each element in the ‘arr’ array. - -### arrayFilter(func, arr1, …) {#arrayfilterfunc-arr1} - -Returns an array containing only the elements in ‘arr1’ for which ‘func’ returns something other than 0. - -示例: - -``` sql -SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res -``` - - ┌─res───────────┐ - │ ['abc World'] │ - └───────────────┘ - -``` sql -SELECT - arrayFilter( - (i, x) -> x LIKE '%World%', - arrayEnumerate(arr), - ['Hello', 'abc World'] AS arr) - AS res -``` - - ┌─res─┐ - │ [2] │ - └─────┘ - -### arrayCount(\[func,\] arr1, …) {#arraycountfunc-arr1} - -返回数组arr中非零元素的数量,如果指定了‘func’,则通过‘func’的返回值确定元素是否为非零元素。 - -### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} - -返回数组‘arr’中是否存在非零元素,如果指定了‘func’,则使用‘func’的返回值确定元素是否为非零元素。 - -### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} - -返回数组‘arr’中是否存在为零的元素,如果指定了‘func’,则使用‘func’的返回值确定元素是否为零元素。 - -### arraySum(\[func,\] arr1, …) {#arraysumfunc-arr1} - -计算arr数组的总和,如果指定了‘func’,则通过‘func’的返回值计算数组的总和。 - -### arrayFirst(func, arr1, …) {#arrayfirstfunc-arr1} - -返回数组中第一个匹配的元素,函数使用‘func’匹配所有元素,直到找到第一个匹配的元素。 - -### arrayFirstIndex(func, arr1, …) {#arrayfirstindexfunc-arr1} - -返回数组中第一个匹配的元素的下标索引,函数使用‘func’匹配所有元素,直到找到第一个匹配的元素。 - -### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} - -返回源数组部分数据的总和,如果指定了`func`函数,则使用`func`的返回值计算总和。 - -示例: - -``` sql -SELECT arrayCumSum([1, 1, 1, 1]) AS res -``` - - ┌─res──────────┐ - │ [1, 2, 3, 4] │ - └──────────────┘ - -### arrayCumSumNonNegative(arr) {#arraycumsumnonnegativearr} - -与arrayCumSum相同,返回源数组部分数据的总和。不同于arrayCumSum,当返回值包含小于零的值时,该值替换为零,后续计算使用零继续计算。例如: - -``` sql -SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res -``` - - ┌─res───────┐ - │ [1,2,0,1] │ - └───────────┘ - -### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} - -返回升序排序`arr1`的结果。如果指定了`func`函数,则排序顺序由`func`的结果决定。 - -[Schwartzian变换](https://en.wikipedia.org/wiki/Schwartzian_transform)用于提高排序效率。 - -示例: - -``` sql -SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); -``` - - ┌─res────────────────┐ - │ ['world', 'hello'] │ - └────────────────────┘ - -请注意,NULL和NaN在最后(NaN在NULL之前)。例如: - -``` sql -SELECT arraySort([1, nan, 2, NULL, 3, nan, 4, NULL]) -``` - - ┌─arraySort([1, nan, 2, NULL, 3, nan, 4, NULL])─┐ - │ [1,2,3,4,nan,nan,NULL,NULL] │ - └───────────────────────────────────────────────┘ - -### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} - -返回降序排序`arr1`的结果。如果指定了`func`函数,则排序顺序由`func`的结果决定。 - -请注意,NULL和NaN在最后(NaN在NULL之前)。例如: - -``` sql -SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, 4, NULL]) -``` - - ┌─arrayReverseSort([1, nan, 2, NULL, 3, nan, 4, NULL])─┐ - │ [4,3,2,1,nan,nan,NULL,NULL] │ - └──────────────────────────────────────────────────────┘ - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/zh/query_language/functions/in_functions.md b/docs/zh/query_language/functions/in_functions.md deleted file mode 100644 index 60df3e25db1..00000000000 --- a/docs/zh/query_language/functions/in_functions.md +++ /dev/null @@ -1,19 +0,0 @@ -# IN运算符相关函数 {#inyun-suan-fu-xiang-guan-han-shu} - -## in, notIn, globalIn, globalNotIn {#in-notin-globalin-globalnotin} - -请参阅[IN 运算符](../select.md#select-in-operators)部分。 - -## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} - -函数用于对多个列进行分组。 -对于具有类型T1,T2,…的列,它返回包含这些列的元组(T1,T2,…)。 执行该函数没有任何成本。 -元组通常用作IN运算符的中间参数值,或用于创建lambda函数的形参列表。 元组不能写入表。 - -## tupleElement(tuple, n), operator x.N {#tupleelementtuple-n-operator-x-n} - -函数用于从元组中获取列。 -’N’是列索引,从1开始。N必须是常量正整数常数,并且不大于元组的大小。 -执行该函数没有任何成本。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/zh/query_language/functions/index.md b/docs/zh/query_language/functions/index.md deleted file mode 100644 index 8d178592e92..00000000000 --- a/docs/zh/query_language/functions/index.md +++ /dev/null @@ -1,66 +0,0 @@ -# 函数 {#han-shu} - -ClickHouse中至少存在两种类型的函数 - 常规函数(它们称之为«函数»)和聚合函数。 常规函数的工作就像分别为每一行执行一次函数计算一样(对于每一行,函数的结果不依赖于其他行)。 聚合函数则从各行累积一组值(即函数的结果以来整个结果集)。 - -在本节中,我们将讨论常规函数。 有关聚合函数,请参阅«聚合函数»一节。 - - \* - ’arrayJoin’函数与表函数均属于第三种类型的函数。 \* - -## 强类型 {#qiang-lei-xing} - -与标准SQL相比,ClickHouse具有强类型。 换句话说,它不会在类型之间进行隐式转换。 每个函数适用于特定的一组类型。 这意味着有时您需要使用类型转换函数。 - -## 常见的子表达式消除 {#chang-jian-de-zi-biao-da-shi-xiao-chu} - -查询中具有相同AST(相同语句或语法分析结果相同)的所有表达式都被视为具有相同的值。 这样的表达式被连接并执行一次。 通过这种方式也可以消除相同的子查询。 - -## 结果类型 {#jie-guo-lei-xing} - -所有函数都只能够返回一个返回值。 结果类型通常由参数的类型决定。 但tupleElement函数(a.N运算符)和toFixedString函数是例外的。 - -## 常量 {#chang-liang} - -为了简单起见,某些函数的某些参数只能是常量。 例如,LIKE运算符的右参数必须是常量。 -几乎所有函数都为常量参数返回常量。 除了用于生成随机数的函数。 -’now’函数为在不同时间运行的查询返回不同的值,但结果被视为常量,因为常量在单个查询中很重要。 -常量表达式也被视为常量(例如,LIKE运算符的右半部分可以由多个常量构造)。 - -对于常量和非常量参数,可以以不同方式实现函数(执行不同的代码)。 但是,对于包含相同数据的常量和非常量参数它们的结果应该是一致的。 - -## NULL值处理 {#nullzhi-chu-li} - -函数具有以下行为: - -- 如果函数的参数至少一个是«NULL»,则函数结果也是«NULL»。 -- 在每个函数的描述中单独指定的特殊行为。在ClickHouse源代码中,这些函数具有«UseDefaultImplementationForNulls = false»。 - -## 不可变性 {#bu-ke-bian-xing} - -函数不能更改其参数的值 - 任何更改都将作为结果返回。因此,计算单独函数的结果不依赖于在查询中写入函数的顺序。 - -## 错误处理 {#cuo-wu-chu-li} - -如果数据无效,某些函数可能会抛出异常。在这种情况下,将取消查询并将错误信息返回给客户端。对于分布式处理,当其中一个服务器发生异常时,其他服务器也会尝试中止查询。 - -## 表达式参数的计算 {#biao-da-shi-can-shu-de-ji-suan} - -在几乎所有编程语言中,某些函数可能无法预先计算其中一个参数。这通常是运算符`&&`,`||`和`? :`。 -但是在ClickHouse中,函数(运算符)的参数总是被预先计算。这是因为一次评估列的整个部分,而不是分别计算每一行。 - -## 执行分布式查询处理的功能 {#zhi-xing-fen-bu-shi-cha-xun-chu-li-de-gong-neng} - -对于分布式查询处理,在远程服务器上执行尽可能多的查询处理阶段,并且在请求者服务器上执行其余阶段(合并中间结果和之后的所有内容)。 - -这意味着可以在不同的服务器上执行功能。 -例如,在查询`SELECT f(sum(g(x)))FROM distributed_table GROUP BY h(y)中,` - -- 如果`distributed_table`至少有两个分片,则在远程服务器上执行函数’g’和’h’,并在请求服务器上执行函数’f’。 -- 如果`distributed_table`只有一个分片,则在该分片的服务器上执行所有’f’,’g’和’h’功能。 - -函数的结果通常不依赖于它在哪个服务器上执行。但是,有时这很重要。 -例如,使用字典的函数时将使用运行它们的服务器上存在的字典。 -另一个例子是`hostName`函数,它返回运行它的服务器的名称,以便在`SELECT`查询中对服务器进行`GROUP BY`。 - -如果查询中的函数在请求服务器上执行,但您需要在远程服务器上执行它,则可以将其包装在«any»聚合函数中,或将其添加到«GROUP BY»中。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/zh/query_language/functions/introspection.md b/docs/zh/query_language/functions/introspection.md deleted file mode 120000 index b1a487e9c77..00000000000 --- a/docs/zh/query_language/functions/introspection.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/functions/introspection.md \ No newline at end of file diff --git a/docs/zh/query_language/functions/ip_address_functions.md b/docs/zh/query_language/functions/ip_address_functions.md deleted file mode 100644 index 0e012f90f84..00000000000 --- a/docs/zh/query_language/functions/ip_address_functions.md +++ /dev/null @@ -1,217 +0,0 @@ -# IP函数 {#iphan-shu} - -## IPv4NumToString(num) {#ipv4numtostringnum} - -接受一个UInt32(大端)表示的IPv4的地址,返回相应IPv4的字符串表现形式,格式为A.B.C.D(以点分割的十进制数字)。 - -## IPv4StringToNum(s) {#ipv4stringtonums} - -与IPv4NumToString函数相反。如果IPv4地址格式无效,则返回0。 - -## IPv4NumToStringClassC(num) {#ipv4numtostringclasscnum} - -与IPv4NumToString类似,但使用xxx替换最后一个字节。 - -示例: - -``` sql -SELECT - IPv4NumToStringClassC(ClientIP) AS k, - count() AS c -FROM test.hits -GROUP BY k -ORDER BY c DESC -LIMIT 10 -``` - - ┌─k──────────────┬─────c─┐ - │ 83.149.9.xxx │ 26238 │ - │ 217.118.81.xxx │ 26074 │ - │ 213.87.129.xxx │ 25481 │ - │ 83.149.8.xxx │ 24984 │ - │ 217.118.83.xxx │ 22797 │ - │ 78.25.120.xxx │ 22354 │ - │ 213.87.131.xxx │ 21285 │ - │ 78.25.121.xxx │ 20887 │ - │ 188.162.65.xxx │ 19694 │ - │ 83.149.48.xxx │ 17406 │ - └────────────────┴───────┘ - -由于使用’xxx’是不规范的,因此将来可能会更改。我们建议您不要依赖此格式。 - -### IPv6NumToString(x) {#ipv6numtostringx} - -接受FixedString(16)类型的二进制格式的IPv6地址。以文本格式返回此地址的字符串。 -IPv6映射的IPv4地址以::ffff:111.222.33。例如: - -``` sql -SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr -``` - - ┌─addr─────────┐ - │ 2a02:6b8::11 │ - └──────────────┘ - -``` sql -SELECT - IPv6NumToString(ClientIP6 AS k), - count() AS c -FROM hits_all -WHERE EventDate = today() AND substring(ClientIP6, 1, 12) != unhex('00000000000000000000FFFF') -GROUP BY k -ORDER BY c DESC -LIMIT 10 -``` - - ┌─IPv6NumToString(ClientIP6)──────────────┬─────c─┐ - │ 2a02:2168:aaa:bbbb::2 │ 24695 │ - │ 2a02:2698:abcd:abcd:abcd:abcd:8888:5555 │ 22408 │ - │ 2a02:6b8:0:fff::ff │ 16389 │ - │ 2a01:4f8:111:6666::2 │ 16016 │ - │ 2a02:2168:888:222::1 │ 15896 │ - │ 2a01:7e00::ffff:ffff:ffff:222 │ 14774 │ - │ 2a02:8109:eee:ee:eeee:eeee:eeee:eeee │ 14443 │ - │ 2a02:810b:8888:888:8888:8888:8888:8888 │ 14345 │ - │ 2a02:6b8:0:444:4444:4444:4444:4444 │ 14279 │ - │ 2a01:7e00::ffff:ffff:ffff:ffff │ 13880 │ - └─────────────────────────────────────────┴───────┘ - -``` sql -SELECT - IPv6NumToString(ClientIP6 AS k), - count() AS c -FROM hits_all -WHERE EventDate = today() -GROUP BY k -ORDER BY c DESC -LIMIT 10 -``` - - ┌─IPv6NumToString(ClientIP6)─┬──────c─┐ - │ ::ffff:94.26.111.111 │ 747440 │ - │ ::ffff:37.143.222.4 │ 529483 │ - │ ::ffff:5.166.111.99 │ 317707 │ - │ ::ffff:46.38.11.77 │ 263086 │ - │ ::ffff:79.105.111.111 │ 186611 │ - │ ::ffff:93.92.111.88 │ 176773 │ - │ ::ffff:84.53.111.33 │ 158709 │ - │ ::ffff:217.118.11.22 │ 154004 │ - │ ::ffff:217.118.11.33 │ 148449 │ - │ ::ffff:217.118.11.44 │ 148243 │ - └────────────────────────────┴────────┘ - -## IPv6StringToNum(s) {#ipv6stringtonums} - -与IPv6NumToString的相反。如果IPv6地址格式无效,则返回空字节字符串。 -十六进制可以是大写的或小写的。 - -## IPv4ToIPv6(x) {#ipv4toipv6x} - -接受一个UInt32类型的IPv4地址,返回FixedString(16)类型的IPv6地址。例如: - -``` sql -SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr -``` - - ┌─addr───────────────┐ - │ ::ffff:192.168.0.1 │ - └────────────────────┘ - -## cutIPv6(x, bitsToCutForIPv6, bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} - -接受一个FixedString(16)类型的IPv6地址,返回一个String,这个String中包含了删除指定位之后的地址的文本格式。例如: - -``` sql -WITH - IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D') AS ipv6, - IPv4ToIPv6(IPv4StringToNum('192.168.0.1')) AS ipv4 -SELECT - cutIPv6(ipv6, 2, 0), - cutIPv6(ipv4, 0, 2) -``` - - ┌─cutIPv6(ipv6, 2, 0)─────────────────┬─cutIPv6(ipv4, 0, 2)─┐ - │ 2001:db8:ac10:fe01:feed:babe:cafe:0 │ ::ffff:192.168.0.0 │ - └─────────────────────────────────────┴─────────────────────┘ - -## IPv4CIDRToRange(ipv4, cidr), {#ipv4cidrtorangeipv4-cidr} - -接受一个IPv4地址以及一个UInt8类型的CIDR。返回包含子网最低范围以及最高范围的元组。 - -``` sql -SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) -``` - - ┌─IPv4CIDRToRange(toIPv4('192.168.5.2'), 16)─┐ - │ ('192.168.0.0','192.168.255.255') │ - └────────────────────────────────────────────┘ - -## IPv6CIDRToRange(ipv6, cidr), {#ipv6cidrtorangeipv6-cidr} - -接受一个IPv6地址以及一个UInt8类型的CIDR。返回包含子网最低范围以及最高范围的元组。 - -``` sql -SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); -``` - - ┌─IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32)─┐ - │ ('2001:db8::','2001:db8:ffff:ffff:ffff:ffff:ffff:ffff') │ - └────────────────────────────────────────────────────────────────────────┘ - -## toIPv4(string) {#toipv4string} - -`IPv4StringToNum()`的别名,它采用字符串形式的IPv4地址并返回[IPv4](../../data_types/domains/ipv4.md)类型的值,该二进制值等于`IPv4StringToNum()`返回的值。 - -``` sql -WITH - '171.225.130.45' as IPv4_string -SELECT - toTypeName(IPv4StringToNum(IPv4_string)), - toTypeName(toIPv4(IPv4_string)) -``` - - ┌─toTypeName(IPv4StringToNum(IPv4_string))─┬─toTypeName(toIPv4(IPv4_string))─┐ - │ UInt32 │ IPv4 │ - └──────────────────────────────────────────┴─────────────────────────────────┘ - -``` sql -WITH - '171.225.130.45' as IPv4_string -SELECT - hex(IPv4StringToNum(IPv4_string)), - hex(toIPv4(IPv4_string)) -``` - - ┌─hex(IPv4StringToNum(IPv4_string))─┬─hex(toIPv4(IPv4_string))─┐ - │ ABE1822D │ ABE1822D │ - └───────────────────────────────────┴──────────────────────────┘ - -## toIPv6(string) {#toipv6string} - -`IPv6StringToNum()`的别名,它采用字符串形式的IPv6地址并返回[IPv6](../../data_types/domains/ipv6.md)类型的值,该二进制值等于`IPv6StringToNum()`返回的值。 - -``` sql -WITH - '2001:438:ffff::407d:1bc1' as IPv6_string -SELECT - toTypeName(IPv6StringToNum(IPv6_string)), - toTypeName(toIPv6(IPv6_string)) -``` - - ┌─toTypeName(IPv6StringToNum(IPv6_string))─┬─toTypeName(toIPv6(IPv6_string))─┐ - │ FixedString(16) │ IPv6 │ - └──────────────────────────────────────────┴─────────────────────────────────┘ - -``` sql -WITH - '2001:438:ffff::407d:1bc1' as IPv6_string -SELECT - hex(IPv6StringToNum(IPv6_string)), - hex(toIPv6(IPv6_string)) -``` - - ┌─hex(IPv6StringToNum(IPv6_string))─┬─hex(toIPv6(IPv6_string))─────────┐ - │ 20010438FFFF000000000000407D1BC1 │ 20010438FFFF000000000000407D1BC1 │ - └───────────────────────────────────┴──────────────────────────────────┘ - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/zh/query_language/functions/json_functions.md b/docs/zh/query_language/functions/json_functions.md deleted file mode 100644 index 5203ae91291..00000000000 --- a/docs/zh/query_language/functions/json_functions.md +++ /dev/null @@ -1,174 +0,0 @@ -# JSON函数 {#jsonhan-shu} - -在Yandex.Metrica中,用户使用JSON作为访问参数。为了处理这些JSON,实现了一些函数。(尽管在大多数情况下,JSON是预先进行额外处理的,并将结果值放在单独的列中。)所有的这些函数都进行了尽可能的假设。以使函数能够尽快的完成工作。 - -我们对JSON格式做了如下假设: - -1. 字段名称(函数的参数)必须使常量。 -2. 字段名称必须使用规范的编码。例如:`visitParamHas('{"abc":"def"}', 'abc') = 1`,但是 `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` -3. 函数可以随意的在多层嵌套结构下查找字段。如果存在多个匹配字段,则返回第一个匹配字段。 -4. JSON除字符串文本外不存在空格字符。 - -## visitParamHas(params, name) {#visitparamhasparams-name} - -检查是否存在«name»名称的字段 - -## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} - -将名为«name»的字段的值解析成UInt64。如果这是一个字符串字段,函数将尝试从字符串的开头解析一个数字。如果该字段不存在,或无法从它中解析到数字,则返回0。 - -## visitParamExtractInt(params, name) {#visitparamextractintparams-name} - -与visitParamExtractUInt相同,但返回Int64。 - -## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} - -与visitParamExtractUInt相同,但返回Float64。 - -## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} - -解析true/false值。其结果是UInt8类型的。 - -## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} - -返回字段的值,包含空格符。 - -示例: - - visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' - visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' - -## visitParamExtractString(params, name) {#visitparamextractstringparams-name} - -使用双引号解析字符串。这个值没有进行转义。如果转义失败,它将返回一个空白字符串。 - -示例: - - visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' - visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' - visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' - visitParamExtractString('{"abc":"hello}', 'abc') = '' - -目前不支持`\uXXXX\uYYYY`这些字符编码,这些编码不在基本多文种平面中(它们被转化为CESU-8而不是UTF-8)。 - -以下函数基于[simdjson](https://github.com/lemire/simdjson),专为更复杂的JSON解析要求而设计。但上述假设2仍然适用。 - -## JSONHas(json\[, indices\_or\_keys\]…) {#jsonhasjson-indices-or-keys} - -如果JSON中存在该值,则返回`1`。 - -如果该值不存在,则返回`0`。 - -示例: - - select JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 1 - select JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 - -`indices_or_keys`可以是零个或多个参数的列表,每个参数可以是字符串或整数。 - -- String = 按成员名称访问JSON对象成员。 -- 正整数 = 从头开始访问第n个成员/成员名称。 -- 负整数 = 从末尾访问第n个成员/成员名称。 - -您可以使用整数来访问JSON数组和JSON对象。 - -例如: - - select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'a' - select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2) = 'b' - select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1) = 'b' - select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' - select JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' - -## JSONLength(json\[, indices\_or\_keys\]…) {#jsonlengthjson-indices-or-keys} - -返回JSON数组或JSON对象的长度。 - -如果该值不存在或类型错误,将返回`0`。 - -示例: - - select JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 - select JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 - -## JSONType(json\[, indices\_or\_keys\]…) {#jsontypejson-indices-or-keys} - -返回JSON值的类型。 - -如果该值不存在,将返回`Null`。 - -示例: - - select JSONType('{"a": "hello", "b": [-100, 200.0, 300]}') = 'Object' - select JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' - select JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' - -## JSONExtractUInt(json\[, indices\_or\_keys\]…) {#jsonextractuintjson-indices-or-keys} - -## JSONExtractInt(json\[, indices\_or\_keys\]…) {#jsonextractintjson-indices-or-keys} - -## JSONExtractFloat(json\[, indices\_or\_keys\]…) {#jsonextractfloatjson-indices-or-keys} - -## JSONExtractBool(json\[, indices\_or\_keys\]…) {#jsonextractbooljson-indices-or-keys} - -解析JSON并提取值。这些函数类似于`visitParam*`函数。 - -如果该值不存在或类型错误,将返回`0`。 - -示例: - - select JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1) = -100 - select JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200.0 - select JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 - -## JSONExtractString(json\[, indices\_or\_keys\]…) {#jsonextractstringjson-indices-or-keys} - -解析JSON并提取字符串。此函数类似于`visitParamExtractString`函数。 - -如果该值不存在或类型错误,则返回空字符串。 - -该值未转义。如果unescaping失败,则返回一个空字符串。 - -示例: - - select JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'hello' - select JSONExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' - select JSONExtractString('{"abc":"\\u263a"}', 'abc') = '☺' - select JSONExtractString('{"abc":"\\u263"}', 'abc') = '' - select JSONExtractString('{"abc":"hello}', 'abc') = '' - -## JSONExtract(json\[, indices\_or\_keys…\], return\_type) {#jsonextractjson-indices-or-keys-return-type} - -解析JSON并提取给定ClickHouse数据类型的值。 - -这是以前的`JSONExtract函数的变体。 这意味着`JSONExtract(…, ‘String’)`返回与`JSONExtractString()`返回完全相同。`JSONExtract(…, ‘Float64’)`返回于`JSONExtractFloat()\`返回完全相同。 - -示例: - - SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))') = ('hello',[-100,200,300]) - SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)') = ([-100,200,300],'hello') - SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))') = [-100, NULL, NULL] - SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)') = NULL - SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8') = 1 - SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Thursday' - SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' - -## JSONExtractKeysAndValues(json\[, indices\_or\_keys…\], value\_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} - -从JSON中解析键值对,其中值是给定的ClickHouse数据类型。 - -示例: - - SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; - -## JSONExtractRaw(json\[, indices\_or\_keys\]…) {#jsonextractrawjson-indices-or-keys} - -返回JSON的部分。 - -如果部件不存在或类型错误,将返回空字符串。 - -示例: - - select JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/zh/query_language/functions/logical_functions.md b/docs/zh/query_language/functions/logical_functions.md deleted file mode 100644 index 2f2a61f57a6..00000000000 --- a/docs/zh/query_language/functions/logical_functions.md +++ /dev/null @@ -1,15 +0,0 @@ -# 逻辑函数 {#luo-ji-han-shu} - -逻辑函数可以接受任何数字类型的参数,并返回UInt8类型的0或1。 - -当向函数传递零时,函数将判定为«false»,否则,任何其他非零的值都将被判定为«true»。 - -## and, AND operator {#and-and-operator} - -## or, OR operator {#or-or-operator} - -## not, NOT operator {#not-not-operator} - -## xor {#xor} - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/zh/query_language/functions/machine_learning_functions.md b/docs/zh/query_language/functions/machine_learning_functions.md deleted file mode 100644 index e9fe2622a57..00000000000 --- a/docs/zh/query_language/functions/machine_learning_functions.md +++ /dev/null @@ -1,15 +0,0 @@ -# 机器学习函数 {#ji-qi-xue-xi-han-shu} - -## evalMLMethod (prediction) {#machine_learning_methods-evalmlmethod} - -使用拟合回归模型的预测请使用`evalMLMethod`函数。 请参阅`linearRegression`中的链接。 - -## Stochastic Linear Regression {#stochastic-linear-regression} - -`stochasticLinearRegression`聚合函数使用线性模型和MSE损失函数实现随机梯度下降法。 使用`evalMLMethod`来预测新数据。 -请参阅示例和注释[此处](../agg_functions/reference.md#agg_functions-stochasticlinearregression)。 - -## Stochastic Logistic Regression {#stochastic-logistic-regression} - -`stochasticLogisticRegression`聚合函数实现了二元分类问题的随机梯度下降法。 使用`evalMLMethod`来预测新数据。 -请参阅示例和注释[此处](../agg_functions/reference.md#agg_functions-stochasticlogisticregression)。 diff --git a/docs/zh/query_language/functions/math_functions.md b/docs/zh/query_language/functions/math_functions.md deleted file mode 100644 index 38b3115e396..00000000000 --- a/docs/zh/query_language/functions/math_functions.md +++ /dev/null @@ -1,107 +0,0 @@ -# 数学函数 {#shu-xue-han-shu} - -以下所有的函数都返回一个Float64类型的数值。返回结果总是以尽可能最大精度返回,但还是可能与机器中可表示最接近该值的数字不同。 - -## e() {#e} - -返回一个接近数学常量e的Float64数字。 - -## pi() {#pi} - -返回一个接近数学常量π的Float64数字。 - -## exp(x) {#expx} - -接受一个数值类型的参数并返回它的指数。 - -## log(x), ln(x) {#logx-lnx} - -接受一个数值类型的参数并返回它的自然对数。 - -## exp2(x) {#exp2x} - -接受一个数值类型的参数并返回它的2的x次幂。 - -## log2(x) {#log2x} - -接受一个数值类型的参数并返回它的底2对数。 - -## exp10(x) {#exp10x} - -接受一个数值类型的参数并返回它的10的x次幂。 - -## log10(x) {#log10x} - -接受一个数值类型的参数并返回它的底10对数。 - -## sqrt(x) {#sqrtx} - -接受一个数值类型的参数并返回它的平方根。 - -## cbrt(x) {#cbrtx} - -接受一个数值类型的参数并返回它的立方根。 - -## erf(x) {#erfx} - -如果’x’是非负数,那么erf(x / σ√2)是具有正态分布且标准偏差为«σ»的随机变量的值与预期值之间的距离大于«x»。 - -示例 (三西格玛准则): - -``` sql -SELECT erf(3 / sqrt(2)) -``` - - ┌─erf(divide(3, sqrt(2)))─┐ - │ 0.9973002039367398 │ - └─────────────────────────┘ - -## erfc(x) {#erfcx} - -接受一个数值参数并返回一个接近1 - erf(x)的Float64数字,但不会丢失大«x»值的精度。 - -## lgamma(x) {#lgammax} - -返回x的绝对值的自然对数的伽玛函数。 - -## tgamma(x) {#tgammax} - -返回x的伽玛函数。 - -## sin(x) {#sinx} - -返回x的三角正弦值。 - -## cos(x) {#cosx} - -返回x的三角余弦值。 - -## tan(x) {#tanx} - -返回x的三角正切值。 - -## asin(x) {#asinx} - -返回x的反三角正弦值。 - -## acos(x) {#acosx} - -返回x的反三角余弦值。 - -## atan(x) {#atanx} - -返回x的反三角正切值。 - -## pow(x, y), power(x, y) {#powx-y-powerx-y} - -接受x和y两个参数。返回x的y次方。 - -## intExp2 {#intexp2} - -接受一个数值类型的参数并返回它的2的x次幂(UInt64)。 - -## intExp10 {#intexp10} - -接受一个数值类型的参数并返回它的10的x次幂(UInt64)。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/zh/query_language/functions/other_functions.md b/docs/zh/query_language/functions/other_functions.md deleted file mode 100644 index 8383c57150c..00000000000 --- a/docs/zh/query_language/functions/other_functions.md +++ /dev/null @@ -1,537 +0,0 @@ -# 其他函数 {#qi-ta-han-shu} - -## hostName() {#hostname} - -返回一个字符串,其中包含执行此函数的主机的名称。 对于分布式处理,如果在远程服务器上执行此函数,则将返回远程服务器主机的名称。 - -## basename {#basename} - -在最后一个斜杠或反斜杠后的字符串文本。 此函数通常用于从路径中提取文件名。 - - basename( expr ) - -**参数** - -- `expr` — 任何一个返回[String](../../data_types/string.md)结果的表达式。[String](../../data_types/string.md) - -**返回值** - -一个String类型的值,其包含: - -- 在最后一个斜杠或反斜杠后的字符串文本内容。 - - 如果输入的字符串以斜杆或反斜杆结尾,例如:`/`或`c:\`,函数将返回一个空字符串。 - -- 如果输入的字符串中不包含斜杆或反斜杠,函数返回输入字符串本身。 - -**示例** - -``` sql -SELECT 'some/long/path/to/file' AS a, basename(a) -``` - -``` text -┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ -│ some\long\path\to\file │ file │ -└────────────────────────┴────────────────────────────────────────┘ -``` - -``` sql -SELECT 'some\\long\\path\\to\\file' AS a, basename(a) -``` - -``` text -┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ -│ some\long\path\to\file │ file │ -└────────────────────────┴────────────────────────────────────────┘ -``` - -``` sql -SELECT 'some-file-name' AS a, basename(a) -``` - -``` text -┌─a──────────────┬─basename('some-file-name')─┐ -│ some-file-name │ some-file-name │ -└────────────────┴────────────────────────────┘ -``` - -## visibleWidth(x) {#visiblewidthx} - -以文本格式(以制表符分隔)向控制台输出值时,计算近似宽度。 -系统使用此函数实现Pretty格式。 -Calculates the approximate width when outputting values to the console in text format (tab-separated). -This function is used by the system for implementing Pretty formats. - -`NULL` is represented as a string corresponding to `NULL` in `Pretty` formats. - - SELECT visibleWidth(NULL) - - ┌─visibleWidth(NULL)─┐ - │ 4 │ - └────────────────────┘ - -## toTypeName(x) {#totypenamex} - -返回包含参数的类型名称的字符串。 - -如果将`NULL`作为参数传递给函数,那么它返回`Nullable(Nothing)`类型,它对应于ClickHouse中的内部`NULL`。 - -## blockSize() {#function-blocksize} - -获取Block的大小。 -在ClickHouse中,查询始终工作在Block(包含列的部分的集合)上。此函数允许您获取调用其的块的大小。 - -## materialize(x) {#materializex} - -将一个常量列变为一个非常量列。 -在ClickHouse中,非常量列和常量列在内存中的表示方式不同。尽管函数对于常量列和非常量总是返回相同的结果,但它们的工作方式可能完全不同(执行不同的代码)。此函数用于调试这种行为。 - -## ignore(…) {#ignore} - -接受任何参数,包括`NULL`。始终返回0。 -但是,函数的参数总是被计算的。该函数可以用于基准测试。 - -## sleep(seconds) {#sleepseconds} - -在每个Block上休眠’seconds’秒。可以是整数或浮点数。 - -## sleepEachRow(seconds) {#sleepeachrowseconds} - -在每行上休眠’seconds’秒。可以是整数或浮点数。 - -## currentDatabase() {#currentdatabase} - -返回当前数据库的名称。 -当您需要在CREATE TABLE中的表引擎参数中指定数据库,您可以使用此函数。 - -## isFinite(x) {#isfinitex} - -接受Float32或Float64类型的参数,如果参数不是infinite且不是NaN,则返回1,否则返回0。 - -## isInfinite(x) {#isinfinitex} - -接受Float32或Float64类型的参数,如果参数是infinite,则返回1,否则返回0。注意NaN返回0。 - -## isNaN(x) {#isnanx} - -接受Float32或Float64类型的参数,如果参数是Nan,则返回1,否则返回0。 - -## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} - -Accepts constant strings: database name, table name, and column name. Returns a UInt8 constant expression equal to 1 if there is a column, otherwise 0. If the hostname parameter is set, the test will run on a remote server. -The function throws an exception if the table does not exist. -For elements in a nested data structure, the function checks for the existence of a column. For the nested data structure itself, the function returns 0. - -## bar {#function-bar} - -使用unicode构建图表。 - -`bar(x, min, max, width)` 当`x = max`时, 绘制一个宽度与`(x - min)`成正比且等于`width`的字符带。 - -参数: - -- `x` — 要显示的尺寸。 -- `min, max` — 整数常量,该值必须是`Int64`。 -- `width` — 常量,可以是正整数或小数。 - -字符带的绘制精度是符号的八分之一。 - -示例: - -``` sql -SELECT - toHour(EventTime) AS h, - count() AS c, - bar(c, 0, 600000, 20) AS bar -FROM test.hits -GROUP BY h -ORDER BY h ASC -``` - - ┌──h─┬──────c─┬─bar────────────────┐ - │ 0 │ 292907 │ █████████▋ │ - │ 1 │ 180563 │ ██████ │ - │ 2 │ 114861 │ ███▋ │ - │ 3 │ 85069 │ ██▋ │ - │ 4 │ 68543 │ ██▎ │ - │ 5 │ 78116 │ ██▌ │ - │ 6 │ 113474 │ ███▋ │ - │ 7 │ 170678 │ █████▋ │ - │ 8 │ 278380 │ █████████▎ │ - │ 9 │ 391053 │ █████████████ │ - │ 10 │ 457681 │ ███████████████▎ │ - │ 11 │ 493667 │ ████████████████▍ │ - │ 12 │ 509641 │ ████████████████▊ │ - │ 13 │ 522947 │ █████████████████▍ │ - │ 14 │ 539954 │ █████████████████▊ │ - │ 15 │ 528460 │ █████████████████▌ │ - │ 16 │ 539201 │ █████████████████▊ │ - │ 17 │ 523539 │ █████████████████▍ │ - │ 18 │ 506467 │ ████████████████▊ │ - │ 19 │ 520915 │ █████████████████▎ │ - │ 20 │ 521665 │ █████████████████▍ │ - │ 21 │ 542078 │ ██████████████████ │ - │ 22 │ 493642 │ ████████████████▍ │ - │ 23 │ 400397 │ █████████████▎ │ - └────┴────────┴────────────────────┘ - -## transform {#transform} - -根据定义,将某些元素转换为其他元素。 -此函数有两种使用方式: - -1. `transform(x, array_from, array_to, default)` - -`x` – 要转换的值。 - -`array_from` – 用于转换的常量数组。 - -`array_to` – 将‘from’中的值转换为的常量数组。 - -`default` – 如果‘x’不等于‘from’中的任何值,则默认转换的值。 - -`array_from` 和 `array_to` – 拥有相同大小的数组。 - -类型约束: - -`transform(T, Array(T), Array(U), U) -> U` - -`T`和`U`可以是String,Date,DateTime或任意数值类型的。 -对于相同的字母(T或U),如果数值类型,那么它们不可不完全匹配的,只需要具备共同的类型即可。 -例如,第一个参数是Int64类型,第二个参数是Array(UInt16)类型。 - -如果’x’值等于’array\_from’数组中的一个元素,它将从’array\_to’数组返回一个对应的元素(下标相同)。否则,它返回’default’。如果’array\_from’匹配到了多个元素,则返回第一个匹配的元素。 - -示例: - -``` sql -SELECT - transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, - count() AS c -FROM test.hits -WHERE SearchEngineID != 0 -GROUP BY title -ORDER BY c DESC -``` - - ┌─title─────┬──────c─┐ - │ Yandex │ 498635 │ - │ Google │ 229872 │ - │ Other │ 104472 │ - └───────────┴────────┘ - -1. `transform(x, array_from, array_to)` - -与第一种不同在于省略了’default’参数。 -如果’x’值等于’array\_from’数组中的一个元素,它将从’array\_to’数组返回相应的元素(下标相同)。 否则,它返回’x’。 - -类型约束: - -`transform(T, Array(T), Array(T)) -> T` - -示例: - -``` sql -SELECT - transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, - count() AS c -FROM test.hits -GROUP BY domain(Referer) -ORDER BY count() DESC -LIMIT 10 -``` - - ┌─s──────────────┬───────c─┐ - │ │ 2906259 │ - │ www.yandex │ 867767 │ - │ ███████.ru │ 313599 │ - │ mail.yandex.ru │ 107147 │ - │ ██████.ru │ 100355 │ - │ █████████.ru │ 65040 │ - │ news.yandex.ru │ 64515 │ - │ ██████.net │ 59141 │ - │ example.com │ 57316 │ - └────────────────┴─────────┘ - -## formatReadableSize(x) {#formatreadablesizex} - -接受大小(字节数)。返回带有后缀(KiB, MiB等)的字符串。 - -示例: - -``` sql -SELECT - arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, - formatReadableSize(filesize_bytes) AS filesize -``` - - ┌─filesize_bytes─┬─filesize───┐ - │ 1 │ 1.00 B │ - │ 1024 │ 1.00 KiB │ - │ 1048576 │ 1.00 MiB │ - │ 192851925 │ 183.92 MiB │ - └────────────────┴────────────┘ - -## least(a, b) {#leasta-b} - -返回a和b中的最小值。 - -## greatest(a, b) {#greatesta-b} - -返回a和b的最大值。 - -## uptime() {#uptime} - -返回服务正常运行的秒数。 - -## version() {#version} - -以字符串形式返回服务器的版本。 - -## timezone() {#timezone} - -返回服务器的时区。 - -## blockNumber {#blocknumber} - -返回行所在的Block的序列号。 - -## rowNumberInBlock {#function-rownumberinblock} - -返回行所在Block中行的序列号。 针对不同的Block始终重新计算。 - -## rowNumberInAllBlocks() {#rownumberinallblocks} - -返回行所在结果集中的序列号。此函数仅考虑受影响的Block。 - -## runningDifference(x) {#other_functions-runningdifference} - -计算数据块中相邻行的值之间的差异。 -对于第一行返回0,并为每个后续行返回与前一行的差异。 - -函数的结果取决于受影响的Block和Block中的数据顺序。 -如果使用ORDER BY创建子查询并从子查询外部调用该函数,则可以获得预期结果。 - -示例: - -``` sql -SELECT - EventID, - EventTime, - runningDifference(EventTime) AS delta -FROM -( - SELECT - EventID, - EventTime - FROM events - WHERE EventDate = '2016-11-24' - ORDER BY EventTime ASC - LIMIT 5 -) -``` - - ┌─EventID─┬───────────EventTime─┬─delta─┐ - │ 1106 │ 2016-11-24 00:00:04 │ 0 │ - │ 1107 │ 2016-11-24 00:00:05 │ 1 │ - │ 1108 │ 2016-11-24 00:00:05 │ 0 │ - │ 1109 │ 2016-11-24 00:00:09 │ 4 │ - │ 1110 │ 2016-11-24 00:00:10 │ 1 │ - └─────────┴─────────────────────┴───────┘ - -## runningDifferenceStartingWithFirstValue {#runningdifferencestartingwithfirstvalue} - -与[runningDifference](./other_functions.md#other_functions-runningdifference)相同,区别在于第一行返回第一行的值,后续每个后续行返回与上一行的差值。 - -## MACNumToString(num) {#macnumtostringnum} - -接受一个UInt64类型的数字。 将其解释为big endian的MAC地址。 返回包含相应MAC地址的字符串,格式为AA:BB:CC:DD:EE:FF(以冒号分隔的十六进制形式的数字)。 - -## MACStringToNum(s) {#macstringtonums} - -与MACNumToString相反。 如果MAC地址格式无效,则返回0。 - -## MACStringToOUI(s) {#macstringtoouis} - -接受格式为AA:BB:CC:DD:EE:FF(十六进制形式的冒号分隔数字)的MAC地址。 返回前三个八位字节作为UInt64编号。 如果MAC地址格式无效,则返回0。 - -## getSizeOfEnumType {#getsizeofenumtype} - -返回[Enum](../../data_types/enum.md)中的枚举数量。 - - getSizeOfEnumType(value) - -**参数:** - -- `value` — `Enum`类型的值。 - -**返回值** - -- `Enum`的枚举数量。 -- 如果类型不是`Enum`,则抛出异常。 - -**示例** - - SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x - - ┌─x─┐ - │ 2 │ - └───┘ - -## toColumnTypeName {#tocolumntypename} - -返回在RAM中列的数据类型的名称。 - - toColumnTypeName(value) - -**参数:** - -- `value` — 任何类型的值。 - -**返回值** - -- 一个字符串,其内容是`value`在RAM中的类型名称。 - -**`toTypeName ' 与 ' toColumnTypeName`的区别示例** - - :) select toTypeName(cast('2018-01-01 01:02:03' AS DateTime)) - - SELECT toTypeName(CAST('2018-01-01 01:02:03', 'DateTime')) - - ┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ - │ DateTime │ - └─────────────────────────────────────────────────────┘ - - 1 rows in set. Elapsed: 0.008 sec. - - :) select toColumnTypeName(cast('2018-01-01 01:02:03' AS DateTime)) - - SELECT toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime')) - - ┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ - │ Const(UInt32) │ - └───────────────────────────────────────────────────────────┘ - -该示例显示`DateTime`数据类型作为`Const(UInt32)`存储在内存中。 - -## dumpColumnStructure {#dumpcolumnstructure} - -输出在RAM中的数据结果的详细信息。 - - dumpColumnStructure(value) - -**参数:** - -- `value` — 任何类型的值. - -**返回值** - -- 一个字符串,其内容是`value`在RAM中的数据结构的详细描述。 - -**示例** - - SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) - - ┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ - │ DateTime, Const(size = 1, UInt32(size = 1)) │ - └──────────────────────────────────────────────────────────────┘ - -## defaultValueOfArgumentType {#defaultvalueofargumenttype} - -输出数据类型的默认值。 - -不包括用户设置的自定义列的默认值。 - - defaultValueOfArgumentType(expression) - -**参数:** - -- `expression` — 任意类型的值或导致任意类型值的表达式。 - -**返回值** - -- 数值类型返回`0`。 -- 字符串类型返回空的字符串。 -- [Nullable](../../data_types/nullable.md)类型返回`ᴺᵁᴸᴸ`。 - -**示例** - - :) SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) - - SELECT defaultValueOfArgumentType(CAST(1, 'Int8')) - - ┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ - │ 0 │ - └─────────────────────────────────────────────┘ - - 1 rows in set. Elapsed: 0.002 sec. - - :) SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) - - SELECT defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)')) - - ┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ - │ ᴺᵁᴸᴸ │ - └───────────────────────────────────────────────────────┘ - - 1 rows in set. Elapsed: 0.002 sec. - -## replicate {#replicate} - -使用单个值填充一个数组。 - -用于[arrayJoin](array_join.md#functions_arrayjoin)的内部实现。 - - replicate(x, arr) - -**参数:** - -- `arr` — 原始数组。 ClickHouse创建一个与原始数据长度相同的新数组,并用值`x`填充它。 -- `x` — 生成的数组将被填充的值。 - -**输出** - -- 一个被`x`填充的数组。 - -**示例** - - SELECT replicate(1, ['a', 'b', 'c']) - - ┌─replicate(1, ['a', 'b', 'c'])─┐ - │ [1,1,1] │ - └───────────────────────────────┘ - -## filesystemAvailable {#filesystemavailable} - -返回磁盘的剩余空间信息(以字节为单位)。使用配置文件中的path配置评估此信息。 - -## filesystemCapacity {#filesystemcapacity} - -返回磁盘的容量信息,以字节为单位。使用配置文件中的path配置评估此信息。 - -## finalizeAggregation {#function-finalizeaggregation} - -获取聚合函数的状态。返回聚合结果(最终状态)。 - -## runningAccumulate {#function-runningaccumulate} - -获取聚合函数的状态并返回其具体的值。这是从第一行到当前行的所有行累计的结果。 - -例如,获取聚合函数的状态(示例runningAccumulate(uniqState(UserID))),对于数据块的每一行,返回所有先前行和当前行的状态合并后的聚合函数的结果。 -因此,函数的结果取决于分区中数据块的顺序以及数据块中行的顺序。 - -## joinGet(‘join\_storage\_table\_name’, ‘get\_column’, join\_key) {#joingetjoin-storage-table-name-get-column-join-key} - -使用指定的连接键从Join类型引擎的表中获取数据。 - -## modelEvaluate(model\_name, …) {#function-modelevaluate} - -使用外部模型计算。 -接受模型的名称以及模型的参数。返回Float64类型的值。 - -## throwIf(x) {#throwifx} - -如果参数不为零则抛出异常。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/zh/query_language/functions/random_functions.md b/docs/zh/query_language/functions/random_functions.md deleted file mode 100644 index 31283cce08a..00000000000 --- a/docs/zh/query_language/functions/random_functions.md +++ /dev/null @@ -1,21 +0,0 @@ -# 随机函数 {#sui-ji-han-shu} - -随机函数使用非加密方式生成伪随机数字。 - -所有随机函数都只接受一个参数或不接受任何参数。 -您可以向它传递任何类型的参数,但传递的参数将不会使用在任何随机数生成过程中。 -此参数的唯一目的是防止公共子表达式消除,以便在相同的查询中使用相同的随机函数生成不同的随机数。 - -## rand {#rand} - -返回一个UInt32类型的随机数字,所有UInt32类型的数字被生成的概率均相等。此函数线性同于的方式生成随机数。 - -## rand64 {#rand64} - -返回一个UInt64类型的随机数字,所有UInt64类型的数字被生成的概率均相等。此函数线性同于的方式生成随机数。 - -## randConstant {#randconstant} - -返回一个UInt32类型的随机数字,该函数不同之处在于仅为每个数据块参数一个随机数。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/zh/query_language/functions/rounding_functions.md b/docs/zh/query_language/functions/rounding_functions.md deleted file mode 100644 index fb421be3b28..00000000000 --- a/docs/zh/query_language/functions/rounding_functions.md +++ /dev/null @@ -1,86 +0,0 @@ -# 取整函数 {#qu-zheng-han-shu} - -## floor(x\[, N\]) {#floorx-n} - -返回小于或等于x的最大舍入数。该函数使用参数乘1/10N,如果1/10N不精确,则选择最接近的精确的适当数据类型的数。 -‘N’是一个整数常量,可选参数。默认为0,这意味着不对其进行舍入。 -‘N’可以是负数。 - -示例: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` - -`x`是任何数字类型。结果与其为相同类型。 -对于整数参数,使用负‘N’值进行舍入是有意义的(对于非负«N»,该函数不执行任何操作)。 -如果取整导致溢出(例如,floor(-128,-1)),则返回特定于实现的结果。 - -## ceil(x\[, N\]), ceiling(x\[, N\]) {#ceilx-n-ceilingx-n} - -返回大于或等于’x’的最小舍入数。在其他方面,它与’floor’功能相同(见上文)。 - -## round(x\[, N\]) {#rounding_functions-round} - -将值取整到指定的小数位数。 - -该函数按顺序返回最近的数字。如果给定数字包含多个最近数字,则函数返回其中最接近偶数的数字(银行的取整方式)。 - - round(expression [, decimal_places]) - -**参数:** - -- `expression` — 要进行取整的数字。可以是任何返回数字[类型](../../data_types/index.md#data_types)的[表达式](../syntax.md#syntax-expressions)。 -- `decimal-places` — 整数类型。 - - 如果`decimal-places > 0`,则该函数将值舍入小数点右侧。 - - 如果`decimal-places < 0`,则该函数将小数点左侧的值四舍五入。 - - 如果`decimal-places = 0`,则该函数将该值舍入为整数。在这种情况下,可以省略参数。 - -**返回值:** - -与输入数字相同类型的取整后的数字。 - -### 示例 {#shi-li} - -**使用示例** - -``` sql -SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 -``` - - ┌───x─┬─round(divide(number, 2))─┐ - │ 0 │ 0 │ - │ 0.5 │ 0 │ - │ 1 │ 1 │ - └─────┴──────────────────────────┘ - -**取整的示例** - -取整到最近的数字。 - - round(3.2, 0) = 3 - round(4.1267, 2) = 4.13 - round(22,-1) = 20 - round(467,-2) = 500 - round(-467,-2) = -500 - -银行的取整。 - - round(3.5) = 4 - round(4.5) = 4 - round(3.55, 1) = 3.6 - round(3.65, 1) = 3.6 - -## roundToExp2(num) {#roundtoexp2num} - -接受一个数字。如果数字小于1,则返回0。否则,它将数字向下舍入到最接近的(整个非负)2的x次幂。 - -## roundDuration(num) {#rounddurationnum} - -接受一个数字。如果数字小于1,则返回0。否则,它将数字向下舍入为集合中的数字:1,10,30,60,120,180,240,300,600,1200,1800,3600,7200,18000,36000。此函数用于Yandex.Metrica报表中计算会话的持续时长。 - -## roundAge(num) {#roundagenum} - -接受一个数字。如果数字小于18,则返回0。否则,它将数字向下舍入为集合中的数字:18,25,35,45,55。此函数用于Yandex.Metrica报表中用户年龄的计算。 - -## roundDown(num, arr) {#rounddownnum-arr} - -接受一个数字,将其向下舍入到指定数组中的元素。如果该值小于数组中的最低边界,则返回最低边界。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/zh/query_language/functions/splitting_merging_functions.md b/docs/zh/query_language/functions/splitting_merging_functions.md deleted file mode 100644 index 7477e89441e..00000000000 --- a/docs/zh/query_language/functions/splitting_merging_functions.md +++ /dev/null @@ -1,29 +0,0 @@ -# 字符串拆分合并函数 {#zi-fu-chuan-chai-fen-he-bing-han-shu} - -## splitByChar(separator, s) {#splitbycharseparator-s} - -将字符串以‘separator’拆分成多个子串。‘separator’必须为仅包含一个字符的字符串常量。 -返回拆分后的子串的数组。 如果分隔符出现在字符串的开头或结尾,或者如果有多个连续的分隔符,则将在对应位置填充空的子串。 - -## splitByString(separator, s) {#splitbystringseparator-s} - -与上面相同,但它使用多个字符的字符串作为分隔符。 该字符串必须为非空。 - -## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator} - -使用separator将数组中列出的字符串拼接起来。‘separator’是一个可选参数:一个常量字符串,默认情况下设置为空字符串。 -返回拼接后的字符串。 - -## alphaTokens(s) {#alphatokenss} - -从范围a-z和A-Z中选择连续字节的子字符串。返回子字符串数组。 - -**示例:** - - SELECT alphaTokens('abca1abc') - - ┌─alphaTokens('abca1abc')─┐ - │ ['abca','abc'] │ - └─────────────────────────┘ - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/zh/query_language/functions/string_functions.md b/docs/zh/query_language/functions/string_functions.md deleted file mode 100644 index a2b5355ae8c..00000000000 --- a/docs/zh/query_language/functions/string_functions.md +++ /dev/null @@ -1,169 +0,0 @@ -# 字符串函数 {#zi-fu-chuan-han-shu} - -## empty {#string-functions-empty} - -对于空字符串返回1,对于非空字符串返回0。 -结果类型是UInt8。 -如果字符串包含至少一个字节,则该字符串被视为非空字符串,即使这是一个空格或空字符。 -该函数也适用于数组。 - -## notEmpty {#notempty} - -对于空字符串返回0,对于非空字符串返回1。 -结果类型是UInt8。 -该函数也适用于数组。 - -## length {#length} - -返回字符串的字节长度。 -结果类型是UInt64。 -该函数也适用于数组。 - -## lengthUTF8 {#lengthutf8} - -假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 -结果类型是UInt64。 - -## char\_length, CHAR\_LENGTH {#char-length-char-length} - -假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 -结果类型是UInt64。 - -## character\_length, CHARACTER\_LENGTH {#character-length-character-length} - -假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 -结果类型是UInt64。 - -## lower, lcase {#lower-lcase} - -将字符串中的ASCII转换为小写。 - -## upper, ucase {#upper-ucase} - -将字符串中的ASCII转换为大写。 - -## lowerUTF8 {#lowerutf8} - -将字符串转换为小写,函数假设字符串是以UTF-8编码文本的字符集。 -同时函数不检测语言。因此对土耳其人来说,结果可能不完全正确。 -如果UTF-8字节序列的长度对于代码点的大写和小写不同,则该代码点的结果可能不正确。 -如果字符串包含一组非UTF-8的字节,则将引发未定义行为。 - -## upperUTF8 {#upperutf8} - -将字符串转换为大写,函数假设字符串是以UTF-8编码文本的字符集。 -同时函数不检测语言。因此对土耳其人来说,结果可能不完全正确。 -如果UTF-8字节序列的长度对于代码点的大写和小写不同,则该代码点的结果可能不正确。 -如果字符串包含一组非UTF-8的字节,则将引发未定义行为。 - -## isValidUTF8 {#isvalidutf8} - -检查字符串是否为有效的UTF-8编码,是则返回1,否则返回0。 - -## toValidUTF8 {#tovalidutf8} - -用`�`(U+FFFD)字符替换无效的UTF-8字符。所有连续的无效字符都会被替换为一个替换字符。 - - toValidUTF8( input_string ) - -参数: - -- input\_string — 任何一个[String](../../data_types/string.md)类型的对象。 - -返回值: 有效的UTF-8字符串。 - -### 示例 {#shi-li} - -``` sql -SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') -``` - -``` text -┌─toValidUTF8('a����b')─┐ -│ a�b │ -└───────────────────────┘ -``` - -## reverse {#reverse} - -反转字符串。 - -## reverseUTF8 {#reverseutf8} - -以Unicode字符为单位反转UTF-8编码的字符串。如果字符串不是UTF-8编码,则可能获取到一个非预期的结果(不会抛出异常)。 - -## format(pattern, s0, s1, …) {#formatpattern-s0-s1} - -使用常量字符串`pattern`格式化其他参数。`pattern`字符串中包含由大括号`{}`包围的«替换字段»。 未被包含在大括号中的任何内容都被视为文本内容,它将原样保留在返回值中。 如果你需要在文本内容中包含一个大括号字符,它可以通过加倍来转义:`{{ '{{' }}`和`{{ '{{' }} '}}' }}`。 字段名称可以是数字(从零开始)或空(然后将它们视为连续数字) - -``` sql -SELECT format('{1} {0} {1}', 'World', 'Hello') - -┌─format('{1} {0} {1}', 'World', 'Hello')─┐ -│ Hello World Hello │ -└─────────────────────────────────────────┘ - -SELECT format('{} {}', 'Hello', 'World') - -┌─format('{} {}', 'Hello', 'World')─┐ -│ Hello World │ -└───────────────────────────────────┘ -``` - -## concat(s1, s2, …) {#concats1-s2} - -将参数中的多个字符串拼接,不带分隔符。 - -## concatAssumeInjective(s1, s2, …) {#concatassumeinjectives1-s2} - -与[concat](./string_functions.md#concat-s1-s2)相同,区别在于,你需要保证concat(s1, s2, s3) -\> s4是单射的,它将用于GROUP BY的优化。 - -## substring(s, offset, length), mid(s, offset, length), substr(s, offset, length) {#substrings-offset-length-mids-offset-length-substrs-offset-length} - -以字节为单位截取指定位置字符串,返回以‘offset’位置为开头,长度为‘length’的子串。‘offset’从1开始(与标准SQL相同)。‘offset’和‘length’参数必须是常量。 - -## substringUTF8(s, offset, length) {#substringutf8s-offset-length} - -与‘substring’相同,但其操作单位为Unicode字符,函数假设字符串是以UTF-8进行编码的文本。如果不是则可能返回一个预期外的结果(不会抛出异常)。 - -## appendTrailingCharIfAbsent(s, c) {#appendtrailingcharifabsents-c} - -如果‘s’字符串非空并且末尾不包含‘c’字符,则将‘c’字符附加到末尾。 - -## convertCharset(s, from, to) {#convertcharsets-from-to} - -返回从‘from’中的编码转换为‘to’中的编码的字符串‘s’。 - -## base64Encode(s) {#base64encodes} - -将字符串‘s’编码成base64 - -## base64Decode(s) {#base64decodes} - -使用base64将字符串解码成原始字符串。如果失败则抛出异常。 - -## tryBase64Decode(s) {#trybase64decodes} - -使用base64将字符串解码成原始字符串。但如果出现错误,将返回空字符串。 - -## endsWith(s, suffix) {#endswiths-suffix} - -返回是否以指定的后缀结尾。如果字符串以指定的后缀结束,则返回1,否则返回0。 - -## startsWith(s, prefix) {#startswiths-prefix} - -返回是否以指定的前缀开头。如果字符串以指定的前缀开头,则返回1,否则返回0。 - -## trimLeft(s) {#trimlefts} - -返回一个字符串,用于删除左侧的空白字符。 - -## trimRight(s) {#trimrights} - -返回一个字符串,用于删除右侧的空白字符。 - -## trimBoth(s) {#trimboths} - -返回一个字符串,用于删除任一侧的空白字符。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/zh/query_language/functions/string_replace_functions.md b/docs/zh/query_language/functions/string_replace_functions.md deleted file mode 100644 index e70dcade3a0..00000000000 --- a/docs/zh/query_language/functions/string_replace_functions.md +++ /dev/null @@ -1,79 +0,0 @@ -# 字符串替换函数 {#zi-fu-chuan-ti-huan-han-shu} - -## replaceOne(haystack, pattern, replacement) {#replaceonehaystack-pattern-replacement} - -用‘replacement’子串替换‘haystack’中与‘pattern’子串第一个匹配的匹配项(如果存在)。 -‘pattern’和‘replacement’必须是常量。 - -## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} - -用‘replacement’子串替换‘haystack’中出现的所有‘pattern’子串。 - -## replaceRegexpOne(haystack, pattern, replacement) {#replaceregexponehaystack-pattern-replacement} - -使用‘pattern’正则表达式替换。 ‘pattern’可以是任意一个有效的re2正则表达式。 -如果存在与正则表达式匹配的匹配项,仅替换第一个匹配项。 -同时‘replacement’可以指定为正则表达式中的捕获组。可以包含`\0-\9`。 -在这种情况下,函数将使用正则表达式的整个匹配项替换‘\\0’。使用其他与之对应的子模式替换对应的‘\\1-\\9’。要在模版中使用‘’字符,请使用‘’将其转义。 -另外还请记住,字符串文字需要额外的转义。 - -示例1.将日期转换为美国格式: - -``` sql -SELECT DISTINCT - EventDate, - replaceRegexpOne(toString(EventDate), '(\\d{4})-(\\d{2})-(\\d{2})', '\\2/\\3/\\1') AS res -FROM test.hits -LIMIT 7 -FORMAT TabSeparated -``` - - 2014-03-17 03/17/2014 - 2014-03-18 03/18/2014 - 2014-03-19 03/19/2014 - 2014-03-20 03/20/2014 - 2014-03-21 03/21/2014 - 2014-03-22 03/22/2014 - 2014-03-23 03/23/2014 - -示例2.复制字符串十次: - -``` sql -SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') AS res -``` - - ┌─res────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ - │ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ - └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ - -## replaceRegexpAll(haystack, pattern, replacement) {#replaceregexpallhaystack-pattern-replacement} - -与replaceRegexpOne相同,但会替换所有出现的匹配项。例如: - -``` sql -SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res -``` - - ┌─res────────────────────────┐ - │ HHeelllloo,, WWoorrlldd!! │ - └────────────────────────────┘ - -例外的是,如果使用正则表达式捕获空白子串,则仅会进行一次替换。 -示例: - -``` sql -SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res -``` - - ┌─res─────────────────┐ - │ here: Hello, World! │ - └─────────────────────┘ - -## regexpQuoteMeta(s) {#regexpquotemetas} - -该函数用于在字符串中的某些预定义字符之前添加反斜杠。 -预定义字符:‘0’,‘\\’,‘\|’,‘(’,‘)’,‘^’,‘$’,‘。’,‘\[’,’\]’,‘?’,‘\*’,‘+’,‘{’,‘:’,’ - ’。 -这个实现与re2 :: RE2 :: QuoteMeta略有不同。它以\\0而不是00转义零字节,它只转义所需的字符。 -有关详细信息,请参阅链接:\[RE2\](https://github.com/google/re2/blob/master/re2/re2.cc\#L473) - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/zh/query_language/functions/string_search_functions.md b/docs/zh/query_language/functions/string_search_functions.md deleted file mode 100644 index 8a27c460966..00000000000 --- a/docs/zh/query_language/functions/string_search_functions.md +++ /dev/null @@ -1,122 +0,0 @@ -# 字符串搜索函数 {#zi-fu-chuan-sou-suo-han-shu} - -下列所有函数在默认的情况下区分大小写。对于不区分大小写的搜索,存在单独的变体。 - -## position(haystack, needle), locate(haystack, needle) {#positionhaystack-needle-locatehaystack-needle} - -在字符串`haystack`中搜索子串`needle`。 -返回子串的位置(以字节为单位),从1开始,如果未找到子串,则返回0。 - -对于不区分大小写的搜索,请使用函数`positionCaseInsensitive`。 - -## positionUTF8(haystack, needle) {#positionutf8haystack-needle} - -与`position`相同,但位置以Unicode字符返回。此函数工作在UTF-8编码的文本字符集中。如非此编码的字符集,则返回一些非预期结果(他不会抛出异常)。 - -对于不区分大小写的搜索,请使用函数`positionCaseInsensitiveUTF8`。 - -## multiSearchAllPositions(haystack, \[needle1, needle2, …, needlen\]) {#multisearchallpositionshaystack-needle1-needle2-needlen} - -与`position`相同,但函数返回一个数组,其中包含所有匹配needlei的位置。 - -对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchAllPositionsCaseInsensitive,multiSearchAllPositionsUTF8,multiSearchAllPositionsCaseInsensitiveUTF8`。 - -## multiSearchFirstPosition(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstpositionhaystack-needle1-needle2-needlen} - -与`position`相同,但返回在`haystack`中与needles字符串匹配的最左偏移。 - -对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchFirstPositionCaseInsensitive,multiSearchFirstPositionUTF8,multiSearchFirstPositionCaseInsensitiveUTF8`。 - -## multiSearchFirstIndex(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} - -返回在字符串`haystack`中最先查找到的needlei的索引`i`(从1开始),没有找到任何匹配项则返回0。 - -对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchFirstIndexCaseInsensitive,multiSearchFirstIndexUTF8,multiSearchFirstIndexCaseInsensitiveUTF8`。 - -## multiSearchAny(haystack, \[needle1, needle2, …, needlen\]) {#multisearchanyhaystack-needle1-needle2-needlen} - -如果`haystack`中至少存在一个needlei匹配则返回1,否则返回0。 - -对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchAnyCaseInsensitive,multiSearchAnyUTF8,multiSearchAnyCaseInsensitiveUTF8`。 - -!!! note "注意" - 在所有`multiSearch*`函数中,由于实现规范,needles的数量应小于28。 - -## match(haystack, pattern) {#matchhaystack-pattern} - -检查字符串是否与`pattern`正则表达式匹配。`pattern`可以是一个任意的`re2`正则表达式。 `re2`正则表达式的[语法](https://github.com/google/re2/wiki/Syntax)比Perl正则表达式的语法存在更多限制。 - -如果不匹配返回0,否则返回1。 - -请注意,反斜杠符号(`\`)用于在正则表达式中转义。由于字符串中采用相同的符号来进行转义。因此,为了在正则表达式中转义符号,必须在字符串文字中写入两个反斜杠(\\)。 - -正则表达式与字符串一起使用,就像它是一组字节一样。正则表达式中不能包含空字节。 -对于在字符串中搜索子字符串的模式,最好使用LIKE或«position»,因为它们更加高效。 - -## multiMatchAny(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} - -与`match`相同,但如果所有正则表达式都不匹配,则返回0;如果任何模式匹配,则返回1。它使用[hyperscan](https://github.com/intel/hyperscan)库。对于在字符串中搜索子字符串的模式,最好使用«multisearchany»,因为它更高效。 - -!!! note "注意" - 任何`haystack`字符串的长度必须小于232\字节,否则抛出异常。这种限制是因为hyperscan API而产生的。 - -## multiMatchAnyIndex(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} - -与`multiMatchAny`相同,但返回与haystack匹配的任何内容的索引位置。 - -## multiFuzzyMatchAny(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} - -与`multiMatchAny`相同,但如果在haystack能够查找到任何模式匹配能够在指定的[编辑距离](https://en.wikipedia.org/wiki/Edit_distance)内进行匹配,则返回1。此功能也处于实验模式,可能非常慢。有关更多信息,请参阅[hyperscan文档](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching)。 - -## multiFuzzyMatchAnyIndex(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} - -与`multiFuzzyMatchAny`相同,但返回匹配项的匹配能容的索引位置。 - -!!! note "注意" - `multiFuzzyMatch*`函数不支持UTF-8正则表达式,由于hyperscan限制,这些表达式被按字节解析。 - -!!! note "注意" - 如要关闭所有hyperscan函数的使用,请设置`SET allow_hyperscan = 0;`。 - -## extract(haystack, pattern) {#extracthaystack-pattern} - -使用正则表达式截取字符串。如果‘haystack’与‘pattern’不匹配,则返回空字符串。如果正则表达式中不包含子模式,它将获取与整个正则表达式匹配的子串。否则,它将获取与第一个子模式匹配的子串。 - -## extractAll(haystack, pattern) {#extractallhaystack-pattern} - -使用正则表达式提取字符串的所有片段。如果‘haystack’与‘pattern’正则表达式不匹配,则返回一个空字符串。否则返回所有与正则表达式匹配的字符串数组。通常,行为与‘extract’函数相同(它采用第一个子模式,如果没有子模式,则采用整个表达式)。 - -## like(haystack, pattern), haystack LIKE pattern operator {#likehaystack-pattern-haystack-like-pattern-operator} - -检查字符串是否与简单正则表达式匹配。 -正则表达式可以包含的元符号有`%`和`_`。 - -`%` 表示任何字节数(包括零字符)。 - -`_` 表示任何一个字节。 - -可以使用反斜杠(`\`)来对元符号进行转义。请参阅«match»函数说明中有关转义的说明。 - -对于像`%needle%`这样的正则表达式,改函数与`position`函数一样快。 -对于其他正则表达式,函数与‘match’函数相同。 - -## notLike(haystack, pattern), haystack NOT LIKE pattern operator {#notlikehaystack-pattern-haystack-not-like-pattern-operator} - -与‘like’函数返回相反的结果。 - -## ngramDistance(haystack, needle) {#ngramdistancehaystack-needle} - -基于4-gram计算`haystack`和`needle`之间的距离:计算两个4-gram集合之间的对称差异,并用它们的基数和对其进行归一化。返回0到1之间的任何浮点数 – 越接近0则表示越多的字符串彼此相似。如果常量的`needle`或`haystack`超过32KB,函数将抛出异常。如果非常量的`haystack`或`needle`字符串超过32Kb,则距离始终为1。 - -对于不区分大小写的搜索或/和UTF-8格式,使用函数`ngramDistanceCaseInsensitive,ngramDistanceUTF8,ngramDistanceCaseInsensitiveUTF8`。 - -## ngramSearch(haystack, needle) {#ngramsearchhaystack-needle} - -与`ngramDistance`相同,但计算`needle`和`haystack`之间的非对称差异——`needle`的n-gram减去`needle`归一化n-gram。可用于模糊字符串搜索。 - -对于不区分大小写的搜索或/和UTF-8格式,使用函数`ngramSearchCaseInsensitive,ngramSearchUTF8,ngramSearchCaseInsensitiveUTF8`。 - -!!! note "注意" - 对于UTF-8,我们使用3-gram。所有这些都不是完全公平的n-gram距离。我们使用2字节哈希来散列n-gram,然后计算这些哈希表之间的(非)对称差异 - 可能会发生冲突。对于UTF-8不区分大小写的格式,我们不使用公平的`tolower`函数 - 我们将每个Unicode字符字节的第5位(从零开始)和字节的第一位归零 - 这适用于拉丁语,主要用于所有西里尔字母。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/zh/query_language/functions/type_conversion_functions.md b/docs/zh/query_language/functions/type_conversion_functions.md deleted file mode 100644 index a9c97589c9f..00000000000 --- a/docs/zh/query_language/functions/type_conversion_functions.md +++ /dev/null @@ -1,169 +0,0 @@ -# 类型转换函数 {#lei-xing-zhuan-huan-han-shu} - -## toUInt8, toUInt16, toUInt32, toUInt64 {#touint8-touint16-touint32-touint64} - -## toInt8, toInt16, toInt32, toInt64 {#toint8-toint16-toint32-toint64} - -## toFloat32, toFloat64 {#tofloat32-tofloat64} - -## toDate, toDateTime {#todate-todatetime} - -## toUInt8OrZero, toUInt16OrZero, toUInt32OrZero, toUInt64OrZero, toInt8OrZero, toInt16OrZero, toInt32OrZero, toInt64OrZero, toFloat32OrZero, toFloat64OrZero, toDateOrZero, toDateTimeOrZero {#touint8orzero-touint16orzero-touint32orzero-touint64orzero-toint8orzero-toint16orzero-toint32orzero-toint64orzero-tofloat32orzero-tofloat64orzero-todateorzero-todatetimeorzero} - -## toUInt8OrNull, toUInt16OrNull, toUInt32OrNull, toUInt64OrNull, toInt8OrNull, toInt16OrNull, toInt32OrNull, toInt64OrNull, toFloat32OrNull, toFloat64OrNull, toDateOrNull, toDateTimeOrNull {#touint8ornull-touint16ornull-touint32ornull-touint64ornull-toint8ornull-toint16ornull-toint32ornull-toint64ornull-tofloat32ornull-tofloat64ornull-todateornull-todatetimeornull} - -## toString {#tostring} - -这些函数用于在数字、字符串(不包含FixedString)、Date以及DateTime之间互相转换。 -所有的函数都接受一个参数。 - -当将其他类型转换到字符串或从字符串转换到其他类型时,使用与TabSeparated格式相同的规则对字符串的值进行格式化或解析。如果无法解析字符串则抛出异常并取消查询。 - -当将Date转换为数字或反之,Date对应Unix时间戳的天数。 -将DataTime转换为数字或反之,DateTime对应Unix时间戳的秒数。 - -toDate/toDateTime函数的日期和日期时间格式定义如下: - - YYYY-MM-DD - YYYY-MM-DD hh:mm:ss - -例外的是,如果将UInt32、Int32、UInt64或Int64类型的数值转换为Date类型,并且其对应的值大于等于65536,则该数值将被解析成unix时间戳(而不是对应的天数)。这意味着允许写入‘toDate(unix\_timestamp)’这种常见情况,否则这将是错误的,并且需要便携更加繁琐的‘toDate(toDateTime(unix\_timestamp))’。 - -Date与DateTime之间的转换以更为自然的方式进行:通过添加空的time或删除time。 - -数值类型之间的转换与C++中不同数字类型之间的赋值相同的规则。 - -此外,DateTime参数的toString函数可以在第二个参数中包含时区名称。 例如:`Asia/Yekaterinburg`在这种情况下,时间根据指定的时区进行格式化。 - -``` sql -SELECT - now() AS now_local, - toString(now(), 'Asia/Yekaterinburg') AS now_yekat -``` - - ┌───────────now_local─┬─now_yekat───────────┐ - │ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │ - └─────────────────────┴─────────────────────┘ - -另请参阅`toUnixTimestamp`函数。 - -## toDecimal32(value, S), toDecimal64(value, S), toDecimal128(value, S) {#todecimal32value-s-todecimal64value-s-todecimal128value-s} - -将`value`转换为精度为`S`的[Decimal](../../data_types/decimal.md)。`value`可以是数字或字符串。`S`参数为指定的小数位数。 - -## toFixedString(s, N) {#tofixedstrings-n} - -将String类型的参数转换为FixedString(N)类型的值(具有固定长度N的字符串)。N必须是一个常量。 -如果字符串的字节数少于N,则向右填充空字节。如果字符串的字节数多于N,则抛出异常。 - -## toStringCutToZero(s) {#tostringcuttozeros} - -接受String或FixedString参数。返回String,其内容在找到的第一个零字节处被截断。 - -示例: - -``` sql -SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut -``` - - ┌─s─────────────┬─s_cut─┐ - │ foo\0\0\0\0\0 │ foo │ - └───────────────┴───────┘ - -``` sql -SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut -``` - - ┌─s──────────┬─s_cut─┐ - │ foo\0bar\0 │ foo │ - └────────────┴───────┘ - -## reinterpretAsUInt8, reinterpretAsUInt16, reinterpretAsUInt32, reinterpretAsUInt64 {#reinterpretasuint8-reinterpretasuint16-reinterpretasuint32-reinterpretasuint64} - -## reinterpretAsInt8, reinterpretAsInt16, reinterpretAsInt32, reinterpretAsInt64 {#reinterpretasint8-reinterpretasint16-reinterpretasint32-reinterpretasint64} - -## reinterpretAsFloat32, reinterpretAsFloat64 {#reinterpretasfloat32-reinterpretasfloat64} - -## reinterpretAsDate, reinterpretAsDateTime {#reinterpretasdate-reinterpretasdatetime} - -这些函数接受一个字符串,并将放在字符串开头的字节解释为主机顺序中的数字(little endian)。如果字符串不够长,则函数就像使用必要数量的空字节填充字符串一样。如果字符串比需要的长,则忽略额外的字节。Date被解释为Unix时间戳的天数,DateTime被解释为Unix时间戳。 - -## reinterpretAsString {#reinterpretasstring} - -此函数接受数字、Date或DateTime,并返回一个字符串,其中包含表示主机顺序(小端)的相应值的字节。从末尾删除空字节。例如,UInt32类型值255是一个字节长的字符串。 - -## reinterpretAsFixedString {#reinterpretasfixedstring} - -此函数接受数字、Date或DateTime,并返回包含表示主机顺序(小端)的相应值的字节的FixedString。从末尾删除空字节。例如,UInt32类型值255是一个长度为一个字节的FixedString。 - -## CAST(x, t) {#type_conversion_function-cast} - -将‘x’转换为‘t’数据类型。还支持语法CAST(x AS t) - -示例: - -``` sql -SELECT - '2016-06-15 23:00:00' AS timestamp, - CAST(timestamp AS DateTime) AS datetime, - CAST(timestamp AS Date) AS date, - CAST(timestamp, 'String') AS string, - CAST(timestamp, 'FixedString(22)') AS fixed_string -``` - - ┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐ - │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │ - └─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ - -将参数转换为FixedString(N),仅适用于String或FixedString(N)类型的参数。 - -支持将数据转换为[Nullable](../../data_types/nullable.md)。例如: - - SELECT toTypeName(x) FROM t_null - - ┌─toTypeName(x)─┐ - │ Int8 │ - │ Int8 │ - └───────────────┘ - - SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null - - ┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ - │ Nullable(UInt16) │ - │ Nullable(UInt16) │ - └─────────────────────────────────────────┘ - -## toIntervalYear, toIntervalQuarter, toIntervalMonth, toIntervalWeek, toIntervalDay, toIntervalHour, toIntervalMinute, toIntervalSecond {#function-tointerval} - -将数字类型参数转换为Interval类型(时间区间)。 -Interval类型实际上是非常有用的,您可以使用此类型的数据直接与Date或DateTime执行算术运算。同时,ClickHouse为Interval类型数据的声明提供了更方便的语法。例如: - -``` sql -WITH - toDate('2019-01-01') AS date, - INTERVAL 1 WEEK AS interval_week, - toIntervalWeek(1) AS interval_to_week -SELECT - date + interval_week, - date + interval_to_week -``` - - ┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ - │ 2019-01-08 │ 2019-01-08 │ - └───────────────────────────┴──────────────────────────────┘ - -## parseDateTimeBestEffort {#type_conversion_functions-parsedatetimebesteffort} - -将数字类型参数解析为Date或DateTime类型。 -与toDate和toDateTime不同,parseDateTimeBestEffort可以进行更复杂的日期格式。 -有关详细信息,请参阅链接:[复杂日期格式](https://xkcd.com/1179/)。 - -## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} - -与[parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回null。 - -## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} - -与[parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回零Date或零DateTime。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/zh/query_language/functions/url_functions.md b/docs/zh/query_language/functions/url_functions.md deleted file mode 100644 index df8b1cb69c4..00000000000 --- a/docs/zh/query_language/functions/url_functions.md +++ /dev/null @@ -1,118 +0,0 @@ -# URL函数 {#urlhan-shu} - -所有这些功能都不遵循RFC。它们被最大程度简化以提高性能。 - -## URL截取函数 {#urljie-qu-han-shu} - -如果URL中没有要截取的内容则返回空字符串。 - -### protocol {#protocol} - -返回URL的协议。例如: http、ftp、mailto、magnet… - -### domain {#domain} - -获取域名。 - -### domainWithoutWWW {#domainwithoutwww} - -返回域名并删除第一个‘www.’。 - -### topLevelDomain {#topleveldomain} - -返回顶级域名。例如:.ru。 - -### firstSignificantSubdomain {#firstsignificantsubdomain} - -返回«第一个有效子域名»。这并不是一个标准概念,仅用于Yandex.Metrica。如果顶级域名为‘com’,‘net’,‘org’或者‘co’则第一个有效子域名为二级域名。否则则返回三级域名。例如,irstSignificantSubdomain (’https://news.yandex.ru/‘) = ’yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’。一些实现细节在未来可能会进行改变。 - -### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} - -返回包含顶级域名与第一个有效子域名之间的内容(请参阅上面的内容)。 - -例如, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. - -### path {#path} - -返回URL路径。例如:`/top/news.html`,不包含请求参数。 - -### pathFull {#pathfull} - -与上面相同,但包括请求参数和fragment。例如:/top/news.html?page=2\#comments - -### queryString {#querystring} - -返回请求参数。例如:page=1&lr=213。请求参数不包含问号已经\# 以及\# 之后所有的内容。 - -### fragment {#fragment} - -返回URL的fragment标识。fragment不包含\#。 - -### queryStringAndFragment {#querystringandfragment} - -返回请求参数和fragment标识。例如:page=1\#29390。 - -### extractURLParameter(URL, name) {#extracturlparameterurl-name} - -返回URL请求参数中名称为‘name’的参数。如果不存在则返回一个空字符串。如果存在多个匹配项则返回第一个相匹配的。此函数假设参数名称与参数值在url中的编码方式相同。 - -### extractURLParameters(URL) {#extracturlparametersurl} - -返回一个数组,其中以name=value的字符串形式返回url的所有请求参数。不以任何编码解析任何内容。 - -### extractURLParameterNames(URL) {#extracturlparameternamesurl} - -返回一个数组,其中包含url的所有请求参数的名称。不以任何编码解析任何内容。 - -### URLHierarchy(URL) {#urlhierarchyurl} - -返回一个数组,其中包含以/切割的URL的所有内容。?将被包含在URL路径以及请求参数中。连续的分割符号被记为一个。 - -### URLPathHierarchy(URL) {#urlpathhierarchyurl} - -与上面相同,但结果不包含协议和host部分。 /element(root)不包括在内。该函数用于在Yandex.Metric中实现导出URL的树形结构。 - - URLPathHierarchy('https://example.com/browse/CONV-6788') = - [ - '/browse/', - '/browse/CONV-6788' - ] - -### decodeURLComponent(URL) {#decodeurlcomponenturl} - -返回已经解码的URL。 -例如: - -``` sql -SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS DecodedURL; -``` - - ┌─DecodedURL─────────────────────────────┐ - │ http://127.0.0.1:8123/?query=SELECT 1; │ - └────────────────────────────────────────┘ - -## 删除URL中的部分内容 {#shan-chu-urlzhong-de-bu-fen-nei-rong} - -如果URL中不包含指定的部分,则URL不变。 - -### cutWWW {#cutwww} - -删除开始的第一个’www.’。 - -### cutQueryString {#cutquerystring} - -删除请求参数。问号也将被删除。 - -### cutFragment {#cutfragment} - -删除fragment标识。\#同样也会被删除。 - -### cutQueryStringAndFragment {#cutquerystringandfragment} - -删除请求参数以及fragment标识。问号以及\#也会被删除。 - -### cutURLParameter(URL, name) {#cuturlparameterurl-name} - -删除URL中名称为‘name’的参数。改函数假设参数名称以及参数值经过URL相同的编码。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/zh/query_language/functions/uuid_functions.md b/docs/zh/query_language/functions/uuid_functions.md deleted file mode 100644 index 2cb2ff30872..00000000000 --- a/docs/zh/query_language/functions/uuid_functions.md +++ /dev/null @@ -1,107 +0,0 @@ -# UUID函数 {#uuidhan-shu} - -下面列出了所有UUID的相关函数 - -## generateUUIDv4 {#uuid-function-generate} - -生成一个UUID([版本4](https://tools.ietf.org/html/rfc4122#section-4.4))。 - -``` sql -generateUUIDv4() -``` - -**返回值** - -UUID类型的值。 - -**使用示例** - -此示例演示如何在表中创建UUID类型的列,并对其写入数据。 - -``` sql -:) CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog - -:) INSERT INTO t_uuid SELECT generateUUIDv4() - -:) SELECT * FROM t_uuid - -┌────────────────────────────────────x─┐ -│ f4bf890f-f9dc-4332-ad5c-0c18e73f28e9 │ -└──────────────────────────────────────┘ -``` - -## toUUID (x) {#touuid-x} - -将String类型的值转换为UUID类型的值。 - -``` sql -toUUID(String) -``` - -**返回值** - -UUID类型的值 - -**使用示例** - -``` sql -:) SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid - -┌─────────────────────────────────uuid─┐ -│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │ -└──────────────────────────────────────┘ -``` - -## UUIDStringToNum {#uuidstringtonum} - -接受一个String类型的值,其中包含36个字符且格式为`xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`,将其转换为UUID的数值并以[FixedString(16)](../../data_types/fixedstring.md)将其返回。 - -``` sql -UUIDStringToNum(String) -``` - -**返回值** - -FixedString(16) - -**使用示例** - -``` sql -:) SELECT - '612f3c40-5d3b-217e-707b-6a546a3d7b29' AS uuid, - UUIDStringToNum(uuid) AS bytes - -┌─uuid─────────────────────────────────┬─bytes────────────┐ -│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ -└──────────────────────────────────────┴──────────────────┘ -``` - -## UUIDNumToString {#uuidnumtostring} - -接受一个[FixedString(16)](../../data_types/fixedstring.md)类型的值,返回其对应的String表现形式。 - -``` sql -UUIDNumToString(FixedString(16)) -``` - -**返回值** - -String. - -**使用示例** - -``` sql -SELECT - 'a/<@];!~p{jTj={)' AS bytes, - UUIDNumToString(toFixedString(bytes, 16)) AS uuid - -┌─bytes────────────┬─uuid─────────────────────────────────┐ -│ a/<@];!~p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ -└──────────────────┴──────────────────────────────────────┘ -``` - -## 另请参阅 {#ling-qing-can-yue} - -- [dictGetUUID](ext_dict_functions.md) - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/zh/query_language/functions/ym_dict_functions.md b/docs/zh/query_language/functions/ym_dict_functions.md deleted file mode 100644 index 6d03ae228e8..00000000000 --- a/docs/zh/query_language/functions/ym_dict_functions.md +++ /dev/null @@ -1,120 +0,0 @@ -# Functions for working with Yandex.Metrica dictionaries {#functions-for-working-with-yandex-metrica-dictionaries} - -In order for the functions below to work, the server config must specify the paths and addresses for getting all the Yandex.Metrica dictionaries. The dictionaries are loaded at the first call of any of these functions. If the reference lists can’t be loaded, an exception is thrown. - -For information about creating reference lists, see the section «Dictionaries». - -## Multiple geobases {#multiple-geobases} - -ClickHouse supports working with multiple alternative geobases (regional hierarchies) simultaneously, in order to support various perspectives on which countries certain regions belong to. - -The ‘clickhouse-server’ config specifies the file with the regional hierarchy::`/opt/geo/regions_hierarchy.txt` - -Besides this file, it also searches for files nearby that have the \_ symbol and any suffix appended to the name (before the file extension). -For example, it will also find the file `/opt/geo/regions_hierarchy_ua.txt`, if present. - -`ua` is called the dictionary key. For a dictionary without a suffix, the key is an empty string. - -All the dictionaries are re-loaded in runtime (once every certain number of seconds, as defined in the builtin\_dictionaries\_reload\_interval config parameter, or once an hour by default). However, the list of available dictionaries is defined one time, when the server starts. - -All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. -Example: - - regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt - regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt - regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt - -### regionToCity(id\[, geobase\]) {#regiontocityid-geobase} - -Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. - -### regionToArea(id\[, geobase\]) {#regiontoareaid-geobase} - -Converts a region to an area (type 5 in the geobase). In every other way, this function is the same as ‘regionToCity’. - -``` sql -SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) -FROM system.numbers -LIMIT 15 -``` - - ┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐ - │ │ - │ Moscow and Moscow region │ - │ St. Petersburg and Leningrad region │ - │ Belgorod region │ - │ Ivanovsk region │ - │ Kaluga region │ - │ Kostroma region │ - │ Kursk region │ - │ Lipetsk region │ - │ Orlov region │ - │ Ryazan region │ - │ Smolensk region │ - │ Tambov region │ - │ Tver region │ - │ Tula region │ - └──────────────────────────────────────────────────────┘ - -### regionToDistrict(id\[, geobase\]) {#regiontodistrictid-geobase} - -Converts a region to a federal district (type 4 in the geobase). In every other way, this function is the same as ‘regionToCity’. - -``` sql -SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) -FROM system.numbers -LIMIT 15 -``` - - ┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐ - │ │ - │ Central federal district │ - │ Northwest federal district │ - │ South federal district │ - │ North Caucases federal district │ - │ Privolga federal district │ - │ Ural federal district │ - │ Siberian federal district │ - │ Far East federal district │ - │ Scotland │ - │ Faroe Islands │ - │ Flemish region │ - │ Brussels capital region │ - │ Wallonia │ - │ Federation of Bosnia and Herzegovina │ - └──────────────────────────────────────────────────────────┘ - -### regionToCountry(id\[, geobase\]) {#regiontocountryid-geobase} - -Converts a region to a country. In every other way, this function is the same as ‘regionToCity’. -Example: `regionToCountry(toUInt32(213)) = 225` converts Moscow (213) to Russia (225). - -### regionToContinent(id\[, geobase\]) {#regiontocontinentid-geobase} - -Converts a region to a continent. In every other way, this function is the same as ‘regionToCity’. -Example: `regionToContinent(toUInt32(213)) = 10001` converts Moscow (213) to Eurasia (10001). - -### regionToPopulation(id\[, geobase\]) {#regiontopopulationid-geobase} - -Gets the population for a region. -The population can be recorded in files with the geobase. See the section «External dictionaries». -If the population is not recorded for the region, it returns 0. -In the Yandex geobase, the population might be recorded for child regions, but not for parent regions. - -### regionIn(lhs, rhs\[, geobase\]) {#regioninlhs-rhs-geobase} - -Checks whether a ‘lhs’ region belongs to a ‘rhs’ region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it doesn’t belong. -The relationship is reflexive – any region also belongs to itself. - -### regionHierarchy(id\[, geobase\]) {#regionhierarchyid-geobase} - -Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. -Example: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. - -### regionToName(id\[, lang\]) {#regiontonameid-lang} - -Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn’t exist, an empty string is returned. - -`ua` and `uk` both mean Ukrainian. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/zh/query_language/index.md b/docs/zh/query_language/index.md deleted file mode 120000 index 44dfff9bb18..00000000000 --- a/docs/zh/query_language/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/index.md \ No newline at end of file diff --git a/docs/zh/query_language/insert_into.md b/docs/zh/query_language/insert_into.md deleted file mode 100644 index b271f62bb03..00000000000 --- a/docs/zh/query_language/insert_into.md +++ /dev/null @@ -1,67 +0,0 @@ -## INSERT {#insert} - -INSERT查询主要用于向系统中添加数据. - -查询的基本格式: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... -``` - -您可以在查询中指定插入的列的列表,如:`[(c1, c2, c3)]`。对于存在于表结构中但不存在于插入列表中的列,它们将会按照如下方式填充数据: - -- 如果存在`DEFAULT`表达式,根据`DEFAULT`表达式计算被填充的值。 -- 如果没有定义`DEFAULT`表达式,则填充零或空字符串。 - -如果 [strict\_insert\_defaults=1](../operations/settings/settings.md),你必须在查询中列出所有没有定义`DEFAULT`表达式的列。 - -数据可以以ClickHouse支持的任何 [输入输出格式](../interfaces/formats.md#formats) 传递给INSERT。格式的名称必须显示的指定在查询中: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set -``` - -例如,下面的查询所使用的输入格式就与上面INSERT … VALUES的中使用的输入格式相同: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... -``` - -ClickHouse会清除数据前所有的空白字符与一行摘要信息(如果需要的话)。所以在进行查询时,我们建议您将数据放入到输入输出格式名称后的新的一行中去(如果数据是以空白字符开始的,这将非常重要)。 - -示例: - -``` sql -INSERT INTO t FORMAT TabSeparated -11 Hello, world! -22 Qwerty -``` - -在使用命令行客户端或HTTP客户端时,你可以将具体的查询语句与数据分开发送。更多具体信息,请参考«[客户端](../interfaces/index.md#interfaces)»部分。 - -### 使用`SELECT`的结果写入 {#insert_query_insert-select} - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... -``` - -写入与SELECT的列的对应关系是使用位置来进行对应的,尽管它们在SELECT表达式与INSERT中的名称可能是不同的。如果需要,会对它们执行对应的类型转换。 - -除了VALUES格式之外,其他格式中的数据都不允许出现诸如`now()`,`1 + 2`等表达式。VALUES格式允许您有限度的使用这些表达式,但是不建议您这么做,因为执行这些表达式总是低效的。 - -系统不支持的其他用于修改数据的查询:`UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`。 -但是,您可以使用 `ALTER TABLE ... DROP PARTITION`查询来删除一些旧的数据。 - -### 性能的注意事项 {#xing-neng-de-zhu-yi-shi-xiang} - -在进行`INSERT`时将会对写入的数据进行一些处理,按照主键排序,按照月份对数据进行分区等。所以如果在您的写入数据中包含多个月份的混合数据时,将会显著的降低`INSERT`的性能。为了避免这种情况: - -- 数据总是以尽量大的batch进行写入,如每次写入100,000行。 -- 数据在写入ClickHouse前预先的对数据进行分组。 - -在以下的情况下,性能不会下降: - -- 数据总是被实时的写入。 -- 写入的数据已经按照时间排序。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/zh/query_language/misc.md b/docs/zh/query_language/misc.md deleted file mode 120000 index 3bd814f3568..00000000000 --- a/docs/zh/query_language/misc.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/misc.md \ No newline at end of file diff --git a/docs/zh/query_language/operators.md b/docs/zh/query_language/operators.md deleted file mode 100644 index df136a5a1fc..00000000000 --- a/docs/zh/query_language/operators.md +++ /dev/null @@ -1,263 +0,0 @@ -# 操作符 {#cao-zuo-fu} - -所有的操作符(运算符)都会在查询时依据他们的优先级及其结合顺序在被解析时转换为对应的函数。下面按优先级从高到低列出各组运算符及其对应的函数: - -## 下标运算符 {#xia-biao-yun-suan-fu} - -`a[N]` – 数组中的第N个元素; 对应函数 `arrayElement(a, N)` - -`a.N` – 元组中第N个元素; 对应函数 `tupleElement(a, N)` - -## 负号 {#fu-hao} - -`-a` – 对应函数 `negate(a)` - -## 乘号、除号和取余 {#cheng-hao-chu-hao-he-qu-yu} - -`a * b` – 对应函数 `multiply(a, b)` - -`a / b` – 对应函数 `divide(a, b)` - -`a % b` – 对应函数 `modulo(a, b)` - -## 加号和减号 {#jia-hao-he-jian-hao} - -`a + b` – 对应函数 `plus(a, b)` - -`a - b` – 对应函数 `minus(a, b)` - -## 关系运算符 {#guan-xi-yun-suan-fu} - -`a = b` – 对应函数 `equals(a, b)` - -`a == b` – 对应函数 `equals(a, b)` - -`a != b` – 对应函数 `notEquals(a, b)` - -`a <> b` – 对应函数 `notEquals(a, b)` - -`a <= b` – 对应函数 `lessOrEquals(a, b)` - -`a >= b` – 对应函数 `greaterOrEquals(a, b)` - -`a < b` – 对应函数 `less(a, b)` - -`a > b` – 对应函数 `greater(a, b)` - -`a LIKE s` – 对应函数 `like(a, b)` - -`a NOT LIKE s` – 对应函数 `notLike(a, b)` - -`a BETWEEN b AND c` – 等价于 `a >= b AND a <= c` - -## 集合关系运算符 {#ji-he-guan-xi-yun-suan-fu} - -*详见此节 [IN 相关操作符](select.md#select-in-operators) 。* - -`a IN ...` – 对应函数 `in(a, b)` - -`a NOT IN ...` – 对应函数 `notIn(a, b)` - -`a GLOBAL IN ...` – 对应函数 `globalIn(a, b)` - -`a GLOBAL NOT IN ...` – 对应函数 `globalNotIn(a, b)` - -## 逻辑非 {#luo-ji-fei} - -`NOT a` – 对应函数 `not(a)` - -## 逻辑与 {#luo-ji-yu} - -`a AND b` – 对应函数`and(a, b)` - -## 逻辑或 {#luo-ji-huo} - -`a OR b` – 对应函数 `or(a, b)` - -## 条件运算符 {#tiao-jian-yun-suan-fu} - -`a ? b : c` – 对应函数 `if(a, b, c)` - -注意: - -条件运算符会先计算表达式b和表达式c的值,再根据表达式a的真假,返回相应的值。如果表达式b和表达式c是 [arrayJoin()](functions/array_join.md#functions_arrayjoin) 函数,则不管表达式a是真是假,每行都会被复制展开。 - -## Operators for Working with Dates and Times {#operators-datetime} - -### EXTRACT {#operator-extract} - -``` sql -EXTRACT(part FROM date); -``` - -Extracts a part from a given date. For example, you can retrieve a month from a given date, or a second from a time. - -The `part` parameter specifies which part of the date to retrieve. The following values are available: - -- `DAY` — The day of the month. Possible values: 1–31. -- `MONTH` — The number of a month. Possible values: 1–12. -- `YEAR` — The year. -- `SECOND` — The second. Possible values: 0–59. -- `MINUTE` — The minute. Possible values: 0–59. -- `HOUR` — The hour. Possible values: 0–23. - -The `part` parameter is case-insensitive. - -The `date` parameter specifies the date or the time to process. Either [Date](../data_types/date.md) or [DateTime](../data_types/datetime.md) type is supported. - -Examples: - -``` sql -SELECT EXTRACT(DAY FROM toDate('2017-06-15')); -SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); -SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); -``` - -In the following example we create a table and insert into it a value with the `DateTime` type. - -``` sql -CREATE TABLE test.Orders -( - OrderId UInt64, - OrderName String, - OrderDate DateTime -) -ENGINE = Log; -``` - -``` sql -INSERT INTO test.Orders VALUES (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); -``` - -``` sql -SELECT - toYear(OrderDate) AS OrderYear, - toMonth(OrderDate) AS OrderMonth, - toDayOfMonth(OrderDate) AS OrderDay, - toHour(OrderDate) AS OrderHour, - toMinute(OrderDate) AS OrderMinute, - toSecond(OrderDate) AS OrderSecond -FROM test.Orders; -``` - -``` text -┌─OrderYear─┬─OrderMonth─┬─OrderDay─┬─OrderHour─┬─OrderMinute─┬─OrderSecond─┐ -│ 2008 │ 10 │ 11 │ 13 │ 23 │ 44 │ -└───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ -``` - -You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00619_extract.sql). - -### INTERVAL {#operator-interval} - -Creates an [Interval](../data_types/special_data_types/interval.md)-type value that should be used in arithmetical operations with [Date](../data_types/date.md) and [DateTime](../data_types/datetime.md)-type values. - -Example: - -``` sql -SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR -``` - -``` text -┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ -│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ -└─────────────────────┴────────────────────────────────────────────────────────┘ -``` - -**See Also** - -- [Interval](../data_types/special_data_types/interval.md) data type -- [toInterval](functions/type_conversion_functions.md#function-tointerval) type convertion functions - -## CASE条件表达式 {#operator_case} - -``` sql -CASE [x] - WHEN a THEN b - [WHEN ... THEN ...] - [ELSE c] -END -``` - -如果指定了 `x` ,该表达式会转换为 `transform(x, [a, ...], [b, ...], c)` 函数。否则转换为 `multiIf(a, b, ..., c)` - -如果该表达式中没有 `ELSE c` 子句,则默认值就是 `NULL` - -但 `transform` 函数不支持 `NULL` - -## 连接运算符 {#lian-jie-yun-suan-fu} - -`s1 || s2` – 对应函数 `concat(s1, s2)` - -## 创建 Lambda 函数 {#chuang-jian-lambda-han-shu} - -`x -> expr` – 对应函数 `lambda(x, expr)` - -接下来的这些操作符因为其本身是括号没有优先级: - -## 创建数组 {#chuang-jian-shu-zu} - -`[x1, ...]` – 对应函数 `array(x1, ...)` - -## 创建元组 {#chuang-jian-yuan-zu} - -`(x1, x2, ...)` – 对应函数 `tuple(x2, x2, ...)` - -## 结合方式 {#jie-he-fang-shi} - -所有的同级操作符从左到右结合。例如, `1 + 2 + 3` 会转换成 `plus(plus(1, 2), 3)`。 -所以,有时他们会跟我们预期的不太一样。例如, `SELECT 4 > 2 > 3` 的结果是0。 - -为了高效, `and` 和 `or` 函数支持任意多参数,一连串的 `AND` 和 `OR` 运算符会转换成其对应的单个函数。 - -## 判断是否为 `NULL` {#pan-duan-shi-fou-wei-null} - -ClickHouse 支持 `IS NULL` 和 `IS NOT NULL` 。 - -### IS NULL {#operator-is-null} - -- 对于 [Nullable](../data_types/nullable.md) 类型的值, `IS NULL` 会返回: - - `1` 值为 `NULL` - - `0` 否则 -- 对于其他类型的值, `IS NULL` 总会返回 `0` - - - -``` bash -:) SELECT x+100 FROM t_null WHERE y IS NULL - -SELECT x + 100 -FROM t_null -WHERE isNull(y) - -┌─plus(x, 100)─┐ -│ 101 │ -└──────────────┘ - -1 rows in set. Elapsed: 0.002 sec. -``` - -### IS NOT NULL {#is-not-null} - -- 对于 [Nullable](../data_types/nullable.md) 类型的值, `IS NOT NULL` 会返回: - - `0` 值为 `NULL` - - `1` 否则 -- 对于其他类型的值,`IS NOT NULL` 总会返回 `1` - - - -``` bash -:) SELECT * FROM t_null WHERE y IS NOT NULL - -SELECT * -FROM t_null -WHERE isNotNull(y) - -┌─x─┬─y─┐ -│ 2 │ 3 │ -└───┴───┘ - -1 rows in set. Elapsed: 0.002 sec. -``` - -[来源文章](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/zh/query_language/select.md b/docs/zh/query_language/select.md deleted file mode 100644 index 8400e963f3c..00000000000 --- a/docs/zh/query_language/select.md +++ /dev/null @@ -1,936 +0,0 @@ -# SELECT 查询语法 {#select-cha-xun-yu-fa} - -`SELECT` 语句用于执行数据的检索。 - -``` sql -SELECT [DISTINCT] expr_list - [FROM [db.]table | (subquery) | table_function] [FINAL] - [SAMPLE sample_coeff] - [ARRAY JOIN ...] - [GLOBAL] ANY|ALL INNER|LEFT JOIN (subquery)|table USING columns_list - [PREWHERE expr] - [WHERE expr] - [GROUP BY expr_list] [WITH TOTALS] - [HAVING expr] - [ORDER BY expr_list] - [LIMIT n BY columns] - [LIMIT [n, ]m] - [UNION ALL ...] - [INTO OUTFILE filename] - [FORMAT format] -``` - -所有的子句都是可选的,除了SELECT之后的表达式列表(expr\_list)。 -下面将按照查询运行的顺序逐一对各个子句进行说明。 - -如果查询中不包含`DISTINCT`,`GROUP BY`,`ORDER BY`子句以及`IN`和`JOIN`子查询,那么它将仅使用O(1)数量的内存来完全流式的处理查询 -否则,这个查询将消耗大量的内存,除非你指定了这些系统配置:`max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`。它们规定了可以使用外部排序(将临时表存储到磁盘中)以及外部聚合,`目前系统不存在关于Join的配置`,更多关于它们的信息,可以参见«配置»部分。 - -### FROM 子句 {#select-from} - -如果查询中不包含FROM子句,那么将读取`system.one`。 -`system.one`中仅包含一行数据(此表实现了与其他数据库管理系统中的DUAL相同的功能)。 - -FROM子句规定了将从哪个表、或子查询、或表函数中读取数据;同时ARRAY JOIN子句和JOIN子句也可以出现在这里(见后文)。 - -可以使用包含在括号里的子查询来替代表。 -在这种情况下,子查询的处理将会构建在外部的查询内部。 -不同于SQL标准,子查询后无需指定别名。为了兼容,你可以在子查询后添加‘AS 别名’,但是指定的名字不能被使用在任何地方。 - -也可以使用表函数来代替表,有关信息,参见«表函数»。 - -执行查询时,在查询中列出的所有列都将从对应的表中提取数据;如果你使用的是子查询的方式,则任何在外部查询中没有使用的列,子查询将从查询中忽略它们; -如果你的查询没有列出任何的列(例如,SELECT count() FROM t),则将额外的从表中提取一些列(最好的情况下是最小的列),以便计算行数。 - -最后的FINAL修饰符仅能够被使用在SELECT from CollapsingMergeTree场景中。当你为FROM指定了FINAL修饰符时,你的查询结果将会在查询过程中被聚合。需要注意的是,在这种情况下,查询将在单个流中读取所有相关的主键列,同时对需要的数据进行合并。这意味着,当使用FINAL修饰符时,查询将会处理的更慢。在大多数情况下,你应该避免使用FINAL修饰符。更多信息,请参阅«CollapsingMergeTree引擎»部分。 - -### SAMPLE 子句 {#select-sample-clause} - -通过SAMPLE子句用户可以进行近似查询处理,近似查询处理仅能工作在MergeTree\*类型的表中,并且在创建表时需要您指定采样表达式(参见«MergeTree 引擎»部分)。 - -`SAMPLE`子句可以使用`SAMPLE k`来表示,其中k可以是0到1的小数值,或者是一个足够大的正整数值。 - -当k为0到1的小数时,查询将使用’k’作为百分比选取数据。例如,`SAMPLE 0.1`查询只会检索数据总量的10%。 -当k为一个足够大的正整数时,查询将使用’k’作为最大样本数。例如, `SAMPLE 10000000`查询只会检索最多10,000,000行数据。 - -Example: - -``` sql -SELECT - Title, - count() * 10 AS PageViews -FROM hits_distributed -SAMPLE 0.1 -WHERE - CounterID = 34 - AND toDate(EventDate) >= toDate('2013-01-29') - AND toDate(EventDate) <= toDate('2013-02-04') - AND NOT DontCountHits - AND NOT Refresh - AND Title != '' -GROUP BY Title -ORDER BY PageViews DESC LIMIT 1000 -``` - -在这个例子中,查询将检索数据总量的0.1 (10%)的数据。值得注意的是,查询不会自动校正聚合函数最终的结果,所以为了得到更加精确的结果,需要将`count()`的结果手动乘以10。 - -当使用像`SAMPLE 10000000`这样的方式进行近似查询时,由于没有了任何关于将会处理了哪些数据或聚合函数应该被乘以几的信息,所以这种方式不适合在这种场景下使用。 - -使用相同的采样率得到的结果总是一致的:如果我们能够看到所有可能存在在表中的数据,那么相同的采样率总是能够得到相同的结果(在建表时使用相同的采样表达式),换句话说,系统在不同的时间,不同的服务器,不同表上总以相同的方式对数据进行采样。 - -例如,我们可以使用采样的方式获取到与不进行采样相同的用户ID的列表。这将表明,你可以在IN子查询中使用采样,或者使用采样的结果与其他查询进行关联。 - -### ARRAY JOIN 子句 {#select-array-join-clause} - -ARRAY JOIN子句可以帮助查询进行与数组和nested数据类型的连接。它有点类似arrayJoin函数,但它的功能更广泛。 - -`ARRAY JOIN` 本质上等同于`INNERT JOIN`数组。 例如: - - :) CREATE TABLE arrays_test (s String, arr Array(UInt8)) ENGINE = Memory - - CREATE TABLE arrays_test - ( - s String, - arr Array(UInt8) - ) ENGINE = Memory - - Ok. - - 0 rows in set. Elapsed: 0.001 sec. - - :) INSERT INTO arrays_test VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []) - - INSERT INTO arrays_test VALUES - - Ok. - - 3 rows in set. Elapsed: 0.001 sec. - - :) SELECT * FROM arrays_test - - SELECT * - FROM arrays_test - - ┌─s───────┬─arr─────┐ - │ Hello │ [1,2] │ - │ World │ [3,4,5] │ - │ Goodbye │ [] │ - └─────────┴─────────┘ - - 3 rows in set. Elapsed: 0.001 sec. - - :) SELECT s, arr FROM arrays_test ARRAY JOIN arr - - SELECT s, arr - FROM arrays_test - ARRAY JOIN arr - - ┌─s─────┬─arr─┐ - │ Hello │ 1 │ - │ Hello │ 2 │ - │ World │ 3 │ - │ World │ 4 │ - │ World │ 5 │ - └───────┴─────┘ - - 5 rows in set. Elapsed: 0.001 sec. - -你还可以为ARRAY JOIN子句指定一个别名,这时你可以通过这个别名来访问数组中的数据,但是数据本身仍然可以通过原来的名称进行访问。例如: - - :) SELECT s, arr, a FROM arrays_test ARRAY JOIN arr AS a - - SELECT s, arr, a - FROM arrays_test - ARRAY JOIN arr AS a - - ┌─s─────┬─arr─────┬─a─┐ - │ Hello │ [1,2] │ 1 │ - │ Hello │ [1,2] │ 2 │ - │ World │ [3,4,5] │ 3 │ - │ World │ [3,4,5] │ 4 │ - │ World │ [3,4,5] │ 5 │ - └───────┴─────────┴───┘ - - 5 rows in set. Elapsed: 0.001 sec. - -当多个具有相同大小的数组使用逗号分割出现在ARRAY JOIN子句中时,ARRAY JOIN会将它们同时执行(直接合并,而不是它们的笛卡尔积)。例如: - - :) SELECT s, arr, a, num, mapped FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped - - SELECT s, arr, a, num, mapped - FROM arrays_test - ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(lambda(tuple(x), plus(x, 1)), arr) AS mapped - - ┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ - │ Hello │ [1,2] │ 1 │ 1 │ 2 │ - │ Hello │ [1,2] │ 2 │ 2 │ 3 │ - │ World │ [3,4,5] │ 3 │ 1 │ 4 │ - │ World │ [3,4,5] │ 4 │ 2 │ 5 │ - │ World │ [3,4,5] │ 5 │ 3 │ 6 │ - └───────┴─────────┴───┴─────┴────────┘ - - 5 rows in set. Elapsed: 0.002 sec. - - :) SELECT s, arr, a, num, arrayEnumerate(arr) FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num - - SELECT s, arr, a, num, arrayEnumerate(arr) - FROM arrays_test - ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num - - ┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ - │ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ - │ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ - │ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ - │ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ - │ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ - └───────┴─────────┴───┴─────┴─────────────────────┘ - - 5 rows in set. Elapsed: 0.002 sec. - -另外ARRAY JOIN也可以工作在nested数据结构上。例如: - - :) CREATE TABLE nested_test (s String, nest Nested(x UInt8, y UInt32)) ENGINE = Memory - - CREATE TABLE nested_test - ( - s String, - nest Nested( - x UInt8, - y UInt32) - ) ENGINE = Memory - - Ok. - - 0 rows in set. Elapsed: 0.006 sec. - - :) INSERT INTO nested_test VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []) - - INSERT INTO nested_test VALUES - - Ok. - - 3 rows in set. Elapsed: 0.001 sec. - - :) SELECT * FROM nested_test - - SELECT * - FROM nested_test - - ┌─s───────┬─nest.x──┬─nest.y─────┐ - │ Hello │ [1,2] │ [10,20] │ - │ World │ [3,4,5] │ [30,40,50] │ - │ Goodbye │ [] │ [] │ - └─────────┴─────────┴────────────┘ - - 3 rows in set. Elapsed: 0.001 sec. - - :) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest - - SELECT s, `nest.x`, `nest.y` - FROM nested_test - ARRAY JOIN nest - - ┌─s─────┬─nest.x─┬─nest.y─┐ - │ Hello │ 1 │ 10 │ - │ Hello │ 2 │ 20 │ - │ World │ 3 │ 30 │ - │ World │ 4 │ 40 │ - │ World │ 5 │ 50 │ - └───────┴────────┴────────┘ - - 5 rows in set. Elapsed: 0.001 sec. - -当你在ARRAY JOIN指定nested数据类型的名称时,其作用与与包含所有数组元素的ARRAY JOIN相同,例如: - - :) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x, nest.y - - SELECT s, `nest.x`, `nest.y` - FROM nested_test - ARRAY JOIN `nest.x`, `nest.y` - - ┌─s─────┬─nest.x─┬─nest.y─┐ - │ Hello │ 1 │ 10 │ - │ Hello │ 2 │ 20 │ - │ World │ 3 │ 30 │ - │ World │ 4 │ 40 │ - │ World │ 5 │ 50 │ - └───────┴────────┴────────┘ - - 5 rows in set. Elapsed: 0.001 sec. - -这种方式也是可以运行的: - - :) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x - - SELECT s, `nest.x`, `nest.y` - FROM nested_test - ARRAY JOIN `nest.x` - - ┌─s─────┬─nest.x─┬─nest.y─────┐ - │ Hello │ 1 │ [10,20] │ - │ Hello │ 2 │ [10,20] │ - │ World │ 3 │ [30,40,50] │ - │ World │ 4 │ [30,40,50] │ - │ World │ 5 │ [30,40,50] │ - └───────┴────────┴────────────┘ - - 5 rows in set. Elapsed: 0.001 sec. - -为了方便使用原来的nested类型的数组,你可以为nested类型定义一个别名。例如: - - :) SELECT s, n.x, n.y, nest.x, nest.y FROM nested_test ARRAY JOIN nest AS n - - SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` - FROM nested_test - ARRAY JOIN nest AS n - - ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ - │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ - │ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ - │ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ - │ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ - │ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ - └───────┴─────┴─────┴─────────┴────────────┘ - - 5 rows in set. Elapsed: 0.001 sec. - -使用arrayEnumerate函数的示例: - - :) SELECT s, n.x, n.y, nest.x, nest.y, num FROM nested_test ARRAY JOIN nest AS n, arrayEnumerate(nest.x) AS num - - SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num - FROM nested_test - ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num - - ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ - │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ - │ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ - │ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ - │ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ - │ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ - └───────┴─────┴─────┴─────────┴────────────┴─────┘ - - 5 rows in set. Elapsed: 0.002 sec. - -在一个查询中只能出现一个ARRAY JOIN子句。 - -如果在WHERE/PREWHERE子句中使用了ARRAY JOIN子句的结果,它将优先于WHERE/PREWHERE子句执行,否则它将在WHERE/PRWHERE子句之后执行,以便减少计算。 - -### JOIN 子句 {#select-join} - -JOIN子句用于连接数据,作用与[SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL))的定义相同。 - -!!! info "注意" - 与 [ARRAY JOIN](#select-array-join-clause) 没有关系. - -``` sql -SELECT -FROM -[GLOBAL] [ANY|ALL] INNER|LEFT|RIGHT|FULL|CROSS [OUTER] JOIN -(ON )|(USING ) ... -``` - -可以使用具体的表名来代替``与``。但这与使用`SELECT * FROM table`子查询的方式相同。除非你的表是\[Join\](../operations/table\_engines/join.md -**支持的`JOIN`类型** - -- `INNER JOIN` -- `LEFT OUTER JOIN` -- `RIGHT OUTER JOIN` -- `FULL OUTER JOIN` -- `CROSS JOIN` - -你可以跳过默认的`OUTER`关键字。 - -**`ANY` 与 `ALL`** - -在使用`ALL`修饰符对JOIN进行修饰时,如果右表中存在多个与左表关联的数据,那么系统则将右表中所有可以与左表关联的数据全部返回在结果中。这与SQL标准的JOIN行为相同。 -在使用`ANY`修饰符对JOIN进行修饰时,如果右表中存在多个与左表关联的数据,那么系统仅返回第一个与左表匹配的结果。如果左表与右表一一对应,不存在多余的行时,`ANY`与`ALL`的结果相同。 - -你可以在会话中通过设置 [join\_default\_strictness](../operations/settings/settings.md) 来指定默认的JOIN修饰符。 - -**`GLOBAL` distribution** - -当使用普通的`JOIN`时,查询将被发送给远程的服务器。并在这些远程服务器上生成右表并与它们关联。换句话说,右表来自于各个服务器本身。 - -当使用`GLOBAL ... JOIN`,首先会在请求服务器上计算右表并以临时表的方式将其发送到所有服务器。这时每台服务器将直接使用它进行计算。 - -使用`GLOBAL`时需要小心。更多信息,参阅 [Distributed subqueries](#select-distributed-subqueries) 部分。 - -**使用建议** - -从子查询中删除所有`JOIN`不需要的列。 - -当执行`JOIN`查询时,因为与其他阶段相比没有进行执行顺序的优化:JOIN优先于WHERE与聚合执行。因此,为了显示的指定执行顺序,我们推荐你使用子查询的方式执行`JOIN`。 - -示例: - -``` sql -SELECT - CounterID, - hits, - visits -FROM -( - SELECT - CounterID, - count() AS hits - FROM test.hits - GROUP BY CounterID -) ANY LEFT JOIN -( - SELECT - CounterID, - sum(Sign) AS visits - FROM test.visits - GROUP BY CounterID -) USING CounterID -ORDER BY hits DESC -LIMIT 10 -``` - - ┌─CounterID─┬───hits─┬─visits─┐ - │ 1143050 │ 523264 │ 13665 │ - │ 731962 │ 475698 │ 102716 │ - │ 722545 │ 337212 │ 108187 │ - │ 722889 │ 252197 │ 10547 │ - │ 2237260 │ 196036 │ 9522 │ - │ 23057320 │ 147211 │ 7689 │ - │ 722818 │ 90109 │ 17847 │ - │ 48221 │ 85379 │ 4652 │ - │ 19762435 │ 77807 │ 7026 │ - │ 722884 │ 77492 │ 11056 │ - └───────────┴────────┴────────┘ - -子查询不允许您设置别名或在其他地方引用它们。 -`USING`中指定的列必须在两个子查询中具有相同的名称,而其他列必须具有不同的名称。您可以通过使用别名的方式来更改子查询中的列名(示例中就分别使用了’hits’与’visits’别名)。 - -`USING`子句用于指定要进行链接的一个或多个列,系统会将这些列在两张表中相等的值连接起来。如果列是一个列表,不需要使用括号包裹。同时JOIN不支持其他更复杂的Join方式。 - -右表(子查询的结果)将会保存在内存中。如果没有足够的内存,则无法运行`JOIN`。 - -只能在查询中指定一个`JOIN`。若要运行多个`JOIN`,你可以将它们放入子查询中。 - -每次运行相同的`JOIN`查询,总是会再次计算 - 没有缓存结果。 为了避免这种情况,可以使用‘Join’引擎,它是一个预处理的Join数据结构,总是保存在内存中。更多信息,参见«Join引擎»部分。 - -在一些场景下,使用`IN`代替`JOIN`将会得到更高的效率。在各种类型的JOIN中,最高效的是`ANY LEFT JOIN`,然后是`ANY INNER JOIN`,效率最差的是`ALL LEFT JOIN`以及`ALL INNER JOIN`。 - -如果你需要使用`JOIN`来关联一些纬度表(包含纬度属性的一些相对比较小的表,例如广告活动的名称),那么`JOIN`可能不是好的选择,因为语法负责,并且每次查询都将重新访问这些表。对于这种情况,您应该使用«外部字典»的功能来替换`JOIN`。更多信息,参见 [外部字典](dicts/external_dicts.md) 部分。 - -#### Null的处理 {#nullde-chu-li} - -JOIN的行为受 [join\_use\_nulls](../operations/settings/settings.md) 的影响。当`join_use_nulls=1`时,`JOIN`的工作与SQL标准相同。 - -如果JOIN的key是 [Nullable](../data_types/nullable.md) 类型的字段,则其中至少一个存在 [NULL](syntax.md) 值的key不会被关联。 - -### WHERE 子句 {#select-where} - -如果存在WHERE子句, 则在该子句中必须包含一个UInt8类型的表达式。 这个表达是通常是一个带有比较和逻辑的表达式。 -这个表达式将会在所有数据转换前用来过滤数据。 - -如果在支持索引的数据库表引擎中,这个表达式将被评估是否使用索引。 - -### PREWHERE 子句 {#prewhere-zi-ju} - -这个子句与WHERE子句的意思相同。主要的不同之处在于表数据的读取。 -当使用PREWHERE时,首先只读取PREWHERE表达式中需要的列。然后在根据PREWHERE执行的结果读取其他需要的列。 - -如果在过滤条件中有少量不适合索引过滤的列,但是它们又可以提供很强的过滤能力。这时使用PREWHERE是有意义的,因为它将帮助减少数据的读取。 - -例如,在一个需要提取大量列的查询中为少部分列编写PREWHERE是很有作用的。 - -PREWHERE 仅支持`*MergeTree`系列引擎。 - -在一个查询中可以同时指定PREWHERE和WHERE,在这种情况下,PREWHERE优先于WHERE执行。 - -值得注意的是,PREWHERE不适合用于已经存在于索引中的列,因为当列已经存在于索引中的情况下,只有满足索引的数据块才会被读取。 - -如果将’optimize\_move\_to\_prewhere’设置为1,并且在查询中不包含PREWHERE,则系统将自动的把适合PREWHERE表达式的部分从WHERE中抽离到PREWHERE中。 - -### GROUP BY 子句 {#select-group-by-clause} - -这是列式数据库管理系统中最重要的一部分。 - -如果存在GROUP BY子句,则在该子句中必须包含一个表达式列表。其中每个表达式将会被称之为«key»。 -SELECT,HAVING,ORDER BY子句中的表达式列表必须来自于这些«key»或聚合函数。简而言之,被选择的列中不能包含非聚合函数或key之外的其他列。 - -如果查询表达式列表中仅包含聚合函数,则可以省略GROUP BY子句,这时会假定将所有数据聚合成一组空«key»。 - -Example: - -``` sql -SELECT - count(), - median(FetchTiming > 60 ? 60 : FetchTiming), - count() - sum(Refresh) -FROM hits -``` - -与SQL标准不同的是,如果表中不存在任何数据(可能表本身中就不存在任何数据,或者由于被WHERE条件过滤掉了),将返回一个空结果,而不是一个包含聚合函数初始值的结果。 - -与MySQL不同的是(实际上这是符合SQL标准的),你不能够获得一个不在key中的非聚合函数列(除了常量表达式)。但是你可以使用‘any’(返回遇到的第一个值)、max、min等聚合函数使它工作。 - -Example: - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - count(), - any(Title) AS title -- getting the first occurred page header for each domain. -FROM hits -GROUP BY domain -``` - -GROUP BY子句会为遇到的每一个不同的key计算一组聚合函数的值。 - -在GROUP BY子句中不支持使用Array类型的列。 - -常量不能作为聚合函数的参数传入聚合函数中。例如: sum(1)。这种情况下你可以省略常量。例如:`count()`。 - -#### NULL 处理 {#null-chu-li} - -对于GROUP BY子句,ClickHouse将 [NULL](syntax.md) 解释为一个值,并且支持`NULL=NULL`。 - -下面这个例子将说明这将意味着什么。 - -假设你有这样一张表: - - ┌─x─┬────y─┐ - │ 1 │ 2 │ - │ 2 │ ᴺᵁᴸᴸ │ - │ 3 │ 2 │ - │ 3 │ 3 │ - │ 3 │ ᴺᵁᴸᴸ │ - └───┴──────┘ - -运行`SELECT sum(x), y FROM t_null_big GROUP BY y`你将得到如下结果: - - ┌─sum(x)─┬────y─┐ - │ 4 │ 2 │ - │ 3 │ 3 │ - │ 5 │ ᴺᵁᴸᴸ │ - └────────┴──────┘ - -你可以看到GROUP BY为`y=NULL`的聚合了x。 - -如果你在向`GROUP BY`中放入几个key,结果将列出所有的组合可能。就像`NULL`是一个特定的值一样。 - -#### WITH TOTALS 修饰符 {#with-totals-xiu-shi-fu} - -如果你指定了WITH TOTALS修饰符,你将会在结果中得到一个被额外计算出的行。在这一行中将包含所有key的默认值(零或者空值),以及所有聚合函数对所有被选择数据行的聚合结果。 - -该行仅在JSON\*, TabSeparated\*, Pretty\*输出格式中与其他行分开输出。 - -在JSON\*输出格式中,这行将出现在Json的‘totals’字段中。在TabSeparated\*输出格式中,这行将位于其他结果之后,同时与其他结果使用空白行分隔。在Pretty\*输出格式中,这行将作为单独的表在所有结果之后输出。 - -当`WITH TOTALS`与HAVING子句同时存在时,它的行为受‘totals\_mode’配置的影响。 -默认情况下,`totals_mode = 'before_having'`,这时`WITH TOTALS`将会在HAVING前计算最多不超过`max_rows_to_group_by`行的数据。 - -在`group_by_overflow_mode = 'any'`并指定了`max_rows_to_group_by`的情况下,`WITH TOTALS`的行为受`totals_mode`的影响。 - -`after_having_exclusive` - 在HAVING后进行计算,计算不超过`max_rows_to_group_by`行的数据。 - -`after_having_inclusive` - 在HAVING后进行计算,计算不少于`max_rows_to_group_by`行的数据。 - -`after_having_auto` - 在HAVING后进行计算,采用统计通过HAVING的行数,在超过不超过‘max\_rows\_to\_group\_by’指定值(默认为50%)的情况下,包含所有行的结果。否则排除这些结果。 - -`totals_auto_threshold` - 默认 0.5,是`after_having_auto`的参数。 - -如果`group_by_overflow_mode != 'any'`并没有指定`max_rows_to_group_by`情况下,所有的模式都与`after_having`相同。 - -你可以在子查询,包含子查询的JOIN子句中使用WITH TOTALS(在这种情况下,它们各自的总值会被组合在一起)。 - -#### GROUP BY 使用外部存储设备 {#select-group-by-in-external-memory} - -你可以在GROUP BY中允许将临时数据转存到磁盘上,以限制对内存的使用。 -`max_bytes_before_external_group_by`这个配置确定了在GROUP BY中启动将临时数据转存到磁盘上的内存阈值。如果你将它设置为0(这是默认值),这项功能将被禁用。 - -当使用`max_bytes_before_external_group_by`时,我们建议将max\_memory\_usage设置为它的两倍。这是因为一个聚合需要两个阶段来完成:(1)读取数据并形成中间数据 (2)合并中间数据。临时数据的转存只会发生在第一个阶段。如果没有发生临时文件的转存,那么阶段二将最多消耗与1阶段相同的内存大小。 - -例如:如果将`max_memory_usage`设置为10000000000并且你想要开启外部聚合,那么你需要将`max_bytes_before_external_group_by`设置为10000000000的同时将`max_memory_usage`设置为20000000000。当外部聚合被触发时(如果刚好只形成了一份临时数据),它的内存使用量将会稍高与`max_bytes_before_external_group_by`。 - -在分布式查询处理中,外部聚合将会在远程的服务器中执行。为了使请求服务器只使用较少的内存,可以设置`distributed_aggregation_memory_efficient`为1。 - -当合并被刷到磁盘的临时数据以及合并远程的服务器返回的结果时,如果在启动`distributed_aggregation_memory_efficient`的情况下,将会消耗1/256 \* 线程数的总内存大小。 - -当启动外部聚合时,如果数据的大小小于`max_bytes_before_external_group_by`设置的值(数据没有被刷到磁盘中),那么数据的聚合速度将会和没有启动外部聚合时一样快。如果有临时数据被刷到了磁盘中,那么这个查询的运行时间将会被延长几倍(大约是3倍)。 - -如果你在GROUP BY后面存在ORDER BY子句,并且ORDER BY后面存在一个极小限制的LIMIT,那么ORDER BY子句将不会使用太多内存。 -否则请不要忘记启动外部排序(`max_bytes_before_external_sort`)。 - -### LIMIT N BY 子句 {#limit-n-by-zi-ju} - -LIMIT N BY子句和LIMIT没有关系, LIMIT N BY COLUMNS 子句可以用来在每一个COLUMNS分组中求得最大的N行数据。我们可以将它们同时用在一个查询中。LIMIT N BY子句中可以包含任意多个分组字段表达式列表。 - -示例: - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - domainWithoutWWW(REFERRER_URL) AS referrer, - device_type, - count() cnt -FROM hits -GROUP BY domain, referrer, device_type -ORDER BY cnt DESC -LIMIT 5 BY domain, device_type -LIMIT 100 -``` - -查询将会为每个`domain, device_type`的组合选出前5个访问最多的数据,但是结果最多将不超过100行(`LIMIT n BY + LIMIT`)。 - -### HAVING 子句 {#having-zi-ju} - -HAVING子句可以用来过滤GROUP BY之后的数据,类似于WHERE子句。 -WHERE于HAVING不同之处在于WHERE在聚合前(GROUP BY)执行,HAVING在聚合后执行。 -如果不存在聚合,则不能使用HAVING。 - -### ORDER BY 子句 {#select-order-by} - -如果存在ORDER BY 子句,则该子句中必须存在一个表达式列表,表达式列表中每一个表达式都可以分配一个DESC或ASC(排序的方向)。如果没有指明排序的方向,将假定以ASC的方式进行排序。其中ASC表示按照升序排序,DESC按照降序排序。示例:`ORDER BY Visits DESC, SearchPhrase` - -对于字符串的排序来讲,你可以为其指定一个排序规则,在指定排序规则时,排序总是不会区分大小写。并且如果与ASC或DESC同时出现时,排序规则必须在它们的后面指定。例如:`ORDER BY SearchPhrase COLLATE 'tr'` - 使用土耳其字母表对它进行升序排序,同时排序时不会区分大小写,并按照UTF-8字符集进行编码。 - -我们推荐只在少量的数据集中使用COLLATE,因为COLLATE的效率远低于正常的字节排序。 - -针对排序表达式中相同值的行将以任意的顺序进行输出,这是不确定的(每次都可能不同)。 -如果省略ORDER BY子句,则结果的顺序也是不固定的。 - -`NaN` 和 `NULL` 的排序规则: - -- 当使用`NULLS FIRST`修饰符时,将会先输出`NULL`,然后是`NaN`,最后才是其他值。 -- 当使用`NULLS LAST`修饰符时,将会先输出其他值,然后是`NaN`,最后才是`NULL`。 -- 默认情况下与使用`NULLS LAST`修饰符相同。 - -示例: - -假设存在如下一张表 - - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - │ 2 │ 2 │ - │ 1 │ nan │ - │ 2 │ 2 │ - │ 3 │ 4 │ - │ 5 │ 6 │ - │ 6 │ nan │ - │ 7 │ ᴺᵁᴸᴸ │ - │ 6 │ 7 │ - │ 8 │ 9 │ - └───┴──────┘ - -运行查询 `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST` 将获得如下结果: - - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - │ 7 │ ᴺᵁᴸᴸ │ - │ 1 │ nan │ - │ 6 │ nan │ - │ 2 │ 2 │ - │ 2 │ 2 │ - │ 3 │ 4 │ - │ 5 │ 6 │ - │ 6 │ 7 │ - │ 8 │ 9 │ - └───┴──────┘ - -当使用浮点类型的数值进行排序时,不管排序的顺序如何,NaNs总是出现在所有值的后面。换句话说,当你使用升序排列一个浮点数值列时,NaNs好像比所有值都要大。反之,当你使用降序排列一个浮点数值列时,NaNs好像比所有值都小。 - -如果你在ORDER BY子句后面存在LIMIT并给定了较小的数值,则将会使用较少的内存。否则,内存的使用量将与需要排序的数据成正比。对于分布式查询,如果省略了GROUP BY,则在远程服务器上执行部分排序,最后在请求服务器上合并排序结果。这意味这对于分布式查询而言,要排序的数据量可以大于单台服务器的内存。 - -如果没有足够的内存,可以使用外部排序(在磁盘中创建一些临时文件)。可以使用`max_bytes_before_external_sort`来设置外部排序,如果你讲它设置为0(默认),则表示禁用外部排序功能。如果启用该功能。当要排序的数据量达到所指定的字节数时,当前排序的结果会被转存到一个临时文件中去。当全部数据读取完毕后,所有的临时文件将会合并成最终输出结果。这些临时文件将会写到config文件配置的/var/lib/clickhouse/tmp/目录中(默认值,你可以通过修改’tmp\_path’配置调整该目录的位置)。 - -查询运行使用的内存要高于‘max\_bytes\_before\_external\_sort’,为此,这个配置必须要远远小于‘max\_memory\_usage’配置的值。例如,如果你的服务器有128GB的内存去运行一个查询,那么推荐你将‘max\_memory\_usage’设置为100GB,‘max\_bytes\_before\_external\_sort’设置为80GB。 - -外部排序效率要远低于在内存中排序。 - -### SELECT 子句 {#select-zi-ju} - -在完成上述列出的所有子句后,将对SELECT子句中的表达式进行分析。 -具体来讲,如果在存在聚合函数的情况下,将对聚合函数之前的表达式进行分析。 -聚合函数与聚合函数之前的表达式都将在聚合期间完成计算(GROUP BY)。 -就像他们本身就已经存在结果上一样。 - -### DISTINCT 子句 {#select-distinct} - -如果存在DISTINCT子句,则会对结果中的完全相同的行进行去重。 -在GROUP BY不包含聚合函数,并对全部SELECT部分都包含在GROUP BY中时的作用一样。但该子句还是与GROUP BY子句存在以下几点不同: - -- 可以与GROUP BY配合使用。 -- 当不存在ORDER BY子句并存在LIMIT子句时,查询将在同时满足DISTINCT与LIMIT的情况下立即停止查询。 -- 在处理数据的同时输出结果,并不是等待整个查询全部完成。 - -在SELECT表达式中存在Array类型的列时,不能使用DISTINCT。 - -`DISTINCT`可以与 [NULL](syntax.md)一起工作,就好像`NULL`仅是一个特殊的值一样,并且`NULL=NULL`。换而言之,在`DISTINCT`的结果中,与`NULL`不同的组合仅能出现一次。 - -### LIMIT 子句 {#limit-zi-ju} - -LIMIT m 用于在查询结果中选择前m行数据。 -LIMIT n, m 用于在查询结果中选择从n行开始的m行数据。 - -‘n’与‘m’必须是正整数。 - -如果没有指定ORDER BY子句,则结果可能是任意的顺序,并且是不确定的。 - -### UNION ALL 子句 {#union-all-zi-ju} - -UNION ALL子句可以组合任意数量的查询,例如: - -``` sql -SELECT CounterID, 1 AS table, toInt64(count()) AS c - FROM test.hits - GROUP BY CounterID - -UNION ALL - -SELECT CounterID, 2 AS table, sum(Sign) AS c - FROM test.visits - GROUP BY CounterID - HAVING c > 0 -``` - -仅支持UNION ALL,不支持其他UNION规则(UNION DISTINCT)。如果你需要UNION DISTINCT,你可以使用UNION ALL中包含SELECT DISTINCT的子查询的方式。 - -UNION ALL中的查询可以同时运行,它们的结果将被混合到一起。 - -这些查询的结果结果必须相同(列的数量和类型)。列名可以是不同的。在这种情况下,最终结果的列名将从第一个查询中获取。UNION会为查询之间进行类型转换。例如,如果组合的两个查询中包含相同的字段,并且是类型兼容的`Nullable`和non-`Nullable`,则结果将会将该字段转换为`Nullable`类型的字段。 - -作为UNION ALL查询的部分不能包含在括号内。ORDER BY与LIMIT子句应该被应用在每个查询中,而不是最终的查询中。如果你需要做最终结果转换,你可以将UNION ALL作为一个子查询包含在FROM子句中。 - -### INTO OUTFILE 子句 {#into-outfile-zi-ju} - -`INTO OUTFILE filename` 子句用于将查询结果重定向输出到指定文件中(filename是一个字符串类型的值)。 -与MySQL不同,执行的结果文件将在客户端建立,如果文件已存在,查询将会失败。 -此命令可以工作在命令行客户端与clickhouse-local中(通过HTTP借口发送将会失败)。 - -默认的输出格式是TabSeparated(与命令行客户端的批处理模式相同)。 - -### FORMAT 子句 {#format-zi-ju} - -‘FORMAT format’ 子句用于指定返回数据的格式。 -你可以使用它方便的转换或创建数据的转储。 -更多信息,参见«输入输出格式»部分。 -如果不存在FORMAT子句,则使用默认的格式,这将取决与DB的配置以及所使用的客户端。对于批量模式的HTTP客户端和命令行客户端而言,默认的格式是TabSeparated。对于交互模式下的命令行客户端,默认的格式是PrettyCompact(它有更加美观的格式)。 - -当使用命令行客户端时,数据以内部高效的格式在服务器和客户端之间进行传递。客户端将单独的解析FORMAT子句,以帮助数据格式的转换(这将减轻网络和服务器的负载)。 - -### IN 运算符 {#select-in-operators} - -对于`IN`、`NOT IN`、`GLOBAL IN`、`GLOBAL NOT IN`操作符被分别实现,因为它们的功能非常丰富。 - -运算符的左侧是单列或列的元组。 - -示例: - -``` sql -SELECT UserID IN (123, 456) FROM ... -SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ... -``` - -如果左侧是单个列并且是一个索引,并且右侧是一组常量时,系统将使用索引来处理查询。 - -不要在列表中列出太多的值(百万)。如果数据集很大,将它们放入临时表中(可以参考«»), 然后使用子查询。 -Don’t list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section «External data for query processing»), then use a subquery. - -右侧可以是一个由常量表达式组成的元组列表(像上面的例子一样),或者是一个数据库中的表的名称,或是一个包含在括号中的子查询。 - -如果右侧是一个表的名字(例如,`UserID IN users`),这相当于`UserID IN (SELECT * FROM users)`。在查询与外部数据表组合使用时可以使用该方法。例如,查询与包含user IDS的‘users’临时表一起被发送的同时需要对结果进行过滤时。 - -如果操作符的右侧是一个Set引擎的表时(数据总是在内存中准备好),则不会每次都为查询创建新的数据集。 - -子查询可以指定一个列以上的元组来进行过滤。 -示例: - -``` sql -SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ... -``` - -IN操作符的左右两侧应具有相同的类型。 - -IN操作符的子查询中可以出现任意子句,包含聚合函数与lambda函数。 -示例: - -``` sql -SELECT - EventDate, - avg(UserID IN - ( - SELECT UserID - FROM test.hits - WHERE EventDate = toDate('2014-03-17') - )) AS ratio -FROM test.hits -GROUP BY EventDate -ORDER BY EventDate ASC -``` - - ┌──EventDate─┬────ratio─┐ - │ 2014-03-17 │ 1 │ - │ 2014-03-18 │ 0.807696 │ - │ 2014-03-19 │ 0.755406 │ - │ 2014-03-20 │ 0.723218 │ - │ 2014-03-21 │ 0.697021 │ - │ 2014-03-22 │ 0.647851 │ - │ 2014-03-23 │ 0.648416 │ - └────────────┴──────────┘ - -为3月17日之后的每一天计算与3月17日访问该网站的用户浏览网页的百分比。 -IN子句中的子查询仅在单个服务器上运行一次。不能够是相关子查询。 - -#### NULL 处理 {#null-chu-li-1} - -在处理中,IN操作符总是假定 [NULL](syntax.md) 值的操作结果总是等于`0`,而不管`NULL`位于左侧还是右侧。`NULL`值不应该包含在任何数据集中,它们彼此不能够对应,并且不能够比较。 - -下面的示例中有一个`t_null`表: - - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - │ 2 │ 3 │ - └───┴──────┘ - -运行查询`SELECT x FROM t_null WHERE y IN (NULL,3)`将得到如下结果: - - ┌─x─┐ - │ 2 │ - └───┘ - -你可以看到在查询结果中不存在`y = NULL`的结果。这是因为ClickHouse无法确定`NULL`是否包含在`(NULL,3)`数据集中,对于这次比较操作返回了`0`,并且在`SELECT`的最终输出中排除了这行。 - - SELECT y IN (NULL, 3) - FROM t_null - - ┌─in(y, tuple(NULL, 3))─┐ - │ 0 │ - │ 1 │ - └───────────────────────┘ - -#### 分布式子查询 {#select-distributed-subqueries} - -对于带有子查询的(类似与JOINs)IN中,有两种选择:普通的`IN`/`JOIN`与`GLOBAL IN` / `GLOBAL JOIN`。它们对于分布式查询的处理运行方式是不同的。 - -!!! 注意 "注意" - 请记住,下面描述的算法可能因为根据 [settings](../operations/settings/settings.md) 配置的不同而不同。 - -当使用普通的IN时,查询总是被发送到远程的服务器,并且在每个服务器中运行«IN»或«JOIN»子句中的子查询。 - -当使用`GLOBAL IN` / `GLOBAL JOIN`时,首先会为`GLOBAL IN` / `GLOBAL JOIN`运行所有子查询,并将结果收集到临时表中,并将临时表发送到每个远程服务器,并使用该临时表运行查询。 - -对于非分布式查询,请使用普通的`IN` / `JOIN`。 - -在分布式查询中使用`IN` / `JOIN`子句中使用子查询需要小心。 - -让我们来看一些例子。假设集群中的每个服务器都存在一个正常表**local\_table**。与一个分布式表**distributed\_table**。 - -对于所有查询**distributed\_table**的查询,查询会被发送到所有的远程服务器并使用**local\_table**表运行查询。 - -例如,查询 - -``` sql -SELECT uniq(UserID) FROM distributed_table -``` - -将发送如下查询到所有远程服务器 - -``` sql -SELECT uniq(UserID) FROM local_table -``` - -这时将并行的执行它们,直到达到可以组合数据的中间结果状态。然后中间结果将返回到请求服务器并在请求服务器上进行合并,最终将结果发送给客户端。 - -现在让我运行一个带有IN的查询: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -- 计算两个站点的用户交集。 - -此查询将被发送给所有的远程服务器 - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -换句话说,IN子句中的数据集将被在每台服务器上被独立的收集,仅与每台服务器上的本地存储上的数据计算交集。 - -如果您已经将数据分散到了集群的每台服务器上,并且单个UserID的数据完全分布在单个服务器上,那么这将是正确且最佳的查询方式。在这种情况下,所有需要的数据都可以在每台服务器的本地进行获取。否则,结果将是不准确的。我们将这种查询称为«local IN»。 - -为了修正这种在数据随机分布的集群中的工作,你可以在子查询中使用**distributed\_table**。查询将更改为这样: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -此查询将被发送给所有的远程服务器 - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -子查询将在每个远程服务器上执行。因为子查询使用分布式表,所有每个远程服务器上的子查询将查询再次发送给所有的远程服务器 - -``` sql -SELECT UserID FROM local_table WHERE CounterID = 34 -``` - -例如,如果你拥有100台服务器的集群,执行整个查询将需要10,000次请求,这通常被认为是不可接受的。 - -在这种情况下,你应该使用GLOBAL IN来替代IN。让我们看一下它是如何工作的。 - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID GLOBAL IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -在请求服务器上运行子查询 - -``` sql -SELECT UserID FROM distributed_table WHERE CounterID = 34 -``` - -将结果放入内存中的临时表中。然后将请求发送到每一台远程服务器 - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL IN _data1 -``` - -临时表`_data1`也会随着查询一起发送到每一台远程服务器(临时表的名称由具体实现定义)。 - -这比使用普通的IN更加理想,但是,请注意以下几点: - -1. 创建临时表时,数据不是唯一的,为了减少通过网络传输的数据量,请在子查询中使用DISTINCT(你不需要在普通的IN中这么做) -2. 临时表将发送到所有远程服务器。其中传输不考虑网络的拓扑结构。例如,如果你有10个远程服务器存在与请求服务器非常远的数据中心中,则数据将通过通道发送数据到远程数据中心10次。使用GLOBAL IN时应避免大数据集。 -3. 当向远程服务器发送数据时,网络带宽的限制是不可配置的,这可能会网络的负载造成压力。 -4. 尝试将数据跨服务器分布,这样你将不需要使用GLOBAL IN。 -5. 如果你需要经常使用GLOBAL IN,请规划你的ClickHouse集群位置,以便副本之间不存在跨数据中心,并且它们之间具有快速的网络交换能力,以便查询可以完全在一个数据中心内完成。 - -另外,在`GLOBAL IN`子句中使用本地表也是有用的,比如,本地表仅在请求服务器上可用,并且您希望在远程服务器上使用来自本地表的数据。 - -### Extreme Values {#extreme-values} - -除了结果外,你还可以获得结果列的最大值与最小值,可以将**extremes**配置设置成1来做到这一点。最大值最小值的计算是针对于数字类型,日期类型进行计算的,对于其他列,将会输出默认值。 - -额外计算的两行结果 - 最大值与最小值,这两行额外的结果仅在JSON\*, TabSeparated\*, and Pretty\* 格式与其他行分开的输出方式输出,不支持其他输出格式。 - -在JSON\*格式中,Extreme值在单独的’extremes’字段中。在TabSeparated\*格式中,在其他结果与’totals’之后输出,并使用空行与其分隔。在Pretty\* 格式中,将在其他结果与’totals’后以单独的表格输出。 - -如果在计算Extreme值的同时包含LIMIT。extremes的计算结果将包含offset跳过的行。在流式的请求中,它可能还包含多余LIMIT的少量行的值。 - -### 注意事项 {#zhu-yi-shi-xiang} - -不同于MySQL, `GROUP BY`与`ORDER BY`子句不支持使用列的位置信息作为参数,但这实际上是符合SQL标准的。 -例如,`GROUP BY 1, 2`将被解释为按照常量进行分组(即,所有的行将会被聚合成一行)。 - -可以在查询的任何部分使用AS。 - -可以在查询的任何部分添加星号,而不仅仅是表达式。在分析查询时,星号被替换为所有的列(不包含`MATERIALIZED`与`ALIAS`的列)。 -只有少数情况下使用星号是合理的: - -- 创建表转储时。 -- 对于仅包含几个列的表,如系统表. -- 获取表中的列信息。在这种情况下应该使用`LIMIT 1`。但是,更好的办法是使用`DESC TABLE`。 -- 当使用`PREWHERE`在少数的几个列上做强过滤时。 -- 在子查询中(因为外部查询不需要的列被排除在子查询之外)。 - -在所有的其他情况下,我们不建议使用星号,因为它是列式数据库的缺点而不是优点。 - -[来源文章](https://clickhouse.tech/docs/zh/query_language/select/) diff --git a/docs/zh/query_language/show.md b/docs/zh/query_language/show.md deleted file mode 120000 index 4c2f4cf2c4f..00000000000 --- a/docs/zh/query_language/show.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/show.md \ No newline at end of file diff --git a/docs/zh/query_language/syntax.md b/docs/zh/query_language/syntax.md deleted file mode 120000 index 5307fd51ae8..00000000000 --- a/docs/zh/query_language/syntax.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/syntax.md \ No newline at end of file diff --git a/docs/zh/query_language/system.md b/docs/zh/query_language/system.md deleted file mode 120000 index 6061858c3f2..00000000000 --- a/docs/zh/query_language/system.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/system.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/file.md b/docs/zh/query_language/table_functions/file.md deleted file mode 120000 index a514547109a..00000000000 --- a/docs/zh/query_language/table_functions/file.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/file.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/generate.md b/docs/zh/query_language/table_functions/generate.md deleted file mode 120000 index 141c05da1e3..00000000000 --- a/docs/zh/query_language/table_functions/generate.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/generate.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/hdfs.md b/docs/zh/query_language/table_functions/hdfs.md deleted file mode 120000 index 2616e737eb6..00000000000 --- a/docs/zh/query_language/table_functions/hdfs.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/hdfs.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/index.md b/docs/zh/query_language/table_functions/index.md deleted file mode 120000 index 89b22522859..00000000000 --- a/docs/zh/query_language/table_functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/index.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/input.md b/docs/zh/query_language/table_functions/input.md deleted file mode 120000 index f23cc8ee673..00000000000 --- a/docs/zh/query_language/table_functions/input.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/input.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/jdbc.md b/docs/zh/query_language/table_functions/jdbc.md deleted file mode 120000 index 73bec80ca58..00000000000 --- a/docs/zh/query_language/table_functions/jdbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/jdbc.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/merge.md b/docs/zh/query_language/table_functions/merge.md deleted file mode 120000 index 383f6c88331..00000000000 --- a/docs/zh/query_language/table_functions/merge.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/merge.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/mysql.md b/docs/zh/query_language/table_functions/mysql.md deleted file mode 120000 index 75c032cc63f..00000000000 --- a/docs/zh/query_language/table_functions/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/mysql.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/numbers.md b/docs/zh/query_language/table_functions/numbers.md deleted file mode 120000 index a679b915669..00000000000 --- a/docs/zh/query_language/table_functions/numbers.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/numbers.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/odbc.md b/docs/zh/query_language/table_functions/odbc.md deleted file mode 120000 index 7620f920494..00000000000 --- a/docs/zh/query_language/table_functions/odbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/odbc.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/remote.md b/docs/zh/query_language/table_functions/remote.md deleted file mode 120000 index b157c4076d3..00000000000 --- a/docs/zh/query_language/table_functions/remote.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/remote.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/url.md b/docs/zh/query_language/table_functions/url.md deleted file mode 120000 index 038e08f7ba9..00000000000 --- a/docs/zh/query_language/table_functions/url.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/url.md \ No newline at end of file diff --git a/docs/zh/roadmap.md b/docs/zh/roadmap.md deleted file mode 100644 index 49532c046f5..00000000000 --- a/docs/zh/roadmap.md +++ /dev/null @@ -1,9 +0,0 @@ -# 规划 {#gui-hua} - -## Q1 2020 {#q1-2020} - -- 更精确的用户资源池,可以在用户之间合理分配集群资源 -- 细粒度的授权管理 -- 与外部认证服务集成 - -[来源文章](https://clickhouse.tech/docs/en/roadmap/) diff --git a/docs/zh/security_changelog.md b/docs/zh/security_changelog.md deleted file mode 100644 index e35d6a7c632..00000000000 --- a/docs/zh/security_changelog.md +++ /dev/null @@ -1,41 +0,0 @@ -## 修复于 ClickHouse Release 18.12.13, 2018-09-10 {#xiu-fu-yu-clickhouse-release-18-12-13-2018-09-10} - -### CVE-2018-14672 {#cve-2018-14672} - -加载CatBoost模型的功能,允许遍历路径并通过错误消息读取任意文件。 - -来源: Yandex信息安全团队的Andrey Krasichkov - -## 修复于 ClickHouse Release 18.10.3, 2018-08-13 {#xiu-fu-yu-clickhouse-release-18-10-3-2018-08-13} - -### CVE-2018-14671 {#cve-2018-14671} - -unixODBC允许从文件系统加载任意共享对象,从而导致«远程执行代码»漏洞。 - -来源:Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov - -## 修复于 ClickHouse Release 1.1.54388, 2018-06-28 {#xiu-fu-yu-clickhouse-release-1-1-54388-2018-06-28} - -### CVE-2018-14668 {#cve-2018-14668} - -远程表函数功能允许在 «user», «password» 及 «default\_database» 字段中使用任意符号,从而导致跨协议请求伪造攻击。 - -来源:Yandex信息安全团队的Andrey Krasichkov - -## 修复于 ClickHouse Release 1.1.54390, 2018-07-06 {#xiu-fu-yu-clickhouse-release-1-1-54390-2018-07-06} - -### CVE-2018-14669 {#cve-2018-14669} - -ClickHouse MySQL客户端启用了 «LOAD DATA LOCAL INFILE» 功能,该功能允许恶意MySQL数据库从连接的ClickHouse服务器读取任意文件。 - -来源:Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov - -## 修复于 ClickHouse Release 1.1.54131, 2017-01-10 {#xiu-fu-yu-clickhouse-release-1-1-54131-2017-01-10} - -### CVE-2018-14670 {#cve-2018-14670} - -deb软件包中的错误配置可能导致使用未经授权的数据库。 - -来源:英国国家网络安全中心(NCSC) - -[来源文章](https://clickhouse.tech/docs/en/security_changelog/) diff --git a/docs/zh/sql_reference/aggregate_functions/combinators.md b/docs/zh/sql_reference/aggregate_functions/combinators.md new file mode 100644 index 00000000000..a8be457ab23 --- /dev/null +++ b/docs/zh/sql_reference/aggregate_functions/combinators.md @@ -0,0 +1,166 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: "\u805A\u5408\u51FD\u6570\u7EC4\u5408\u5668" +--- + +# 聚合函数组合器 {#aggregate_functions_combinators} + +聚合函数的名称可以附加一个后缀。 这改变了聚合函数的工作方式。 + +## -如果 {#agg-functions-combinator-if} + +The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). + +例: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` 等等。 + +使用条件聚合函数,您可以一次计算多个条件的聚合,而无需使用子查询和 `JOIN`例如,在Yandex的。Metrica,条件聚合函数用于实现段比较功能。 + +## -阵列 {#agg-functions-combinator-array} + +-Array后缀可以附加到任何聚合函数。 在这种情况下,聚合函数采用的参数 ‘Array(T)’ 类型(数组)而不是 ‘T’ 类型参数。 如果聚合函数接受多个参数,则它必须是长度相等的数组。 在处理数组时,聚合函数的工作方式与所有数组元素的原始聚合函数类似。 + +示例1: `sumArray(arr)` -总计所有的所有元素 ‘arr’ 阵列。 在这个例子中,它可以更简单地编写: `sum(arraySum(arr))`. + +示例2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ 阵列。 这可以做一个更简单的方法: `uniq(arrayJoin(arr))`,但它并不总是可以添加 ‘arrayJoin’ 到查询。 + +-如果和-阵列可以组合。 然而, ‘Array’ 必须先来,然后 ‘If’. 例: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. 由于这个顺序,该 ‘cond’ 参数不会是数组。 + +## -州 {#agg-functions-combinator-state} + +如果应用此combinator,则聚合函数不会返回结果值(例如唯一值的数量 [uniq](reference.md#agg_function-uniq) 函数),但聚合的中间状态(用于 `uniq`,这是用于计算唯一值的数量的散列表)。 这是一个 `AggregateFunction(...)` 可用于进一步处理或存储在表中以完成聚合。 + +要使用这些状态,请使用: + +- [AggregatingMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) 表引擎。 +- [最后聚会](../../sql_reference/functions/other_functions.md#function-finalizeaggregation) 功能。 +- [跑累积](../../sql_reference/functions/other_functions.md#function-runningaccumulate) 功能。 +- [-合并](#aggregate_functions_combinators_merge) combinator +- [-MergeState](#aggregate_functions_combinators_mergestate) combinator + +## -合并 {#aggregate_functions_combinators-merge} + +如果应用此组合器,则聚合函数将中间聚合状态作为参数,组合状态以完成聚合,并返回结果值。 + +## -MergeState {#aggregate_functions_combinators-mergestate} + +以与-Merge combinator相同的方式合并中间聚合状态。 但是,它不会返回结果值,而是返回中间聚合状态,类似于-State combinator。 + +## -ForEach {#agg-functions-combinator-foreach} + +将表的聚合函数转换为聚合相应数组项并返回结果数组的数组的聚合函数。 例如, `sumForEach` 对于数组 `[1, 2]`, `[3, 4, 5]`和`[6, 7]`返回结果 `[10, 13, 5]` 之后将相应的数组项添加在一起。 + +## -OrDefault {#agg-functions-combinator-ordefault} + +如果没有要聚合的内容,则填充聚合函数的返回类型的默认值。 + +``` sql +SELECT avg(number), avgOrDefault(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrDefault(number)─┐ +│ nan │ 0 │ +└─────────────┴──────────────────────┘ +``` + +## -OrNull {#agg-functions-combinator-ornull} + +填充 `null` 如果没有什么聚合。 返回列将为空。 + +``` sql +SELECT avg(number), avgOrNull(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrNull(number)─┐ +│ nan │ ᴺᵁᴸᴸ │ +└─────────────┴───────────────────┘ +``` + +-OrDefault和-OrNull可以与其他组合器相结合。 当聚合函数不接受空输入时,它很有用。 + +``` sql +SELECT avgOrNullIf(x, x > 10) +FROM +( + SELECT toDecimal32(1.23, 2) AS x +) +``` + +``` text +┌─avgOrNullIf(x, greater(x, 10))─┐ +│ ᴺᵁᴸᴸ │ +└────────────────────────────────┘ +``` + +## -重新采样 {#agg-functions-combinator-resample} + +允许您将数据划分为组,然后单独聚合这些组中的数据。 通过将一列中的值拆分为间隔来创建组。 + +``` sql +Resample(start, end, step)(, resampling_key) +``` + +**参数** + +- `start` — Starting value of the whole required interval for `resampling_key` 值。 +- `stop` — Ending value of the whole required interval for `resampling_key` 值。 整个时间间隔不包括 `stop` 价值 `[start, stop)`. +- `step` — Step for separating the whole interval into subintervals. The `aggFunction` 在每个子区间上独立执行。 +- `resampling_key` — Column whose values are used for separating data into intervals. +- `aggFunction_params` — `aggFunction` 参数。 + +**返回值** + +- 阵列 `aggFunction` 每个子区间的结果。 + +**示例** + +考虑一下 `people` 具有以下数据的表: + +``` text +┌─name───┬─age─┬─wage─┐ +│ John │ 16 │ 10 │ +│ Alice │ 30 │ 15 │ +│ Mary │ 35 │ 8 │ +│ Evelyn │ 48 │ 11.5 │ +│ David │ 62 │ 9.9 │ +│ Brian │ 60 │ 16 │ +└────────┴─────┴──────┘ +``` + +让我们得到的人的名字,他们的年龄在于的时间间隔 `[30,60)` 和 `[60,75)`. 由于我们使用整数表示的年龄,我们得到的年龄 `[30, 59]` 和 `[60,74]` 间隔。 + +要在数组中聚合名称,我们使用 [groupArray](reference.md#agg_function-grouparray) 聚合函数。 这需要一个参数。 在我们的例子中,它是 `name` 列。 该 `groupArrayResample` 函数应该使用 `age` 按年龄聚合名称的列。 要定义所需的时间间隔,我们通过 `30, 75, 30` 参数到 `groupArrayResample` 功能。 + +``` sql +SELECT groupArrayResample(30, 75, 30)(name, age) FROM people +``` + +``` text +┌─groupArrayResample(30, 75, 30)(name, age)─────┐ +│ [['Alice','Mary','Evelyn'],['David','Brian']] │ +└───────────────────────────────────────────────┘ +``` + +考虑结果。 + +`Jonh` 是因为他太年轻了 其他人按照指定的年龄间隔进行分配。 + +现在让我们计算指定年龄间隔内的总人数和平均工资。 + +``` sql +SELECT + countResample(30, 75, 30)(name, age) AS amount, + avgResample(30, 75, 30)(wage, age) AS avg_wage +FROM people +``` + +``` text +┌─amount─┬─avg_wage──────────────────┐ +│ [3,2] │ [11.5,12.949999809265137] │ +└────────┴───────────────────────────┘ +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/zh/sql_reference/aggregate_functions/index.md b/docs/zh/sql_reference/aggregate_functions/index.md new file mode 100644 index 00000000000..7e53c8c8c53 --- /dev/null +++ b/docs/zh/sql_reference/aggregate_functions/index.md @@ -0,0 +1,62 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u805A\u5408\u51FD\u6570" +toc_priority: 33 +toc_title: "\u5BFC\u8A00" +--- + +# 聚合函数 {#aggregate-functions} + +聚合函数在 [正常](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) 方式如预期的数据库专家。 + +ClickHouse还支持: + +- [参数聚合函数](parametric_functions.md#aggregate_functions_parametric),它接受除列之外的其他参数。 +- [组合器](combinators.md#aggregate_functions_combinators),这改变了聚合函数的行为。 + +## 空处理 {#null-processing} + +在聚合过程中,所有 `NULL`s被跳过。 + +**例:** + +考虑这个表: + +``` text +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +比方说,你需要在总的值 `y` 列: + +``` sql +SELECT sum(y) FROM t_null_big +``` + + ┌─sum(y)─┐ + │ 7 │ + └────────┘ + +该 `sum` 函数解释 `NULL` 作为 `0`. 特别是,这意味着,如果函数接收输入的选择,其中所有的值 `NULL`,那么结果将是 `0`,不 `NULL`. + +现在你可以使用 `groupArray` 函数从创建一个数组 `y` 列: + +``` sql +SELECT groupArray(y) FROM t_null_big +``` + +``` text +┌─groupArray(y)─┐ +│ [2,2,3] │ +└───────────────┘ +``` + +`groupArray` 不包括 `NULL` 在生成的数组中。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/zh/sql_reference/aggregate_functions/parametric_functions.md b/docs/zh/sql_reference/aggregate_functions/parametric_functions.md new file mode 100644 index 00000000000..18adcd93487 --- /dev/null +++ b/docs/zh/sql_reference/aggregate_functions/parametric_functions.md @@ -0,0 +1,499 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: "\u53C2\u6570\u805A\u5408\u51FD\u6570" +--- + +# 参数聚合函数 {#aggregate_functions_parametric} + +Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. + +## 直方图 {#histogram} + +计算自适应直方图。 它不能保证精确的结果。 + +``` sql +histogram(number_of_bins)(values) +``` + +该函数使用 [流式并行决策树算法](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). 当新数据输入函数时,hist图分区的边界将被调整。 在通常情况下,箱的宽度不相等。 + +**参数** + +`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. +`values` — [表达式](../syntax.md#syntax-expressions) 导致输入值。 + +**返回值** + +- [阵列](../../sql_reference/data_types/array.md) 的 [元组](../../sql_reference/data_types/tuple.md) 下面的格式: + + ``` + [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] + ``` + + - `lower` — Lower bound of the bin. + - `upper` — Upper bound of the bin. + - `height` — Calculated height of the bin. + +**示例** + +``` sql +SELECT histogram(5)(number + 1) +FROM ( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─histogram(5)(plus(number, 1))───────────────────────────────────────────┐ +│ [(1,4.5,4),(4.5,8.5,4),(8.5,12.75,4.125),(12.75,17,4.625),(17,20,3.25)] │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +您可以使用 [酒吧](../../sql_reference/functions/other_functions.md#function-bar) 功能,例如: + +``` sql +WITH histogram(5)(rand() % 100) AS hist +SELECT + arrayJoin(hist).3 AS height, + bar(height, 0, 6, 5) AS bar +FROM +( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─height─┬─bar───┐ +│ 2.125 │ █▋ │ +│ 3.25 │ ██▌ │ +│ 5.625 │ ████▏ │ +│ 5.625 │ ████▏ │ +│ 3.375 │ ██▌ │ +└────────┴───────┘ +``` + +在这种情况下,您应该记住您不知道直方图bin边界。 + +## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} + +检查序列是否包含与模式匹配的事件链。 + +``` sql +sequenceMatch(pattern)(timestamp, cond1, cond2, ...) +``` + +!!! warning "警告" + 在同一秒钟发生的事件可能以未定义的顺序排列在序列中,影响结果。 + +**参数** + +- `pattern` — Pattern string. See [模式语法](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` 和 `DateTime`. 您还可以使用任何支持的 [UInt](../../sql_reference/data_types/int_uint.md) 数据类型。 + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. 最多可以传递32个条件参数。 该函数只考虑这些条件中描述的事件。 如果序列包含未在条件中描述的数据,则函数将跳过这些数据。 + +**返回值** + +- 1,如果模式匹配。 +- 0,如果模式不匹配。 + +类型: `UInt8`. + + +**模式语法** + +- `(?N)` — Matches the condition argument at position `N`. 条件在编号 `[1, 32]` 范围。 例如, `(?1)` 匹配传递给 `cond1` 参数。 + +- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. + +- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` 匹配彼此发生超过1800秒的事件。 这些事件之间可以存在任意数量的任何事件。 您可以使用 `>=`, `>`, `<`, `<=` 运营商。 + +**例** + +考虑在数据 `t` 表: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +└──────┴────────┘ +``` + +执行查询: + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 1 │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +该函数找到了数字2跟随数字1的事件链。 它跳过了它们之间的数字3,因为该数字没有被描述为事件。 如果我们想在搜索示例中给出的事件链时考虑这个数字,我们应该为它创建一个条件。 + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 3))─┐ +│ 0 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +在这种情况下,函数找不到与模式匹配的事件链,因为数字3的事件发生在1和2之间。 如果在相同的情况下,我们检查了数字4的条件,则序列将与模式匹配。 + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 4))─┐ +│ 1 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**另请参阅** + +- [sequenceCount](#function-sequencecount) + +## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} + +计数与模式匹配的事件链的数量。 该函数搜索不重叠的事件链。 当前链匹配后,它开始搜索下一个链。 + +!!! warning "警告" + 在同一秒钟发生的事件可能以未定义的顺序排列在序列中,影响结果。 + +``` sql +sequenceCount(pattern)(timestamp, cond1, cond2, ...) +``` + +**参数** + +- `pattern` — Pattern string. See [模式语法](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` 和 `DateTime`. 您还可以使用任何支持的 [UInt](../../sql_reference/data_types/int_uint.md) 数据类型。 + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. 最多可以传递32个条件参数。 该函数只考虑这些条件中描述的事件。 如果序列包含未在条件中描述的数据,则函数将跳过这些数据。 + +**返回值** + +- 匹配的非重叠事件链数。 + +类型: `UInt64`. + +**示例** + +考虑在数据 `t` 表: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +│ 4 │ 1 │ +│ 5 │ 3 │ +│ 6 │ 2 │ +└──────┴────────┘ +``` + +计算数字2在数字1之后出现的次数以及它们之间的任何其他数字: + +``` sql +SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceCount('(?1).*(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 2 │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +**另请参阅** + +- [sequenceMatch](#function-sequencematch) + +## windowFunnel {#windowfunnel} + +搜索滑动时间窗中的事件链,并计算从链中发生的最大事件数。 + +该函数根据算法工作: + +- 该函数搜索触发链中的第一个条件并将事件计数器设置为1的数据。 这是滑动窗口启动的时刻。 + +- 如果来自链的事件在窗口内顺序发生,则计数器将递增。 如果事件序列中断,则计数器不会增加。 + +- 如果数据在不同的完成点具有多个事件链,则该函数将仅输出最长链的大小。 + +**语法** + +``` sql +windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) +``` + +**参数** + +- `window` — Length of the sliding window in seconds. +- `mode` -这是一个可选的参数。 + - `'strict'` -当 `'strict'` 设置时,windowFunnel()仅对唯一值应用条件。 +- `timestamp` — Name of the column containing the timestamp. Data types supported: [日期](../../sql_reference/data_types/date.md), [日期时间](../../sql_reference/data_types/datetime.md#data_type-datetime) 和其他无符号整数类型(请注意,即使时间戳支持 `UInt64` 类型,它的值不能超过Int64最大值,即2^63-1)。 +- `cond` — Conditions or data describing the chain of events. [UInt8](../../sql_reference/data_types/int_uint.md). + +**返回值** + +滑动时间窗口内连续触发条件链的最大数目。 +对选择中的所有链进行了分析。 + +类型: `Integer`. + +**示例** + +确定设定的时间段是否足以让用户选择手机并在在线商店中购买两次。 + +设置以下事件链: + +1. 用户登录到其在应用商店中的帐户 (`eventID = 1003`). +2. 用户搜索手机 (`eventID = 1007, product = 'phone'`). +3. 用户下了订单 (`eventID = 1009`). +4. 用户再次下订单 (`eventID = 1010`). + +输入表: + +``` text +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-28 │ 1 │ 2019-01-29 10:00:00 │ 1003 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-31 │ 1 │ 2019-01-31 09:00:00 │ 1007 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-30 │ 1 │ 2019-01-30 08:00:00 │ 1009 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-02-01 │ 1 │ 2019-02-01 08:00:00 │ 1010 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +``` + +了解用户有多远 `user_id` 可以在2019的1-2月期间通过链条。 + +查询: + +``` sql +SELECT + level, + count() AS c +FROM +( + SELECT + user_id, + windowFunnel(6048000000000000)(timestamp, eventID = 1003, eventID = 1009, eventID = 1007, eventID = 1010) AS level + FROM trend + WHERE (event_date >= '2019-01-01') AND (event_date <= '2019-02-02') + GROUP BY user_id +) +GROUP BY level +ORDER BY level ASC +``` + +结果: + +``` text +┌─level─┬─c─┐ +│ 4 │ 1 │ +└───────┴───┘ +``` + +## 保留 {#retention} + +该函数将一组条件作为参数,类型为1到32个参数 `UInt8` 表示事件是否满足特定条件。 +任何条件都可以指定为参数(如 [WHERE](../../sql_reference/statements/select.md#select-where)). + +除了第一个以外,条件成对适用:如果第一个和第二个是真的,第二个结果将是真的,如果第一个和fird是真的,第三个结果将是真的,等等。 + +**语法** + +``` sql +retention(cond1, cond2, ..., cond32); +``` + +**参数** + +- `cond` — an expression that returns a `UInt8` 结果(1或0)。 + +**返回值** + +数组为1或0。 + +- 1 — condition was met for the event. +- 0 — condition wasn't met for the event. + +类型: `UInt8`. + +**示例** + +让我们考虑计算的一个例子 `retention` 功能,以确定网站流量。 + +**1.** Сreate a table to illustrate an example. + +``` sql +CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; + +INSERT INTO retention_test SELECT '2020-01-01', number FROM numbers(5); +INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); +INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); +``` + +输入表: + +查询: + +``` sql +SELECT * FROM retention_test +``` + +结果: + +``` text +┌───────date─┬─uid─┐ +│ 2020-01-01 │ 0 │ +│ 2020-01-01 │ 1 │ +│ 2020-01-01 │ 2 │ +│ 2020-01-01 │ 3 │ +│ 2020-01-01 │ 4 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-02 │ 0 │ +│ 2020-01-02 │ 1 │ +│ 2020-01-02 │ 2 │ +│ 2020-01-02 │ 3 │ +│ 2020-01-02 │ 4 │ +│ 2020-01-02 │ 5 │ +│ 2020-01-02 │ 6 │ +│ 2020-01-02 │ 7 │ +│ 2020-01-02 │ 8 │ +│ 2020-01-02 │ 9 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-03 │ 0 │ +│ 2020-01-03 │ 1 │ +│ 2020-01-03 │ 2 │ +│ 2020-01-03 │ 3 │ +│ 2020-01-03 │ 4 │ +│ 2020-01-03 │ 5 │ +│ 2020-01-03 │ 6 │ +│ 2020-01-03 │ 7 │ +│ 2020-01-03 │ 8 │ +│ 2020-01-03 │ 9 │ +│ 2020-01-03 │ 10 │ +│ 2020-01-03 │ 11 │ +│ 2020-01-03 │ 12 │ +│ 2020-01-03 │ 13 │ +│ 2020-01-03 │ 14 │ +└────────────┴─────┘ +``` + +**2.** 按唯一ID对用户进行分组 `uid` 使用 `retention` 功能。 + +查询: + +``` sql +SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r +FROM retention_test +WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') +GROUP BY uid +ORDER BY uid ASC +``` + +结果: + +``` text +┌─uid─┬─r───────┐ +│ 0 │ [1,1,1] │ +│ 1 │ [1,1,1] │ +│ 2 │ [1,1,1] │ +│ 3 │ [1,1,1] │ +│ 4 │ [1,1,1] │ +│ 5 │ [0,0,0] │ +│ 6 │ [0,0,0] │ +│ 7 │ [0,0,0] │ +│ 8 │ [0,0,0] │ +│ 9 │ [0,0,0] │ +│ 10 │ [0,0,0] │ +│ 11 │ [0,0,0] │ +│ 12 │ [0,0,0] │ +│ 13 │ [0,0,0] │ +│ 14 │ [0,0,0] │ +└─────┴─────────┘ +``` + +**3.** 计算每天的现场访问总数。 + +查询: + +``` sql +SELECT + sum(r[1]) AS r1, + sum(r[2]) AS r2, + sum(r[3]) AS r3 +FROM +( + SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r + FROM retention_test + WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') + GROUP BY uid +) +``` + +结果: + +``` text +┌─r1─┬─r2─┬─r3─┐ +│ 5 │ 5 │ 5 │ +└────┴────┴────┘ +``` + +哪里: + +- `r1`-2020-01-01期间访问该网站的独立访问者数量( `cond1` 条件)。 +- `r2`-在2020-01-01和2020-01-02之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond2` 条件)。 +- `r3`-在2020-01-01和2020-01-03之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond3` 条件)。 + +## uniqUpTo(N)(x) {#uniquptonx} + +Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. + +建议使用小Ns,高达10。 N的最大值为100。 + +对于聚合函数的状态,它使用的内存量等于1+N\*一个字节值的大小。 +对于字符串,它存储8个字节的非加密哈希。 也就是说,计算是近似的字符串。 + +该函数也适用于多个参数。 + +它的工作速度尽可能快,除了使用较大的N值并且唯一值的数量略小于N的情况。 + +用法示例: + +``` text +Problem: Generate a report that shows only keywords that produced at least 5 unique users. +Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) + +## sumMapFiltered(keys\_to\_keep)(键值) {#summapfilteredkeys-to-keepkeys-values} + +同样的行为 [sumMap](reference.md#agg_functions-summap) 除了一个键数组作为参数传递。 这在使用高基数密钥时尤其有用。 diff --git a/docs/zh/sql_reference/aggregate_functions/reference.md b/docs/zh/sql_reference/aggregate_functions/reference.md new file mode 100644 index 00000000000..b8071860d41 --- /dev/null +++ b/docs/zh/sql_reference/aggregate_functions/reference.md @@ -0,0 +1,1878 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 36 +toc_title: "\u53C2\u8003\u8D44\u6599" +--- + +# 函数参考 {#function-reference} + +## 计数 {#agg_function-count} + +计数行数或非空值。 + +ClickHouse支持以下语法 `count`: +- `count(expr)` 或 `COUNT(DISTINCT expr)`. +- `count()` 或 `COUNT(*)`. 该 `count()` 语法是ClickHouse特定的。 + +**参数** + +该功能可以采取: + +- 零参数。 +- 一 [表达式](../syntax.md#syntax-expressions). + +**返回值** + +- 如果没有参数调用函数,它会计算行数。 +- 如果 [表达式](../syntax.md#syntax-expressions) 被传递,则该函数计数此表达式返回的次数非null。 如果表达式返回 [可为空](../../sql_reference/data_types/nullable.md)-键入值,然后结果 `count` 保持不 `Nullable`. 如果返回表达式,则该函数返回0 `NULL` 对于所有的行。 + +在这两种情况下,返回值的类型为 [UInt64](../../sql_reference/data_types/int_uint.md). + +**详细信息** + +ClickHouse支持 `COUNT(DISTINCT ...)` 语法 这种结构的行为取决于 [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) 设置。 它定义了其中的 [uniq\*](#agg_function-uniq) 函数用于执行操作。 默认值为 [uniqExact](#agg_function-uniqexact) 功能。 + +该 `SELECT count() FROM table` 查询未被优化,因为表中的条目数没有单独存储。 它从表中选择一个小列并计算其中的值数。 + +**例** + +示例1: + +``` sql +SELECT count() FROM t +``` + +``` text +┌─count()─┐ +│ 5 │ +└─────────┘ +``` + +示例2: + +``` sql +SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' +``` + +``` text +┌─name──────────────────────────┬─value─────┐ +│ count_distinct_implementation │ uniqExact │ +└───────────────────────────────┴───────────┘ +``` + +``` sql +SELECT count(DISTINCT num) FROM t +``` + +``` text +┌─uniqExact(num)─┐ +│ 3 │ +└────────────────┘ +``` + +这个例子表明 `count(DISTINCT num)` 由执行 `uniqExact` 根据功能 `count_distinct_implementation` 设定值。 + +## 任何(x) {#agg_function-any} + +选择第一个遇到的值。 +查询可以以任何顺序执行,甚至每次都以不同的顺序执行,因此此函数的结果是不确定的。 +要获得确定的结果,您可以使用 ‘min’ 或 ‘max’ 功能,而不是 ‘any’. + +在某些情况下,可以依靠执行的顺序。 这适用于SELECT来自使用ORDER BY的子查询的情况。 + +当一个 `SELECT` 查询具有 `GROUP BY` 子句或至少一个聚合函数,ClickHouse(相对于MySQL)要求在所有表达式 `SELECT`, `HAVING`,和 `ORDER BY` 子句可以从键或聚合函数计算。 换句话说,从表中选择的每个列必须在键或聚合函数内使用。 要获得像MySQL这样的行为,您可以将其他列放在 `any` 聚合函数。 + +## anyHeavy(x) {#anyheavyx} + +使用选择一个频繁出现的值 [重打者](http://www.cs.umd.edu/~samir/498/karp.pdf) 算法。 如果某个值在查询的每个执行线程中出现的情况超过一半,则返回此值。 通常情况下,结果是不确定的。 + +``` sql +anyHeavy(column) +``` + +**参数** + +- `column` – The column name. + +**示例** + +就拿 [时间](../../getting_started/example_datasets/ontime.md) 数据集,并选择在任何频繁出现的值 `AirlineID` 列。 + +``` sql +SELECT anyHeavy(AirlineID) AS res +FROM ontime +``` + +``` text +┌───res─┐ +│ 19690 │ +└───────┘ +``` + +## anyLast(x) {#anylastx} + +选择遇到的最后一个值。 +其结果是一样不确定的 `any` 功能。 + +## 集团比特 {#groupbitand} + +按位应用 `AND` 对于一系列的数字。 + +``` sql +groupBitAnd(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt*` 类型。 + +**示例** + +测试数据: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +查询: + +``` sql +SELECT groupBitAnd(num) FROM t +``` + +哪里 `num` 是包含测试数据的列。 + +结果: + +``` text +binary decimal +00000100 = 4 +``` + +## groupBitOr {#groupbitor} + +按位应用 `OR` 对于一系列的数字。 + +``` sql +groupBitOr(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt*` 类型。 + +**示例** + +测试数据: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +查询: + +``` sql +SELECT groupBitOr(num) FROM t +``` + +哪里 `num` 是包含测试数据的列。 + +结果: + +``` text +binary decimal +01111101 = 125 +``` + +## groupBitXor {#groupbitxor} + +按位应用 `XOR` 对于一系列的数字。 + +``` sql +groupBitXor(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt*` 类型。 + +**示例** + +测试数据: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +查询: + +``` sql +SELECT groupBitXor(num) FROM t +``` + +哪里 `num` 是包含测试数据的列。 + +结果: + +``` text +binary decimal +01101000 = 104 +``` + +## groupBitmap {#groupbitmap} + +从无符号整数列的位图或聚合计算,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmap(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +测试数据: + +``` text +UserID +1 +1 +2 +3 +``` + +查询: + +``` sql +SELECT groupBitmap(UserID) as num FROM t +``` + +结果: + +``` text +num +3 +``` + +## min(x) {#agg_function-min} + +计算最小值。 + +## max(x) {#agg_function-max} + +计算最大值。 + +## argMin(arg,val) {#agg-function-argmin} + +计算 ‘arg’ 最小值的值 ‘val’ 价值。 如果有几个不同的值 ‘arg’ 对于最小值 ‘val’,遇到的第一个值是输出。 + +**示例:** + +``` text +┌─user─────┬─salary─┐ +│ director │ 5000 │ +│ manager │ 3000 │ +│ worker │ 1000 │ +└──────────┴────────┘ +``` + +``` sql +SELECT argMin(user, salary) FROM salary +``` + +``` text +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ +``` + +## argMax(arg,val) {#agg-function-argmax} + +计算 ‘arg’ 最大值 ‘val’ 价值。 如果有几个不同的值 ‘arg’ 对于最大值 ‘val’,遇到的第一个值是输出。 + +## sum(x) {#agg_function-sum} + +计算总和。 +只适用于数字。 + +## sumWithOverflow(x) {#sumwithoverflowx} + +使用与输入参数相同的数据类型计算数字的总和。 如果总和超过此数据类型的最大值,则函数返回错误。 + +只适用于数字。 + +## sumMap(key,value) {#agg_functions-summap} + +总计 ‘value’ 数组根据在指定的键 ‘key’ 阵列。 +元素的数量 ‘key’ 和 ‘value’ 总计的每一行必须相同。 +Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. + +示例: + +``` sql +CREATE TABLE sum_map( + date Date, + timeslot DateTime, + statusMap Nested( + status UInt16, + requests UInt64 + ) +) ENGINE = Log; +INSERT INTO sum_map VALUES + ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); +SELECT + timeslot, + sumMap(statusMap.status, statusMap.requests) +FROM sum_map +GROUP BY timeslot +``` + +``` text +┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ +│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ +│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ +└─────────────────────┴──────────────────────────────────────────────┘ +``` + +## skewPop {#skewpop} + +计算 [歪斜](https://en.wikipedia.org/wiki/Skewness) 的序列。 + +``` sql +skewPop(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**示例** + +``` sql +SELECT skewPop(value) FROM series_with_value_column +``` + +## skewSamp {#skewsamp} + +计算 [样品偏度](https://en.wikipedia.org/wiki/Skewness) 的序列。 + +它表示随机变量的偏度的无偏估计,如果传递的值形成其样本。 + +``` sql +skewSamp(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). 如果 `n <= 1` (`n` 是样本的大小),则该函数返回 `nan`. + +**示例** + +``` sql +SELECT skewSamp(value) FROM series_with_value_column +``` + +## kurtPop {#kurtpop} + +计算 [峰度](https://en.wikipedia.org/wiki/Kurtosis) 的序列。 + +``` sql +kurtPop(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**示例** + +``` sql +SELECT kurtPop(value) FROM series_with_value_column +``` + +## kurtSamp {#kurtsamp} + +计算 [峰度样本](https://en.wikipedia.org/wiki/Kurtosis) 的序列。 + +它表示随机变量峰度的无偏估计,如果传递的值形成其样本。 + +``` sql +kurtSamp(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). 如果 `n <= 1` (`n` 是样本的大小),则该函数返回 `nan`. + +**示例** + +``` sql +SELECT kurtSamp(value) FROM series_with_value_column +``` + +## timeSeriesGroupSum(uid,timestamp,value) {#agg-function-timeseriesgroupsum} + +`timeSeriesGroupSum` 可以聚合不同的时间序列,即采样时间戳不对齐。 +它将在两个采样时间戳之间使用线性插值,然后将时间序列和在一起。 + +- `uid` 是时间序列唯一id, `UInt64`. +- `timestamp` 是Int64型,以支持毫秒或微秒。 +- `value` 是指标。 + +函数返回元组数组 `(timestamp, aggregated_value)` 对。 + +在使用此功能之前,请确保 `timestamp` 按升序排列 + +示例: + +``` text +┌─uid─┬─timestamp─┬─value─┐ +│ 1 │ 2 │ 0.2 │ +│ 1 │ 7 │ 0.7 │ +│ 1 │ 12 │ 1.2 │ +│ 1 │ 17 │ 1.7 │ +│ 1 │ 25 │ 2.5 │ +│ 2 │ 3 │ 0.6 │ +│ 2 │ 8 │ 1.6 │ +│ 2 │ 12 │ 2.4 │ +│ 2 │ 18 │ 3.6 │ +│ 2 │ 24 │ 4.8 │ +└─────┴───────────┴───────┘ +``` + +``` sql +CREATE TABLE time_series( + uid UInt64, + timestamp Int64, + value Float64 +) ENGINE = Memory; +INSERT INTO time_series VALUES + (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), + (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); + +SELECT timeSeriesGroupSum(uid, timestamp, value) +FROM ( + SELECT * FROM time_series order by timestamp ASC +); +``` + +其结果将是: + +``` text +[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] +``` + +## timeSeriesGroupRateSum(uid,ts,val) {#agg-function-timeseriesgroupratesum} + +同样,timeSeriesGroupRateSum,timeSeriesGroupRateSum将计算时间序列的速率,然后将速率总和在一起。 +此外,使用此函数之前,时间戳应该是上升顺序。 + +使用此函数,上述情况下的结果将是: + +``` text +[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] +``` + +## avg(x) {#agg_function-avg} + +计算平均值。 +只适用于数字。 +结果总是Float64。 + +## 平均加权 {#avgweighted} + +计算 [加权算术平均值](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean). + +**语法** + +``` sql +avgWeighted(x, weight) +``` + +**参数** + +- `x` — Values. [整数](../data_types/int_uint.md) 或 [浮点](../data_types/float.md). +- `weight` — Weights of the values. [整数](../data_types/int_uint.md) 或 [浮点](../data_types/float.md). + +类型 `x` 和 `weight` 一定是一样的 + +**返回值** + +- 加权平均值。 +- `NaN`. 如果所有的权重都等于0。 + +类型: [Float64](../data_types/float.md). + +**示例** + +查询: + +``` sql +SELECT avgWeighted(x, w) +FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) +``` + +结果: + +``` text +┌─avgWeighted(x, weight)─┐ +│ 8 │ +└────────────────────────┘ +``` + +## uniq {#agg_function-uniq} + +计算参数的不同值的近似数量。 + +``` sql +uniq(x[, ...]) +``` + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +**返回值** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-键入号码。 + +**实施细节** + +功能: + +- 计算聚合中所有参数的哈希值,然后在计算中使用它。 + +- 使用自适应采样算法。 对于计算状态,该函数使用最多65536个元素哈希值的样本。 + + This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. + +- 确定性地提供结果(它不依赖于查询处理顺序)。 + +我们建议在几乎所有情况下使用此功能。 + +**另请参阅** + +- [uniqCombined](#agg_function-uniqcombined) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined {#agg_function-uniqcombined} + +计算不同参数值的近似数量。 + +``` sql +uniqCombined(HLL_precision)(x[, ...]) +``` + +该 `uniqCombined` 函数是计算不同数值数量的不错选择。 + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +`HLL_precision` 是以2为底的单元格数的对数 [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). 可选,您可以将该函数用作 `uniqCombined(x[, ...])`. 默认值 `HLL_precision` 是17,这是有效的96KiB的空间(2^17个单元,每个6比特)。 + +**返回值** + +- 一个数字 [UInt64](../../sql_reference/data_types/int_uint.md)-键入号码。 + +**实施细节** + +功能: + +- 计算散列(64位散列 `String` 否则32位)对于聚合中的所有参数,然后在计算中使用它。 + +- 使用三种算法的组合:数组、哈希表和HyperLogLog与error错表。 + + For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. + +- 确定性地提供结果(它不依赖于查询处理顺序)。 + +!!! note "注" + 因为它使用32位散列非-`String` 类型,结果将有非常高的误差基数显着大于 `UINT_MAX` (错误将在几百亿不同值之后迅速提高),因此在这种情况下,您应该使用 [uniqCombined64](#agg_function-uniqcombined64) + +相比于 [uniq](#agg_function-uniq) 功能,该 `uniqCombined`: + +- 消耗少几倍的内存。 +- 计算精度高出几倍。 +- 通常具有略低的性能。 在某些情况下, `uniqCombined` 可以表现得比 `uniq`,例如,使用通过网络传输大量聚合状态的分布式查询。 + +**另请参阅** + +- [uniq](#agg_function-uniq) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined64 {#agg_function-uniqcombined64} + +和 [uniqCombined](#agg_function-uniqcombined),但对所有数据类型使用64位哈希。 + +## uniqHLL12 {#agg_function-uniqhll12} + +计算不同参数值的近似数量,使用 [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) 算法。 + +``` sql +uniqHLL12(x[, ...]) +``` + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +**返回值** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-键入号码。 + +**实施细节** + +功能: + +- 计算聚合中所有参数的哈希值,然后在计算中使用它。 + +- 使用HyperLogLog算法来近似不同参数值的数量。 + + 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + +- 提供确定结果(它不依赖于查询处理顺序)。 + +我们不建议使用此功能。 在大多数情况下,使用 [uniq](#agg_function-uniq) 或 [uniqCombined](#agg_function-uniqcombined) 功能。 + +**另请参阅** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqExact](#agg_function-uniqexact) + +## uniqExact {#agg_function-uniqexact} + +计算不同参数值的准确数目。 + +``` sql +uniqExact(x[, ...]) +``` + +使用 `uniqExact` 功能,如果你绝对需要一个确切的结果。 否则使用 [uniq](#agg_function-uniq) 功能。 + +该 `uniqExact` 功能使用更多的内存比 `uniq`,因为状态的大小随着不同值的数量的增加而无界增长。 + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +**另请参阅** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqHLL12](#agg_function-uniqhll12) + +## 群交(x),群交(max\_size)(x) {#agg_function-grouparray} + +创建参数值的数组。 +值可以按任何(不确定)顺序添加到数组中。 + +第二个版本(与 `max_size` 参数)将结果数组的大小限制为 `max_size` 元素。 +例如, `groupArray (1) (x)` 相当于 `[any (x)]`. + +在某些情况下,您仍然可以依靠执行的顺序。 这适用于以下情况 `SELECT` 来自使用 `ORDER BY`. + +## groupArrayInsertAt(值,位置) {#grouparrayinsertatvalue-position} + +将值插入到数组中的指定位置中。 + +!!! note "注" + 此函数使用从零开始的位置,与传统SQL数组的从一开始的位置相反。 + +Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. + +可选参数: + +- 在空位置替换的默认值。 +- 生成数组的长度。 这允许您接收所有聚合键的相同大小的数组。 使用此参数时,必须指定默认值。 + +## groupArrayMovingSum {#agg_function-grouparraymovingsum} + +计算输入值的移动和。 + +``` sql +groupArrayMovingSum(numbers_for_summing) +groupArrayMovingSum(window_size)(numbers_for_summing) +``` + +该函数可以将窗口大小作为参数。 如果未指定,则该函数的窗口大小等于列中的行数。 + +**参数** + +- `numbers_for_summing` — [表达式](../syntax.md#syntax-expressions) 生成数值数据类型值。 +- `window_size` — Size of the calculation window. + +**返回值** + +- 与输入数据大小和类型相同的数组。 + +**示例** + +样品表: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +查询: + +``` sql +SELECT + groupArrayMovingSum(int) AS I, + groupArrayMovingSum(float) AS F, + groupArrayMovingSum(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingSum(2)(int) AS I, + groupArrayMovingSum(2)(float) AS F, + groupArrayMovingSum(2)(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +## groupArrayMovingAvg {#agg_function-grouparraymovingavg} + +计算输入值的移动平均值。 + +``` sql +groupArrayMovingAvg(numbers_for_summing) +groupArrayMovingAvg(window_size)(numbers_for_summing) +``` + +该函数可以将窗口大小作为参数。 如果未指定,则该函数的窗口大小等于列中的行数。 + +**参数** + +- `numbers_for_summing` — [表达式](../syntax.md#syntax-expressions) 生成数值数据类型值。 +- `window_size` — Size of the calculation window. + +**返回值** + +- 与输入数据大小和类型相同的数组。 + +该函数使用 [四舍五入到零](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). 它截断结果数据类型的小数位数。 + +**示例** + +样品表 `b`: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +查询: + +``` sql +SELECT + groupArrayMovingAvg(int) AS I, + groupArrayMovingAvg(float) AS F, + groupArrayMovingAvg(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ +│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ +└───────────┴─────────────────────────────────────┴───────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingAvg(2)(int) AS I, + groupArrayMovingAvg(2)(float) AS F, + groupArrayMovingAvg(2)(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ +│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ +└───────────┴──────────────────────────────────┴───────────────────────┘ +``` + +## 禄,赂麓ta脌麓,):脡,,拢脢,group媒group)galaxy s8碌胫脢)禄煤)酶脱脩) {#groupuniqarrayx-groupuniqarraymax-sizex} + +从不同的参数值创建一个数组。 内存消耗是一样的 `uniqExact` 功能。 + +第二个版本(与 `max_size` 参数)将结果数组的大小限制为 `max_size` 元素。 +例如, `groupUniqArray(1)(x)` 相当于 `[any(x)]`. + +## 分位数 {#quantile} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +此功能适用 [油藏采样](https://en.wikipedia.org/wiki/Reservoir_sampling) 随着储存器大小高达8192和随机数发生器进行采样。 结果是非确定性的。 要获得精确的分位数,请使用 [quantileExact](#quantileexact) 功能。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantile(level)(expr) +``` + +别名: `median`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +输入表: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +查询: + +``` sql +SELECT quantile(val) FROM t +``` + +结果: + +``` text +┌─quantile(val)─┐ +│ 1.5 │ +└───────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 量化确定 {#quantiledeterministic} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +此功能适用 [油藏采样](https://en.wikipedia.org/wiki/Reservoir_sampling) 与储层大小高达8192和采样的确定性算法。 结果是确定性的。 要获得精确的分位数,请使用 [quantileExact](#quantileexact) 功能。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileDeterministic(level)(expr, determinator) +``` + +别名: `medianDeterministic`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). +- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +输入表: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +查询: + +``` sql +SELECT quantileDeterministic(val, 1) FROM t +``` + +结果: + +``` text +┌─quantileDeterministic(val, 1)─┐ +│ 1.5 │ +└───────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## quantileExact {#quantileexact} + +正是计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` 内存,其中 `n` 是传递的多个值。 然而,对于少量的值,该函数是非常有效的。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileExact(level)(expr) +``` + +别名: `medianExact`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). + +**返回值** + +- 指定电平的分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +查询: + +``` sql +SELECT quantileExact(number) FROM numbers(10) +``` + +结果: + +``` text +┌─quantileExact(number)─┐ +│ 5 │ +└───────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 分位数加权 {#quantileexactweighted} + +正是计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 数值数据序列,考虑到每个元素的权重。 + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). 您可以使用此功能,而不是 `quantileExact` 并指定重量1。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileExactWeighted(level)(expr, weight) +``` + +别名: `medianExactWeighted`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. + +**返回值** + +- 指定电平的分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +输入表: + +``` text +┌─n─┬─val─┐ +│ 0 │ 3 │ +│ 1 │ 2 │ +│ 2 │ 1 │ +│ 5 │ 4 │ +└───┴─────┘ +``` + +查询: + +``` sql +SELECT quantileExactWeighted(n, val) FROM t +``` + +结果: + +``` text +┌─quantileExactWeighted(n, val)─┐ +│ 1 │ +└───────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 分位定时 {#quantiletiming} + +随着确定的精度计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +结果是确定性的(它不依赖于查询处理顺序)。 该函数针对描述加载网页时间或后端响应时间等分布的序列进行了优化。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTiming(level)(expr) +``` + +别名: `medianTiming`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). + +- `expr` — [表达式](../syntax.md#syntax-expressions) 在一个列值返回 [浮动\*](../../sql_reference/data_types/float.md)-键入号码。 + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +**精度** + +计算是准确的,如果: + +- 值的总数不超过5670。 +- 总数值超过5670,但页面加载时间小于1024ms。 + +否则,计算结果将四舍五入到16毫秒的最接近倍数。 + +!!! note "注" + 对于计算页面加载时间分位数,此函数比 [分位数](#quantile). + +**返回值** + +- 指定电平的分位数。 + +类型: `Float32`. + +!!! note "注" + 如果没有值传递给函数(当使用 `quantileTimingIf`), [阿南](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 被返回。 这样做的目的是将这些案例与导致零的案例区分开来。 看 [按条款订购](../statements/select.md#select-order-by) 对于排序注意事项 `NaN` 值。 + +**示例** + +输入表: + +``` text +┌─response_time─┐ +│ 72 │ +│ 112 │ +│ 126 │ +│ 145 │ +│ 104 │ +│ 242 │ +│ 313 │ +│ 168 │ +│ 108 │ +└───────────────┘ +``` + +查询: + +``` sql +SELECT quantileTiming(response_time) FROM t +``` + +结果: + +``` text +┌─quantileTiming(response_time)─┐ +│ 126 │ +└───────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 分位时间加权 {#quantiletimingweighted} + +随着确定的精度计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 根据每个序列成员的权重对数字数据序列进行处理。 + +结果是确定性的(它不依赖于查询处理顺序)。 该函数针对描述加载网页时间或后端响应时间等分布的序列进行了优化。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTimingWeighted(level)(expr, weight) +``` + +别名: `medianTimingWeighted`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). + +- `expr` — [表达式](../syntax.md#syntax-expressions) 在一个列值返回 [浮动\*](../../sql_reference/data_types/float.md)-键入号码。 + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**精度** + +计算是准确的,如果: + +- 值的总数不超过5670。 +- 总数值超过5670,但页面加载时间小于1024ms。 + +否则,计算结果将四舍五入到16毫秒的最接近倍数。 + +!!! note "注" + 对于计算页面加载时间分位数,此函数比 [分位数](#quantile). + +**返回值** + +- 指定电平的分位数。 + +类型: `Float32`. + +!!! note "注" + 如果没有值传递给函数(当使用 `quantileTimingIf`), [阿南](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 被返回。 这样做的目的是将这些案例与导致零的案例区分开来。 看 [按条款订购](../statements/select.md#select-order-by) 对于排序注意事项 `NaN` 值。 + +**示例** + +输入表: + +``` text +┌─response_time─┬─weight─┐ +│ 68 │ 1 │ +│ 104 │ 2 │ +│ 112 │ 3 │ +│ 126 │ 2 │ +│ 138 │ 1 │ +│ 162 │ 1 │ +└───────────────┴────────┘ +``` + +查询: + +``` sql +SELECT quantileTimingWeighted(response_time, weight) FROM t +``` + +结果: + +``` text +┌─quantileTimingWeighted(response_time, weight)─┐ +│ 112 │ +└───────────────────────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## quantileTDigest {#quantiletdigest} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 使用的数字数据序列 [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) 算法。 + +最大误差为1%。 内存消耗 `log(n)`,哪里 `n` 是多个值。 结果取决于运行查询的顺序,并且是不确定的。 + +该功能的性能低于性能 [分位数](#quantile) 或 [分位定时](#quantiletiming). 在状态大小与精度的比率方面,这个函数比 `quantile`. + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTDigest(level)(expr) +``` + +别名: `medianTDigest`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +查询: + +``` sql +SELECT quantileTDigest(number) FROM numbers(10) +``` + +结果: + +``` text +┌─quantileTDigest(number)─┐ +│ 4.5 │ +└─────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## quantileTDigestWeighted {#quantiletdigestweighted} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 使用的数字数据序列 [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) 算法。 该函数考虑了每个序列成员的权重。 最大误差为1%。 内存消耗 `log(n)`,哪里 `n` 是多个值。 + +该功能的性能低于性能 [分位数](#quantile) 或 [分位定时](#quantiletiming). 在状态大小与精度的比率方面,这个函数比 `quantile`. + +结果取决于运行查询的顺序,并且是不确定的。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTDigest(level)(expr) +``` + +别名: `medianTDigest`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +查询: + +``` sql +SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) +``` + +结果: + +``` text +┌─quantileTDigestWeighted(number, 1)─┐ +│ 4.5 │ +└────────────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 中位数 {#median} + +该 `median*` 函数是相应的别名 `quantile*` 功能。 它们计算数字数据样本的中位数。 + +功能: + +- `median` — Alias for [分位数](#quantile). +- `medianDeterministic` — Alias for [量化确定](#quantiledeterministic). +- `medianExact` — Alias for [quantileExact](#quantileexact). +- `medianExactWeighted` — Alias for [分位数加权](#quantileexactweighted). +- `medianTiming` — Alias for [分位定时](#quantiletiming). +- `medianTimingWeighted` — Alias for [分位时间加权](#quantiletimingweighted). +- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). + +**示例** + +输入表: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +查询: + +``` sql +SELECT medianDeterministic(val, 1) FROM t +``` + +结果: + +``` text +┌─medianDeterministic(val, 1)─┐ +│ 1.5 │ +└─────────────────────────────┘ +``` + +## quantiles(level1, level2, …)(x) {#quantiles} + +所有分位数函数也具有相应的分位数函数: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. 这些函数在一遍中计算所列电平的所有分位数,并返回结果值的数组。 + +## varSamp(x) {#varsampx} + +计算金额 `Σ((x - x̅)^2) / (n - 1)`,哪里 `n` 是样本大小和 `x̅`是平均值 `x`. + +它表示随机变量的方差的无偏估计,如果传递的值形成其样本。 + +返回 `Float64`. 当 `n <= 1`,返回 `+∞`. + +## varPop(x) {#varpopx} + +计算金额 `Σ((x - x̅)^2) / n`,哪里 `n` 是样本大小和 `x̅`是平均值 `x`. + +换句话说,分散为一组值。 返回 `Float64`. + +## stddevSamp(x) {#stddevsampx} + +结果等于平方根 `varSamp(x)`. + +## stddevPop(x) {#stddevpopx} + +结果等于平方根 `varPop(x)`. + +## topK(N)(x) {#topknx} + +返回指定列中近似最常见值的数组。 生成的数组按值的近似频率降序排序(而不是值本身)。 + +实现了 [过滤节省空间](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) 基于reduce-and-combine算法的TopK分析算法 [并行节省空间](https://arxiv.org/pdf/1401.0702.pdf). + +``` sql +topK(N)(column) +``` + +此函数不提供保证的结果。 在某些情况下,可能会发生错误,并且可能会返回不是最常见值的常见值。 + +我们建议使用 `N < 10` 值;性能降低了大 `N` 值。 的最大值 `N = 65536`. + +**参数** + +- ‘N’ 是要返回的元素数。 + +如果省略该参数,则使用默认值10。 + +**参数** + +- ' x ' – The value to calculate frequency. + +**示例** + +就拿 [时间](../../getting_started/example_datasets/ontime.md) 数据集,并选择在三个最频繁出现的值 `AirlineID` 列。 + +``` sql +SELECT topK(3)(AirlineID) AS res +FROM ontime +``` + +``` text +┌─res─────────────────┐ +│ [19393,19790,19805] │ +└─────────────────────┘ +``` + +## topKWeighted {#topkweighted} + +类似于 `topK` 但需要一个整数类型的附加参数 - `weight`. 每一价值是占 `weight` 次频率计算。 + +**语法** + +``` sql +topKWeighted(N)(x, weight) +``` + +**参数** + +- `N` — The number of elements to return. + +**参数** + +- `x` – The value. +- `weight` — The weight. [UInt8](../../sql_reference/data_types/int_uint.md). + +**返回值** + +返回具有最大近似权重总和的值数组。 + +**示例** + +查询: + +``` sql +SELECT topKWeighted(10)(number, number) FROM numbers(1000) +``` + +结果: + +``` text +┌─topKWeighted(10)(number, number)──────────┐ +│ [999,998,997,996,995,994,993,992,991,990] │ +└───────────────────────────────────────────┘ +``` + +## covarSamp(x,y) {#covarsampx-y} + +计算的值 `Σ((x - x̅)(y - y̅)) / (n - 1)`. + +返回Float64。 当 `n <= 1`, returns +∞. + +## covarPop(x,y) {#covarpopx-y} + +计算的值 `Σ((x - x̅)(y - y̅)) / n`. + +## corr(x,y) {#corrx-y} + +计算Pearson相关系数: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. + +## categoricalInformationValue {#categoricalinformationvalue} + +计算的值 `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` 对于每个类别。 + +``` sql +categoricalInformationValue(category1, category2, ..., tag) +``` + +结果指示离散(分类)要素如何使用 `[category1, category2, ...]` 有助于预测的价值的学习模型 `tag`. + +## simpleLinearRegression {#simplelinearregression} + +执行简单(一维)线性回归。 + +``` sql +simpleLinearRegression(x, y) +``` + +参数: + +- `x` — Column with dependent variable values. +- `y` — Column with explanatory variable values. + +返回值: + +常量 `(a, b)` 结果行的 `y = a*x + b`. + +**例** + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ +│ (1,0) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ +│ (1,3) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## 随机指标线上回归 {#agg_functions-stochasticlinearregression} + +该函数实现随机线性回归。 它支持自定义参数的学习率,L2正则化系数,迷你批量大小,并具有更新权重的方法很少 ([亚当](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (默认使用), [简单SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [动量](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). + +### 参数 {#agg_functions-stochasticlinearregression-parameters} + +有4个可自定义的参数。 它们按顺序传递给函数,但是没有必要传递所有四个默认值将被使用,但是好的模型需要一些参数调整。 + +``` text +stochasticLinearRegression(1.0, 1.0, 10, 'SGD') +``` + +1. `learning rate` 当执行梯度下降步骤时,步长上的系数。 过大的学习率可能会导致模型的权重无限大。 默认值为 `0.00001`. +2. `l2 regularization coefficient` 这可能有助于防止过度拟合。 默认值为 `0.1`. +3. `mini-batch size` 设置元素的数量,这些元素将被计算和求和以执行梯度下降的一个步骤。 纯随机下降使用一个元素,但是具有小批量(约10个元素)使梯度步骤更稳定。 默认值为 `15`. +4. `method for updating weights` 他们是: `Adam` (默认情况下), `SGD`, `Momentum`, `Nesterov`. `Momentum` 和 `Nesterov` 需要更多的计算和内存,但是它们恰好在收敛速度和随机梯度方法的稳定性方面是有用的。 + +### 用途 {#agg_functions-stochasticlinearregression-usage} + +`stochasticLinearRegression` 用于两个步骤:拟合模型和预测新数据。 为了拟合模型并保存其状态以供以后使用,我们使用 `-State` combinator,它基本上保存了状态(模型权重等)。 +为了预测我们使用函数 [evalMLMethod](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod),这需要一个状态作为参数以及特征来预测。 + + + +**1.** 适合 + +可以使用这种查询。 + +``` sql +CREATE TABLE IF NOT EXISTS train_data +( + param1 Float64, + param2 Float64, + target Float64 +) ENGINE = Memory; + +CREATE TABLE your_model ENGINE = Memory AS SELECT +stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) +AS state FROM train_data; +``` + +在这里,我们还需要将数据插入到 `train_data` 桌子 参数的数量不是固定的,它只取决于参数的数量,传递到 `linearRegressionState`. 它们都必须是数值。 +请注意,带有目标值的列(我们想要学习预测)被插入作为第一个参数。 + +**2.** 预测 + +在将状态保存到表中之后,我们可以多次使用它进行预测,甚至与其他状态合并并创建新的更好的模型。 + +``` sql +WITH (SELECT state FROM your_model) AS model SELECT +evalMLMethod(model, param1, param2) FROM test_data +``` + +查询将返回一列预测值。 请注意,第一个参数 `evalMLMethod` 是 `AggregateFunctionState` 对象,接下来是要素列。 + +`test_data` 是一个像表 `train_data` 但可能不包含目标值。 + +### 注 {#agg_functions-stochasticlinearregression-notes} + +1. 要合并两个模型,用户可以创建这样的查询: + `sql SELECT state1 + state2 FROM your_models` + 哪里 `your_models` 表包含这两个模型。 此查询将返回new `AggregateFunctionState` 对象。 + +2. 如果没有,用户可以获取创建的模型的权重用于自己的目的,而不保存模型 `-State` 使用combinator。 + `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` + 这种查询将拟合模型并返回其权重-首先是权重,它对应于模型的参数,最后一个是偏差。 所以在上面的例子中,查询将返回一个具有3个值的列。 + +**另请参阅** + +- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) +- [线性回归和逻辑回归之间的区别](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} + +该函数实现随机逻辑回归。 它可以用于二进制分类问题,支持与stochasticLinearRegression相同的自定义参数,并以相同的方式工作。 + +### 参数 {#agg_functions-stochasticlogisticregression-parameters} + +参数与stochasticLinearRegression中的参数完全相同: +`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. +欲了解更多信息,请参阅 [参数](#agg_functions-stochasticlinearregression-parameters). + +``` text +stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') +``` + +1. 适合 + + + + See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. + + Predicted labels have to be in \[-1, 1\]. + +1. 预测 + + + + Using saved state we can predict probability of object having label `1`. + + ``` sql + WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) FROM test_data + ``` + + The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. + + We can also set a bound of probability, which assigns elements to different labels. + + ``` sql + SELECT ans < 1.1 AND ans > 0.5 FROM + (WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) AS ans FROM test_data) + ``` + + Then the result will be labels. + + `test_data` is a table like `train_data` but may not contain target value. + +**另请参阅** + +- [随机指标线上回归](#agg_functions-stochasticlinearregression) +- [线性回归和逻辑回归之间的差异。](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## groupBitmapAnd {#groupbitmapand} + +计算位图列的AND,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapAnd(expr) +``` + +**参数** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapAnd(z)─┐ +│ 3 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ +│ [6,8,10] │ +└──────────────────────────────────────────────────┘ +``` + +## groupBitmapOr {#groupbitmapor} + +计算位图列的OR,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). 这相当于 `groupBitmapMerge`. + +``` sql +groupBitmapOr(expr) +``` + +**参数** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapOr(z)─┐ +│ 15 │ +└──────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ +│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ +└─────────────────────────────────────────────────┘ +``` + +## groupBitmapXor {#groupbitmapxor} + +计算位图列的XOR,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapOr(expr) +``` + +**参数** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapXor(z)─┐ +│ 10 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ +│ [1,3,5,6,8,10,11,13,14,15] │ +└──────────────────────────────────────────────────┘ +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/zh/sql_reference/data_types/aggregatefunction.md b/docs/zh/sql_reference/data_types/aggregatefunction.md new file mode 100644 index 00000000000..e1fb7b1d133 --- /dev/null +++ b/docs/zh/sql_reference/data_types/aggregatefunction.md @@ -0,0 +1,64 @@ + +# AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} + +聚合函数的中间状态,可以通过聚合函数名称加`-State`后缀的形式得到它。与此同时,当您需要访问该类型的最终状态数据时,您需要以相同的聚合函数名加`-Merge`后缀的形式来得到最终状态数据。 + +`AggregateFunction` — 参数化的数据类型。 + +**参数** + +- 聚合函数名 + + 如果函数具备多个参数列表,请在此处指定其他参数列表中的值。 + +- 聚合函数参数的类型 + +**示例** + +``` sql +CREATE TABLE t +( + column1 AggregateFunction(uniq, UInt64), + column2 AggregateFunction(anyIf, String, UInt8), + column3 AggregateFunction(quantiles(0.5, 0.9), UInt64) +) ENGINE = ... +``` + +上述中的[uniq](../../sql_reference/data_types/aggregatefunction.md#agg_function-uniq), anyIf ([任何](../../sql_reference/data_types/aggregatefunction.md#agg_function-any)+[如果](../../sql_reference/data_types/aggregatefunction.md#agg-functions-combinator-if)) 以及 [分位数](../../sql_reference/data_types/aggregatefunction.md) 都为ClickHouse中支持的聚合函数。 + +## 使用指南 {#shi-yong-zhi-nan} + +### 数据写入 {#shu-ju-xie-ru} + +当需要写入数据时,您需要将数据包含在`INSERT SELECT`语句中,同时对于`AggregateFunction`类型的数据,您需要使用对应的以`-State`为后缀的函数进行处理。 + +**函数使用示例** + +``` sql +uniqState(UserID) +quantilesState(0.5, 0.9)(SendTiming) +``` + +不同于`uniq`和`quantiles`函数返回聚合结果的最终值,以`-State`后缀的函数总是返回`AggregateFunction`类型的数据的中间状态。 + +对于`SELECT`而言,`AggregateFunction`类型总是以特定的二进制形式展现在所有的输出格式中。例如,您可以使用`SELECT`语句将函数的状态数据转储为`TabSeparated`格式的同时使用`INSERT`语句将数据转储回去。 + +### 数据查询 {#shu-ju-cha-xun} + +当从`AggregatingMergeTree`表中查询数据时,对于`AggregateFunction`类型的字段,您需要使用以`-Merge`为后缀的相同聚合函数来聚合数据。对于非`AggregateFunction`类型的字段,请将它们包含在`GROUP BY`子句中。 + +以`-Merge`为后缀的聚合函数,可以将多个`AggregateFunction`类型的中间状态组合计算为最终的聚合结果。 + +例如,如下的两个查询返回的结果总是一致: + +``` sql +SELECT uniq(UserID) FROM table + +SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP BY RegionID) +``` + +## 使用示例 {#shi-yong-shi-li} + +请参阅 [AggregatingMergeTree](../../sql_reference/data_types/aggregatefunction.md) 的说明 + +[来源文章](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/zh/sql_reference/data_types/array.md b/docs/zh/sql_reference/data_types/array.md new file mode 100644 index 00000000000..7a35647d20e --- /dev/null +++ b/docs/zh/sql_reference/data_types/array.md @@ -0,0 +1,73 @@ + +# 阵列(T) {#data-type-array} + +由 `T` 类型元素组成的数组。 + +`T` 可以是任意类型,包含数组类型。 但不推荐使用多维数组,ClickHouse 对多维数组的支持有限。例如,不能存储在 `MergeTree` 表中存储多维数组。 + +## 创建数组 {#chuang-jian-shu-zu} + +您可以使用array函数来创建数组: + + array(T) + +您也可以使用方括号: + + [] + +创建数组示例: + + :) SELECT array(1, 2) AS x, toTypeName(x) + + SELECT + [1, 2] AS x, + toTypeName(x) + + ┌─x─────┬─toTypeName(array(1, 2))─┐ + │ [1,2] │ Array(UInt8) │ + └───────┴─────────────────────────┘ + + 1 rows in set. Elapsed: 0.002 sec. + + :) SELECT [1, 2] AS x, toTypeName(x) + + SELECT + [1, 2] AS x, + toTypeName(x) + + ┌─x─────┬─toTypeName([1, 2])─┐ + │ [1,2] │ Array(UInt8) │ + └───────┴────────────────────┘ + + 1 rows in set. Elapsed: 0.002 sec. + +## 使用数据类型 {#shi-yong-shu-ju-lei-xing} + +ClickHouse会自动检测数组元素,并根据元素计算出存储这些元素最小的数据类型。如果在元素中存在 [NULL](../../sql_reference/data_types/array.md#null-literal) 或存在 [可为空](nullable.md#data_type-nullable) 类型元素,那么数组的元素类型将会变成 [可为空](nullable.md)。 + +如果 ClickHouse 无法确定数据类型,它将产生异常。当尝试同时创建一个包含字符串和数字的数组时会发生这种情况 (`SELECT array(1, 'a')`)。 + +自动数据类型检测示例: + + :) SELECT array(1, 2, NULL) AS x, toTypeName(x) + + SELECT + [1, 2, NULL] AS x, + toTypeName(x) + + ┌─x──────────┬─toTypeName(array(1, 2, NULL))─┐ + │ [1,2,NULL] │ Array(Nullable(UInt8)) │ + └────────────┴───────────────────────────────┘ + + 1 rows in set. Elapsed: 0.002 sec. + +如果您尝试创建不兼容的数据类型数组,ClickHouse 将引发异常: + + :) SELECT array(1, 'a') + + SELECT [1, 'a'] + + Received exception from server (version 1.1.54388): + Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. + + 0 rows in set. Elapsed: 0.246 sec. diff --git a/docs/zh/sql_reference/data_types/boolean.md b/docs/zh/sql_reference/data_types/boolean.md new file mode 100644 index 00000000000..26c8ac5cdd5 --- /dev/null +++ b/docs/zh/sql_reference/data_types/boolean.md @@ -0,0 +1,4 @@ + +# 布尔值 {#boolean-values} + +没有单独的类型来存储布尔值。可以使用 UInt8 类型,取值限制为 0 或 1。 diff --git a/docs/zh/sql_reference/data_types/date.md b/docs/zh/sql_reference/data_types/date.md new file mode 100644 index 00000000000..18bdb507f37 --- /dev/null +++ b/docs/zh/sql_reference/data_types/date.md @@ -0,0 +1,6 @@ + +# 日期 {#date} + +日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2106年,但最终完全支持的年份为2105)。最小值输出为0000-00-00。 + +日期中没有存储时区信息。 diff --git a/docs/zh/sql_reference/data_types/datetime.md b/docs/zh/sql_reference/data_types/datetime.md new file mode 100644 index 00000000000..1122131614b --- /dev/null +++ b/docs/zh/sql_reference/data_types/datetime.md @@ -0,0 +1,12 @@ + +# 日期时间 {#data_type-datetime} + +时间戳类型。用四个字节(无符号的)存储 Unix 时间戳)。允许存储与日期类型相同的范围内的值。最小值为 0000-00-00 00:00:00。时间戳类型值精确到秒(没有闰秒)。 + +## 时区 {#shi-qu} + +使用启动客户端或服务器时的系统时区,时间戳是从文本(分解为组件)转换为二进制并返回。在文本格式中,有关夏令时的信息会丢失。 + +默认情况下,客户端连接到服务的时候会使用服务端时区。您可以通过启用客户端命令行选项 `--use_client_time_zone` 来设置使用客户端时间。 + +因此,在处理文本日期时(例如,在保存文本转储时),请记住在夏令时更改期间可能存在歧义,如果时区发生更改,则可能存在匹配数据的问题。 diff --git a/docs/zh/sql_reference/data_types/datetime64.md b/docs/zh/sql_reference/data_types/datetime64.md new file mode 100644 index 00000000000..dd87486cee5 --- /dev/null +++ b/docs/zh/sql_reference/data_types/datetime64.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 49 +toc_title: DateTime64 +--- + +# Datetime64 {#data_type-datetime64} + +允许存储时间instant间,可以表示为日历日期和一天中的时间,具有定义的亚秒精度 + +刻度尺寸(精度):10-精度 秒 + +语法: + +``` sql +DateTime64(precision, [timezone]) +``` + +在内部,存储数据作为一些 ‘ticks’ 自纪元开始(1970-01-01 00:00:00UTC)作为Int64. 刻度分辨率由precision参数确定。 此外,该 `DateTime64` 类型可以存储时区是相同的整个列,影响如何的值 `DateTime64` 类型值以文本格式显示,以及如何解析指定为字符串的值 (‘2020-01-01 05:00:01.000’). 时区不存储在表的行中(或resultset中),而是存储在列元数据中。 查看详细信息 [日期时间](datetime.md). + +## 例 {#examples} + +**1.** 创建一个表 `DateTime64`-输入列并将数据插入其中: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime64(3, 'Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +``` + +``` sql +SELECT * FROM dt +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00.000 │ 1 │ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +- 将日期时间作为整数插入时,将其视为适当缩放的Unix时间戳(UTC)。 `1546300800000` (精度为3)表示 `'2019-01-01 00:00:00'` UTC. 然而,作为 `timestamp` 列有 `Europe/Moscow` (UTC+3)指定的时区,当输出为字符串时,该值将显示为 `'2019-01-01 03:00:00'` +- 当插入字符串值作为日期时间时,它被视为处于列时区。 `'2019-01-01 00:00:00'` 将被视为 `Europe/Moscow` 时区并存储为 `1546290000000`. + +**2.** 过滤 `DateTime64` 值 + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +不像 `DateTime`, `DateTime64` 值不转换为 `String` 自动 + +**3.** 获取一个时区 `DateTime64`-类型值: + +``` sql +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────────column─┬─x──────────────────────────────┐ +│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ +└─────────────────────────┴────────────────────────────────┘ +``` + +**4.** 时区转换 + +``` sql +SELECT +toDateTime64(timestamp, 3, 'Europe/London') as lon_time, +toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────────lon_time──┬────────────────mos_time─┐ +│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ +│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ +└─────────────────────────┴─────────────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [类型转换函数](../../sql_reference/functions/type_conversion_functions.md) +- [用于处理日期和时间的函数](../../sql_reference/functions/date_time_functions.md) +- [用于处理数组的函数](../../sql_reference/functions/array_functions.md) +- [该 `date_time_input_format` 设置](../../operations/settings/settings.md#settings-date_time_input_format) +- [该 `timezone` 服务器配置参数](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [使用日期和时间的操作员](../../sql_reference/operators.md#operators-datetime) +- [`Date` 数据类型](date.md) +- [`DateTime` 数据类型](datetime.md) diff --git a/docs/zh/sql_reference/data_types/decimal.md b/docs/zh/sql_reference/data_types/decimal.md new file mode 100644 index 00000000000..24bc1f70415 --- /dev/null +++ b/docs/zh/sql_reference/data_types/decimal.md @@ -0,0 +1,81 @@ + +# Decimal(P,S),Decimal32(S),Decimal64(S),Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} + +有符号的定点数,可在加、减和乘法运算过程中保持精度。对于除法,最低有效数字会被丢弃(不舍入)。 + +## 参数 {#can-shu} + +- P - 精度。有效范围:\[1:38\],决定可以有多少个十进制数字(包括分数)。 +- S - 规模。有效范围:\[0:P\],决定数字的小数部分中包含的小数位数。 + +对于不同的 P 参数值 Decimal 表示,以下例子都是同义的: +-P从\[1:9\]-对于Decimal32(S) +-P从\[10:18\]-对于Decimal64(小号) +-P从\[19:38\]-对于Decimal128(S) + +## 十进制值范围 {#shi-jin-zhi-zhi-fan-wei} + +- Decimal32(S) - ( -1 \* 10^(9 - S),1\*10^(9-S) ) +- Decimal64(S) - ( -1 \* 10^(18 - S),1\*10^(18-S) ) +- Decimal128(S) - ( -1 \* 10^(38 - S),1\*10^(38-S) ) + +例如,Decimal32(4) 可以表示 -99999.9999 至 99999.9999 的数值,步长为0.0001。 + +## 内部表示方式 {#nei-bu-biao-shi-fang-shi} + +数据采用与自身位宽相同的有符号整数存储。这个数在内存中实际范围会高于上述范围,从 String 转换到十进制数的时候会做对应的检查。 + +由于现代CPU不支持128位数字,因此 Decimal128 上的操作由软件模拟。所以 Decimal128 的运算速度明显慢于 Decimal32/Decimal64。 + +## 运算和结果类型 {#yun-suan-he-jie-guo-lei-xing} + +对Decimal的二进制运算导致更宽的结果类型(无论参数的顺序如何)。 + +- Decimal64(S1) Decimal32(S2)-\>Decimal64(S) +- Decimal128(S1) Decimal32(S2)-\>Decimal128(S) +- Decimal128(S1) Decimal64(S2)-\>Decimal128(S) + +精度变化的规则: + +- 加法,减法:S = max(S1, S2)。 +- 乘法:S = S1 + S2。 +- 除法:S = S1。 + +对于 Decimal 和整数之间的类似操作,结果是与参数大小相同的十进制。 + +未定义Decimal和Float32/Float64之间的函数。要执行此类操作,您可以使用:toDecimal32、toDecimal64、toDecimal128 或 toFloat32,toFloat64,需要显式地转换其中一个参数。注意,结果将失去精度,类型转换是昂贵的操作。 + +Decimal上的一些函数返回结果为Float64(例如,var或stddev)。对于其中一些,中间计算发生在Decimal中。对于此类函数,尽管结果类型相同,但Float64和Decimal中相同数据的结果可能不同。 + +## 溢出检查 {#yi-chu-jian-cha} + +在对 Decimal 类型执行操作时,数值可能会发生溢出。分数中的过多数字被丢弃(不是舍入的)。整数中的过多数字将导致异常。 + + SELECT toDecimal32(2, 4) AS x, x / 3 + + ┌──────x─┬─divide(toDecimal32(2, 4), 3)─┐ + │ 2.0000 │ 0.6666 │ + └────────┴──────────────────────────────┘ + + SELECT toDecimal32(4.2, 8) AS x, x * x + + DB::Exception: Scale is out of bounds. + + SELECT toDecimal32(4.2, 8) AS x, 6 * x + + DB::Exception: Decimal math overflow. + +检查溢出会导致计算变慢。如果已知溢出不可能,则可以通过设置`decimal_check_overflow`来禁用溢出检查,在这种情况下,溢出将导致结果不正确: + + SET decimal_check_overflow = 0; + SELECT toDecimal32(4.2, 8) AS x, 6 * x + + ┌──────────x─┬─multiply(6, toDecimal32(4.2, 8))─┐ + │ 4.20000000 │ -17.74967296 │ + └────────────┴──────────────────────────────────┘ + +溢出检查不仅发生在算术运算上,还发生在比较运算上: + + SELECT toDecimal32(1, 8) < 100 + + DB::Exception: Can't compare. diff --git a/docs/zh/sql_reference/data_types/domains/index.md b/docs/zh/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..7df13d51e54 --- /dev/null +++ b/docs/zh/sql_reference/data_types/domains/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u57DF" +toc_priority: 56 +--- + + diff --git a/docs/zh/sql_reference/data_types/domains/ipv4.md b/docs/zh/sql_reference/data_types/domains/ipv4.md new file mode 100644 index 00000000000..26ed4d84922 --- /dev/null +++ b/docs/zh/sql_reference/data_types/domains/ipv4.md @@ -0,0 +1,69 @@ + +## IPv4 {#ipv4} + +`IPv4`是与`UInt32`类型保持二进制兼容的Domain类型,其用于存储IPv4地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 + +### 基本使用 {#ji-ben-shi-yong} + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + + ┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ + │ url │ String │ │ │ │ │ + │ from │ IPv4 │ │ │ │ │ + └──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ + +同时您也可以使用`IPv4`类型的列作为主键: + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; +``` + +在写入与查询时,`IPv4`类型能够识别可读性更加友好的输入输出格式: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); + +SELECT * FROM hits; +``` + + ┌─url────────────────────────────────┬───────────from─┐ + │ https://clickhouse.tech/docs/en/ │ 116.106.34.242 │ + │ https://wikipedia.org │ 116.253.40.133 │ + │ https://clickhouse.tech │ 183.247.232.58 │ + └────────────────────────────────────┴────────────────┘ + +同时它提供更为紧凑的二进制存储格式: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + + ┌─toTypeName(from)─┬─hex(from)─┐ + │ IPv4 │ B7F7E83A │ + └──────────────────┴───────────┘ + +不可隐式转换为除`UInt32`以外的其他类型类型。如果要将`IPv4`类型的值转换成字符串,你可以使用`IPv4NumToString()`显示的进行转换: + +``` sql +SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; +``` + + ┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ + │ String │ 183.247.232.58 │ + └───────────────────────────────────┴────────────────┘ + +或可以使用`CAST`将它转换为`UInt32`类型: + +``` sql +SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; +``` + + ┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ + │ UInt32 │ 3086477370 │ + └──────────────────────────────────┴────────────┘ + +[来源文章](https://clickhouse.tech/docs/en/data_types/domains/ipv4) diff --git a/docs/zh/sql_reference/data_types/domains/ipv6.md b/docs/zh/sql_reference/data_types/domains/ipv6.md new file mode 100644 index 00000000000..b147fb6db84 --- /dev/null +++ b/docs/zh/sql_reference/data_types/domains/ipv6.md @@ -0,0 +1,69 @@ + +## IPv6 {#ipv6} + +`IPv6`是与`FixedString(16)`类型保持二进制兼容的Domain类型,其用于存储IPv6地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 + +### 基本用法 {#ji-ben-yong-fa} + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + + ┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ + │ url │ String │ │ │ │ │ + │ from │ IPv6 │ │ │ │ │ + └──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ + +同时您也可以使用`IPv6`类型的列作为主键: + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; +``` + +在写入与查询时,`IPv6`类型能够识别可读性更加友好的输入输出格式: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); + +SELECT * FROM hits; +``` + + ┌─url────────────────────────────────┬─from──────────────────────────┐ + │ https://clickhouse.tech │ 2001:44c8:129:2632:33:0:252:2 │ + │ https://clickhouse.tech/docs/en/ │ 2a02:e980:1e::1 │ + │ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │ + └────────────────────────────────────┴───────────────────────────────┘ + +同时它提供更为紧凑的二进制存储格式: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + + ┌─toTypeName(from)─┬─hex(from)────────────────────────┐ + │ IPv6 │ 200144C8012926320033000002520002 │ + └──────────────────┴──────────────────────────────────┘ + +不可隐式转换为除`FixedString(16)`以外的其他类型类型。如果要将`IPv6`类型的值转换成字符串,你可以使用`IPv6NumToString()`显示的进行转换: + +``` sql +SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; +``` + + ┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ + │ String │ 2001:44c8:129:2632:33:0:252:2 │ + └───────────────────────────────────┴───────────────────────────────┘ + +或使用`CAST`将其转换为`FixedString(16)`: + +``` sql +SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; +``` + + ┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ + │ FixedString(16) │ ��� │ + └───────────────────────────────────────────┴─────────┘ + +[来源文章](https://clickhouse.tech/docs/en/data_types/domains/ipv6) diff --git a/docs/zh/sql_reference/data_types/domains/overview.md b/docs/zh/sql_reference/data_types/domains/overview.md new file mode 100644 index 00000000000..b330bad18c0 --- /dev/null +++ b/docs/zh/sql_reference/data_types/domains/overview.md @@ -0,0 +1,27 @@ + +# 域 {#domains} + +Domain类型是特定实现的类型,它总是与某个现存的基础类型保持二进制兼容的同时添加一些额外的特性,以能够在维持磁盘数据不变的情况下使用这些额外的特性。目前ClickHouse暂不支持自定义domain类型。 + +如果你可以在一个地方使用与Domain类型二进制兼容的基础类型,那么在相同的地方您也可以使用Domain类型,例如: + +- 使用Domain类型作为表中列的类型 +- 对Domain类型的列进行读/写数据 +- 如果与Domain二进制兼容的基础类型可以作为索引,那么Domain类型也可以作为索引 +- 将Domain类型作为参数传递给函数使用 +- 其他 + +### Domains的额外特性 {#domainsde-e-wai-te-xing} + +- 在执行SHOW CREATE TABLE 或 DESCRIBE TABLE时,其对应的列总是展示为Domain类型的名称 +- 在INSERT INTO domain\_table(domain\_column) VALUES(…)中输入数据总是以更人性化的格式进行输入 +- 在SELECT domain\_column FROM domain\_table中数据总是以更人性化的格式输出 +- 在INSERT INTO domain\_table FORMAT CSV …中,实现外部源数据以更人性化的格式载入 + +### Domains类型的限制 {#domainslei-xing-de-xian-zhi} + +- 无法通过`ALTER TABLE`将基础类型的索引转换为Domain类型的索引。 +- 当从其他列或表插入数据时,无法将string类型的值隐式地转换为Domain类型的值。 +- 无法对存储为Domain类型的值添加约束。 + +[来源文章](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/zh/sql_reference/data_types/enum.md b/docs/zh/sql_reference/data_types/enum.md new file mode 100644 index 00000000000..87ada143638 --- /dev/null +++ b/docs/zh/sql_reference/data_types/enum.md @@ -0,0 +1,101 @@ + +# Enum8,Enum16 {#enum8-enum16} + +包括 `Enum8` 和 `Enum16` 类型。`Enum` 保存 `'string'= integer` 的对应关系。在 ClickHouse 中,尽管用户使用的是字符串常量,但所有含有 `Enum` 数据类型的操作都是按照包含整数的值来执行。这在性能方面比使用 `String` 数据类型更有效。 + +- `Enum8` 用 `'String'= Int8` 对描述。 +- `Enum16` 用 `'String'= Int16` 对描述。 + +## 用法示例 {#yong-fa-shi-li} + +创建一个带有一个枚举 `Enum8('hello' = 1, 'world' = 2)` 类型的列: + + CREATE TABLE t_enum + ( + x Enum8('hello' = 1, 'world' = 2) + ) + ENGINE = TinyLog + +这个 `x` 列只能存储类型定义中列出的值:`'hello'`或`'world'`。如果您尝试保存任何其他值,ClickHouse 抛出异常。 + + :) INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') + + INSERT INTO t_enum VALUES + + Ok. + + 3 rows in set. Elapsed: 0.002 sec. + + :) insert into t_enum values('a') + + INSERT INTO t_enum VALUES + + + Exception on client: + Code: 49. DB::Exception: Unknown element 'a' for type Enum8('hello' = 1, 'world' = 2) + +当您从表中查询数据时,ClickHouse 从 `Enum` 中输出字符串值。 + + SELECT * FROM t_enum + + ┌─x─────┐ + │ hello │ + │ world │ + │ hello │ + └───────┘ + +如果需要看到对应行的数值,则必须将 `Enum` 值转换为整数类型。 + + SELECT CAST(x, 'Int8') FROM t_enum + + ┌─CAST(x, 'Int8')─┐ + │ 1 │ + │ 2 │ + │ 1 │ + └─────────────────┘ + +在查询中创建枚举值,您还需要使用 `CAST`。 + + SELECT toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')) + + ┌─toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)'))─┐ + │ Enum8('a' = 1, 'b' = 2) │ + └──────────────────────────────────────────────────────┘ + +## 规则及用法 {#gui-ze-ji-yong-fa} + +`Enum8` 类型的每个值范围是 `-128 ... 127`,`Enum16` 类型的每个值范围是 `-32768 ... 32767`。所有的字符串或者数字都必须是不一样的。允许存在空字符串。如果某个 Enum 类型被指定了(在表定义的时候),数字可以是任意顺序。然而,顺序并不重要。 + +`Enum` 中的字符串和数值都不能是 [NULL](../../sql_reference/data_types/enum.md)。 + +`Enum` 包含在 [可为空](nullable.md) 类型中。因此,如果您使用此查询创建一个表 + + CREATE TABLE t_enum_nullable + ( + x Nullable( Enum8('hello' = 1, 'world' = 2) ) + ) + ENGINE = TinyLog + +不仅可以存储 `'hello'` 和 `'world'` ,还可以存储 `NULL`。 + + INSERT INTO t_enum_nullable Values('hello'),('world'),(NULL) + +在内存中,`Enum` 列的存储方式与相应数值的 `Int8` 或 `Int16` 相同。 + +当以文本方式读取的时候,ClickHouse 将值解析成字符串然后去枚举值的集合中搜索对应字符串。如果没有找到,会抛出异常。当读取文本格式的时候,会根据读取到的字符串去找对应的数值。如果没有找到,会抛出异常。 + +当以文本形式写入时,ClickHouse 将值解析成字符串写入。如果列数据包含垃圾数据(不是来自有效集合的数字),则抛出异常。Enum 类型以二进制读取和写入的方式与 `Int8` 和 `Int16` 类型一样的。 + +隐式默认值是数值最小的值。 + +在 `ORDER BY`,`GROUP BY`,`IN`,`DISTINCT` 等等中,Enum 的行为与相应的数字相同。例如,按数字排序。对于等式运算符和比较运算符,Enum 的工作机制与它们在底层数值上的工作机制相同。 + +枚举值不能与数字进行比较。枚举可以与常量字符串进行比较。如果与之比较的字符串不是有效Enum值,则将引发异常。可以使用 IN 运算符来判断一个 Enum 是否存在于某个 Enum 集合中,其中集合中的 Enum 需要用字符串表示。 + +大多数具有数字和字符串的运算并不适用于Enums;例如,Enum 类型不能和一个数值相加。但是,Enum有一个原生的 `toString` 函数,它返回它的字符串值。 + +Enum 值使用 `toT` 函数可以转换成数值类型,其中 T 是一个数值类型。若 `T` 恰好对应 Enum 的底层数值类型,这个转换是零消耗的。 + +Enum 类型可以被 `ALTER` 无成本地修改对应集合的值。可以通过 `ALTER` 操作来增加或删除 Enum 的成员(只要表没有用到该值,删除都是安全的)。作为安全保障,改变之前使用过的 Enum 成员将抛出异常。 + +通过 `ALTER` 操作,可以将 `Enum8` 转成 `Enum16`,反之亦然,就像 `Int8` 转 `Int16`一样。 diff --git a/docs/zh/sql_reference/data_types/fixedstring.md b/docs/zh/sql_reference/data_types/fixedstring.md new file mode 100644 index 00000000000..c8e71e69303 --- /dev/null +++ b/docs/zh/sql_reference/data_types/fixedstring.md @@ -0,0 +1,57 @@ + +# 固定字符串 {#fixedstring} + +固定长度 N 的字符串(N 必须是严格的正自然数)。 + +您可以使用下面的语法对列声明为`FixedString`类型: + +``` sql + FixedString(N) +``` + +其中`N`表示自然数。 + +当数据的长度恰好为N个字节时,`FixedString`类型是高效的。 在其他情况下,这可能会降低效率。 + +可以有效存储在`FixedString`类型的列中的值的示例: + +- 二进制表示的IP地址(IPv6使用`FixedString(16)`) +- 语言代码(ru\_RU, en\_US … ) +- 货币代码(USD, RUB … ) +- 二进制表示的哈希值(MD5使用`FixedString(16)`,SHA256使用`FixedString(32)`) + +请使用[UUID](uuid.md)数据类型来存储UUID值,。 + +当向ClickHouse中插入数据时, + +- 如果字符串包含的字节数少于\`N',将对字符串末尾进行空字节填充。 +- 如果字符串包含的字节数大于`N`,将抛出`Too large value for FixedString(N)`异常。 + +当做数据查询时,ClickHouse不会删除字符串末尾的空字节。 如果使用`WHERE`子句,则须要手动添加空字节以匹配`FixedString`的值。 以下示例阐明了如何将`WHERE`子句与`FixedString`一起使用。 + +考虑带有`FixedString(2)`列的表: + +``` text +┌─name──┐ +│ b │ +└───────┘ +``` + +查询语句`SELECT * FROM FixedStringTable WHERE a = 'b'` 不会返回任何结果。请使用空字节来填充筛选条件。 + +``` sql +SELECT * FROM FixedStringTable +WHERE a = 'b\0' +``` + +``` text +┌─a─┐ +│ b │ +└───┘ +``` + +这种方式与MySQL的`CHAR`类型的方式不同(MySQL中使用空格填充字符串,并在输出时删除空格)。 + +请注意,`FixedString(N)`的长度是个常量。仅由空字符组成的字符串,函数[长度](../../sql_reference/data_types/fixedstring.md#array_functions-length)返回值为`N`,而函数[空](../../sql_reference/data_types/fixedstring.md#string_functions-empty)的返回值为`1`。 + +[来源文章](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/zh/sql_reference/data_types/float.md b/docs/zh/sql_reference/data_types/float.md new file mode 100644 index 00000000000..bdc8093a9a9 --- /dev/null +++ b/docs/zh/sql_reference/data_types/float.md @@ -0,0 +1,71 @@ + +# Float32,Float64 {#float32-float64} + +[浮点数](https://en.wikipedia.org/wiki/IEEE_754)。 + +类型与以下 C 语言中类型是相同的: + +- `Float32` - `float` +- `Float64` - `double` + +我们建议您尽可能以整数形式存储数据。例如,将固定精度的数字转换为整数值,例如货币数量或页面加载时间用毫秒为单位表示 + +## 使用浮点数 {#shi-yong-fu-dian-shu} + +- 对浮点数进行计算可能引起四舍五入的误差。 + + + +``` sql +SELECT 1 - 0.9 +``` + + ┌───────minus(1, 0.9)─┐ + │ 0.09999999999999998 │ + └─────────────────────┘ + +- 计算的结果取决于计算方法(计算机系统的处理器类型和体系结构) + +- 浮点计算结果可能是诸如无穷大(`INF`)和«非数字»(`NaN`)。对浮点数计算的时候应该考虑到这点。 + +- 当一行行阅读浮点数的时候,浮点数的结果可能不是机器最近显示的数值。 + +## 南和Inf {#data_type-float-nan-inf} + +与标准SQL相比,ClickHouse 支持以下类别的浮点数: + +- `Inf` – 正无穷 + + + +``` sql +SELECT 0.5 / 0 +``` + + ┌─divide(0.5, 0)─┐ + │ inf │ + └────────────────┘ + +- `-Inf` – 负无穷 + + + +``` sql +SELECT -0.5 / 0 +``` + + ┌─divide(-0.5, 0)─┐ + │ -inf │ + └─────────────────┘ + +- `NaN` – 非数字 + + + + SELECT 0 / 0 + + ┌─divide(0, 0)─┐ + │ nan │ + └──────────────┘ + +可以在 [ORDER BY 子句](../../sql_reference/data_types/float.md) 查看更多关于 `NaN` 排序的规则。 diff --git a/docs/zh/sql_reference/data_types/index.md b/docs/zh/sql_reference/data_types/index.md new file mode 100644 index 00000000000..8df3911ab36 --- /dev/null +++ b/docs/zh/sql_reference/data_types/index.md @@ -0,0 +1,6 @@ + +# 数据类型 {#data_types} + +ClickHouse 可以在数据表中存储多种数据类型。 + +本节描述 ClickHouse 支持的数据类型,以及使用或者实现它们时(如果有的话)的注意事项。 diff --git a/docs/zh/sql_reference/data_types/int_uint.md b/docs/zh/sql_reference/data_types/int_uint.md new file mode 100644 index 00000000000..b74bbcf178f --- /dev/null +++ b/docs/zh/sql_reference/data_types/int_uint.md @@ -0,0 +1,18 @@ + +# UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} + +固定长度的整型,包括有符号整型或无符号整型。 + +## 整型范围 {#zheng-xing-fan-wei} + +- Int8-\[-128:127\] +- Int16-\[-32768:32767\] +- Int32-\[-2147483648:2147483647\] +- Int64-\[-9223372036854775808:9223372036854775807\] + +## 无符号整型范围 {#wu-fu-hao-zheng-xing-fan-wei} + +- UInt8-\[0:255\] +- UInt16-\[0:65535\] +- UInt32-\[0:4294967295\] +- UInt64-\[0:18446744073709551615\] diff --git a/docs/zh/sql_reference/data_types/nested_data_structures/index.md b/docs/zh/sql_reference/data_types/nested_data_structures/index.md new file mode 100644 index 00000000000..fdeb9fe6ac5 --- /dev/null +++ b/docs/zh/sql_reference/data_types/nested_data_structures/index.md @@ -0,0 +1,2 @@ + +# 嵌套数据结构 {#qian-tao-shu-ju-jie-gou} diff --git a/docs/zh/sql_reference/data_types/nested_data_structures/nested.md b/docs/zh/sql_reference/data_types/nested_data_structures/nested.md new file mode 100644 index 00000000000..6ac26c0eeba --- /dev/null +++ b/docs/zh/sql_reference/data_types/nested_data_structures/nested.md @@ -0,0 +1,98 @@ + +# Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} + +嵌套数据结构类似于嵌套表。嵌套数据结构的参数(列名和类型)与 CREATE 查询类似。每个表可以包含任意多行嵌套数据结构。 + +示例: + +``` sql +CREATE TABLE test.visits +( + CounterID UInt32, + StartDate Date, + Sign Int8, + IsNew UInt8, + VisitID UInt64, + UserID UInt64, + ... + Goals Nested + ( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32 + ), + ... +) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign) +``` + +上述示例声明了 `Goals` 这种嵌套数据结构,它包含访客转化相关的数据(访客达到的目标)。在 ‘visits’ 表中每一行都可以对应零个或者任意个转化数据。 + +只支持一级嵌套。嵌套结构的列中,若列的类型是数组类型,那么该列其实和多维数组是相同的,所以目前嵌套层级的支持很局限(MergeTree 引擎中不支持存储这样的列) + +大多数情况下,处理嵌套数据结构时,会指定一个单独的列。为了这样实现,列的名称会与点号连接起来。这些列构成了一组匹配类型。在同一条嵌套数据中,所有的列都具有相同的长度。 + +示例: + +``` sql +SELECT + Goals.ID, + Goals.EventTime +FROM test.visits +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goals.ID───────────────────────┬─Goals.EventTime───────────────────────────────────────────────────────────────────────────┐ +│ [1073752,591325,591325] │ ['2014-03-17 16:38:10','2014-03-17 16:38:48','2014-03-17 16:42:27'] │ +│ [1073752] │ ['2014-03-17 00:28:25'] │ +│ [1073752] │ ['2014-03-17 10:46:20'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:59:20','2014-03-17 22:17:55','2014-03-17 22:18:07','2014-03-17 22:18:51'] │ +│ [] │ [] │ +│ [1073752,591325,591325] │ ['2014-03-17 11:37:06','2014-03-17 14:07:47','2014-03-17 14:36:21'] │ +│ [] │ [] │ +│ [] │ [] │ +│ [591325,1073752] │ ['2014-03-17 00:46:05','2014-03-17 00:46:05'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:28:33','2014-03-17 13:30:26','2014-03-17 18:51:21','2014-03-17 18:51:45'] │ +└────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +所以可以简单地把嵌套数据结构当做是所有列都是相同长度的多列数组。 + +SELECT 查询只有在使用 ARRAY JOIN 的时候才可以指定整个嵌套数据结构的名称。更多信息,参考 «ARRAY JOIN 子句»。示例: + +``` sql +SELECT + Goal.ID, + Goal.EventTime +FROM test.visits +ARRAY JOIN Goals AS Goal +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goal.ID─┬──────Goal.EventTime─┐ +│ 1073752 │ 2014-03-17 16:38:10 │ +│ 591325 │ 2014-03-17 16:38:48 │ +│ 591325 │ 2014-03-17 16:42:27 │ +│ 1073752 │ 2014-03-17 00:28:25 │ +│ 1073752 │ 2014-03-17 10:46:20 │ +│ 1073752 │ 2014-03-17 13:59:20 │ +│ 591325 │ 2014-03-17 22:17:55 │ +│ 591325 │ 2014-03-17 22:18:07 │ +│ 591325 │ 2014-03-17 22:18:51 │ +│ 1073752 │ 2014-03-17 11:37:06 │ +└─────────┴─────────────────────┘ +``` + +不能对整个嵌套数据结构执行 SELECT。只能明确列出属于它一部分列。 + +对于 INSERT 查询,可以单独地传入所有嵌套数据结构中的列数组(假如它们是单独的列数组)。在插入过程中,系统会检查它们是否有相同的长度。 + +对于 DESCRIBE 查询,嵌套数据结构中的列会以相同的方式分别列出来。 + +ALTER 查询对嵌套数据结构的操作非常有限。 diff --git a/docs/zh/sql_reference/data_types/nullable.md b/docs/zh/sql_reference/data_types/nullable.md new file mode 100644 index 00000000000..6ece5f3c178 --- /dev/null +++ b/docs/zh/sql_reference/data_types/nullable.md @@ -0,0 +1,42 @@ + +# 可为空(类型名称) {#data_type-nullable} + +允许用特殊标记 ([NULL](../../sql_reference/data_types/nullable.md)) 表示«缺失值»,可以与 `TypeName` 的正常值存放一起。例如,`Nullable(Int8)` 类型的列可以存储 `Int8` 类型值,而没有值的行将存储 `NULL`。 + +对于 `TypeName`,不能使用复合数据类型 [阵列](array.md) 和 [元组](tuple.md)。复合数据类型可以包含 `Nullable` 类型值,例如`Array(Nullable(Int8))`。 + +`Nullable` 类型字段不能包含在表索引中。 + +除非在 ClickHouse 服务器配置中另有说明,否则 `NULL` 是任何 `Nullable` 类型的默认值。 + +## 存储特性 {#cun-chu-te-xing} + +要在表的列中存储 `Nullable` 类型值,ClickHouse 除了使用带有值的普通文件外,还使用带有 `NULL` 掩码的单独文件。 掩码文件中的条目允许 ClickHouse 区分每个表行的 `NULL` 和相应数据类型的默认值。 由于附加了新文件,`Nullable` 列与类似的普通文件相比消耗额外的存储空间。 + +!!! 注意点 "注意点" + 使用 `Nullable` 几乎总是对性能产生负面影响,在设计数据库时请记住这一点 + +掩码文件中的条目允许ClickHouse区分每个表行的对应数据类型的«NULL»和默认值由于有额外的文件,«Nullable»列比普通列消耗更多的存储空间 + +## 用法示例 {#yong-fa-shi-li} + +``` sql +CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog +``` + +``` sql +INSERT INTO t_null VALUES (1, NULL), (2, 3) +``` + +``` sql +SELECT x + y FROM t_null +``` + +``` text +┌─plus(x, y)─┐ +│ ᴺᵁᴸᴸ │ +│ 5 │ +└────────────┘ +``` + +[来源文章](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/zh/sql_reference/data_types/special_data_types/expression.md b/docs/zh/sql_reference/data_types/special_data_types/expression.md new file mode 100644 index 00000000000..d4fb3257f60 --- /dev/null +++ b/docs/zh/sql_reference/data_types/special_data_types/expression.md @@ -0,0 +1,4 @@ + +# 表达式 {#expression} + +用于表示高阶函数中的Lambd表达式。 diff --git a/docs/zh/sql_reference/data_types/special_data_types/index.md b/docs/zh/sql_reference/data_types/special_data_types/index.md new file mode 100644 index 00000000000..64d93783cb9 --- /dev/null +++ b/docs/zh/sql_reference/data_types/special_data_types/index.md @@ -0,0 +1,4 @@ + +# 特殊数据类型 {#special-data-types} + +特殊数据类型的值既不能存在表中也不能在结果中输出,但可用于查询的中间结果。 diff --git a/docs/zh/sql_reference/data_types/special_data_types/interval.md b/docs/zh/sql_reference/data_types/special_data_types/interval.md new file mode 100644 index 00000000000..7a7ac888775 --- /dev/null +++ b/docs/zh/sql_reference/data_types/special_data_types/interval.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 61 +toc_title: "\u95F4\u9694" +--- + +# 间隔 {#data-type-interval} + +表示时间和日期间隔的数据类型族。 由此产生的类型 [INTERVAL](../../../sql_reference/operators.md#operator-interval) 接线员 + +!!! warning "警告" + `Interval` 数据类型值不能存储在表中。 + +结构: + +- 时间间隔作为无符号整数值。 +- 间隔的类型。 + +支持的时间间隔类型: + +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +对于每个间隔类型,都有一个单独的数据类型。 例如, `DAY` 间隔对应于 `IntervalDay` 数据类型: + +``` sql +SELECT toTypeName(INTERVAL 4 DAY) +``` + +``` text +┌─toTypeName(toIntervalDay(4))─┐ +│ IntervalDay │ +└──────────────────────────────┘ +``` + +## 使用说明 {#data-type-interval-usage-remarks} + +您可以使用 `Interval`-在算术运算类型值 [日期](../../../sql_reference/data_types/date.md) 和 [日期时间](../../../sql_reference/data_types/datetime.md)-类型值。 例如,您可以将4天添加到当前时间: + +``` sql +SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY +``` + +``` text +┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ +│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ +└─────────────────────┴───────────────────────────────┘ +``` + +不同类型的间隔不能合并。 你不能使用间隔,如 `4 DAY 1 HOUR`. 以小于或等于间隔的最小单位的单位指定间隔,例如,间隔 `1 day and an hour` 间隔可以表示为 `25 HOUR` 或 `90000 SECOND`. + +你不能执行算术运算 `Interval`-类型值,但你可以添加不同类型的时间间隔,因此值 `Date` 或 `DateTime` 数据类型。 例如: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +以下查询将导致异常: + +``` sql +select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) +``` + +``` text +Received exception from server (version 19.14.1): +Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. +``` + +## 另请参阅 {#see-also} + +- [INTERVAL](../../../sql_reference/operators.md#operator-interval) 接线员 +- [toInterval](../../../sql_reference/functions/type_conversion_functions.md#function-tointerval) 类型转换函数 diff --git a/docs/zh/sql_reference/data_types/special_data_types/nothing.md b/docs/zh/sql_reference/data_types/special_data_types/nothing.md new file mode 100644 index 00000000000..ebc2b572983 --- /dev/null +++ b/docs/zh/sql_reference/data_types/special_data_types/nothing.md @@ -0,0 +1,20 @@ + +# 没什么 {#nothing} + +此数据类型的唯一目的是表示不是期望值的情况。 所以不能创建一个 `Nothing` 类型的值。 + +例如,文本 [NULL](../../../sql_reference/data_types/special_data_types/nothing.md#null-literal) 的类型为 `Nullable(Nothing)`。详情请见 [可为空](../../../sql_reference/data_types/special_data_types/nothing.md)。 + +`Nothing` 类型也可以用来表示空数组: + +``` bash +:) SELECT toTypeName(array()) + +SELECT toTypeName([]) + +┌─toTypeName(array())─┐ +│ Array(Nothing) │ +└─────────────────────┘ + +1 rows in set. Elapsed: 0.062 sec. +``` diff --git a/docs/zh/sql_reference/data_types/special_data_types/set.md b/docs/zh/sql_reference/data_types/special_data_types/set.md new file mode 100644 index 00000000000..0e1f9c6cc35 --- /dev/null +++ b/docs/zh/sql_reference/data_types/special_data_types/set.md @@ -0,0 +1,4 @@ + +# 设置 {#set} + +可以用在 IN 表达式的右半部分。 diff --git a/docs/zh/sql_reference/data_types/string.md b/docs/zh/sql_reference/data_types/string.md new file mode 100644 index 00000000000..3c9226787a2 --- /dev/null +++ b/docs/zh/sql_reference/data_types/string.md @@ -0,0 +1,11 @@ + +# 字符串 {#string} + +字符串可以任意长度的。它可以包含任意的字节集,包含空字节。因此,字符串类型可以代替其他 DBMSs 中的 VARCHAR、BLOB、CLOB 等类型。 + +## 编码 {#bian-ma} + +ClickHouse 没有编码的概念。字符串可以是任意的字节集,按它们原本的方式进行存储和输出。 +若需存储文本,我们建议使用 UTF-8 编码。至少,如果你的终端使用UTF-8(推荐),这样读写就不需要进行任何的转换了。 +同样,对不同的编码文本 ClickHouse 会有不同处理字符串的函数。 +比如,`length` 函数可以计算字符串包含的字节数组的长度,然而 `lengthUTF8` 函数是假设字符串以 UTF-8 编码,计算的是字符串包含的 Unicode 字符的长度。 diff --git a/docs/zh/sql_reference/data_types/tuple.md b/docs/zh/sql_reference/data_types/tuple.md new file mode 100644 index 00000000000..e3520722c97 --- /dev/null +++ b/docs/zh/sql_reference/data_types/tuple.md @@ -0,0 +1,46 @@ + +# Tuple(T1, T2, …) {#tuplet1-t2} + +元组,其中每个元素都有单独的 [类型](index.md#data_types)。 + +不能在表中存储元组(除了内存表)。它们可以用于临时列分组。在查询中,IN 表达式和带特定参数的 lambda 函数可以来对临时列进行分组。更多信息,请参阅 [IN 操作符](../../sql_reference/data_types/tuple.md) 和 [高阶函数](../../sql_reference/data_types/tuple.md)。 + +元组可以是查询的结果。在这种情况下,对于JSON以外的文本格式,括号中的值是逗号分隔的。在JSON格式中,元组作为数组输出(在方括号中)。 + +## 创建元组 {#chuang-jian-yuan-zu} + +可以使用函数来创建元组: + + tuple(T1, T2, ...) + +创建元组的示例: + + :) SELECT tuple(1,'a') AS x, toTypeName(x) + + SELECT + (1, 'a') AS x, + toTypeName(x) + + ┌─x───────┬─toTypeName(tuple(1, 'a'))─┐ + │ (1,'a') │ Tuple(UInt8, String) │ + └─────────┴───────────────────────────┘ + + 1 rows in set. Elapsed: 0.021 sec. + +## 元组中的数据类型 {#yuan-zu-zhong-de-shu-ju-lei-xing} + +在动态创建元组时,ClickHouse 会自动为元组的每一个参数赋予最小可表达的类型。如果参数为 [NULL](../../sql_reference/data_types/tuple.md#null-literal),那这个元组对应元素是 [可为空](nullable.md)。 + +自动数据类型检测示例: + + SELECT tuple(1, NULL) AS x, toTypeName(x) + + SELECT + (1, NULL) AS x, + toTypeName(x) + + ┌─x────────┬─toTypeName(tuple(1, NULL))──────┐ + │ (1,NULL) │ Tuple(UInt8, Nullable(Nothing)) │ + └──────────┴─────────────────────────────────┘ + + 1 rows in set. Elapsed: 0.002 sec. diff --git a/docs/zh/sql_reference/data_types/uuid.md b/docs/zh/sql_reference/data_types/uuid.md new file mode 100644 index 00000000000..4c35fcf2d9c --- /dev/null +++ b/docs/zh/sql_reference/data_types/uuid.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: UUID +--- + +# UUID {#uuid-data-type} + +通用唯一标识符(UUID)是用于标识记录的16字节数。 有关UUID的详细信息,请参阅 [维基百科](https://en.wikipedia.org/wiki/Universally_unique_identifier). + +UUID类型值的示例如下所示: + +``` text +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +``` + +如果在插入新记录时未指定UUID列值,则UUID值将用零填充: + +``` text +00000000-0000-0000-0000-000000000000 +``` + +## 如何生成 {#how-to-generate} + +要生成UUID值,ClickHouse提供了 [generateuidv4](../../sql_reference/functions/uuid_functions.md) 功能。 + +## 用法示例 {#usage-example} + +**示例1** + +此示例演示如何创建具有UUID类型列的表并将值插入到表中。 + +``` sql +CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog +``` + +``` sql +INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +└──────────────────────────────────────┴───────────┘ +``` + +**示例2** + +在此示例中,插入新记录时未指定UUID列值。 + +``` sql +INSERT INTO t_uuid (y) VALUES ('Example 2') +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ +└──────────────────────────────────────┴───────────┘ +``` + +## 限制 {#restrictions} + +UUID数据类型仅支持以下功能 [字符串](string.md) 数据类型也支持(例如, [min](../../sql_reference/aggregate_functions/reference.md#agg_function-min), [max](../../sql_reference/aggregate_functions/reference.md#agg_function-max),和 [计数](../../sql_reference/aggregate_functions/reference.md#agg_function-count)). + +算术运算不支持UUID数据类型(例如, [abs](../../sql_reference/functions/arithmetic_functions.md#arithm_func-abs))或聚合函数,例如 [sum](../../sql_reference/aggregate_functions/reference.md#agg_function-sum) 和 [avg](../../sql_reference/aggregate_functions/reference.md#agg_function-avg). + +[原始文章](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts.md new file mode 100644 index 00000000000..afbdd082576 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -0,0 +1,56 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u6982\u8FF0" +--- + +# 外部字典 {#dicts-external-dicts} + +您可以从各种数据源添加自己的字典。 字典的数据源可以是本地文本或可执行文件、HTTP(s)资源或其他DBMS。 有关详细信息,请参阅 “[外部字典的来源](external_dicts_dict_sources.md)”. + +ClickHouse: + +- 完全或部分存储在RAM中的字典。 +- 定期更新字典并动态加载缺失的值。 换句话说,字典可以动态加载。 +- 允许创建外部字典与xml文件或 [DDL查询](../../statements/create.md#create-dictionary-query). + +外部字典的配置可以位于一个或多个xml文件中。 配置的路径在指定 [dictionaries\_config](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_config) 参数。 + +字典可以在服务器启动或首次使用时加载,具体取决于 [dictionaries\_lazy\_load](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) 设置。 + +字典配置文件具有以下格式: + +``` xml + + An optional element with any content. Ignored by the ClickHouse server. + + + /etc/metrika.xml + + + + + + + + +``` + +你可以 [配置](external_dicts_dict.md) 同一文件中的任意数量的字典。 + +[字典的DDL查询](../../statements/create.md#create-dictionary-query) 在服务器配置中不需要任何其他记录。 它们允许使用字典作为一流的实体,如表或视图。 + +!!! attention "注意" + 您可以通过在一个小字典中描述它来转换小字典的值 `SELECT` 查询(见 [变换](../../../sql_reference/functions/other_functions.md) 功能)。 此功能与外部字典无关。 + +## 另请参阅 {#ext-dicts-see-also} + +- [配置外部字典](external_dicts_dict.md) +- [在内存中存储字典](external_dicts_dict_layout.md) +- [字典更新](external_dicts_dict_lifetime.md) +- [外部字典的来源](external_dicts_dict_sources.md) +- [字典键和字段](external_dicts_dict_structure.md) +- [使用外部字典的函数](../../../sql_reference/functions/ext_dict_functions.md) + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md new file mode 100644 index 00000000000..df64d31d2a9 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -0,0 +1,53 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 40 +toc_title: "\u914D\u7F6E\u5916\u90E8\u5B57\u5178" +--- + +# 配置外部字典 {#dicts-external-dicts-dict} + +如果使用xml文件配置字典,则比字典配置具有以下结构: + +``` xml + + dict_name + + + + + + + + + + + + + + + + + +``` + +相应的 [DDL-查询](../../statements/create.md#create-dictionary-query) 具有以下结构: + +``` sql +CREATE DICTIONARY dict_name +( + ... -- attributes +) +PRIMARY KEY ... -- complex or single key configuration +SOURCE(...) -- Source configuration +LAYOUT(...) -- Memory layout configuration +LIFETIME(...) -- Lifetime of dictionary in memory +``` + +- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. +- [来源](external_dicts_dict_sources.md) — Source of the dictionary. +- [布局](external_dicts_dict_layout.md) — Dictionary layout in memory. +- [结构](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. +- [使用寿命](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md new file mode 100644 index 00000000000..925e5f6c8f4 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 45 +toc_title: "\u5206\u5C42\u5B57\u5178" +--- + +# 分层字典 {#hierarchical-dictionaries} + +ClickHouse支持分层字典与 [数字键](external_dicts_dict_structure.md#ext_dict-numeric-key). + +看看下面的层次结构: + +``` text +0 (Common parent) +│ +├── 1 (Russia) +│ │ +│ └── 2 (Moscow) +│ │ +│ └── 3 (Center) +│ +└── 4 (Great Britain) + │ + └── 5 (London) +``` + +这种层次结构可以表示为下面的字典表。 + +| region\_id | parent\_region | region\_name | +|------------|----------------|--------------| +| 1 | 0 | 俄罗斯 | +| 2 | 1 | 莫斯科 | +| 3 | 2 | 中心 | +| 4 | 0 | 英国 | +| 5 | 4 | 伦敦 | + +此表包含一列 `parent_region` 包含该元素的最近父项的键。 + +ClickHouse支持 [等级](external_dicts_dict_structure.md#hierarchical-dict-attr) 属性为 [外部字典](index.md) 属性。 此属性允许您配置类似于上述的分层字典。 + +该 [独裁主义](../../../sql_reference/functions/ext_dict_functions.md#dictgethierarchy) 函数允许您获取元素的父链。 + +对于我们的例子,dictionary的结构可以是以下内容: + +``` xml + + + + region_id + + + + parent_region + UInt64 + 0 + true + + + + region_name + String + + + + + +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md new file mode 100644 index 00000000000..4dcf5f4c1b0 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -0,0 +1,373 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 41 +toc_title: "\u5728\u5185\u5B58\u4E2D\u5B58\u50A8\u5B57\u5178" +--- + +# 在内存中存储字典 {#dicts-external-dicts-dict-layout} + +有多种方法可以将字典存储在内存中。 + +我们建议 [平](#flat), [散列](#dicts-external_dicts_dict_layout-hashed) 和 [complex\_key\_hashed](#complex-key-hashed). 其提供最佳的处理速度。 + +不建议使用缓存,因为性能可能较差,并且难以选择最佳参数。 阅读更多的部分 “[缓存](#cache)”. + +有几种方法可以提高字典性能: + +- 调用该函数以使用后的字典 `GROUP BY`. +- 将要提取的属性标记为"注射"。 如果不同的属性值对应于不同的键,则称为注射属性。 所以当 `GROUP BY` 使用由键获取属性值的函数,此函数会自动取出 `GROUP BY`. + +ClickHouse为字典中的错误生成异常。 错误示例: + +- 无法加载正在访问的字典。 +- 查询错误 `cached` 字典 + +您可以查看外部字典的列表及其状态 `system.dictionaries` 桌子 + +配置如下所示: + +``` xml + + + ... + + + + + + ... + + +``` + +相应的 [DDL-查询](../../statements/create.md#create-dictionary-query): + +``` sql +CREATE DICTIONARY (...) +... +LAYOUT(LAYOUT_TYPE(param value)) -- layout settings +... +``` + +## 在内存中存储字典的方法 {#ways-to-store-dictionaries-in-memory} + +- [平](#flat) +- [散列](#dicts-external_dicts_dict_layout-hashed) +- [sparse\_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) +- [缓存](#cache) +- [range\_hashed](#range-hashed) +- [complex\_key\_hashed](#complex-key-hashed) +- [complex\_key\_cache](#complex-key-cache) +- [ip\_trie](#ip-trie) + +### 平 {#flat} + +字典以平面数组的形式完全存储在内存中。 字典使用多少内存? 量与最大键的大小(在使用的空间中)成正比。 + +字典键具有 `UInt64` 类型和值限制为500,000。 如果在创建字典时发现较大的键,ClickHouse将引发异常,不会创建字典。 + +支持所有类型的来源。 更新时,数据(来自文件或表)将完整读取。 + +此方法在存储字典的所有可用方法中提供了最佳性能。 + +配置示例: + +``` xml + + + +``` + +或 + +``` sql +LAYOUT(FLAT()) +``` + +### 散列 {#dicts-external_dicts_dict_layout-hashed} + +该字典以哈希表的形式完全存储在内存中。 字典中可以包含任意数量的带有任意标识符的元素,在实践中,键的数量可以达到数千万项。 + +支持所有类型的来源。 更新时,数据(来自文件或表)将完整读取。 + +配置示例: + +``` xml + + + +``` + +或 + +``` sql +LAYOUT(HASHED()) +``` + +### sparse\_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} + +类似于 `hashed`,但使用更少的内存,有利于更多的CPU使用率。 + +配置示例: + +``` xml + + + +``` + +``` sql +LAYOUT(SPARSE_HASHED()) +``` + +### complex\_key\_hashed {#complex-key-hashed} + +这种类型的存储是用于复合 [键](external_dicts_dict_structure.md). 类似于 `hashed`. + +配置示例: + +``` xml + + + +``` + +``` sql +LAYOUT(COMPLEX_KEY_HASHED()) +``` + +### range\_hashed {#range-hashed} + +字典以哈希表的形式存储在内存中,其中包含有序范围及其相应值的数组。 + +此存储方法的工作方式与散列方式相同,除了键之外,还允许使用日期/时间(任意数字类型)范围。 + +示例:该表格包含每个广告客户的折扣,格式为: + +``` text ++---------|-------------|-------------|------+ +| advertiser id | discount start date | discount end date | amount | ++===============+=====================+===================+========+ +| 123 | 2015-01-01 | 2015-01-15 | 0.15 | ++---------|-------------|-------------|------+ +| 123 | 2015-01-16 | 2015-01-31 | 0.25 | ++---------|-------------|-------------|------+ +| 456 | 2015-01-01 | 2015-01-15 | 0.05 | ++---------|-------------|-------------|------+ +``` + +要对日期范围使用示例,请定义 `range_min` 和 `range_max` 中的元素 [结构](external_dicts_dict_structure.md). 这些元素必须包含元素 `name` 和`type` (如果 `type` 如果没有指定,则默认类型将使用-Date)。 `type` 可以是任何数字类型(Date/DateTime/UInt64/Int32/others)。 + +示例: + +``` xml + + + Id + + + first + Date + + + last + Date + + ... +``` + +或 + +``` sql +CREATE DICTIONARY somedict ( + id UInt64, + first Date, + last Date +) +PRIMARY KEY id +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) +``` + +要使用这些字典,您需要将附加参数传递给 `dictGetT` 函数,为其选择一个范围: + +``` sql +dictGetT('dict_name', 'attr_name', id, date) +``` + +此函数返回指定的值 `id`s和包含传递日期的日期范围。 + +算法的详细信息: + +- 如果 `id` 未找到或范围未找到 `id`,它返回字典的默认值。 +- 如果存在重叠范围,则可以使用任意范围。 +- 如果范围分隔符是 `NULL` 或无效日期(如1900-01-01或2039-01-01),范围保持打开状态。 范围可以在两侧打开。 + +配置示例: + +``` xml + + + + ... + + + + + + + + Abcdef + + + StartTimeStamp + UInt64 + + + EndTimeStamp + UInt64 + + + XXXType + String + + + + + + +``` + +或 + +``` sql +CREATE DICTIONARY somedict( + Abcdef UInt64, + StartTimeStamp UInt64, + EndTimeStamp UInt64, + XXXType String DEFAULT '' +) +PRIMARY KEY Abcdef +RANGE(MIN StartTimeStamp MAX EndTimeStamp) +``` + +### 缓存 {#cache} + +字典存储在具有固定数量的单元格的缓存中。 这些单元格包含经常使用的元素。 + +搜索字典时,首先搜索缓存。 对于每个数据块,所有在缓存中找不到或过期的密钥都从源请求,使用 `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. 然后将接收到的数据写入高速缓存。 + +对于缓存字典,过期 [使用寿命](external_dicts_dict_lifetime.md) 可以设置高速缓存中的数据。 如果更多的时间比 `lifetime` 自从在单元格中加载数据以来,单元格的值不被使用,并且在下次需要使用时重新请求它。 +这是存储字典的所有方法中最不有效的。 缓存的速度在很大程度上取决于正确的设置和使用场景。 缓存类型字典只有在命中率足够高(推荐99%或更高)时才能表现良好。 您可以查看平均命中率 `system.dictionaries` 桌子 + +要提高缓存性能,请使用以下子查询 `LIMIT`,并从外部调用字典函数。 + +支持 [来源](external_dicts_dict_sources.md):MySQL的,ClickHouse的,可执行文件,HTTP. + +设置示例: + +``` xml + + + + 1000000000 + + +``` + +或 + +``` sql +LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) +``` + +设置足够大的缓存大小。 你需要尝试选择细胞的数量: + +1. 设置一些值。 +2. 运行查询,直到缓存完全满。 +3. 使用评估内存消耗 `system.dictionaries` 桌子 +4. 增加或减少单元数,直到达到所需的内存消耗。 + +!!! warning "警告" + 不要使用ClickHouse作为源,因为处理随机读取的查询速度很慢。 + +### complex\_key\_cache {#complex-key-cache} + +这种类型的存储是用于复合 [键](external_dicts_dict_structure.md). 类似于 `cache`. + +### ip\_trie {#ip-trie} + +这种类型的存储用于将网络前缀(IP地址)映射到ASN等元数据。 + +示例:该表包含网络前缀及其对应的AS号码和国家代码: + +``` text + +-----------|-----|------+ + | prefix | asn | cca2 | + +=================+=======+========+ + | 202.79.32.0/20 | 17501 | NP | + +-----------|-----|------+ + | 2620:0:870::/48 | 3856 | US | + +-----------|-----|------+ + | 2a02:6b8:1::/48 | 13238 | RU | + +-----------|-----|------+ + | 2001:db8::/32 | 65536 | ZZ | + +-----------|-----|------+ +``` + +使用此类布局时,结构必须具有复合键。 + +示例: + +``` xml + + + + prefix + String + + + + asn + UInt32 + + + + cca2 + String + ?? + + ... +``` + +或 + +``` sql +CREATE DICTIONARY somedict ( + prefix String, + asn UInt32, + cca2 String DEFAULT '??' +) +PRIMARY KEY prefix +``` + +该键必须只有一个包含允许的IP前缀的字符串类型属性。 还不支持其他类型。 + +对于查询,必须使用相同的函数 (`dictGetT` 与元组)至于具有复合键的字典: + +``` sql +dictGetT('dict_name', 'attr_name', tuple(ip)) +``` + +该函数采用任一 `UInt32` 对于IPv4,或 `FixedString(16)` 碌莽禄Ipv6拢IPv6: + +``` sql +dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +``` + +还不支持其他类型。 该函数返回与此IP地址对应的前缀的属性。 如果有重叠的前缀,则返回最具体的前缀。 + +数据存储在一个 `trie`. 它必须完全适合RAM。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md new file mode 100644 index 00000000000..66ff7124ba1 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 42 +toc_title: "\u5B57\u5178\u66F4\u65B0" +--- + +# 字典更新 {#dictionary-updates} + +ClickHouse定期更新字典。 完全下载字典的更新间隔和缓存字典的无效间隔在 `` 在几秒钟内标记。 + +字典更新(除首次使用的加载之外)不会阻止查询。 在更新期间,将使用旧版本的字典。 如果在更新过程中发生错误,则将错误写入服务器日志,并使用旧版本的字典继续查询。 + +设置示例: + +``` xml + + ... + 300 + ... + +``` + +``` sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +设置 `0` (`LIFETIME(0)`)防止字典更新。 + +您可以设置升级的时间间隔,ClickHouse将在此范围内选择一个统一的随机时间。 为了在大量服务器上升级时分配字典源上的负载,这是必要的。 + +设置示例: + +``` xml + + ... + + 300 + 360 + + ... + +``` + +或 + +``` sql +LIFETIME(MIN 300 MAX 360) +``` + +升级字典时,ClickHouse服务器根据字典的类型应用不同的逻辑 [来源](external_dicts_dict_sources.md): + +- 对于文本文件,它检查修改的时间。 如果时间与先前记录的时间不同,则更新字典。 +- 对于MyISAM表,修改的时间使用检查 `SHOW TABLE STATUS` 查询。 +- 默认情况下,每次都会更新来自其他来源的字典。 + +对于MySQL(InnoDB),ODBC和ClickHouse源代码,您可以设置一个查询,只有在字典真正改变时才会更新字典,而不是每次都更新。 为此,请按照下列步骤操作: + +- 字典表必须具有在源数据更新时始终更改的字段。 +- 源的设置必须指定检索更改字段的查询。 ClickHouse服务器将查询结果解释为一行,如果此行相对于其以前的状态发生了更改,则更新字典。 指定查询 `` 字段中的设置 [来源](external_dicts_dict_sources.md). + +设置示例: + +``` xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +或 + +``` sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md new file mode 100644 index 00000000000..c8173749b33 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -0,0 +1,608 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 43 +toc_title: "\u5916\u90E8\u5B57\u5178\u7684\u6765\u6E90" +--- + +# 外部字典的来源 {#dicts-external-dicts-dict-sources} + +外部字典可以从许多不同的来源连接。 + +如果使用xml-file配置字典,则配置如下所示: + +``` xml + + + ... + + + + + + ... + + ... + +``` + +在情况下 [DDL-查询](../../statements/create.md#create-dictionary-query),相等的配置将看起来像: + +``` sql +CREATE DICTIONARY dict_name (...) +... +SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration +... +``` + +源配置在 `source` 科。 + +来源类型 (`source_type`): + +- [本地文件](#dicts-external_dicts_dict_sources-local_file) +- [可执行文件](#dicts-external_dicts_dict_sources-executable) +- [HTTP(s)](#dicts-external_dicts_dict_sources-http) +- DBMS + - [ODBC](#dicts-external_dicts_dict_sources-odbc) + - [MySQL](#dicts-external_dicts_dict_sources-mysql) + - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) + - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) + - [Redis](#dicts-external_dicts_dict_sources-redis) + +## 本地文件 {#dicts-external_dicts_dict_sources-local_file} + +设置示例: + +``` xml + + + /opt/dictionaries/os.tsv + TabSeparated + + +``` + +或 + +``` sql +SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +设置字段: + +- `path` – The absolute path to the file. +- `format` – The file format. All the formats described in “[格式](../../../interfaces/formats.md#formats)” 支持。 + +## 可执行文件 {#dicts-external_dicts_dict_sources-executable} + +使用可执行文件取决于 [字典如何存储在内存中](external_dicts_dict_layout.md). 如果字典存储使用 `cache` 和 `complex_key_cache`,ClickHouse通过向可执行文件的STDIN发送请求来请求必要的密钥。 否则,ClickHouse将启动可执行文件并将其输出视为字典数据。 + +设置示例: + +``` xml + + + cat /opt/dictionaries/os.tsv + TabSeparated + + +``` + +或 + +``` sql +SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +设置字段: + +- `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). +- `format` – The file format. All the formats described in “[格式](../../../interfaces/formats.md#formats)” 支持。 + +## Http(s) {#dicts-external_dicts_dict_sources-http} + +使用HTTP(s)服务器取决于 [字典如何存储在内存中](external_dicts_dict_layout.md). 如果字典存储使用 `cache` 和 `complex_key_cache`,ClickHouse通过通过发送请求请求必要的密钥 `POST` 方法。 + +设置示例: + +``` xml + + + http://[::1]/os.tsv + TabSeparated + + user + password + + +
    + API-KEY + key +
    +
    +
    + +``` + +或 + +``` sql +SOURCE(HTTP( + url 'http://[::1]/os.tsv' + format 'TabSeparated' + credentials(user 'user' password 'password') + headers(header(name 'API-KEY' value 'key')) +)) +``` + +为了让ClickHouse访问HTTPS资源,您必须 [配置openSSL](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-openssl) 在服务器配置中。 + +设置字段: + +- `url` – The source URL. +- `format` – The file format. All the formats described in “[格式](../../../interfaces/formats.md#formats)” 支持。 +- `credentials` – Basic HTTP authentication. Optional parameter. + - `user` – Username required for the authentication. + - `password` – Password required for the authentication. +- `headers` – All custom HTTP headers entries used for the HTTP request. Optional parameter. + - `header` – Single HTTP header entry. + - `name` – Identifiant name used for the header send on the request. + - `value` – Value set for a specific identifiant name. + +## ODBC {#dicts-external_dicts_dict_sources-odbc} + +您可以使用此方法连接具有ODBC驱动程序的任何数据库。 + +设置示例: + +``` xml + + + DatabaseName + ShemaName.TableName
    + DSN=some_parameters + SQL_QUERY +
    + +``` + +或 + +``` sql +SOURCE(ODBC( + db 'DatabaseName' + table 'SchemaName.TableName' + connection_string 'DSN=some_parameters' + invalidate_query 'SQL_QUERY' +)) +``` + +设置字段: + +- `db` – Name of the database. Omit it if the database name is set in the `` 参数。 +- `table` – Name of the table and schema if exists. +- `connection_string` – Connection string. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [更新字典](external_dicts_dict_lifetime.md). + +ClickHouse接收来自ODBC-driver的引用符号,并将查询中的所有设置引用到driver,因此有必要根据数据库中的表名大小写设置表名。 + +如果您在使用Oracle时遇到编码问题,请参阅相应的 [FAQ](../../../faq/general.md#oracle-odbc-encodings) 文章. + +### ODBC字典功能的已知漏洞 {#known-vulnerability-of-the-odbc-dictionary-functionality} + +!!! attention "注意" + 通过ODBC驱动程序连接参数连接到数据库时 `Servername` 可以取代。 在这种情况下,值 `USERNAME` 和 `PASSWORD` 从 `odbc.ini` 被发送到远程服务器,并且可能会受到损害。 + +**不安全使用示例** + +让我们为PostgreSQL配置unixODBC。 的内容 `/etc/odbc.ini`: + +``` text +[gregtest] +Driver = /usr/lib/psqlodbca.so +Servername = localhost +PORT = 5432 +DATABASE = test_db +#OPTION = 3 +USERNAME = test +PASSWORD = test +``` + +如果然后进行查询,例如 + +``` sql +SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); +``` + +ODBC驱动程序将发送的值 `USERNAME` 和 `PASSWORD` 从 `odbc.ini` 到 `some-server.com`. + +### 连接Postgresql的示例 {#example-of-connecting-postgresql} + +Ubuntu操作系统。 + +为PostgreSQL安装unixODBC和ODBC驱动程序: + +``` bash +$ sudo apt-get install -y unixodbc odbcinst odbc-postgresql +``` + +配置 `/etc/odbc.ini` (或 `~/.odbc.ini`): + +``` text + [DEFAULT] + Driver = myconnection + + [myconnection] + Description = PostgreSQL connection to my_db + Driver = PostgreSQL Unicode + Database = my_db + Servername = 127.0.0.1 + UserName = username + Password = password + Port = 5432 + Protocol = 9.3 + ReadOnly = No + RowVersioning = No + ShowSystemTables = No + ConnSettings = +``` + +ClickHouse中的字典配置: + +``` xml + + + table_name + + + + + DSN=myconnection + postgresql_table
    +
    + + + 300 + 360 + + + + + + + id + + + some_column + UInt64 + 0 + + +
    +
    +``` + +或 + +``` sql +CREATE DICTIONARY table_name ( + id UInt64, + some_column UInt64 DEFAULT 0 +) +PRIMARY KEY id +SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) +LAYOUT(HASHED()) +LIFETIME(MIN 300 MAX 360) +``` + +您可能需要编辑 `odbc.ini` 使用驱动程序指定库的完整路径 `DRIVER=/usr/local/lib/psqlodbcw.so`. + +### 连接MS SQL Server的示例 {#example-of-connecting-ms-sql-server} + +Ubuntu操作系统。 + +安装驱动程序: : + +``` bash +$ sudo apt-get install tdsodbc freetds-bin sqsh +``` + +配置驱动程序: + +``` bash + $ cat /etc/freetds/freetds.conf + ... + + [MSSQL] + host = 192.168.56.101 + port = 1433 + tds version = 7.0 + client charset = UTF-8 + + $ cat /etc/odbcinst.ini + ... + + [FreeTDS] + Description = FreeTDS + Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so + Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so + FileUsage = 1 + UsageCount = 5 + + $ cat ~/.odbc.ini + ... + + [MSSQL] + Description = FreeTDS + Driver = FreeTDS + Servername = MSSQL + Database = test + UID = test + PWD = test + Port = 1433 +``` + +在ClickHouse中配置字典: + +``` xml + + + test + + + dict
    + DSN=MSSQL;UID=test;PWD=test +
    + + + + 300 + 360 + + + + + + + + + k + + + s + String + + + +
    +
    +``` + +或 + +``` sql +CREATE DICTIONARY test ( + k UInt64, + s String DEFAULT '' +) +PRIMARY KEY k +SOURCE(ODBC(table 'dict' connection_string 'DSN=MSSQL;UID=test;PWD=test')) +LAYOUT(FLAT()) +LIFETIME(MIN 300 MAX 360) +``` + +## DBMS {#dbms} + +### Mysql {#dicts-external_dicts_dict_sources-mysql} + +设置示例: + +``` xml + + + 3306 + clickhouse + qwerty + + example01-1 + 1 + + + example01-2 + 1 + + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +或 + +``` sql +SOURCE(MYSQL( + port 3306 + user 'clickhouse' + password 'qwerty' + replica(host 'example01-1' priority 1) + replica(host 'example01-2' priority 1) + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +设置字段: + +- `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). + +- `user` – Name of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `password` – Password of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `replica` – Section of replica configurations. There can be multiple sections. + + - `replica/host` – The MySQL host. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. + +- `db` – Name of the database. + +- `table` – Name of the table. + +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` 例如,mysql中的子句, `id > 10 AND id < 20`. 可选参数。 + +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [更新字典](external_dicts_dict_lifetime.md). + +MySQL可以通过套接字在本地主机上连接。 要做到这一点,设置 `host` 和 `socket`. + +设置示例: + +``` xml + + + localhost + /path/to/socket/file.sock + clickhouse + qwerty + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +或 + +``` sql +SOURCE(MYSQL( + host 'localhost' + socket '/path/to/socket/file.sock' + user 'clickhouse' + password 'qwerty' + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} + +设置示例: + +``` xml + + + example01-01-1 + 9000 + default + + default + ids
    + id=10 +
    + +``` + +或 + +``` sql +SOURCE(CLICKHOUSE( + host 'example01-01-1' + port 9000 + user 'default' + password '' + db 'default' + table 'ids' + where 'id=10' +)) +``` + +设置字段: + +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [分布](../../../engines/table_engines/special/distributed.md) 表并在后续配置中输入它。 +- `port` – The port on the ClickHouse server. +- `user` – Name of the ClickHouse user. +- `password` – Password of the ClickHouse user. +- `db` – Name of the database. +- `table` – Name of the table. +- `where` – The selection criteria. May be omitted. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [更新字典](external_dicts_dict_lifetime.md). + +### Mongodb {#dicts-external_dicts_dict_sources-mongodb} + +设置示例: + +``` xml + + + localhost + 27017 + + + test + dictionary_source + + +``` + +或 + +``` sql +SOURCE(MONGO( + host 'localhost' + port 27017 + user '' + password '' + db 'test' + collection 'dictionary_source' +)) +``` + +设置字段: + +- `host` – The MongoDB host. +- `port` – The port on the MongoDB server. +- `user` – Name of the MongoDB user. +- `password` – Password of the MongoDB user. +- `db` – Name of the database. +- `collection` – Name of the collection. + +### Redis {#dicts-external_dicts_dict_sources-redis} + +设置示例: + +``` xml + + + localhost + 6379 + simple + 0 + + +``` + +或 + +``` sql +SOURCE(REDIS( + host 'localhost' + port 6379 + storage_type 'simple' + db_index 0 +)) +``` + +设置字段: + +- `host` – The Redis host. +- `port` – The port on the Redis server. +- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` 适用于简单源和散列单键源, `hash_map` 用于具有两个键的散列源。 不支持具有复杂键的范围源和缓存源。 可以省略,默认值为 `simple`. +- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md new file mode 100644 index 00000000000..0ac0226aa50 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 44 +toc_title: "\u5B57\u5178\u952E\u548C\u5B57\u6BB5" +--- + +# 字典键和字段 {#dictionary-key-and-fields} + +该 `` 子句描述可用于查询的字典键和字段。 + +XML描述: + +``` xml + + + + Id + + + + + + + ... + + + +``` + +属性在元素中描述: + +- `` — [键列](external_dicts_dict_structure.md#ext_dict_structure-key). +- `` — [数据列](external_dicts_dict_structure.md#ext_dict_structure-attributes). 可以有多个属性。 + +DDL查询: + +``` sql +CREATE DICTIONARY dict_name ( + Id UInt64, + -- attributes +) +PRIMARY KEY Id +... +``` + +查询正文中描述了属性: + +- `PRIMARY KEY` — [键列](external_dicts_dict_structure.md#ext_dict_structure-key) +- `AttrName AttrType` — [数据列](external_dicts_dict_structure.md#ext_dict_structure-attributes). 可以有多个属性。 + +## 键 {#ext_dict_structure-key} + +ClickHouse支持以下类型的键: + +- 数字键。 `UInt64`. 在定义 `` 标记或使用 `PRIMARY KEY` 关键字。 +- 复合密钥。 组不同类型的值。 在标签中定义 `` 或 `PRIMARY KEY` 关键字。 + +Xml结构可以包含 `` 或 ``. DDL-查询必须包含单个 `PRIMARY KEY`. + +!!! warning "警告" + 不能将键描述为属性。 + +### 数字键 {#ext_dict-numeric-key} + +类型: `UInt64`. + +配置示例: + +``` xml + + Id + +``` + +配置字段: + +- `name` – The name of the column with keys. + +对于DDL-查询: + +``` sql +CREATE DICTIONARY ( + Id UInt64, + ... +) +PRIMARY KEY Id +... +``` + +- `PRIMARY KEY` – The name of the column with keys. + +### 复合密钥 {#composite-key} + +关键可以是一个 `tuple` 从任何类型的字段。 该 [布局](external_dicts_dict_layout.md) 在这种情况下,必须是 `complex_key_hashed` 或 `complex_key_cache`. + +!!! tip "提示" + 复合键可以由单个元素组成。 例如,这使得可以使用字符串作为键。 + +键结构在元素中设置 ``. 键字段的格式与字典的格式相同 [属性](external_dicts_dict_structure.md). 示例: + +``` xml + + + + field1 + String + + + field2 + UInt32 + + ... + +... +``` + +或 + +``` sql +CREATE DICTIONARY ( + field1 String, + field2 String + ... +) +PRIMARY KEY field1, field2 +... +``` + +对于查询 `dictGet*` 函数中,一个元组作为键传递。 示例: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. + +## 属性 {#ext_dict_structure-attributes} + +配置示例: + +``` xml + + ... + + Name + ClickHouseDataType + + rand64() + true + true + true + + +``` + +或 + +``` sql +CREATE DICTIONARY somename ( + Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID +) +``` + +配置字段: + +| 标签 | 产品描述 | 必填项 | +|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------| +| `name` | 列名称。 | 是 | +| `type` | ClickHouse数据类型。
    ClickHouse尝试将字典中的值转换为指定的数据类型。 例如,对于MySQL,该字段可能是 `TEXT`, `VARCHAR`,或 `BLOB` 在MySQL源表中,但它可以上传为 `String` 在克里克豪斯
    [可为空](../../../sql_reference/data_types/nullable.md) 不支持。 | 是 | +| `null_value` | 非现有元素的默认值。
    在示例中,它是一个空字符串。 你不能使用 `NULL` 在这个领域。 | 是 | +| `expression` | [表达式](../../syntax.md#syntax-expressions) ClickHouse对该值执行。
    表达式可以是远程SQL数据库中的列名。 因此,您可以使用它为远程列创建别名。

    默认值:无表达式。 | 非也。 | +| `hierarchical` | 如果 `true`,该属性包含当前键的父键值。 看 [分层字典](external_dicts_dict_hierarchical.md).

    默认值: `false`. | 非也。 | +| `injective` | 标志,显示是否 `id -> attribute` 图像是 [注射](https://en.wikipedia.org/wiki/Injective_function).
    如果 `true`,ClickHouse可以自动放置后 `GROUP BY` 子句注入字典的请求。 通常它显着减少了这种请求的数量。

    默认值: `false`. | 非也。 | +| `is_object_id` | 显示是否通过以下方式对MongoDB文档执行查询的标志 `ObjectID`.

    默认值: `false`. | 非也。 | + +## 另请参阅 {#see-also} + +- [使用外部字典的函数](../../../sql_reference/functions/ext_dict_functions.md). + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/index.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..25d86ecda96 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5916\u90E8\u5B57\u5178" +toc_priority: 37 +--- + + diff --git a/docs/zh/sql_reference/dictionaries/index.md b/docs/zh/sql_reference/dictionaries/index.md new file mode 100644 index 00000000000..9c9817ad0ad --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5B57\u5178" +toc_priority: 35 +toc_title: "\u5BFC\u8A00" +--- + +# 字典 {#dictionaries} + +字典是一个映射 (`key -> attributes`)这是方便各种类型的参考清单。 + +ClickHouse支持使用可用于查询的字典的特殊功能。 这是更容易和更有效地使用字典与功能比 `JOIN` 与参考表。 + +[NULL](../syntax.md#null) 值不能存储在字典中。 + +ClickHouse支持: + +- [内置字典](internal_dicts.md#internal_dicts) 具有特定的 [功能集](../../sql_reference/functions/ym_dict_functions.md). +- [插件(外部)字典](external_dictionaries/external_dicts.md) 用一个 [职能净额](../../sql_reference/functions/ext_dict_functions.md). + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/zh/sql_reference/dictionaries/internal_dicts.md b/docs/zh/sql_reference/dictionaries/internal_dicts.md new file mode 100644 index 00000000000..bcede3c14ad --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/internal_dicts.md @@ -0,0 +1,55 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u5185\u90E8\u5B57\u5178" +--- + +# 内部字典 {#internal_dicts} + +ClickHouse包含用于处理地理数据库的内置功能。 + +这使您可以: + +- 使用区域的ID以所需语言获取其名称。 +- 使用区域ID获取城市、地区、联邦区、国家或大陆的ID。 +- 检查一个区域是否属于另一个区域。 +- 获取父区域链。 + +所有功能支持 “translocality,” 能够同时使用不同的角度对区域所有权。 有关详细信息,请参阅部分 “Functions for working with Yandex.Metrica dictionaries”. + +在默认包中禁用内部字典。 +要启用它们,请取消注释参数 `path_to_regions_hierarchy_file` 和 `path_to_regions_names_files` 在服务器配置文件中。 + +Geobase从文本文件加载。 + +将 `regions_hierarchy*.txt` 文件到 `path_to_regions_hierarchy_file` 目录。 此配置参数必须包含指向 `regions_hierarchy.txt` 文件(默认区域层次结构)和其他文件 (`regions_hierarchy_ua.txt`)必须位于同一目录中。 + +把 `regions_names_*.txt` 在文件 `path_to_regions_names_files` 目录。 + +您也可以自己创建这些文件。 文件格式如下: + +`regions_hierarchy*.txt`:TabSeparated(无标题),列: + +- 地区ID (`UInt32`) +- 父区域ID (`UInt32`) +- 区域类型 (`UInt8`):1-大陆,3-国家,4-联邦区,5-地区,6-城市;其他类型没有价值 +- 人口 (`UInt32`) — optional column + +`regions_names_*.txt`:TabSeparated(无标题),列: + +- 地区ID (`UInt32`) +- 地区名称 (`String`) — Can't contain tabs or line feeds, even escaped ones. + +平面阵列用于存储在RAM中。 出于这个原因,Id不应该超过一百万。 + +字典可以在不重新启动服务器的情况下更新。 但是,不会更新可用字典集。 +对于更新,将检查文件修改时间。 如果文件已更改,则更新字典。 +检查更改的时间间隔在 `builtin_dictionaries_reload_interval` 参数。 +字典更新(首次使用时加载除外)不会阻止查询。 在更新期间,查询使用旧版本的字典。 如果在更新过程中发生错误,则将错误写入服务器日志,并使用旧版本的字典继续查询。 + +我们建议定期使用geobase更新字典。 在更新期间,生成新文件并将其写入单独的位置。 一切准备就绪后,将其重命名为服务器使用的文件。 + +还有与操作系统标识符和Yandex的工作功能。Metrica搜索引擎,但他们不应该被使用。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/zh/sql_reference/functions/arithmetic_functions.md b/docs/zh/sql_reference/functions/arithmetic_functions.md new file mode 100644 index 00000000000..66bd42ec63a --- /dev/null +++ b/docs/zh/sql_reference/functions/arithmetic_functions.md @@ -0,0 +1,77 @@ + +# 算术函数 {#suan-zhu-han-shu} + +对于所有算术函数,结果类型为结果适合的最小数字类型(如果存在这样的类型)。最小数字类型是根据数字的位数,是否有符号以及是否是浮点类型而同时进行的。如果没有足够的位,则采用最高位类型。 + +例如: + +``` sql +SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 + 0 + 0) +``` + + ┌─toTypeName(0)─┬─toTypeName(plus(0, 0))─┬─toTypeName(plus(plus(0, 0), 0))─┬─toTypeName(plus(plus(plus(0, 0), 0), 0))─┐ + │ UInt8 │ UInt16 │ UInt32 │ UInt64 │ + └───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘ + +算术函数适用于UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64,Float32或Float64中的任何类型。 + +溢出的产生方式与C++相同。 + +## 加(a,b),a+b {#plusa-b-a-b} + +计算数字的总和。 +您还可以将Date或DateTime与整数进行相加。在Date的情况下,添加的整数意味着添加相应的天数。对于DateTime,这意味这添加相应的描述。 + +## 减(a,b),a-b {#minusa-b-a-b} + +计算数字之间的差,结果总是有符号的。 + +您还可以将Date或DateTime与整数进行相减。见上面的'plus'。 + +## 乘(a,b),a\*b {#multiplya-b-a-b} + +计算数字的乘积。 + +## 除以(a,b),a/b {#dividea-b-a-b} + +计算数字的商。结果类型始终是浮点类型。 +它不是整数除法。对于整数除法,请使用'intDiv'函数。 +当除以零时,你得到'inf',‘- inf’或’nan’。 + +## intDiv(a,b) {#intdiva-b} + +计算整数数字的商,向下舍入(按绝对值)。 +除以零或将最小负数除以-1时抛出异常。 + +## intDivOrZero(a,b) {#intdivorzeroa-b} + +与'intDiv'的不同之处在于它在除以零或将最小负数除以-1时返回零。 + +## 模(a,b),a%b {#moduloa-b-a-b} + +计算除法后的余数。 +如果参数是浮点数,则通过删除小数部分将它们预转换为整数。 +其余部分与C++中的含义相同。截断除法用于负数。 +除以零或将最小负数除以-1时抛出异常。 + +## 否定(a),-a {#negatea-a} + +计算一个数字的 +用反转符号计算一个数字。结果始终是签名的。 +计算具有反向符号的数字。 结果始终签名。 + +## abs(a) {#arithm_func-abs} + +计算数字(a)的绝对值。也就是说,如果a &lt; 0,它返回-a。对于无符号类型,它不执行任何操作。对于有符号整数类型,它返回无符号数。 + +## gcd(a,b) {#gcda-b} + +返回数字的最大公约数。 +除以零或将最小负数除以-1时抛出异常。 + +## lcm(a,b) {#lcma-b} + +返回数字的最小公倍数。 +除以零或将最小负数除以-1时抛出异常。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/zh/sql_reference/functions/array_functions.md b/docs/zh/sql_reference/functions/array_functions.md new file mode 100644 index 00000000000..cb8f7347b72 --- /dev/null +++ b/docs/zh/sql_reference/functions/array_functions.md @@ -0,0 +1,666 @@ + +# 数组函数 {#shu-zu-han-shu} + +## 空 {#empty} + +对于空数组返回1,对于非空数组返回0。 +结果类型是UInt8。 +该函数也适用于字符串。 + +## notEmpty {#notempty} + +对于空数组返回0,对于非空数组返回1。 +结果类型是UInt8。 +该函数也适用于字符串。 + +## 长度 {#array_functions-length} + +返回数组中的元素个数。 +结果类型是UInt64。 +该函数也适用于字符串。 + +## emptyArrayUInt8,emptyArrayUInt16,emptyArrayUInt32,emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} + +## emptyArrayInt8,emptyArrayInt16,emptyArrayInt32,emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} + +## emptyArrayFloat32,emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} + +## 空空漫步,空空漫步时间 {#emptyarraydate-emptyarraydatetime} + +## 空字符串 {#emptyarraystring} + +不接受任何参数并返回适当类型的空数组。 + +## emptyArrayToSingle {#emptyarraytosingle} + +接受一个空数组并返回一个仅包含一个默认值元素的数组。 + +## 范围(N) {#rangen} + +返回从0到N-1的数字数组。 +以防万一,如果在数据块中创建总长度超过100,000,000个元素的数组,则抛出异常。 + +## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} + +使用函数的参数作为数组元素创建一个数组。 +参数必须是常量,并且具有最小公共类型的类型。必须至少传递一个参数,否则将不清楚要创建哪种类型的数组。也就是说,你不能使用这个函数来创建一个空数组(为此,使用上面描述的'emptyArray  \*'函数)。 +返回'Array(T)'类型的结果,其中'T'是传递的参数中最小的公共类型。 + +## arrayConcat {#arrayconcat} + +合并参数中传递的所有数组。 + + arrayConcat(arrays) + +**参数** + +- `arrays` – 任意数量的[阵列](../../sql_reference/functions/array_functions.md)类型的参数. + **示例** + + + +``` sql +SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res +``` + + ┌─res───────────┐ + │ [1,2,3,4,5,6] │ + └───────────────┘ + +## arrayElement(arr,n),运算符arr\[n\] {#arrayelementarr-n-operator-arrn} + +从数组`arr`中获取索引为«n»的元素。 `n`必须是任何整数类型。 +数组中的索引从一开始。 +支持负索引。在这种情况下,它选择从末尾开始编号的相应元素。例如,`arr [-1]`是数组中的最后一项。 + +如果索引超出数组的边界,则返回默认值(数字为0,字符串为空字符串等)。 + +## 有(arr,elem) {#hasarr-elem} + +检查'arr'数组是否具有'elem'元素。 +如果元素不在数组中,则返回0;如果在,则返回1。 + +`NULL` 值的处理。 + + SELECT has([1, 2, NULL], NULL) + + ┌─has([1, 2, NULL], NULL)─┐ + │ 1 │ + └─────────────────────────┘ + +## hasAll {#hasall} + +检查一个数组是否是另一个数组的子集。 + + hasAll(set, subset) + +**参数** + +- `set` – 具有一组元素的任何类型的数组。 +- `subset` – 任何类型的数组,其元素应该被测试为`set`的子集。 + +**返回值** + +- `1`, 如果`set`包含`subset`中的所有元素。 +- `0`, 否则。 + +**特殊的定义** + +- 空数组是任何数组的子集。 +- «Null»作为数组中的元素值进行处理。 +- 忽略两个数组中的元素值的顺序。 + +**示例** + +`SELECT hasAll([], [])` 返回1。 + +`SELECT hasAll([1, Null], [Null])` 返回1。 + +`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` 返回1。 + +`SELECT hasAll(['a', 'b'], ['a'])` 返回1。 + +`SELECT hasAll([1], ['a'])` 返回0。 + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` 返回0。 + +## hasAny {#hasany} + +检查两个数组是否存在交集。 + + hasAny(array1, array2) + +**参数** + +- `array1` – 具有一组元素的任何类型的数组。 +- `array2` – 具有一组元素的任何类型的数组。 + +**返回值** + +- `1`, 如果`array1`和`array2`存在交集。 +- `0`, 否则。 + +**特殊的定义** + +- «Null»作为数组中的元素值进行处理。 +- 忽略两个数组中的元素值的顺序。 + +**示例** + +`SELECT hasAny([1], [])` 返回 `0`. + +`SELECT hasAny([Null], [Null, 1])` 返回 `1`. + +`SELECT hasAny([-128, 1., 512], [1])` 返回 `1`. + +`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` 返回 `0`. + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` 返回 `1`. + +## indexOf(arr,x) {#indexofarr-x} + +返回数组中第一个'x'元素的索引(从1开始),如果'x'元素不存在在数组中,则返回0。 + +示例: + + :) SELECT indexOf([1,3,NULL,NULL],NULL) + + SELECT indexOf([1, 3, NULL, NULL], NULL) + + ┌─indexOf([1, 3, NULL, NULL], NULL)─┐ + │ 3 │ + └───────────────────────────────────┘ + +设置为«NULL»的元素将作为普通的元素值处理。 + +## countEqual(arr,x) {#countequalarr-x} + +返回数组中等于x的元素的个数。相当于arrayCount(elem - \> elem = x,arr)。 + +`NULL`值将作为单独的元素值处理。 + +示例: + + SELECT countEqual([1, 2, NULL, NULL], NULL) + + ┌─countEqual([1, 2, NULL, NULL], NULL)─┐ + │ 2 │ + └──────────────────────────────────────┘ + +## ツ暗ェツ氾环催ツ団ツ法ツ人) {#array_functions-arrayenumerate} + +返回 Array \[1, 2, 3, …, length (arr) \] + +此功能通常与ARRAY JOIN一起使用。它允许在应用ARRAY JOIN后为每个数组计算一次。例如: + +``` sql +SELECT + count() AS Reaches, + countIf(num = 1) AS Hits +FROM test.hits +ARRAY JOIN + GoalsReached, + arrayEnumerate(GoalsReached) AS num +WHERE CounterID = 160656 +LIMIT 10 +``` + + ┌─Reaches─┬──Hits─┐ + │ 95606 │ 31406 │ + └─────────┴───────┘ + +在此示例中,Reaches是转换次数(应用ARRAY JOIN后接收的字符串),Hits是浏览量(ARRAY JOIN之前的字符串)。在这种特殊情况下,您可以更轻松地获得相同的结果: + +``` sql +SELECT + sum(length(GoalsReached)) AS Reaches, + count() AS Hits +FROM test.hits +WHERE (CounterID = 160656) AND notEmpty(GoalsReached) +``` + + ┌─Reaches─┬──Hits─┐ + │ 95606 │ 31406 │ + └─────────┴───────┘ + +此功能也可用于高阶函数。例如,您可以使用它来获取与条件匹配的元素的数组索引。 + +## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} + +返回与源数组大小相同的数组,其中每个元素表示与其下标对应的源数组元素在源数组中出现的次数。 +例如:arrayEnumerateUniq( \[10,20,10,30 \])=  \[1,1,2,1 \]。 + +使用ARRAY JOIN和数组元素的聚合时,此函数很有用。 + +示例: + +``` sql +SELECT + Goals.ID AS GoalID, + sum(Sign) AS Reaches, + sumIf(Sign, num = 1) AS Visits +FROM test.visits +ARRAY JOIN + Goals, + arrayEnumerateUniq(Goals.ID) AS num +WHERE CounterID = 160656 +GROUP BY GoalID +ORDER BY Reaches DESC +LIMIT 10 +``` + + ┌──GoalID─┬─Reaches─┬─Visits─┐ + │ 53225 │ 3214 │ 1097 │ + │ 2825062 │ 3188 │ 1097 │ + │ 56600 │ 2803 │ 488 │ + │ 1989037 │ 2401 │ 365 │ + │ 2830064 │ 2396 │ 910 │ + │ 1113562 │ 2372 │ 373 │ + │ 3270895 │ 2262 │ 812 │ + │ 1084657 │ 2262 │ 345 │ + │ 56599 │ 2260 │ 799 │ + │ 3271094 │ 2256 │ 812 │ + └─────────┴─────────┴────────┘ + +在此示例中,每个GoalID都计算转换次数(目标嵌套数据结构中的每个元素都是达到的目标,我们称之为转换)和会话数。如果没有ARRAY JOIN,我们会将会话数计为总和(Sign)。但在这种特殊情况下,行乘以嵌套的Goals结构,因此为了在此之后计算每个会话一次,我们将一个条件应用于arrayEnumerateUniq(Goals.ID)函数的值。 + +arrayEnumerateUniq函数可以使用与参数大小相同的多个数组。在这种情况下,对于所有阵列中相同位置的元素元组,考虑唯一性。 + +``` sql +SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res +``` + + ┌─res───────────┐ + │ [1,2,1,1,2,1] │ + └───────────────┘ + +当使用带有嵌套数据结构的ARRAY JOIN并在此结构中跨多个元素进一步聚合时,这是必需的。 + +## arrayPopBack {#arraypopback} + +从数组中删除最后一项。 + + arrayPopBack(array) + +**参数** + +- `array` – 数组。 + +**示例** + +``` sql +SELECT arrayPopBack([1, 2, 3]) AS res +``` + + ┌─res───┐ + │ [1,2] │ + └───────┘ + +## arrayPopFront {#arraypopfront} + +从数组中删除第一项。 + + arrayPopFront(array) + +**参数** + +- `array` – 数组。 + +**示例** + +``` sql +SELECT arrayPopFront([1, 2, 3]) AS res +``` + + ┌─res───┐ + │ [2,3] │ + └───────┘ + +## arrayPushBack {#arraypushback} + +添加一个元素到数组的末尾。 + + arrayPushBack(array, single_value) + +**参数** + +- `array` – 数组。 +- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../sql_reference/functions/array_functions.md#data_types)»。可以是'NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 + +**示例** + +``` sql +SELECT arrayPushBack(['a'], 'b') AS res +``` + + ┌─res───────┐ + │ ['a','b'] │ + └───────────┘ + +## arrayPushFront {#arraypushfront} + +将一个元素添加到数组的开头。 + + arrayPushFront(array, single_value) + +**参数** + +- `array` – 数组。 +- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../sql_reference/functions/array_functions.md#data_types)»。可以是'NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 + +**示例** + +``` sql +SELECT arrayPushFront(['b'], 'a') AS res +``` + + ┌─res───────┐ + │ ['a','b'] │ + └───────────┘ + +## arrayResize {#arrayresize} + +更改数组的长度。 + + arrayResize(array, size[, extender]) + +**参数:** + +- `array` — 数组. +- `size` — 数组所需的长度。 + - 如果`size`小于数组的原始大小,则数组将从右侧截断。 +- 如果`size`大于数组的初始大小,则使用`extender`值或数组项的数据类型的默认值将数组扩展到右侧。 +- `extender` — 扩展数组的值。可以是'NULL\`。 + +**返回值:** + +一个`size`长度的数组。 + +**调用示例** + + SELECT arrayResize([1], 3) + + ┌─arrayResize([1], 3)─┐ + │ [1,0,0] │ + └─────────────────────┘ + + SELECT arrayResize([1], 3, NULL) + + ┌─arrayResize([1], 3, NULL)─┐ + │ [1,NULL,NULL] │ + └───────────────────────────┘ + +## arraySlice {#arrayslice} + +返回一个子数组,包含从指定位置的指定长度的元素。 + + arraySlice(array, offset[, length]) + +**参数** + +- `array` – 数组。 +- `offset` – 数组的偏移。正值表示左侧的偏移量,负值表示右侧的缩进值。数组下标从1开始。 +- `length` - 子数组的长度。如果指定负值,则该函数返回`[offset,array_length - length`。如果省略该值,则该函数返回`[offset,the_end_of_array]`。 + +**示例** + +``` sql +SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res +``` + + ┌─res────────┐ + │ [2,NULL,4] │ + └────────────┘ + +设置为«NULL»的数组元素作为普通的数组元素值处理。 + +## arraySort(\[func,\] arr, …) {#array_functions-reverse-sort} + +以升序对`arr`数组的元素进行排序。如果指定了`func`函数,则排序顺序由`func`函数的调用结果决定。如果`func`接受多个参数,那么`arraySort`函数也将解析与`func`函数参数相同数量的数组参数。更详细的示例在`arraySort`的末尾。 + +整数排序示例: + +``` sql +SELECT arraySort([1, 3, 3, 0]); +``` + + ┌─arraySort([1, 3, 3, 0])─┐ + │ [0,1,3,3] │ + └─────────────────────────┘ + +字符串排序示例: + +``` sql +SELECT arraySort(['hello', 'world', '!']); +``` + + ┌─arraySort(['hello', 'world', '!'])─┐ + │ ['!','hello','world'] │ + └────────────────────────────────────┘ + +`NULL`,`NaN`和`Inf`的排序顺序: + +``` sql +SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); +``` + + ┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ + │ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ + └───────────────────────────────────────────────────────────┘ + +- `-Inf` 是数组中的第一个。 +- `NULL` 是数组中的最后一个。 +- `NaN` 在`NULL`的前面。 +- `Inf` 在`NaN`的前面。 + +注意:`arraySort`是[高阶函数](higher_order_functions.md)。您可以将lambda函数作为第一个参数传递给它。在这种情况下,排序顺序由lambda函数的调用结果决定。 + +让我们来看一下如下示例: + +``` sql +SELECT arraySort((x) -> -x, [1, 2, 3]) as res; +``` + + ┌─res─────┐ + │ [3,2,1] │ + └─────────┘ + +对于源数组的每个元素,lambda函数返回排序键,即\[1 -\> -1, 2 -\> -2, 3 -\> -3\]。由于`arraySort`函数按升序对键进行排序,因此结果为\[3,2,1\]。因此,`(x) -> -x` lambda函数将排序设置为[降序](#array_functions-reverse-sort)。 + +lambda函数可以接受多个参数。在这种情况下,您需要为`arraySort`传递与lambda参数个数相同的数组。函数使用第一个输入的数组中的元素组成返回结果;使用接下来传入的数组作为排序键。例如: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + + ┌─res────────────────┐ + │ ['world', 'hello'] │ + └────────────────────┘ + +这里,在第二个数组(\[2, 1\])中定义了第一个数组(\[‘hello’,‘world’\])的相应元素的排序键,即\[‘hello’ -\> 2,‘world’ -\> 1\]。 由于lambda函数中没有使用`x`,因此源数组中的实际值不会影响结果的顺序。所以,'world'将是结果中的第一个元素,'hello'将是结果中的第二个元素。 + +其他示例如下所示。 + +``` sql +SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; +``` + +``` sql +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +``` sql +SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; +``` + +``` sql +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +!!! 注意 "注意" + 为了提高排序效率, 使用了[施瓦茨变换](https://en.wikipedia.org/wiki/Schwartzian_transform)。 + +## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} + +以降序对`arr`数组的元素进行排序。如果指定了`func`函数,则排序顺序由`func`函数的调用结果决定。如果`func`接受多个参数,那么`arrayReverseSort`函数也将解析与`func`函数参数相同数量的数组作为参数。更详细的示例在`arrayReverseSort`的末尾。 + +整数排序示例: + +``` sql +SELECT arrayReverseSort([1, 3, 3, 0]); +``` + + ┌─arrayReverseSort([1, 3, 3, 0])─┐ + │ [3,3,1,0] │ + └────────────────────────────────┘ + +字符串排序示例: + +``` sql +SELECT arrayReverseSort(['hello', 'world', '!']); +``` + + ┌─arrayReverseSort(['hello', 'world', '!'])─┐ + │ ['world','hello','!'] │ + └───────────────────────────────────────────┘ + +`NULL`,`NaN`和`Inf`的排序顺序: + +``` sql +SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; +``` + +``` sql +┌─res───────────────────────────────────┐ +│ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────┘ +``` + +- `Inf` 是数组中的第一个。 +- `NULL` 是数组中的最后一个。 +- `NaN` 在`NULL`的前面。 +- `-Inf` 在`NaN`的前面。 + +注意:`arraySort`是[高阶函数](higher_order_functions.md)。您可以将lambda函数作为第一个参数传递给它。如下示例所示。 + +``` sql +SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; +``` + + ┌─res─────┐ + │ [1,2,3] │ + └─────────┘ + +数组按以下方式排序: +数组按以下方式排序: + +1. 首先,根据lambda函数的调用结果对源数组(\[1, 2, 3\])进行排序。 结果是\[3, 2, 1\]。 +2. 反转上一步获得的数组。 所以,最终的结果是\[1, 2, 3\]。 + +lambda函数可以接受多个参数。在这种情况下,您需要为`arrayReverseSort`传递与lambda参数个数相同的数组。函数使用第一个输入的数组中的元素组成返回结果;使用接下来传入的数组作为排序键。例如: + +``` sql +SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` sql +┌─res───────────────┐ +│ ['hello','world'] │ +└───────────────────┘ +``` + +在这个例子中,数组按以下方式排序: + +1. 首先,根据lambda函数的调用结果对源数组(\[‘hello’,‘world’\])进行排序。 其中,在第二个数组(\[2,1\])中定义了源数组中相应元素的排序键。 所以,排序结果\[‘world’,‘hello’\]。 +2. 反转上一步骤中获得的排序数组。 所以,最终的结果是\[‘hello’,‘world’\]。 + +其他示例如下所示。 + +``` sql +SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; +``` + +``` sql +┌─res─────┐ +│ [5,3,4] │ +└─────────┘ +``` + +``` sql +SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; +``` + +``` sql +┌─res─────┐ +│ [4,3,5] │ +└─────────┘ +``` + +## arrayUniq(arr, …) {#arrayuniqarr} + +如果传递一个参数,则计算数组中不同元素的数量。 +如果传递了多个参数,则它计算多个数组中相应位置的不同元素元组的数量。 + +如果要获取数组中唯一项的列表,可以使用arrayReduce(‘groupUniqArray’,arr)。 + +## arryjoin(arr) {#array-functions-join} + +一个特殊的功能。请参见[«ArrayJoin函数»](array_join.md#functions_arrayjoin)部分。 + +## arrayDifference(arr) {#arraydifferencearr} + +返回一个数组,其中包含所有相邻元素对之间的差值。例如: + +``` sql +SELECT arrayDifference([1, 2, 3, 4]) +``` + + ┌─arrayDifference([1, 2, 3, 4])─┐ + │ [0,1,1,1] │ + └───────────────────────────────┘ + +## arrayDistinct(arr) {#arraydistinctarr} + +返回一个包含所有数组中不同元素的数组。例如: + +``` sql +SELECT arrayDistinct([1, 2, 2, 3, 1]) +``` + + ┌─arrayDistinct([1, 2, 2, 3, 1])─┐ + │ [1,2,3] │ + └────────────────────────────────┘ + +## arrayEnumerateDense(arr) {#arrayenumeratedensearr} + +返回与源数组大小相同的数组,指示每个元素首次出现在源数组中的位置。例如:arrayEnumerateDense(\[10,20,10,30\])= \[1,2,1,3\]。 + +## arrayIntersect(arr) {#arrayintersectarr} + +返回所有数组元素的交集。例如: + +``` sql +SELECT + arrayIntersect([1, 2], [1, 3], [2, 3]) AS no_intersect, + arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect +``` + + ┌─no_intersect─┬─intersect─┐ + │ [] │ [1] │ + └──────────────┴───────────┘ + +## arrayReduce(agg\_func, arr1, …) {#arrayreduceagg-func-arr1} + +将聚合函数应用于数组并返回其结果。如果聚合函数具有多个参数,则此函数可应用于相同大小的多个数组。 + +arrayReduce(‘agg\_func’,arr1,…) - 将聚合函数`agg_func`应用于数组`arr1 ...`。如果传递了多个数组,则相应位置上的元素将作为多个参数传递给聚合函数。例如:SELECT arrayReduce(‘max’,\[1,2,3\])= 3 + +## ツ暗ェツ氾环催ツ団ツ法ツ人) {#arrayreversearr} + +返回与源数组大小相同的数组,包含反转源数组的所有元素的结果。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/zh/sql_reference/functions/array_join.md b/docs/zh/sql_reference/functions/array_join.md new file mode 100644 index 00000000000..1788b44f3e5 --- /dev/null +++ b/docs/zh/sql_reference/functions/array_join.md @@ -0,0 +1,29 @@ + +# arrayJoin函数 {#functions_arrayjoin} + +这是一个非常有用的函数。 + +普通函数不会更改结果集的行数,而只是计算每行中的值(map)。 +聚合函数将多行压缩到一行中(fold或reduce)。 +'arrayJoin'函数获取每一行并将他们展开到多行(unfold)。 + +此函数将数组作为参数,并将该行在结果集中复制数组元素个数。 +除了应用此函数的列中的值之外,简单地复制列中的所有值;它被替换为相应的数组值。 + +查询可以使用多个`arrayJoin`函数。在这种情况下,转换被执行多次。 + +请注意SELECT查询中的ARRAY JOIN语法,它提供了更广泛的可能性。 + +示例: + +``` sql +SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src +``` + + ┌─dst─┬─\'Hello\'─┬─src─────┐ + │ 1 │ Hello │ [1,2,3] │ + │ 2 │ Hello │ [1,2,3] │ + │ 3 │ Hello │ [1,2,3] │ + └─────┴───────────┴─────────┘ + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/zh/sql_reference/functions/bit_functions.md b/docs/zh/sql_reference/functions/bit_functions.md new file mode 100644 index 00000000000..1b280c8babd --- /dev/null +++ b/docs/zh/sql_reference/functions/bit_functions.md @@ -0,0 +1,30 @@ + +# 位操作函数 {#wei-cao-zuo-han-shu} + +位操作函数适用于UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64,Float32或Float64中的任何类型。 + +结果类型是一个整数,其位数等于其参数的最大位。如果至少有一个参数为有符数字,则结果为有符数字。如果参数是浮点数,则将其强制转换为Int64。 + +## bitAnd(a,b) {#bitanda-b} + +## bitOr(a,b) {#bitora-b} + +## bitXor(a,b) {#bitxora-b} + +## bitNot(a) {#bitnota} + +## bitShiftLeft(a,b) {#bitshiftlefta-b} + +## bitShiftRight(a,b) {#bitshiftrighta-b} + +## bitRotateLeft(a,b) {#bitrotatelefta-b} + +## bitRotateRight(a,b) {#bitrotaterighta-b} + +## bitTest(a,b) {#bittesta-b} + +## bitTestAll(a,b) {#bittestalla-b} + +## bitTestAny(a,b) {#bittestanya-b} + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/zh/sql_reference/functions/bitmap_functions.md b/docs/zh/sql_reference/functions/bitmap_functions.md new file mode 100644 index 00000000000..3415b590644 --- /dev/null +++ b/docs/zh/sql_reference/functions/bitmap_functions.md @@ -0,0 +1,385 @@ + +# 位图函数 {#wei-tu-han-shu} + +位图函数用于对两个位图对象进行计算,对于任何一个位图函数,它都将返回一个位图对象,例如and,or,xor,not等等。 + +位图对象有两种构造方法。一个是由聚合函数groupBitmapState构造的,另一个是由Array Object构造的。同时还可以将位图对象转化为数组对象。 + +我们使用RoaringBitmap实际存储位图对象,当基数小于或等于32时,它使用Set保存。当基数大于32时,它使用RoaringBitmap保存。这也是为什么低基数集的存储更快的原因。 + +有关RoaringBitmap的更多信息,请参阅:[呻吟声](https://github.com/RoaringBitmap/CRoaring)。 + +## bitmapBuild {#bitmapbuild} + +从无符号整数数组构建位图对象。 + + bitmapBuild(array) + +**参数** + +- `array` – 无符号整数数组. + +**示例** + +``` sql +SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res +``` + +## bitmapToArray {#bitmaptoarray} + +将位图转换为整数数组。 + + bitmapToArray(bitmap) + +**参数** + +- `bitmap` – 位图对象. + +**示例** + +``` sql +SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─────────┐ + │ [1,2,3,4,5] │ + └─────────────┘ + +## bitmapSubsetInRange {#bitmapsubsetinrange} + +将位图指定范围(不包含range\_end)转换为另一个位图。 + + bitmapSubsetInRange(bitmap, range_start, range_end) + +**参数** + +- `bitmap` – 位图对象. +- `range_start` – 范围起始点(含). +- `range_end` – 范围结束点(不含). + +**示例** + +``` sql +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + + ┌─res───────────────┐ + │ [30,31,32,33,100] │ + └───────────────────┘ + +## bitmapSubsetLimit {#bitmapsubsetlimit} + +将位图指定范围(起始点和数目上限)转换为另一个位图。 + + bitmapSubsetLimit(bitmap, range_start, limit) + +**参数** + +- `bitmap` – 位图对象. +- `range_start` – 范围起始点(含). +- `limit` – 子位图基数上限. + +**示例** + +``` sql +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + + ┌─res───────────────────────┐ + │ [30,31,32,33,100,200,500] │ + └───────────────────────────┘ + +## bitmapContains {#bitmapcontains} + +检查位图是否包含指定元素。 + + bitmapContains(haystack, needle) + +**参数** + +- `haystack` – 位图对象. +- `needle` – 元素,类型UInt32. + +**示例** + +``` sql +SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## bitmapHasAny {#bitmaphasany} + +与`hasAny(array,array)`类似,如果位图有任何公共元素则返回1,否则返回0。 +对于空位图,返回0。 + + bitmapHasAny(bitmap,bitmap) + +**参数** + +- `bitmap` – bitmap对象。 + +**示例** + +``` sql +SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + + ┌─res─┐ + │ 1 │ + └─────┘ + +## bitmapHasAll {#bitmaphasall} + +与`hasAll(array,array)`类似,如果第一个位图包含第二个位图的所有元素,则返回1,否则返回0。 +如果第二个参数是空位图,则返回1。 + + bitmapHasAll(bitmap,bitmap) + +**参数** + +- `bitmap` – bitmap 对象。 + +**示例** + +``` sql +SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + + ┌─res─┐ + │ 0 │ + └─────┘ + +## 位图和 {#bitmapand} + +为两个位图对象进行与操作,返回一个新的位图对象。 + + bitmapAnd(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + + ┌─res─┐ + │ [3] │ + └─────┘ + +## 位图 {#bitmapor} + +为两个位图对象进行或操作,返回一个新的位图对象。 + + bitmapOr(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + + ┌─res─────────┐ + │ [1,2,3,4,5] │ + └─────────────┘ + +## bitmapXor {#bitmapxor} + +为两个位图对象进行异或操作,返回一个新的位图对象。 + + bitmapXor(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + + ┌─res───────┐ + │ [1,2,4,5] │ + └───────────┘ + +## bitmapAndnot {#bitmapandnot} + +计算两个位图的差异,返回一个新的位图对象。 + + bitmapAndnot(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + + ┌─res───┐ + │ [1,2] │ + └───────┘ + +## bitmapCardinality {#bitmapcardinality} + +返回一个UInt64类型的数值,表示位图对象的基数。 + + bitmapCardinality(bitmap) + +**参数** + +- `bitmap` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 5 │ + └─────┘ + +## bitmapMin {#bitmapmin} + +返回一个UInt64类型的数值,表示位图中的最小值。如果位图为空则返回UINT32\_MAX。 + + bitmapMin(bitmap) + +**参数** + +- `bitmap` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 1 │ + └─────┘ + +## bitmapMax {#bitmapmax} + +返回一个UInt64类型的数值,表示位图中的最大值。如果位图为空则返回0。 + + bitmapMax(bitmap) + +**参数** + +- `bitmap` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 5 │ + └─────┘ + +## 位图和标准性 {#bitmapandcardinality} + +为两个位图对象进行与操作,返回结果位图的基数。 + + bitmapAndCardinality(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + + ┌─res─┐ + │ 1 │ + └─────┘ + +## bitmapOrCardinality {#bitmaporcardinality} + +为两个位图进行或运算,返回结果位图的基数。 + + bitmapOrCardinality(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + + ┌─res─┐ + │ 5 │ + └─────┘ + +## bitmapXorCardinality {#bitmapxorcardinality} + +为两个位图进行异或运算,返回结果位图的基数。 + + bitmapXorCardinality(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` – 位图对象。 + +**示例** + +``` sql +SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + + ┌─res─┐ + │ 4 │ + └─────┘ + +## 位图和非标准性 {#bitmapandnotcardinality} + +计算两个位图的差异,返回结果位图的基数。 + + bitmapAndnotCardinality(bitmap1,bitmap2) + +**参数** + +- `bitmap1` – 位图对象。 +- `bitmap2` - 位图对象。 + +**示例** + +``` sql +SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + + ┌─res─┐ + │ 2 │ + └─────┘ + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/zh/sql_reference/functions/comparison_functions.md b/docs/zh/sql_reference/functions/comparison_functions.md new file mode 100644 index 00000000000..a73d983f386 --- /dev/null +++ b/docs/zh/sql_reference/functions/comparison_functions.md @@ -0,0 +1,33 @@ + +# 比较函数 {#bi-jiao-han-shu} + +比较函数始终返回0或1(UInt8)。 + +可以比较以下类型: + +- 数字 +- String 和 FixedString +- 日期 +- 日期时间 + +以上每个组内的类型均可互相比较,但是对于不同组的类型间不能够进行比较。 + +例如,您无法将日期与字符串进行比较。您必须使用函数将字符串转换为日期,反之亦然。 + +字符串按字节进行比较。较短的字符串小于以其开头并且至少包含一个字符的所有字符串。 + +注意。直到1.1.54134版本,有符号和无符号数字的比较方式与C++相同。换句话说,在SELECT 9223372036854775807 &gt; -1 等情况下,您可能会得到错误的结果。 此行为在版本1.1.54134中已更改,现在在数学上是正确的。 + +## 等于,a=b和a==b运算符 {#equals-a-b-and-a-b-operator} + +## notEquals,a! 运算符=b和a `<>` b {#notequals-a-operator-b-and-a-b} + +## 少, `< operator` {#less-operator} + +## 更大, `> operator` {#greater-operator} + +## 出租等级, `<= operator` {#lessorequals-operator} + +## 伟大的等级, `>= operator` {#greaterorequals-operator} + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/zh/sql_reference/functions/conditional_functions.md b/docs/zh/sql_reference/functions/conditional_functions.md new file mode 100644 index 00000000000..d9721fa3f60 --- /dev/null +++ b/docs/zh/sql_reference/functions/conditional_functions.md @@ -0,0 +1,45 @@ + +# 条件函数 {#tiao-jian-han-shu} + +## 如果(cond,那么,否则),cond? 运算符然后:else {#ifcond-then-else-cond-operator-then-else} + +如果`cond != 0`则返回`then`,如果`cond = 0`则返回`else`。 +`cond`必须是`UInt8`类型,`then`和`else`必须存在最低的共同类型。 + +`then`和`else`可以是`NULL` + +## 多 {#multiif} + +允许您在查询中更紧凑地编写[CASE](../operators.md#operator_case)运算符。 + + multiIf(cond_1, then_1, cond_2, then_2...else) + +**参数:** + +- `cond_N` — 函数返回`then_N`的条件。 +- `then_N` — 执行时函数的结果。 +- `else` — 如果没有满足任何条件,则为函数的结果。 + +该函数接受`2N + 1`参数。 + +**返回值** + +该函数返回值«then\_N»或«else»之一,具体取决于条件`cond_N`。 + +**示例** + +存在如下一张表 + + ┌─x─┬────y─┐ + │ 1 │ ᴺᵁᴸᴸ │ + │ 2 │ 3 │ + └───┴──────┘ + +执行查询 `SELECT multiIf(isNull(y) x, y < 3, y, NULL) FROM t_null`。结果: + + ┌─multiIf(isNull(y), x, less(y, 3), y, NULL)─┐ + │ 1 │ + │ ᴺᵁᴸᴸ │ + └────────────────────────────────────────────┘ + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/zh/sql_reference/functions/date_time_functions.md b/docs/zh/sql_reference/functions/date_time_functions.md new file mode 100644 index 00000000000..ca974f563db --- /dev/null +++ b/docs/zh/sql_reference/functions/date_time_functions.md @@ -0,0 +1,293 @@ + +# 时间日期函数 {#shi-jian-ri-qi-han-shu} + +支持时区。 + +所有的时间日期函数都可以在第二个可选参数中接受时区参数。示例:Asia / Yekaterinburg。在这种情况下,它们使用指定的时区而不是本地(默认)时区。 + +``` sql +SELECT + toDateTime('2016-06-15 23:00:00') AS time, + toDate(time) AS date_local, + toDate(time, 'Asia/Yekaterinburg') AS date_yekat, + toString(time, 'US/Samoa') AS time_samoa +``` + + ┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐ + │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │ + └─────────────────────┴────────────┴────────────┴─────────────────────┘ + +仅支持与UTC相差一整小时的时区。 + +## toTimeZone {#totimezone} + +将Date或DateTime转换为指定的时区。 + +## 玩一年 {#toyear} + +将Date或DateTime转换为包含年份编号(AD)的UInt16类型的数字。 + +## 到四分钟 {#toquarter} + +将Date或DateTime转换为包含季度编号的UInt8类型的数字。 + +## toMonth {#tomonth} + +将Date或DateTime转换为包含月份编号(1-12)的UInt8类型的数字。 + +## 今天一年 {#todayofyear} + +将Date或DateTime转换为包含一年中的某一天的编号的UInt16(1-366)类型的数字。 + +## 今天月 {#todayofmonth} + +将Date或DateTime转换为包含一月中的某一天的编号的UInt8(1-31)类型的数字。 + +## 今天一周 {#todayofweek} + +将Date或DateTime转换为包含一周中的某一天的编号的UInt8(周一是1, 周日是7)类型的数字。 + +## toHour {#tohour} + +将DateTime转换为包含24小时制(0-23)小时数的UInt8数字。 +这个函数假设如果时钟向前移动,它是一个小时,发生在凌晨2点,如果时钟被移回,它是一个小时,发生在凌晨3点(这并非总是如此 - 即使在莫斯科时钟在不同的时间两次改变)。 + +## toMinute {#tominute} + +将DateTime转换为包含一小时中分钟数(0-59)的UInt8数字。 + +## 秒 {#tosecond} + +将DateTime转换为包含一分钟中秒数(0-59)的UInt8数字。 +闰秒不计算在内。 + +## toUnixTimestamp {#tounixtimestamp} + +将DateTime转换为unix时间戳。 + +## 开始一年 {#tostartofyear} + +将Date或DateTime向前取整到本年的第一天。 +返回Date类型。 + +## 今年开始 {#tostartofisoyear} + +将Date或DateTime向前取整到ISO本年的第一天。 +返回Date类型。 + +## 四分之一开始 {#tostartofquarter} + +将Date或DateTime向前取整到本季度的第一天。 +返回Date类型。 + +## 到月份开始 {#tostartofmonth} + +将Date或DateTime向前取整到本月的第一天。 +返回Date类型。 + +!!! 注意 "注意" +     解析不正确日期的行为是特定于实现的。 ClickHouse可能会返回零日期,抛出异常或执行«natural»溢出。 + +## toMonday {#tomonday} + +将Date或DateTime向前取整到本周的星期一。 +返回Date类型。 + +## 今天开始 {#tostartofday} + +将DateTime向前取整到当日的开始。 + +## 开始一小时 {#tostartofhour} + +将DateTime向前取整到当前小时的开始。 + +## to startofminute {#tostartofminute} + +将DateTime向前取整到当前分钟的开始。 + +## to startoffiveminute {#tostartoffiveminute} + +将DateTime以五分钟为单位向前取整到最接近的时间点。 + +## 开始分钟 {#tostartoftenminutes} + +将DateTime以十分钟为单位向前取整到最接近的时间点。 + +## 开始几分钟 {#tostartoffifteenminutes} + +将DateTime以十五分钟为单位向前取整到最接近的时间点。 + +## toStartOfInterval(time\_or\_data,间隔x单位\[,time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} + +这是名为`toStartOf*`的所有函数的通用函数。例如, +`toStartOfInterval(t,INTERVAL 1 year)`返回与`toStartOfYear(t)`相同的结果, +`toStartOfInterval(t,INTERVAL 1 month)`返回与`toStartOfMonth(t)`相同的结果, +`toStartOfInterval(t,INTERVAL 1 day)`返回与`toStartOfDay(t)`相同的结果, +`toStartOfInterval(t,INTERVAL 15 minute)`返回与`toStartOfFifteenMinutes(t)`相同的结果。 + +## toTime {#totime} + +将DateTime中的日期转换为一个固定的日期,同时保留时间部分。 + +## toRelativeYearNum {#torelativeyearnum} + +将Date或DateTime转换为年份的编号,从过去的某个固定时间点开始。 + +## toRelativeQuarterNum {#torelativequarternum} + +将Date或DateTime转换为季度的数字,从过去的某个固定时间点开始。 + +## toRelativeMonthNum {#torelativemonthnum} + +将Date或DateTime转换为月份的编号,从过去的某个固定时间点开始。 + +## toRelativeWeekNum {#torelativeweeknum} + +将Date或DateTime转换为星期数,从过去的某个固定时间点开始。 + +## toRelativeDayNum {#torelativedaynum} + +将Date或DateTime转换为当天的编号,从过去的某个固定时间点开始。 + +## toRelativeHourNum {#torelativehournum} + +将DateTime转换为小时数,从过去的某个固定时间点开始。 + +## toRelativeMinuteNum {#torelativeminutenum} + +将DateTime转换为分钟数,从过去的某个固定时间点开始。 + +## toRelativeSecondNum {#torelativesecondnum} + +将DateTime转换为秒数,从过去的某个固定时间点开始。 + +## toISOYear {#toisoyear} + +将Date或DateTime转换为包含ISO年份的UInt16类型的编号。 + +## toISOWeek {#toisoweek} + +将Date或DateTime转换为包含ISO周数的UInt8类型的编号。 + +## 现在 {#now} + +不接受任何参数并在请求执行时的某一刻返回当前时间(DateTime)。 +此函数返回一个常量,即时请求需要很长时间能够完成。 + +## 今天 {#today} + +不接受任何参数并在请求执行时的某一刻返回当前日期(Date)。 +其功能与'toDate(now())'相同。 + +## 昨天 {#yesterday} + +不接受任何参数并在请求执行时的某一刻返回昨天的日期(Date)。 +其功能与'today() - 1'相同。 + +## 时隙 {#timeslot} + +将时间向前取整半小时。 +此功能用于Yandex.Metrica,因为如果跟踪标记显示单个用户的连续综合浏览量在时间上严格超过此数量,则半小时是将会话分成两个会话的最短时间。这意味着(tag id,user id,time slot)可用于搜索相应会话中包含的综合浏览量。 + +## toyyymm {#toyyyymm} + +将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 100 + MM)。 + +## toyyymmdd {#toyyyymmdd} + +将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 10000 + MM \* 100 + DD)。 + +## toYYYYMMDDhhmmss {#toyyyymmddhhmmss} + +将Date或DateTime转换为包含年份和月份编号的UInt64类型的数字(YYYY \* 10000000000 + MM \* 100000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss)。 + +## 隆隆隆隆路虏脢,,陇,貌,垄拢卢虏禄quar陇,貌路,隆拢脳枚脢虏,麓脢,脱,,,录,禄庐戮,utes, {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} + +函数将一段时间间隔添加到Date/DateTime,然后返回Date/DateTime。例如: + +``` sql +WITH + toDate('2018-01-01') AS date, + toDateTime('2018-01-01 00:00:00') AS date_time +SELECT + addYears(date, 1) AS add_years_with_date, + addYears(date_time, 1) AS add_years_with_date_time +``` + + ┌─add_years_with_date─┬─add_years_with_date_time─┐ + │ 2019-01-01 │ 2019-01-01 00:00:00 │ + └─────────────────────┴──────────────────────────┘ + +## subtractYears,subtractMonths,subtractWeeks,subtractDays,subtractours,subtractMinutes,subtractSeconds,subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} + +函数将Date/DateTime减去一段时间间隔,然后返回Date/DateTime。例如: + +``` sql +WITH + toDate('2019-01-01') AS date, + toDateTime('2019-01-01 00:00:00') AS date_time +SELECT + subtractYears(date, 1) AS subtract_years_with_date, + subtractYears(date_time, 1) AS subtract_years_with_date_time +``` + + ┌─subtract_years_with_date─┬─subtract_years_with_date_time─┐ + │ 2018-01-01 │ 2018-01-01 00:00:00 │ + └──────────────────────────┴───────────────────────────────┘ + +## dateDiff(‘unit’,t1,t2,\[时区\]) {#datediffunit-t1-t2-timezone} + +返回以'unit'为单位表示的两个时间之间的差异,例如`'hours'`。 ‘t1’和’t2’可以是Date或DateTime,如果指定’timezone’,它将应用于两个参数。如果不是,则使用来自数据类型't1'和't2'的时区。如果时区不相同,则结果将是未定义的。 + +支持的单位值: + +| 单位 | +|------| +| 第二 | +| 分钟 | +| 小时 | +| 日 | +| 周 | +| 月 | +| 季 | +| 年 | + +## 时隙(开始时间,持续时间,\[,大小\]) {#timeslotsstarttime-duration-size} + +它返回一个时间数组,其中包括从从«StartTime»开始到«StartTime + Duration 秒»内的所有符合«size»(以秒为单位)步长的时间点。其中«size»是一个可选参数,默认为1800。 +例如,`timeSlots(toDateTime('2012-01-01 12:20:00'),600) = [toDateTime('2012-01-01 12:00:00'),toDateTime('2012-01-01 12:30:00' )]`。 +这对于搜索在相应会话中综合浏览量是非常有用的。 + +## formatDateTime(时间,格式\[,时区\]) {#formatdatetimetime-format-timezone} + +函数根据给定的格式字符串来格式化时间。请注意:格式字符串必须是常量表达式,例如:单个结果列不能有多种格式字符串。 + +支持的格式修饰符: +(«Example» 列是对`2018-01-02 22:33:44`的格式化结果) + +| 修饰符 | 产品描述 | 示例 | +|--------|-------------------------------------------|------------| +| %C | 年除以100并截断为整数(00-99) | 20 | +| %d | 月中的一天,零填充(01-31) | 02 | +| %D | 短MM/DD/YY日期,相当于%m/%d/%y | 01/02/2018 | +| %e | 月中的一天,空格填充(1-31) | 2 | +| %F | 短YYYY-MM-DD日期,相当于%Y-%m-%d | 2018-01-02 | +| %H | 24小时格式(00-23) | 22 | +| %I | 小时12h格式(01-12) | 10 | +| %j | 一年(001-366) | 002 | +| %m | 月份为十进制数(01-12) | 01 | +| %M | 分钟(00-59) | 33 | +| %n | 换行符(") | | +| %p | AM或PM指定 | PM | +| %R | 24小时HH:MM时间,相当于%H:%M | 22:33 | +| %S | 第二(00-59) | 44 | +| %t | 水平制表符(') | | +| %T | ISO8601时间格式(HH:MM:SS),相当于%H:%M:%S | 22:33:44 | +| %u | ISO8601平日as编号,星期一为1(1-7) | 2 | +| %V | ISO8601周编号(01-53) | 01 | +| %w | 周日为十进制数,周日为0(0-6) | 2 | +| %y | 年份,最后两位数字(00-99) | 18 | +| %Y | 年 | 2018 | +| %% | %符号 | % | + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/zh/sql_reference/functions/encoding_functions.md b/docs/zh/sql_reference/functions/encoding_functions.md new file mode 100644 index 00000000000..42d10c4408f --- /dev/null +++ b/docs/zh/sql_reference/functions/encoding_functions.md @@ -0,0 +1,29 @@ + +# 编码函数 {#bian-ma-han-shu} + +## hex {#hex} + +接受`String`,`unsigned integer`,`Date`或`DateTime`类型的参数。返回包含参数的十六进制表示的字符串。使用大写字母`A-F`。不使用`0x`前缀或`h`后缀。对于字符串,所有字节都简单地编码为两个十六进制数字。数字转换为大端(«易阅读»)格式。对于数字,去除其中较旧的零,但仅限整个字节。例如,`hex(1)='01'`。 `Date`被编码为自Unix时间开始以来的天数。 `DateTime`编码为自Unix时间开始以来的秒数。 + +## unhex(str) {#unhexstr} + +接受包含任意数量的十六进制数字的字符串,并返回包含相应字节的字符串。支持大写和小写字母A-F。十六进制数字的数量不必是偶数。如果是奇数,则最后一位数被解释为00-0F字节的低位。如果参数字符串包含除十六进制数字以外的任何内容,则返回一些实现定义的结果(不抛出异常)。 +如果要将结果转换为数字,可以使用«reverse»和«reinterpretAsType»函数。 + +## UUIDStringToNum(str) {#uuidstringtonumstr} + +接受包含36个字符的字符串,格式为«123e4567-e89b-12d3-a456-426655440000»,并将其转化为FixedString(16)返回。 + +## UUIDNumToString(str) {#uuidnumtostringstr} + +接受FixedString(16)值。返回包含36个字符的文本格式的字符串。 + +## 位掩码列表(num) {#bitmasktolistnum} + +接受一个整数。返回一个字符串,其中包含一组2的幂列表,其列表中的所有值相加等于这个整数。列表使用逗号分割,按升序排列。 + +## 位掩码阵列(num) {#bitmasktoarraynum} + +接受一个整数。返回一个UInt64类型数组,其中包含一组2的幂列表,其列表中的所有值相加等于这个整数。数组中的数字按升序排列。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/zh/sql_reference/functions/ext_dict_functions.md b/docs/zh/sql_reference/functions/ext_dict_functions.md new file mode 100644 index 00000000000..23077618722 --- /dev/null +++ b/docs/zh/sql_reference/functions/ext_dict_functions.md @@ -0,0 +1,47 @@ + +# 字典函数 {#zi-dian-han-shu} + +有关连接和配置外部词典的信息,请参阅[外部词典](../../sql_reference/functions/ext_dict_functions.md)。 + +## dictGetUInt8,dictGetUInt16,dictGetUInt32,dictGetUInt64 {#dictgetuint8-dictgetuint16-dictgetuint32-dictgetuint64} + +## dictGetInt8,dictGetInt16,dictGetInt32,dictGetInt64 {#dictgetint8-dictgetint16-dictgetint32-dictgetint64} + +## dictGetFloat32,dictGetFloat64 {#dictgetfloat32-dictgetfloat64} + +## dictGetDate,dictGetDateTime {#dictgetdate-dictgetdatetime} + +## dictgetuid {#dictgetuuid} + +## dictGetString {#dictgetstring} + +`dictGetT('dict_name', 'attr_name', id)` + +- 使用'id'键获取dict\_name字典中attr\_name属性的值。`dict_name`和`attr_name`是常量字符串。`id`必须是UInt64。 + 如果字典中没有`id`键,则返回字典描述中指定的默认值。 + +## dictGetTOrDefault {#ext_dict_functions-dictgettordefault} + +`dictGetTOrDefault('dict_name', 'attr_name', id, default)` + +与`dictGetT`函数相同,但默认值取自函数的最后一个参数。 + +## dictIsIn {#dictisin} + +`dictIsIn ('dict_name', child_id, ancestor_id)` + +- 对于'dict\_name'分层字典,查找'child\_id'键是否位于'ancestor\_id'内(或匹配'ancestor\_id')。返回UInt8。 + +## 独裁主义 {#dictgethierarchy} + +`dictGetHierarchy('dict_name', id)` + +- 对于'dict\_name'分层字典,返回从'id'开始并沿父元素链继续的字典键数组。返回Array(UInt64) + +## dictHas {#dicthas} + +`dictHas('dict_name', id)` + +- 检查字典是否存在指定的`id`。如果不存在,则返回0;如果存在,则返回1。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/zh/sql_reference/functions/functions_for_nulls.md b/docs/zh/sql_reference/functions/functions_for_nulls.md new file mode 100644 index 00000000000..9252d8bfeb0 --- /dev/null +++ b/docs/zh/sql_reference/functions/functions_for_nulls.md @@ -0,0 +1,253 @@ + +# Nullable处理函数 {#nullablechu-li-han-shu} + +## isNull {#isnull} + +检查参数是否为[NULL](../syntax.md#null)。 + + isNull(x) + +**参数** + +- `x` — 一个非复合数据类型的值。 + +**返回值** + +- `1` 如果`x`为`NULL`。 +- `0` 如果`x`不为`NULL`。 + +**示例** + +存在以下内容的表 + + ┌─x─┬────y─┐ + │ 1 │ ᴺᵁᴸᴸ │ + │ 2 │ 3 │ + └───┴──────┘ + +对其进行查询 + + :) SELECT x FROM t_null WHERE isNull(y) + + SELECT x + FROM t_null + WHERE isNull(y) + + ┌─x─┐ + │ 1 │ + └───┘ + + 1 rows in set. Elapsed: 0.010 sec. + +## isNotNull {#isnotnull} + +检查参数是否不为 [NULL](../syntax.md#null). + + isNotNull(x) + +**参数:** + +- `x` — 一个非复合数据类型的值。 + +**返回值** + +- `0` 如果`x`为`NULL`。 +- `1` 如果`x`不为`NULL`。 + +**示例** + +存在以下内容的表 + + ┌─x─┬────y─┐ + │ 1 │ ᴺᵁᴸᴸ │ + │ 2 │ 3 │ + └───┴──────┘ + +对其进行查询 + + :) SELECT x FROM t_null WHERE isNotNull(y) + + SELECT x + FROM t_null + WHERE isNotNull(y) + + ┌─x─┐ + │ 2 │ + └───┘ + + 1 rows in set. Elapsed: 0.010 sec. + +## 合并 {#coalesce} + +检查从左到右是否传递了«NULL»参数并返回第一个非`'NULL`参数。 + + coalesce(x,...) + +**参数:** + +- 任何数量的非复合类型的参数。所有参数必须与数据类型兼容。 + +**返回值** + +- 第一个非'NULL\`参数。 +- `NULL`,如果所有参数都是'NULL\`。 + +**示例** + +考虑可以指定多种联系客户的方式的联系人列表。 + + ┌─name─────┬─mail─┬─phone─────┬──icq─┐ + │ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │ + │ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ + └──────────┴──────┴───────────┴──────┘ + +`mail`和`phone`字段是String类型,但`icq`字段是`UInt32`,所以它需要转换为`String`。 + +从联系人列表中获取客户的第一个可用联系方式: + + :) SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook + + SELECT coalesce(mail, phone, CAST(icq, 'Nullable(String)')) + FROM aBook + + ┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐ + │ client 1 │ 123-45-67 │ + │ client 2 │ ᴺᵁᴸᴸ │ + └──────────┴──────────────────────────────────────────────────────┘ + + 2 rows in set. Elapsed: 0.006 sec. + +## ifNull {#ifnull} + +如果第一个参数为«NULL»,则返回第二个参数的值。 + + ifNull(x,alt) + +**参数:** + +- `x` — 要检查«NULL»的值。 +- `alt` — 如果`x`为'NULL\`,函数返回的值。 + +**返回值** + +- 价值 `x`,如果 `x` 不是 `NULL`. +- 价值 `alt`,如果 `x` 是 `NULL`. + +**示例** + + SELECT ifNull('a', 'b') + + ┌─ifNull('a', 'b')─┐ + │ a │ + └──────────────────┘ + + SELECT ifNull(NULL, 'b') + + ┌─ifNull(NULL, 'b')─┐ + │ b │ + └───────────────────┘ + +## nullIf {#nullif} + +如果参数相等,则返回`NULL`。 + + nullIf(x, y) + +**参数:** + +`x`, `y` — 用于比较的值。 它们必须是类型兼容的,否则将抛出异常。 + +**返回值** + +- 如果参数相等,则为`NULL`。 +- 如果参数不相等,则为`x`值。 + +**示例** + + SELECT nullIf(1, 1) + + ┌─nullIf(1, 1)─┐ + │ ᴺᵁᴸᴸ │ + └──────────────┘ + + SELECT nullIf(1, 2) + + ┌─nullIf(1, 2)─┐ + │ 1 │ + └──────────────┘ + +## assumeNotNull {#assumenotnull} + +将[可为空](../../sql_reference/functions/functions_for_nulls.md)类型的值转换为非`Nullable`类型的值。 + + assumeNotNull(x) + +**参数:** + +- `x` — 原始值。 + +**返回值** + +- 如果`x`不为`NULL`,返回非`Nullable`类型的原始值。 +- 如果`x`为`NULL`,返回对应非`Nullable`类型的默认值。 + +**示例** + +存在如下`t_null`表。 + + SHOW CREATE TABLE t_null + + ┌─statement─────────────────────────────────────────────────────────────────┐ + │ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │ + └───────────────────────────────────────────────────────────────────────────┘ + + ┌─x─┬────y─┐ + │ 1 │ ᴺᵁᴸᴸ │ + │ 2 │ 3 │ + └───┴──────┘ + +将列`y`作为`assumeNotNull`函数的参数。 + + SELECT assumeNotNull(y) FROM t_null + + ┌─assumeNotNull(y)─┐ + │ 0 │ + │ 3 │ + └──────────────────┘ + + SELECT toTypeName(assumeNotNull(y)) FROM t_null + + ┌─toTypeName(assumeNotNull(y))─┐ + │ Int8 │ + │ Int8 │ + └──────────────────────────────┘ + +## 可调整 {#tonullable} + +将参数的类型转换为`Nullable`。 + + toNullable(x) + +**参数:** + +- `x` — 任何非复合类型的值。 + +**返回值** + +- 输入的值,但其类型为`Nullable`。 + +**示例** + + SELECT toTypeName(10) + + ┌─toTypeName(10)─┐ + │ UInt8 │ + └────────────────┘ + + SELECT toTypeName(toNullable(10)) + + ┌─toTypeName(toNullable(10))─┐ + │ Nullable(UInt8) │ + └────────────────────────────┘ + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/zh/sql_reference/functions/geo.md b/docs/zh/sql_reference/functions/geo.md new file mode 100644 index 00000000000..3f6e6a3bb10 --- /dev/null +++ b/docs/zh/sql_reference/functions/geo.md @@ -0,0 +1,223 @@ + +# GEO函数 {#geohan-shu} + +## 大圆形距离 {#greatcircledistance} + +使用[great-circle distance公式](https://en.wikipedia.org/wiki/Great-circle_distance)计算地球表面两点之间的距离。 + +``` sql +greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) +``` + +**输入参数** + +- `lon1Deg` — 第一个点的经度,单位:度,范围: `[-180°, 180°]`。 +- `lat1Deg` — 第一个点的纬度,单位:度,范围: `[-90°, 90°]`。 +- `lon2Deg` — 第二个点的经度,单位:度,范围: `[-180°, 180°]`。 +- `lat2Deg` — 第二个点的纬度,单位:度,范围: `[-90°, 90°]`。 + +正值对应北纬和东经,负值对应南纬和西经。 + +**返回值** + +地球表面的两点之间的距离,以米为单位。 + +当输入参数值超出规定的范围时将抛出异常。 + +**示例** + +``` sql +SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) +``` + +``` text +┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐ +│ 14132374.194975413 │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## 尖尖的人 {#pointinellipses} + +检查指定的点是否至少包含在指定的一个椭圆中。 +下述中的坐标是几何图形在笛卡尔坐标系中的位置。 + +``` sql +pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) +``` + +**输入参数** + +- `x, y` — 平面上某个点的坐标。 +- `xᵢ, yᵢ` — 第i个椭圆的中心坐标。 +- `aᵢ, bᵢ` — 以x, y坐标为单位的第i个椭圆的轴。 + +输入参数的个数必须是`2+4⋅n`,其中`n`是椭圆的数量。 + +**返回值** + +如果该点至少包含在一个椭圆中,则返回`1`;否则,则返回`0`。 + +**示例** + +``` sql +SELECT pointInEllipses(55.755831, 37.617673, 55.755831, 37.617673, 1.0, 2.0) +``` + +``` text +┌─pointInEllipses(55.755831, 37.617673, 55.755831, 37.617673, 1., 2.)─┐ +│ 1 │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +## pointInPolygon {#pointinpolygon} + +检查指定的点是否包含在指定的多边形中。 + +``` sql +pointInPolygon((x, y), [(a, b), (c, d) ...], ...) +``` + +**输入参数** + +- `(x, y)` — 平面上某个点的坐标。[元组](../../sql_reference/functions/geo.md)类型,包含坐标的两个数字。 +- `[(a, b), (c, d) ...]` — 多边形的顶点。[阵列](../../sql_reference/functions/geo.md)类型。每个顶点由一对坐标`(a, b)`表示。顶点可以按顺时针或逆时针指定。顶点的个数应该大于等于3。同时只能是常量的。 +- 该函数还支持镂空的多边形(切除部分)。如果需要,可以使用函数的其他参数定义需要切除部分的多边形。(The function does not support non-simply-connected polygons.) + +**返回值** + +如果坐标点存在在多边形范围内,则返回`1`。否则返回`0`。 +如果坐标位于多边形的边界上,则该函数可能返回`1`,或可能返回`0`。 + +**示例** + +``` sql +SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## geohashEncode {#geohashencode} + +将经度和纬度编码为geohash-string,请参阅(http://geohash.org/,https://en.wikipedia.org/wiki/Geohash)。 + +``` sql +geohashEncode(longitude, latitude, [precision]) +``` + +**输入值** + +- longitude - 要编码的坐标的经度部分。其值应在`[-180°,180°]`范围内 +- latitude - 要编码的坐标的纬度部分。其值应在`[-90°,90°]`范围内 +- precision - 可选,生成的geohash-string的长度,默认为`12`。取值范围为`[1,12]`。任何小于`1`或大于`12`的值都会默认转换为`12`。 + +**返回值** + +- 坐标编码的字符串(使用base32编码的修改版本)。 + +**示例** + +``` sql +SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res +``` + +``` text +┌─res──────────┐ +│ ezs42d000000 │ +└──────────────┘ +``` + +## geohashDecode {#geohashdecode} + +将任何geohash编码的字符串解码为经度和纬度。 + +**输入值** + +- encoded string - geohash编码的字符串。 + +**返回值** + +- (longitude, latitude) - 经度和纬度的`Float64`值的2元组。 + +**示例** + +``` sql +SELECT geohashDecode('ezs42') AS res +``` + +``` text +┌─res─────────────────────────────┐ +│ (-5.60302734375,42.60498046875) │ +└─────────────────────────────────┘ +``` + +## geoToH3 {#geotoh3} + +计算指定的分辨率的[H3](https://uber.github.io/h3/#/documentation/overview/introduction)索引`(lon, lat)`。 + +``` sql +geoToH3(lon, lat, resolution) +``` + +**输入值** + +- `lon` — 经度。 [Float64](../../sql_reference/functions/geo.md)类型。 +- `lat` — 纬度。 [Float64](../../sql_reference/functions/geo.md)类型。 +- `resolution` — 索引的分辨率。 取值范围为: `[0, 15]`。 [UInt8](../../sql_reference/functions/geo.md)类型。 + +**返回值** + +- H3中六边形的索引值。 +- 发生异常时返回0。 + +[UInt64](../../sql_reference/functions/geo.md)类型。 + +**示例** + +``` sql +SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index +``` + +``` text +┌────────────h3Index─┐ +│ 644325524701193974 │ +└────────────────────┘ +``` + +## geohashesInBox {#geohashesinbox} + +计算在指定精度下计算最小包含指定的经纬范围的最小图形的geohash数组。 + +**输入值** + +- longitude\_min - 最小经度。其值应在`[-180°,180°]`范围内 +- latitude\_min - 最小纬度。其值应在`[-90°,90°]`范围内 +- longitude\_max - 最大经度。其值应在`[-180°,180°]`范围内 +- latitude\_max - 最大纬度。其值应在`[-90°,90°]`范围内 +- precision - geohash的精度。其值应在`[1, 12]`内的`UInt8`类型的数字 + +请注意,上述所有的坐标参数必须同为`Float32`或`Float64`中的一种类型。 + +**返回值** + +- 包含指定范围内的指定精度的geohash字符串数组。注意,您不应该依赖返回数组中geohash的顺序。 +- \[\] - 当传入的最小经纬度大于最大经纬度时将返回一个空数组。 + +请注意,如果生成的数组长度超过10000时,则函数将抛出异常。 + +**示例** + +``` sql +SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos +``` + +``` text +┌─thasos──────────────────────────────────────┐ +│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │ +└─────────────────────────────────────────────┘ +``` + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/zh/sql_reference/functions/hash_functions.md b/docs/zh/sql_reference/functions/hash_functions.md new file mode 100644 index 00000000000..9dc4aa9b794 --- /dev/null +++ b/docs/zh/sql_reference/functions/hash_functions.md @@ -0,0 +1,115 @@ + +# Hash函数 {#hashhan-shu} + +Hash函数可以用于将元素不可逆的伪随机打乱。 + +## halfMD5 {#halfmd5} + +计算字符串的MD5。然后获取结果的前8个字节并将它们作为UInt64(大端)返回。 +此函数相当低效(500万个短字符串/秒/核心)。 +如果您不需要一定使用MD5,请使用'sipHash64'函数。 + +## MD5 {#md5} + +计算字符串的MD5并将结果放入FixedString(16)中返回。 +如果您只是需要一个128位的hash,同时不需要一定使用MD5,请使用'sipHash128'函数。 +如果您要获得与md5sum程序相同的输出结果,请使用lower(hex(MD5(s)))。 + +## sipHash64 {#siphash64} + +计算字符串的SipHash。 +接受String类型的参数,返回UInt64。 +SipHash是一种加密哈希函数。它的处理性能至少比MD5快三倍。 +有关详细信息,请参阅链接:https://131002.net/siphash/ + +## sipHash128 {#hash_functions-siphash128} + +计算字符串的SipHash。 +接受String类型的参数,返回FixedString(16)。 +与sipHash64函数的不同在于它的最终计算结果为128位。 + +## cityHash64 {#cityhash64} + +计算任意数量字符串的CityHash64或使用特定实现的Hash函数计算任意数量其他类型的Hash。 +对于字符串,使用CityHash算法。 这是一个快速的非加密哈希函数,用于字符串。 +对于其他类型的参数,使用特定实现的Hash函数,这是一种快速的非加密的散列函数。 +如果传递了多个参数,则使用CityHash组合这些参数的Hash结果。 +例如,您可以计算整个表的checksum,其结果取决于行的顺序:`SELECT sum(cityHash64(*)) FROM table`。 + +## intHash32 {#inthash32} + +为任何类型的整数计算32位的哈希。 +这是相对高效的非加密Hash函数。 + +## intHash64 {#inthash64} + +从任何类型的整数计算64位哈希码。 +它的工作速度比intHash32函数快。 + +## SHA1 {#sha1} + +## SHA224 {#sha224} + +## SHA256 {#sha256} + +计算字符串的SHA-1,SHA-224或SHA-256,并将结果字节集返回为FixedString(20),FixedString(28)或FixedString(32)。 +该函数相当低效(SHA-1大约500万个短字符串/秒/核心,而SHA-224和SHA-256大约220万个短字符串/秒/核心)。 +我们建议仅在必须使用这些Hash函数且无法更改的情况下使用这些函数。 +即使在这些情况下,我们仍建议将函数采用在写入数据时使用预计算的方式将其计算完毕。而不是在SELECT中计算它们。 + +## URLHash(url\[,N\]) {#urlhashurl-n} + +一种快速的非加密哈希函数,用于规范化的从URL获得的字符串。 +`URLHash(s)` - 从一个字符串计算一个哈希,如果结尾存在尾随符号`/`,`?`或`#`则忽略。 +`URLHash(s,N)` - 计算URL层次结构中字符串到N级别的哈希值,如果末尾存在尾随符号`/`,`?`或`#`则忽略。 +URL的层级与URLHierarchy中的层级相同。 此函数被用于Yandex.Metrica。 + +## farmHash64 {#farmhash64} + +计算字符串的FarmHash64。 +接受一个String类型的参数。返回UInt64。 +有关详细信息,请参阅链接:[FarmHash64](https://github.com/google/farmhash) + +## javaHash {#hash_functions-javahash} + +计算字符串的JavaHash。 +接受一个String类型的参数。返回Int32。 +有关更多信息,请参阅链接:[JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) + +## hiveHash {#hivehash} + +计算字符串的HiveHash。 +接受一个String类型的参数。返回Int32。 +与[JavaHash](#hash_functions-javahash)相同,但不会返回负数。 + +## metroHash64 {#metrohash64} + +计算字符串的MetroHash。 +接受一个String类型的参数。返回UInt64。 +有关详细信息,请参阅链接:[MetroHash64](http://www.jandrewrogers.com/2015/05/27/metrohash/) + +## jumpConsistentHash {#jumpconsistenthash} + +计算UInt64的JumpConsistentHash。 +接受UInt64类型的参数。返回Int32。 +有关更多信息,请参见链接:[JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) + +## murmurHash2\_32,murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} + +计算字符串的MurmurHash2。 +接受一个String类型的参数。返回UInt64或UInt32。 +有关更多信息,请参阅链接:[MurmurHash2](https://github.com/aappleby/smhasher) + +## murmurHash3\_32,murmurHash3\_64,murmurHash3\_128 {#murmurhash3-32-murmurhash3-64-murmurhash3-128} + +计算字符串的MurmurHash3。 +接受一个String类型的参数。返回UInt64或UInt32或FixedString(16)。 +有关更多信息,请参阅链接:[MurmurHash3](https://github.com/aappleby/smhasher) + +## xxHash32,xxHash64 {#xxhash32-xxhash64} + +计算字符串的xxHash。 +接受一个String类型的参数。返回UInt64或UInt32。 +有关更多信息,请参见链接:[xxHash](http://cyan4973.github.io/xxHash/) + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/zh/sql_reference/functions/higher_order_functions.md b/docs/zh/sql_reference/functions/higher_order_functions.md new file mode 100644 index 00000000000..6d090e7330d --- /dev/null +++ b/docs/zh/sql_reference/functions/higher_order_functions.md @@ -0,0 +1,139 @@ + +# 高阶函数 {#gao-jie-han-shu} + +## `->` 运算符, lambda(params, expr) 函数 {#yun-suan-fu-lambdaparams-expr-han-shu} + +用于描述一个lambda函数用来传递给其他高阶函数。箭头的左侧有一个形式参数,它可以是一个标识符或多个标识符所组成的元祖。箭头的右侧是一个表达式,在这个表达式中可以使用形式参数列表中的任何一个标识符或表的任何一个列名。 + +示例: `x -> 2 * x, str -> str != Referer.` + +高阶函数只能接受lambda函数作为其参数。 + +高阶函数可以接受多个参数的lambda函数作为其参数,在这种情况下,高阶函数需要同时传递几个长度相等的数组,这些数组将被传递给lambda参数。 + +除了'arrayMap'和'arrayFilter'以外的所有其他函数,都可以省略第一个参数(lambda函数)。在这种情况下,默认返回数组元素本身。 + +### arrayMap(func, arr1, …) {#higher_order_functions-array-map} + +将arr +将从'func'函数的原始应用程序获得的数组返回到'arr'数组中的每个元素。 +返回从原始应用程序获得的数组 ‘func’ 函数中的每个元素 ‘arr’ 阵列。 + +### arrayFilter(func, arr1, …) {#arrayfilterfunc-arr1} + +返回一个仅包含以下元素的数组 ‘arr1’ 对于哪个 ‘func’ 返回0以外的内容。 + +示例: + +``` sql +SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res +``` + + ┌─res───────────┐ + │ ['abc World'] │ + └───────────────┘ + +``` sql +SELECT + arrayFilter( + (i, x) -> x LIKE '%World%', + arrayEnumerate(arr), + ['Hello', 'abc World'] AS arr) + AS res +``` + + ┌─res─┐ + │ [2] │ + └─────┘ + +### arrayCount(\[func,\] arr1, …) {#arraycountfunc-arr1} + +返回数组arr中非零元素的数量,如果指定了'func',则通过'func'的返回值确定元素是否为非零元素。 + +### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} + +返回数组'arr'中是否存在非零元素,如果指定了'func',则使用'func'的返回值确定元素是否为非零元素。 + +### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} + +返回数组'arr'中是否存在为零的元素,如果指定了'func',则使用'func'的返回值确定元素是否为零元素。 + +### arraySum(\[func,\] arr1, …) {#arraysumfunc-arr1} + +计算arr数组的总和,如果指定了'func',则通过'func'的返回值计算数组的总和。 + +### arrayFirst(func, arr1, …) {#arrayfirstfunc-arr1} + +返回数组中第一个匹配的元素,函数使用'func'匹配所有元素,直到找到第一个匹配的元素。 + +### arrayFirstIndex(func, arr1, …) {#arrayfirstindexfunc-arr1} + +返回数组中第一个匹配的元素的下标索引,函数使用'func'匹配所有元素,直到找到第一个匹配的元素。 + +### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} + +返回源数组部分数据的总和,如果指定了`func`函数,则使用`func`的返回值计算总和。 + +示例: + +``` sql +SELECT arrayCumSum([1, 1, 1, 1]) AS res +``` + + ┌─res──────────┐ + │ [1, 2, 3, 4] │ + └──────────────┘ + +### arrayCumSumNonNegative(arr) {#arraycumsumnonnegativearr} + +与arrayCumSum相同,返回源数组部分数据的总和。不同于arrayCumSum,当返回值包含小于零的值时,该值替换为零,后续计算使用零继续计算。例如: + +``` sql +SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res +``` + + ┌─res───────┐ + │ [1,2,0,1] │ + └───────────┘ + +### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} + +返回升序排序`arr1`的结果。如果指定了`func`函数,则排序顺序由`func`的结果决定。 + +[Schwartzian变换](https://en.wikipedia.org/wiki/Schwartzian_transform)用于提高排序效率。 + +示例: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); +``` + + ┌─res────────────────┐ + │ ['world', 'hello'] │ + └────────────────────┘ + +请注意,NULL和NaN在最后(NaN在NULL之前)。例如: + +``` sql +SELECT arraySort([1, nan, 2, NULL, 3, nan, 4, NULL]) +``` + + ┌─arraySort([1, nan, 2, NULL, 3, nan, 4, NULL])─┐ + │ [1,2,3,4,nan,nan,NULL,NULL] │ + └───────────────────────────────────────────────┘ + +### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} + +返回降序排序`arr1`的结果。如果指定了`func`函数,则排序顺序由`func`的结果决定。 + +请注意,NULL和NaN在最后(NaN在NULL之前)。例如: + +``` sql +SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, 4, NULL]) +``` + + ┌─arrayReverseSort([1, nan, 2, NULL, 3, nan, 4, NULL])─┐ + │ [4,3,2,1,nan,nan,NULL,NULL] │ + └──────────────────────────────────────────────────────┘ + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/zh/sql_reference/functions/in_functions.md b/docs/zh/sql_reference/functions/in_functions.md new file mode 100644 index 00000000000..f4f358bad9a --- /dev/null +++ b/docs/zh/sql_reference/functions/in_functions.md @@ -0,0 +1,20 @@ + +# IN运算符相关函数 {#inyun-suan-fu-xiang-guan-han-shu} + +## in,notIn,globalIn,globalNotIn {#in-notin-globalin-globalnotin} + +请参阅[IN 运算符](../statements/select.md#select-in-operators)部分。 + +## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} + +函数用于对多个列进行分组。 +对于具有类型T1,T2,…的列,它返回包含这些列的元组(T1,T2,…)。 执行该函数没有任何成本。 +元组通常用作IN运算符的中间参数值,或用于创建lambda函数的形参列表。 元组不能写入表。 + +## 元组元素(元组,n),运算符x.N {#tupleelementtuple-n-operator-x-n} + +函数用于从元组中获取列。 +'N'是列索引,从1开始。N必须是常量正整数常数,并且不大于元组的大小。 +执行该函数没有任何成本。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/zh/sql_reference/functions/index.md b/docs/zh/sql_reference/functions/index.md new file mode 100644 index 00000000000..1f61a1f2919 --- /dev/null +++ b/docs/zh/sql_reference/functions/index.md @@ -0,0 +1,67 @@ + +# 函数 {#han-shu} + +ClickHouse中至少存在两种类型的函数 - 常规函数(它们称之为«函数»)和聚合函数。 常规函数的工作就像分别为每一行执行一次函数计算一样(对于每一行,函数的结果不依赖于其他行)。 聚合函数则从各行累积一组值(即函数的结果以来整个结果集)。 + +在本节中,我们将讨论常规函数。 有关聚合函数,请参阅«聚合函数»一节。 + + \* - 'arrayJoin'函数与表函数均属于第三种类型的函数。 \* + +## 强类型 {#qiang-lei-xing} + +与标准SQL相比,ClickHouse具有强类型。 换句话说,它不会在类型之间进行隐式转换。 每个函数适用于特定的一组类型。 这意味着有时您需要使用类型转换函数。 + +## 常见的子表达式消除 {#chang-jian-de-zi-biao-da-shi-xiao-chu} + +查询中具有相同AST(相同语句或语法分析结果相同)的所有表达式都被视为具有相同的值。 这样的表达式被连接并执行一次。 通过这种方式也可以消除相同的子查询。 + +## 结果类型 {#jie-guo-lei-xing} + +所有函数都只能够返回一个返回值。 结果类型通常由参数的类型决定。 但tupleElement函数(a.N运算符)和toFixedString函数是例外的。 + +## 常量 {#chang-liang} + +为了简单起见,某些函数的某些参数只能是常量。 例如,LIKE运算符的右参数必须是常量。 +几乎所有函数都为常量参数返回常量。 除了用于生成随机数的函数。 +'now'函数为在不同时间运行的查询返回不同的值,但结果被视为常量,因为常量在单个查询中很重要。 +常量表达式也被视为常量(例如,LIKE运算符的右半部分可以由多个常量构造)。 + +对于常量和非常量参数,可以以不同方式实现函数(执行不同的代码)。 但是,对于包含相同数据的常量和非常量参数它们的结果应该是一致的。 + +## NULL值处理 {#nullzhi-chu-li} + +函数具有以下行为: + +- 如果函数的参数至少一个是«NULL»,则函数结果也是«NULL»。 +- 在每个函数的描述中单独指定的特殊行为。在ClickHouse源代码中,这些函数具有«UseDefaultImplementationForNulls = false»。 + +## 不可变性 {#bu-ke-bian-xing} + +函数不能更改其参数的值 - 任何更改都将作为结果返回。因此,计算单独函数的结果不依赖于在查询中写入函数的顺序。 + +## 错误处理 {#cuo-wu-chu-li} + +如果数据无效,某些函数可能会抛出异常。在这种情况下,将取消查询并将错误信息返回给客户端。对于分布式处理,当其中一个服务器发生异常时,其他服务器也会尝试中止查询。 + +## 表达式参数的计算 {#biao-da-shi-can-shu-de-ji-suan} + +在几乎所有编程语言中,某些函数可能无法预先计算其中一个参数。这通常是运算符`&&`,`||`和`? :`。 +但是在ClickHouse中,函数(运算符)的参数总是被预先计算。这是因为一次评估列的整个部分,而不是分别计算每一行。 + +## 执行分布式查询处理的功能 {#zhi-xing-fen-bu-shi-cha-xun-chu-li-de-gong-neng} + +对于分布式查询处理,在远程服务器上执行尽可能多的查询处理阶段,并且在请求者服务器上执行其余阶段(合并中间结果和之后的所有内容)。 + +这意味着可以在不同的服务器上执行功能。 +例如,在查询`SELECT f(sum(g(x)))FROM distributed_table GROUP BY h(y)中,` + +- 如果`distributed_table`至少有两个分片,则在远程服务器上执行函数'g'和'h',并在请求服务器上执行函数'f'。 +- 如果`distributed_table`只有一个分片,则在该分片的服务器上执行所有'f','g'和'h'功能。 + +函数的结果通常不依赖于它在哪个服务器上执行。但是,有时这很重要。 +例如,使用字典的函数时将使用运行它们的服务器上存在的字典。 +另一个例子是`hostName`函数,它返回运行它的服务器的名称,以便在`SELECT`查询中对服务器进行`GROUP BY`。 + +如果查询中的函数在请求服务器上执行,但您需要在远程服务器上执行它,则可以将其包装在«any»聚合函数中,或将其添加到«GROUP BY»中。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/zh/sql_reference/functions/introspection.md b/docs/zh/sql_reference/functions/introspection.md new file mode 100644 index 00000000000..f0c907b3e67 --- /dev/null +++ b/docs/zh/sql_reference/functions/introspection.md @@ -0,0 +1,310 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 65 +toc_title: "\u81EA\u7701" +--- + +# 内省功能 {#introspection-functions} + +您可以使用本章中描述的函数来反省 [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) 和 [DWARF](https://en.wikipedia.org/wiki/DWARF) 用于查询分析。 + +!!! warning "警告" + 这些功能很慢,可能会强加安全考虑。 + +对于内省功能的正确操作: + +- 安装 `clickhouse-common-static-dbg` 包。 + +- 设置 [allow\_introspection\_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) 设置为1。 + + For security reasons introspection functions are disabled by default. + +ClickHouse将探查器报告保存到 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) 系统表. 确保正确配置了表和探查器。 + +## addressToLine {#addresstoline} + +将ClickHouse服务器进程内的虚拟内存地址转换为ClickHouse源代码中的文件名和行号。 + +如果您使用官方的ClickHouse软件包,您需要安装 `clickhouse-common-static-dbg` 包。 + +**语法** + +``` sql +addressToLine(address_of_binary_instruction) +``` + +**参数** + +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. + +**返回值** + +- 源代码文件名和此文件中用冒号分隔的行号。 + + For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number. + +- 二进制文件的名称,如果函数找不到调试信息。 + +- 空字符串,如果地址无效。 + +类型: [字符串](../../sql_reference/data_types/string.md). + +**示例** + +启用内省功能: + +``` sql +SET allow_introspection_functions=1 +``` + +从中选择第一个字符串 `trace_log` 系统表: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-19 +event_time: 2019-11-19 18:57:23 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 421b6855-1858-45a5-8f37-f383409d6d72 +trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] +``` + +该 `trace` 字段包含采样时的堆栈跟踪。 + +获取单个地址的源代码文件名和行号: + +``` sql +SELECT addressToLine(94784076370703) \G +``` + +``` text +Row 1: +────── +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +``` + +将函数应用于整个堆栈跟踪: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToLine(x), trace), '\n') AS trace_source_code_lines +FROM system.trace_log +LIMIT 1 +\G +``` + +该 [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 功能允许处理的每个单独的元素 `trace` 阵列由 `addressToLine` 功能。 这种处理的结果,你在看 `trace_source_code_lines` 列的输出。 + +``` text +Row 1: +────── +trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so +/usr/lib/debug/usr/bin/clickhouse +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.h:155 +/usr/include/c++/9/bits/atomic_base.h:551 +/usr/lib/debug/usr/bin/clickhouse +/lib/x86_64-linux-gnu/libpthread-2.27.so +/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 +``` + +## addressToSymbol {#addresstosymbol} + +将ClickHouse服务器进程内的虚拟内存地址转换为ClickHouse对象文件中的符号。 + +**语法** + +``` sql +addressToSymbol(address_of_binary_instruction) +``` + +**参数** + +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. + +**返回值** + +- 来自ClickHouse对象文件的符号。 +- 空字符串,如果地址无效。 + +类型: [字符串](../../sql_reference/data_types/string.md). + +**示例** + +启用内省功能: + +``` sql +SET allow_introspection_functions=1 +``` + +从中选择第一个字符串 `trace_log` 系统表: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +该 `trace` 字段包含采样时的堆栈跟踪。 + +获取单个地址的符号: + +``` sql +SELECT addressToSymbol(94138803686098) \G +``` + +``` text +Row 1: +────── +addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +``` + +将函数应用于整个堆栈跟踪: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToSymbol(x), trace), '\n') AS trace_symbols +FROM system.trace_log +LIMIT 1 +\G +``` + +该 [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 功能允许处理的每个单独的元素 `trace` 阵列由 `addressToSymbols` 功能。 这种处理的结果,你在看 `trace_symbols` 列的输出。 + +``` text +Row 1: +────── +trace_symbols: _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +_ZNK2DB10Aggregator21executeWithoutKeyImplERPcmPNS0_28AggregateFunctionInstructionEPNS_5ArenaE +_ZN2DB10Aggregator14executeOnBlockESt6vectorIN3COWINS_7IColumnEE13immutable_ptrIS3_EESaIS6_EEmRNS_22AggregatedDataVariantsERS1_IPKS3_SaISC_EERS1_ISE_SaISE_EERb +_ZN2DB10Aggregator14executeOnBlockERKNS_5BlockERNS_22AggregatedDataVariantsERSt6vectorIPKNS_7IColumnESaIS9_EERS6_ISB_SaISB_EERb +_ZN2DB10Aggregator7executeERKSt10shared_ptrINS_17IBlockInputStreamEERNS_22AggregatedDataVariantsE +_ZN2DB27AggregatingBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB28AsynchronousBlockInputStream9calculateEv +_ZNSt17_Function_handlerIFvvEZN2DB28AsynchronousBlockInputStream4nextEvEUlvE_E9_M_invokeERKSt9_Any_data +_ZN14ThreadPoolImplI20ThreadFromGlobalPoolE6workerESt14_List_iteratorIS0_E +_ZZN20ThreadFromGlobalPoolC4IZN14ThreadPoolImplIS_E12scheduleImplIvEET_St8functionIFvvEEiSt8optionalImEEUlvE1_JEEEOS4_DpOT0_ENKUlvE_clEv +_ZN14ThreadPoolImplISt6threadE6workerESt14_List_iteratorIS0_E +execute_native_thread_routine +start_thread +clone +``` + +## demangle {#demangle} + +转换一个符号,您可以使用 [addressToSymbol](#addresstosymbol) 函数到C++函数名。 + +**语法** + +``` sql +demangle(symbol) +``` + +**参数** + +- `symbol` ([字符串](../../sql_reference/data_types/string.md)) — Symbol from an object file. + +**返回值** + +- C++函数的名称。 +- 如果符号无效,则为空字符串。 + +类型: [字符串](../../sql_reference/data_types/string.md). + +**示例** + +启用内省功能: + +``` sql +SET allow_introspection_functions=1 +``` + +从中选择第一个字符串 `trace_log` 系统表: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +该 `trace` 字段包含采样时的堆栈跟踪。 + +获取单个地址的函数名称: + +``` sql +SELECT demangle(addressToSymbol(94138803686098)) \G +``` + +``` text +Row 1: +────── +demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +``` + +将函数应用于整个堆栈跟踪: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS trace_functions +FROM system.trace_log +LIMIT 1 +\G +``` + +该 [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 功能允许处理的每个单独的元素 `trace` 阵列由 `demangle` 功能。 这种处理的结果,你在看 `trace_functions` 列的输出。 + +``` text +Row 1: +────── +trace_functions: DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) const +DB::Aggregator::executeOnBlock(std::vector::immutable_ptr, std::allocator::immutable_ptr > >, unsigned long, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::executeOnBlock(DB::Block const&, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::execute(std::shared_ptr const&, DB::AggregatedDataVariants&) +DB::AggregatingBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::AsynchronousBlockInputStream::calculate() +std::_Function_handler::_M_invoke(std::_Any_data const&) +ThreadPoolImpl::worker(std::_List_iterator) +ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const +ThreadPoolImpl::worker(std::_List_iterator) +execute_native_thread_routine +start_thread +clone +``` diff --git a/docs/zh/sql_reference/functions/ip_address_functions.md b/docs/zh/sql_reference/functions/ip_address_functions.md new file mode 100644 index 00000000000..17f4c4a5991 --- /dev/null +++ b/docs/zh/sql_reference/functions/ip_address_functions.md @@ -0,0 +1,218 @@ + +# IP函数 {#iphan-shu} + +## IPv4NumToString(num) {#ipv4numtostringnum} + +接受一个UInt32(大端)表示的IPv4的地址,返回相应IPv4的字符串表现形式,格式为A.B.C.D(以点分割的十进制数字)。 + +## IPv4StringToNum(s) {#ipv4stringtonums} + +与IPv4NumToString函数相反。如果IPv4地址格式无效,则返回0。 + +## IPv4NumToStringClassC(num) {#ipv4numtostringclasscnum} + +与IPv4NumToString类似,但使用xxx替换最后一个字节。 + +示例: + +``` sql +SELECT + IPv4NumToStringClassC(ClientIP) AS k, + count() AS c +FROM test.hits +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + + ┌─k──────────────┬─────c─┐ + │ 83.149.9.xxx │ 26238 │ + │ 217.118.81.xxx │ 26074 │ + │ 213.87.129.xxx │ 25481 │ + │ 83.149.8.xxx │ 24984 │ + │ 217.118.83.xxx │ 22797 │ + │ 78.25.120.xxx │ 22354 │ + │ 213.87.131.xxx │ 21285 │ + │ 78.25.121.xxx │ 20887 │ + │ 188.162.65.xxx │ 19694 │ + │ 83.149.48.xxx │ 17406 │ + └────────────────┴───────┘ + +由于使用'xxx'是不规范的,因此将来可能会更改。我们建议您不要依赖此格式。 + +### IPv6NumToString(x) {#ipv6numtostringx} + +接受FixedString(16)类型的二进制格式的IPv6地址。以文本格式返回此地址的字符串。 +IPv6映射的IPv4地址以::ffff:111.222.33。例如: + +``` sql +SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr +``` + + ┌─addr─────────┐ + │ 2a02:6b8::11 │ + └──────────────┘ + +``` sql +SELECT + IPv6NumToString(ClientIP6 AS k), + count() AS c +FROM hits_all +WHERE EventDate = today() AND substring(ClientIP6, 1, 12) != unhex('00000000000000000000FFFF') +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + + ┌─IPv6NumToString(ClientIP6)──────────────┬─────c─┐ + │ 2a02:2168:aaa:bbbb::2 │ 24695 │ + │ 2a02:2698:abcd:abcd:abcd:abcd:8888:5555 │ 22408 │ + │ 2a02:6b8:0:fff::ff │ 16389 │ + │ 2a01:4f8:111:6666::2 │ 16016 │ + │ 2a02:2168:888:222::1 │ 15896 │ + │ 2a01:7e00::ffff:ffff:ffff:222 │ 14774 │ + │ 2a02:8109:eee:ee:eeee:eeee:eeee:eeee │ 14443 │ + │ 2a02:810b:8888:888:8888:8888:8888:8888 │ 14345 │ + │ 2a02:6b8:0:444:4444:4444:4444:4444 │ 14279 │ + │ 2a01:7e00::ffff:ffff:ffff:ffff │ 13880 │ + └─────────────────────────────────────────┴───────┘ + +``` sql +SELECT + IPv6NumToString(ClientIP6 AS k), + count() AS c +FROM hits_all +WHERE EventDate = today() +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + + ┌─IPv6NumToString(ClientIP6)─┬──────c─┐ + │ ::ffff:94.26.111.111 │ 747440 │ + │ ::ffff:37.143.222.4 │ 529483 │ + │ ::ffff:5.166.111.99 │ 317707 │ + │ ::ffff:46.38.11.77 │ 263086 │ + │ ::ffff:79.105.111.111 │ 186611 │ + │ ::ffff:93.92.111.88 │ 176773 │ + │ ::ffff:84.53.111.33 │ 158709 │ + │ ::ffff:217.118.11.22 │ 154004 │ + │ ::ffff:217.118.11.33 │ 148449 │ + │ ::ffff:217.118.11.44 │ 148243 │ + └────────────────────────────┴────────┘ + +## IPv6StringToNum(s) {#ipv6stringtonums} + +与IPv6NumToString的相反。如果IPv6地址格式无效,则返回空字节字符串。 +十六进制可以是大写的或小写的。 + +## IPv4ToIPv6(x) {#ipv4toipv6x} + +接受一个UInt32类型的IPv4地址,返回FixedString(16)类型的IPv6地址。例如: + +``` sql +SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr +``` + + ┌─addr───────────────┐ + │ ::ffff:192.168.0.1 │ + └────────────────────┘ + +## cutIPv6(x,bitsToCutForIPv6,bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} + +接受一个FixedString(16)类型的IPv6地址,返回一个String,这个String中包含了删除指定位之后的地址的文本格式。例如: + +``` sql +WITH + IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D') AS ipv6, + IPv4ToIPv6(IPv4StringToNum('192.168.0.1')) AS ipv4 +SELECT + cutIPv6(ipv6, 2, 0), + cutIPv6(ipv4, 0, 2) +``` + + ┌─cutIPv6(ipv6, 2, 0)─────────────────┬─cutIPv6(ipv4, 0, 2)─┐ + │ 2001:db8:ac10:fe01:feed:babe:cafe:0 │ ::ffff:192.168.0.0 │ + └─────────────────────────────────────┴─────────────────────┘ + +## ツ古カツ益ツ催ツ団ツ法ツ人), {#ipv4cidrtorangeipv4-cidr} + +接受一个IPv4地址以及一个UInt8类型的CIDR。返回包含子网最低范围以及最高范围的元组。 + +``` sql +SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) +``` + + ┌─IPv4CIDRToRange(toIPv4('192.168.5.2'), 16)─┐ + │ ('192.168.0.0','192.168.255.255') │ + └────────────────────────────────────────────┘ + +## ツ暗ェツ氾环催ツ団ツ法ツ人), {#ipv6cidrtorangeipv6-cidr} + +接受一个IPv6地址以及一个UInt8类型的CIDR。返回包含子网最低范围以及最高范围的元组。 + +``` sql +SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); +``` + + ┌─IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32)─┐ + │ ('2001:db8::','2001:db8:ffff:ffff:ffff:ffff:ffff:ffff') │ + └────────────────────────────────────────────────────────────────────────┘ + +## toIPv4(字符串) {#toipv4string} + +`IPv4StringToNum()`的别名,它采用字符串形式的IPv4地址并返回[IPv4](../../sql_reference/functions/ip_address_functions.md)类型的值,该二进制值等于`IPv4StringToNum()`返回的值。 + +``` sql +WITH + '171.225.130.45' as IPv4_string +SELECT + toTypeName(IPv4StringToNum(IPv4_string)), + toTypeName(toIPv4(IPv4_string)) +``` + + ┌─toTypeName(IPv4StringToNum(IPv4_string))─┬─toTypeName(toIPv4(IPv4_string))─┐ + │ UInt32 │ IPv4 │ + └──────────────────────────────────────────┴─────────────────────────────────┘ + +``` sql +WITH + '171.225.130.45' as IPv4_string +SELECT + hex(IPv4StringToNum(IPv4_string)), + hex(toIPv4(IPv4_string)) +``` + + ┌─hex(IPv4StringToNum(IPv4_string))─┬─hex(toIPv4(IPv4_string))─┐ + │ ABE1822D │ ABE1822D │ + └───────────────────────────────────┴──────────────────────────┘ + +## toIPv6(字符串) {#toipv6string} + +`IPv6StringToNum()`的别名,它采用字符串形式的IPv6地址并返回[IPv6](../../sql_reference/functions/ip_address_functions.md)类型的值,该二进制值等于`IPv6StringToNum()`返回的值。 + +``` sql +WITH + '2001:438:ffff::407d:1bc1' as IPv6_string +SELECT + toTypeName(IPv6StringToNum(IPv6_string)), + toTypeName(toIPv6(IPv6_string)) +``` + + ┌─toTypeName(IPv6StringToNum(IPv6_string))─┬─toTypeName(toIPv6(IPv6_string))─┐ + │ FixedString(16) │ IPv6 │ + └──────────────────────────────────────────┴─────────────────────────────────┘ + +``` sql +WITH + '2001:438:ffff::407d:1bc1' as IPv6_string +SELECT + hex(IPv6StringToNum(IPv6_string)), + hex(toIPv6(IPv6_string)) +``` + + ┌─hex(IPv6StringToNum(IPv6_string))─┬─hex(toIPv6(IPv6_string))─────────┐ + │ 20010438FFFF000000000000407D1BC1 │ 20010438FFFF000000000000407D1BC1 │ + └───────────────────────────────────┴──────────────────────────────────┘ + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/zh/sql_reference/functions/json_functions.md b/docs/zh/sql_reference/functions/json_functions.md new file mode 100644 index 00000000000..ca76edde09c --- /dev/null +++ b/docs/zh/sql_reference/functions/json_functions.md @@ -0,0 +1,175 @@ + +# JSON函数 {#jsonhan-shu} + +在Yandex.Metrica中,用户使用JSON作为访问参数。为了处理这些JSON,实现了一些函数。(尽管在大多数情况下,JSON是预先进行额外处理的,并将结果值放在单独的列中。)所有的这些函数都进行了尽可能的假设。以使函数能够尽快的完成工作。 + +我们对JSON格式做了如下假设: + +1. 字段名称(函数的参数)必须使常量。 +2. 字段名称必须使用规范的编码。例如:`visitParamHas('{"abc":"def"}', 'abc') = 1`,但是 `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` +3. 函数可以随意的在多层嵌套结构下查找字段。如果存在多个匹配字段,则返回第一个匹配字段。 +4. JSON除字符串文本外不存在空格字符。 + +## ツ环板(ョツ嘉ッツ偲青visャツ静ャツ青サツ催ャツ渉) {#visitparamhasparams-name} + +检查是否存在«name»名称的字段 + +## 访问paramextractuint(参数,名称) {#visitparamextractuintparams-name} + +将名为«name»的字段的值解析成UInt64。如果这是一个字符串字段,函数将尝试从字符串的开头解析一个数字。如果该字段不存在,或无法从它中解析到数字,则返回0。 + +## visitParamExtractInt(参数,名称) {#visitparamextractintparams-name} + +与visitParamExtractUInt相同,但返回Int64。 + +## 访问paramextractfloat(参数,名称) {#visitparamextractfloatparams-name} + +与visitParamExtractUInt相同,但返回Float64。 + +## ツ环板(ョツ嘉ッツ偲青妥-ツ姪(不ツ督ョツ産) {#visitparamextractboolparams-name} + +解析true/false值。其结果是UInt8类型的。 + +## 掳胫((禄脢鹿脷露胫鲁隆鹿((酶-11-16""\[脪陆(,,,) {#visitparamextractrawparams-name} + +返回字段的值,包含空格符。 + +示例: + + visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' + visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' + +## visitParamExtractString(参数,名称) {#visitparamextractstringparams-name} + +使用双引号解析字符串。这个值没有进行转义。如果转义失败,它将返回一个空白字符串。 + +示例: + + visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' + visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' + visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' + visitParamExtractString('{"abc":"hello}', 'abc') = '' + +目前不支持`\uXXXX\uYYYY`这些字符编码,这些编码不在基本多文种平面中(它们被转化为CESU-8而不是UTF-8)。 + +以下函数基于[simdjson](https://github.com/lemire/simdjson),专为更复杂的JSON解析要求而设计。但上述假设2仍然适用。 + +## JSONHas(json\[, indices\_or\_keys\]…) {#jsonhasjson-indices-or-keys} + +如果JSON中存在该值,则返回`1`。 + +如果该值不存在,则返回`0`。 + +示例: + + select JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 1 + select JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 + +`indices_or_keys`可以是零个或多个参数的列表,每个参数可以是字符串或整数。 + +- String = 按成员名称访问JSON对象成员。 +- 正整数 = 从头开始访问第n个成员/成员名称。 +- 负整数 = 从末尾访问第n个成员/成员名称。 + +您可以使用整数来访问JSON数组和JSON对象。 + +例如: + + select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'a' + select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2) = 'b' + select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1) = 'b' + select JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' + select JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' + +## JSONLength(json\[, indices\_or\_keys\]…) {#jsonlengthjson-indices-or-keys} + +返回JSON数组或JSON对象的长度。 + +如果该值不存在或类型错误,将返回`0`。 + +示例: + + select JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 + select JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 + +## JSONType(json\[, indices\_or\_keys\]…) {#jsontypejson-indices-or-keys} + +返回JSON值的类型。 + +如果该值不存在,将返回`Null`。 + +示例: + + select JSONType('{"a": "hello", "b": [-100, 200.0, 300]}') = 'Object' + select JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' + select JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' + +## JSONExtractUInt(json\[, indices\_or\_keys\]…) {#jsonextractuintjson-indices-or-keys} + +## JSONExtractInt(json\[, indices\_or\_keys\]…) {#jsonextractintjson-indices-or-keys} + +## JSONExtractFloat(json\[, indices\_or\_keys\]…) {#jsonextractfloatjson-indices-or-keys} + +## JSONExtractBool(json\[, indices\_or\_keys\]…) {#jsonextractbooljson-indices-or-keys} + +解析JSON并提取值。这些函数类似于`visitParam*`函数。 + +如果该值不存在或类型错误,将返回`0`。 + +示例: + + select JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1) = -100 + select JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200.0 + select JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 + +## JSONExtractString(json\[, indices\_or\_keys\]…) {#jsonextractstringjson-indices-or-keys} + +解析JSON并提取字符串。此函数类似于`visitParamExtractString`函数。 + +如果该值不存在或类型错误,则返回空字符串。 + +该值未转义。如果unescaping失败,则返回一个空字符串。 + +示例: + + select JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'hello' + select JSONExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' + select JSONExtractString('{"abc":"\\u263a"}', 'abc') = '☺' + select JSONExtractString('{"abc":"\\u263"}', 'abc') = '' + select JSONExtractString('{"abc":"hello}', 'abc') = '' + +## JSONExtract(json\[, indices\_or\_keys…\], return\_type) {#jsonextractjson-indices-or-keys-return-type} + +解析JSON并提取给定ClickHouse数据类型的值。 + +这是以前的`JSONExtract函数的变体。 这意味着`JSONExtract(…, ‘String’)`返回与`JSONExtractString()`返回完全相同。`JSONExtract(…, ‘Float64’)`返回于`JSONExtractFloat()\`返回完全相同。 + +示例: + + SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))') = ('hello',[-100,200,300]) + SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)') = ([-100,200,300],'hello') + SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))') = [-100, NULL, NULL] + SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)') = NULL + SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8') = 1 + SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Thursday' + SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' + +## JSONExtractKeysAndValues(json\[, indices\_or\_keys…\], value\_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} + +从JSON中解析键值对,其中值是给定的ClickHouse数据类型。 + +示例: + + SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; + +## JSONExtractRaw(json\[, indices\_or\_keys\]…) {#jsonextractrawjson-indices-or-keys} + +返回JSON的部分。 + +如果部件不存在或类型错误,将返回空字符串。 + +示例: + + select JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/zh/sql_reference/functions/logical_functions.md b/docs/zh/sql_reference/functions/logical_functions.md new file mode 100644 index 00000000000..18a383edbdb --- /dev/null +++ b/docs/zh/sql_reference/functions/logical_functions.md @@ -0,0 +1,16 @@ + +# 逻辑函数 {#luo-ji-han-shu} + +逻辑函数可以接受任何数字类型的参数,并返回UInt8类型的0或1。 + +当向函数传递零时,函数将判定为«false»,否则,任何其他非零的值都将被判定为«true»。 + +## 和,和运营商 {#and-and-operator} + +## 或,或运营商 {#or-or-operator} + +## 不是,不是运营商 {#not-not-operator} + +## 异或 {#xor} + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/zh/sql_reference/functions/machine_learning_functions.md b/docs/zh/sql_reference/functions/machine_learning_functions.md new file mode 100644 index 00000000000..0bdea52c59f --- /dev/null +++ b/docs/zh/sql_reference/functions/machine_learning_functions.md @@ -0,0 +1,16 @@ + +# 机器学习函数 {#ji-qi-xue-xi-han-shu} + +## evalMLMethod(预测) {#machine_learning_methods-evalmlmethod} + +使用拟合回归模型的预测请使用`evalMLMethod`函数。 请参阅`linearRegression`中的链接。 + +## 随机线性回归 {#stochastic-linear-regression} + +`stochasticLinearRegression`聚合函数使用线性模型和MSE损失函数实现随机梯度下降法。 使用`evalMLMethod`来预测新数据。 +请参阅示例和注释[此处](../../sql_reference/functions/machine_learning_functions.md#agg_functions-stochasticlinearregression)。 + +## 随机逻辑回归 {#stochastic-logistic-regression} + +`stochasticLogisticRegression`聚合函数实现了二元分类问题的随机梯度下降法。 使用`evalMLMethod`来预测新数据。 +请参阅示例和注释[此处](../../sql_reference/functions/machine_learning_functions.md#agg_functions-stochasticlogisticregression)。 diff --git a/docs/zh/sql_reference/functions/math_functions.md b/docs/zh/sql_reference/functions/math_functions.md new file mode 100644 index 00000000000..fef88389b86 --- /dev/null +++ b/docs/zh/sql_reference/functions/math_functions.md @@ -0,0 +1,108 @@ + +# 数学函数 {#shu-xue-han-shu} + +以下所有的函数都返回一个Float64类型的数值。返回结果总是以尽可能最大精度返回,但还是可能与机器中可表示最接近该值的数字不同。 + +## e() {#e} + +返回一个接近数学常量e的Float64数字。 + +## pi() {#pi} + +返回一个接近数学常量π的Float64数字。 + +## exp(x) {#expx} + +接受一个数值类型的参数并返回它的指数。 + +## log(x),ln(x) {#logx-lnx} + +接受一个数值类型的参数并返回它的自然对数。 + +## exp2(x) {#exp2x} + +接受一个数值类型的参数并返回它的2的x次幂。 + +## log2(x) {#log2x} + +接受一个数值类型的参数并返回它的底2对数。 + +## exp10(x) {#exp10x} + +接受一个数值类型的参数并返回它的10的x次幂。 + +## log10(x) {#log10x} + +接受一个数值类型的参数并返回它的底10对数。 + +## sqrt(x) {#sqrtx} + +接受一个数值类型的参数并返回它的平方根。 + +## cbrt(x) {#cbrtx} + +接受一个数值类型的参数并返回它的立方根。 + +## erf(x) {#erfx} + +如果'x'是非负数,那么erf(x / σ√2)是具有正态分布且标准偏差为«σ»的随机变量的值与预期值之间的距离大于«x»。 + +示例 (三西格玛准则): + +``` sql +SELECT erf(3 / sqrt(2)) +``` + + ┌─erf(divide(3, sqrt(2)))─┐ + │ 0.9973002039367398 │ + └─────────────────────────┘ + +## erfc(x) {#erfcx} + +接受一个数值参数并返回一个接近1 - erf(x)的Float64数字,但不会丢失大«x»值的精度。 + +## lgamma(x) {#lgammax} + +返回x的绝对值的自然对数的伽玛函数。 + +## tgamma(x) {#tgammax} + +返回x的伽玛函数。 + +## sin(x) {#sinx} + +返回x的三角正弦值。 + +## cos(x) {#cosx} + +返回x的三角余弦值。 + +## 谭(x) {#tanx} + +返回x的三角正切值。 + +## asin(x) {#asinx} + +返回x的反三角正弦值。 + +## acos(x) {#acosx} + +返回x的反三角余弦值。 + +## 阿坦(x) {#atanx} + +返回x的反三角正切值。 + +## pow(x,y),power(x,y) {#powx-y-powerx-y} + +接受x和y两个参数。返回x的y次方。 + +## intExp2 {#intexp2} + +接受一个数值类型的参数并返回它的2的x次幂(UInt64)。 + +## intExp10 {#intexp10} + +接受一个数值类型的参数并返回它的10的x次幂(UInt64)。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/zh/sql_reference/functions/other_functions.md b/docs/zh/sql_reference/functions/other_functions.md new file mode 100644 index 00000000000..e0c7e47be58 --- /dev/null +++ b/docs/zh/sql_reference/functions/other_functions.md @@ -0,0 +1,538 @@ + +# 其他函数 {#qi-ta-han-shu} + +## 主机名() {#hostname} + +返回一个字符串,其中包含执行此函数的主机的名称。 对于分布式处理,如果在远程服务器上执行此函数,则将返回远程服务器主机的名称。 + +## basename {#basename} + +在最后一个斜杠或反斜杠后的字符串文本。 此函数通常用于从路径中提取文件名。 + + basename( expr ) + +**参数** + +- `expr` — 任何一个返回[字符串](../../sql_reference/functions/other_functions.md)结果的表达式。[字符串](../../sql_reference/functions/other_functions.md) + +**返回值** + +一个String类型的值,其包含: + +- 在最后一个斜杠或反斜杠后的字符串文本内容。 + + 如果输入的字符串以斜杆或反斜杆结尾,例如:`/`或`c:\`,函数将返回一个空字符串。 + +- 如果输入的字符串中不包含斜杆或反斜杠,函数返回输入字符串本身。 + +**示例** + +``` sql +SELECT 'some/long/path/to/file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some\\long\\path\\to\\file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some-file-name' AS a, basename(a) +``` + +``` text +┌─a──────────────┬─basename('some-file-name')─┐ +│ some-file-name │ some-file-name │ +└────────────────┴────────────────────────────┘ +``` + +## visibleWidth(x) {#visiblewidthx} + +以文本格式(以制表符分隔)向控制台输出值时,计算近似宽度。 +系统使用此函数实现Pretty格式。 +以文本格式(制表符分隔)将值输出到控制台时,计算近似宽度。 +这个函数被系统用于实现漂亮的格式。 + +`NULL` 表示为对应于 `NULL` 在 `Pretty` 格式。 + + SELECT visibleWidth(NULL) + + ┌─visibleWidth(NULL)─┐ + │ 4 │ + └────────────────────┘ + +## toTypeName(x) {#totypenamex} + +返回包含参数的类型名称的字符串。 + +如果将`NULL`作为参数传递给函数,那么它返回`Nullable(Nothing)`类型,它对应于ClickHouse中的内部`NULL`。 + +## 块大小() {#function-blocksize} + +获取Block的大小。 +在ClickHouse中,查询始终工作在Block(包含列的部分的集合)上。此函数允许您获取调用其的块的大小。 + +## 实现(x) {#materializex} + +将一个常量列变为一个非常量列。 +在ClickHouse中,非常量列和常量列在内存中的表示方式不同。尽管函数对于常量列和非常量总是返回相同的结果,但它们的工作方式可能完全不同(执行不同的代码)。此函数用于调试这种行为。 + +## ignore(…) {#ignore} + +接受任何参数,包括`NULL`。始终返回0。 +但是,函数的参数总是被计算的。该函数可以用于基准测试。 + +## 睡眠(秒) {#sleepseconds} + +在每个Block上休眠'seconds'秒。可以是整数或浮点数。 + +## sleepEachRow(秒) {#sleepeachrowseconds} + +在每行上休眠'seconds'秒。可以是整数或浮点数。 + +## 当前数据库() {#currentdatabase} + +返回当前数据库的名称。 +当您需要在CREATE TABLE中的表引擎参数中指定数据库,您可以使用此函数。 + +## isFinite(x) {#isfinitex} + +接受Float32或Float64类型的参数,如果参数不是infinite且不是NaN,则返回1,否则返回0。 + +## isInfinite(x) {#isinfinitex} + +接受Float32或Float64类型的参数,如果参数是infinite,则返回1,否则返回0。注意NaN返回0。 + +## isNaN(x) {#isnanx} + +接受Float32或Float64类型的参数,如果参数是Nan,则返回1,否则返回0。 + +## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} + +接受常量字符串:数据库名称、表名称和列名称。 如果存在列,则返回等于1的UInt8常量表达式,否则返回0。 如果设置了hostname参数,则测试将在远程服务器上运行。 +如果表不存在,该函数将引发异常。 +对于嵌套数据结构中的元素,该函数检查是否存在列。 对于嵌套数据结构本身,函数返回0。 + +## 酒吧 {#function-bar} + +使用unicode构建图表。 + +`bar(x, min, max, width)` 当`x = max`时, 绘制一个宽度与`(x - min)`成正比且等于`width`的字符带。 + +参数: + +- `x` — 要显示的尺寸。 +- `min, max` — 整数常量,该值必须是`Int64`。 +- `width` — 常量,可以是正整数或小数。 + +字符带的绘制精度是符号的八分之一。 + +示例: + +``` sql +SELECT + toHour(EventTime) AS h, + count() AS c, + bar(c, 0, 600000, 20) AS bar +FROM test.hits +GROUP BY h +ORDER BY h ASC +``` + + ┌──h─┬──────c─┬─bar────────────────┐ + │ 0 │ 292907 │ █████████▋ │ + │ 1 │ 180563 │ ██████ │ + │ 2 │ 114861 │ ███▋ │ + │ 3 │ 85069 │ ██▋ │ + │ 4 │ 68543 │ ██▎ │ + │ 5 │ 78116 │ ██▌ │ + │ 6 │ 113474 │ ███▋ │ + │ 7 │ 170678 │ █████▋ │ + │ 8 │ 278380 │ █████████▎ │ + │ 9 │ 391053 │ █████████████ │ + │ 10 │ 457681 │ ███████████████▎ │ + │ 11 │ 493667 │ ████████████████▍ │ + │ 12 │ 509641 │ ████████████████▊ │ + │ 13 │ 522947 │ █████████████████▍ │ + │ 14 │ 539954 │ █████████████████▊ │ + │ 15 │ 528460 │ █████████████████▌ │ + │ 16 │ 539201 │ █████████████████▊ │ + │ 17 │ 523539 │ █████████████████▍ │ + │ 18 │ 506467 │ ████████████████▊ │ + │ 19 │ 520915 │ █████████████████▎ │ + │ 20 │ 521665 │ █████████████████▍ │ + │ 21 │ 542078 │ ██████████████████ │ + │ 22 │ 493642 │ ████████████████▍ │ + │ 23 │ 400397 │ █████████████▎ │ + └────┴────────┴────────────────────┘ + +## 变换 {#transform} + +根据定义,将某些元素转换为其他元素。 +此函数有两种使用方式: + +1. `transform(x, array_from, array_to, default)` + +`x` – 要转换的值。 + +`array_from` – 用于转换的常量数组。 + +`array_to` – 将'from'中的值转换为的常量数组。 + +`default` – 如果'x'不等于'from'中的任何值,则默认转换的值。 + +`array_from` 和 `array_to` – 拥有相同大小的数组。 + +类型约束: + +`transform(T, Array(T), Array(U), U) -> U` + +`T`和`U`可以是String,Date,DateTime或任意数值类型的。 +对于相同的字母(T或U),如果数值类型,那么它们不可不完全匹配的,只需要具备共同的类型即可。 +例如,第一个参数是Int64类型,第二个参数是Array(UInt16)类型。 + +如果'x'值等于'array\_from'数组中的一个元素,它将从'array\_to'数组返回一个对应的元素(下标相同)。否则,它返回'default'。如果'array\_from'匹配到了多个元素,则返回第一个匹配的元素。 + +示例: + +``` sql +SELECT + transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, + count() AS c +FROM test.hits +WHERE SearchEngineID != 0 +GROUP BY title +ORDER BY c DESC +``` + + ┌─title─────┬──────c─┐ + │ Yandex │ 498635 │ + │ Google │ 229872 │ + │ Other │ 104472 │ + └───────────┴────────┘ + +1. `transform(x, array_from, array_to)` + +与第一种不同在于省略了'default'参数。 +如果'x'值等于'array\_from'数组中的一个元素,它将从'array\_to'数组返回相应的元素(下标相同)。 否则,它返回'x'。 + +类型约束: + +`transform(T, Array(T), Array(T)) -> T` + +示例: + +``` sql +SELECT + transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, + count() AS c +FROM test.hits +GROUP BY domain(Referer) +ORDER BY count() DESC +LIMIT 10 +``` + + ┌─s──────────────┬───────c─┐ + │ │ 2906259 │ + │ www.yandex │ 867767 │ + │ ███████.ru │ 313599 │ + │ mail.yandex.ru │ 107147 │ + │ ██████.ru │ 100355 │ + │ █████████.ru │ 65040 │ + │ news.yandex.ru │ 64515 │ + │ ██████.net │ 59141 │ + │ example.com │ 57316 │ + └────────────────┴─────────┘ + +## formatReadableSize(x) {#formatreadablesizex} + +接受大小(字节数)。返回带有后缀(KiB, MiB等)的字符串。 + +示例: + +``` sql +SELECT + arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, + formatReadableSize(filesize_bytes) AS filesize +``` + + ┌─filesize_bytes─┬─filesize───┐ + │ 1 │ 1.00 B │ + │ 1024 │ 1.00 KiB │ + │ 1048576 │ 1.00 MiB │ + │ 192851925 │ 183.92 MiB │ + └────────────────┴────────────┘ + +## 至少(a,b) {#leasta-b} + +返回a和b中的最小值。 + +## 最伟大(a,b) {#greatesta-b} + +返回a和b的最大值。 + +## 碌莽禄time拢time() {#uptime} + +返回服务正常运行的秒数。 + +## 版本() {#version} + +以字符串形式返回服务器的版本。 + +## 时区() {#timezone} + +返回服务器的时区。 + +## blockNumber {#blocknumber} + +返回行所在的Block的序列号。 + +## rowNumberInBlock {#function-rownumberinblock} + +返回行所在Block中行的序列号。 针对不同的Block始终重新计算。 + +## rowNumberInAllBlocks() {#rownumberinallblocks} + +返回行所在结果集中的序列号。此函数仅考虑受影响的Block。 + +## 运行差异(x) {#other_functions-runningdifference} + +计算数据块中相邻行的值之间的差异。 +对于第一行返回0,并为每个后续行返回与前一行的差异。 + +函数的结果取决于受影响的Block和Block中的数据顺序。 +如果使用ORDER BY创建子查询并从子查询外部调用该函数,则可以获得预期结果。 + +示例: + +``` sql +SELECT + EventID, + EventTime, + runningDifference(EventTime) AS delta +FROM +( + SELECT + EventID, + EventTime + FROM events + WHERE EventDate = '2016-11-24' + ORDER BY EventTime ASC + LIMIT 5 +) +``` + + ┌─EventID─┬───────────EventTime─┬─delta─┐ + │ 1106 │ 2016-11-24 00:00:04 │ 0 │ + │ 1107 │ 2016-11-24 00:00:05 │ 1 │ + │ 1108 │ 2016-11-24 00:00:05 │ 0 │ + │ 1109 │ 2016-11-24 00:00:09 │ 4 │ + │ 1110 │ 2016-11-24 00:00:10 │ 1 │ + └─────────┴─────────────────────┴───────┘ + +## 运行差异启动与第一值 {#runningdifferencestartingwithfirstvalue} + +与[运行差异](./other_functions.md#other_functions-runningdifference)相同,区别在于第一行返回第一行的值,后续每个后续行返回与上一行的差值。 + +## MACNumToString(num) {#macnumtostringnum} + +接受一个UInt64类型的数字。 将其解释为big endian的MAC地址。 返回包含相应MAC地址的字符串,格式为AA:BB:CC:DD:EE:FF(以冒号分隔的十六进制形式的数字)。 + +## MACStringToNum(s) {#macstringtonums} + +与MACNumToString相反。 如果MAC地址格式无效,则返回0。 + +## MACStringToOUI(s) {#macstringtoouis} + +接受格式为AA:BB:CC:DD:EE:FF(十六进制形式的冒号分隔数字)的MAC地址。 返回前三个八位字节作为UInt64编号。 如果MAC地址格式无效,则返回0。 + +## getSizeOfEnumType {#getsizeofenumtype} + +返回[枚举](../../sql_reference/functions/other_functions.md)中的枚举数量。 + + getSizeOfEnumType(value) + +**参数:** + +- `value` — `Enum`类型的值。 + +**返回值** + +- `Enum`的枚举数量。 +- 如果类型不是`Enum`,则抛出异常。 + +**示例** + + SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x + + ┌─x─┐ + │ 2 │ + └───┘ + +## toColumnTypeName {#tocolumntypename} + +返回在RAM中列的数据类型的名称。 + + toColumnTypeName(value) + +**参数:** + +- `value` — 任何类型的值。 + +**返回值** + +- 一个字符串,其内容是`value`在RAM中的类型名称。 + +**`toTypeName ' 与 ' toColumnTypeName`的区别示例** + + :) select toTypeName(cast('2018-01-01 01:02:03' AS DateTime)) + + SELECT toTypeName(CAST('2018-01-01 01:02:03', 'DateTime')) + + ┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ + │ DateTime │ + └─────────────────────────────────────────────────────┘ + + 1 rows in set. Elapsed: 0.008 sec. + + :) select toColumnTypeName(cast('2018-01-01 01:02:03' AS DateTime)) + + SELECT toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime')) + + ┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ + │ Const(UInt32) │ + └───────────────────────────────────────────────────────────┘ + +该示例显示`DateTime`数据类型作为`Const(UInt32)`存储在内存中。 + +## dumpColumnStructure {#dumpcolumnstructure} + +输出在RAM中的数据结果的详细信息。 + + dumpColumnStructure(value) + +**参数:** + +- `value` — 任何类型的值. + +**返回值** + +- 一个字符串,其内容是`value`在RAM中的数据结构的详细描述。 + +**示例** + + SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) + + ┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ + │ DateTime, Const(size = 1, UInt32(size = 1)) │ + └──────────────────────────────────────────────────────────────┘ + +## defaultValueOfArgumentType {#defaultvalueofargumenttype} + +输出数据类型的默认值。 + +不包括用户设置的自定义列的默认值。 + + defaultValueOfArgumentType(expression) + +**参数:** + +- `expression` — 任意类型的值或导致任意类型值的表达式。 + +**返回值** + +- 数值类型返回`0`。 +- 字符串类型返回空的字符串。 +- [可为空](../../sql_reference/functions/other_functions.md)类型返回`ᴺᵁᴸᴸ`。 + +**示例** + + :) SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) + + SELECT defaultValueOfArgumentType(CAST(1, 'Int8')) + + ┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ + │ 0 │ + └─────────────────────────────────────────────┘ + + 1 rows in set. Elapsed: 0.002 sec. + + :) SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) + + SELECT defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)')) + + ┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ + │ ᴺᵁᴸᴸ │ + └───────────────────────────────────────────────────────┘ + + 1 rows in set. Elapsed: 0.002 sec. + +## 复制 {#replicate} + +使用单个值填充一个数组。 + +用于[arrayJoin](array_join.md#functions_arrayjoin)的内部实现。 + + replicate(x, arr) + +**参数:** + +- `arr` — 原始数组。 ClickHouse创建一个与原始数据长度相同的新数组,并用值`x`填充它。 +- `x` — 生成的数组将被填充的值。 + +**输出** + +- 一个被`x`填充的数组。 + +**示例** + + SELECT replicate(1, ['a', 'b', 'c']) + + ┌─replicate(1, ['a', 'b', 'c'])─┐ + │ [1,1,1] │ + └───────────────────────────────┘ + +## 文件系统可用 {#filesystemavailable} + +返回磁盘的剩余空间信息(以字节为单位)。使用配置文件中的path配置评估此信息。 + +## 文件系统容量 {#filesystemcapacity} + +返回磁盘的容量信息,以字节为单位。使用配置文件中的path配置评估此信息。 + +## 最后聚会 {#function-finalizeaggregation} + +获取聚合函数的状态。返回聚合结果(最终状态)。 + +## 跑累积 {#function-runningaccumulate} + +获取聚合函数的状态并返回其具体的值。这是从第一行到当前行的所有行累计的结果。 + +例如,获取聚合函数的状态(示例runningAccumulate(uniqState(UserID))),对于数据块的每一行,返回所有先前行和当前行的状态合并后的聚合函数的结果。 +因此,函数的结果取决于分区中数据块的顺序以及数据块中行的顺序。 + +## joinGet(‘join\_storage\_table\_name’, ‘get\_column’,join\_key) {#joingetjoin-storage-table-name-get-column-join-key} + +使用指定的连接键从Join类型引擎的表中获取数据。 + +## modelEvaluate(model\_name, …) {#function-modelevaluate} + +使用外部模型计算。 +接受模型的名称以及模型的参数。返回Float64类型的值。 + +## throwIf(x) {#throwifx} + +如果参数不为零则抛出异常。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/zh/sql_reference/functions/random_functions.md b/docs/zh/sql_reference/functions/random_functions.md new file mode 100644 index 00000000000..1db2f4a8438 --- /dev/null +++ b/docs/zh/sql_reference/functions/random_functions.md @@ -0,0 +1,22 @@ + +# 随机函数 {#sui-ji-han-shu} + +随机函数使用非加密方式生成伪随机数字。 + +所有随机函数都只接受一个参数或不接受任何参数。 +您可以向它传递任何类型的参数,但传递的参数将不会使用在任何随机数生成过程中。 +此参数的唯一目的是防止公共子表达式消除,以便在相同的查询中使用相同的随机函数生成不同的随机数。 + +## 兰德 {#rand} + +返回一个UInt32类型的随机数字,所有UInt32类型的数字被生成的概率均相等。此函数线性同于的方式生成随机数。 + +## rand64 {#rand64} + +返回一个UInt64类型的随机数字,所有UInt64类型的数字被生成的概率均相等。此函数线性同于的方式生成随机数。 + +## randConstant {#randconstant} + +返回一个UInt32类型的随机数字,该函数不同之处在于仅为每个数据块参数一个随机数。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/zh/sql_reference/functions/rounding_functions.md b/docs/zh/sql_reference/functions/rounding_functions.md new file mode 100644 index 00000000000..773f969090d --- /dev/null +++ b/docs/zh/sql_reference/functions/rounding_functions.md @@ -0,0 +1,87 @@ + +# 取整函数 {#qu-zheng-han-shu} + +## 楼(x\[,N\]) {#floorx-n} + +返回小于或等于x的最大舍入数。该函数使用参数乘1/10N,如果1/10N不精确,则选择最接近的精确的适当数据类型的数。 +'N'是一个整数常量,可选参数。默认为0,这意味着不对其进行舍入。 +'N'可以是负数。 + +示例: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` + +`x`是任何数字类型。结果与其为相同类型。 +对于整数参数,使用负'N'值进行舍入是有意义的(对于非负«N»,该函数不执行任何操作)。 +如果取整导致溢出(例如,floor(-128,-1)),则返回特定于实现的结果。 + +## ceil(x\[,N\]),天花板(x\[,N\]) {#ceilx-n-ceilingx-n} + +返回大于或等于'x'的最小舍入数。在其他方面,它与'floor'功能相同(见上文)。 + +## 圆形(x\[,N\]) {#rounding_functions-round} + +将值取整到指定的小数位数。 + +该函数按顺序返回最近的数字。如果给定数字包含多个最近数字,则函数返回其中最接近偶数的数字(银行的取整方式)。 + + round(expression [, decimal_places]) + +**参数:** + +- `expression` — 要进行取整的数字。可以是任何返回数字[类型](../../sql_reference/functions/rounding_functions.md#data_types)的[表达式](../syntax.md#syntax-expressions)。 +- `decimal-places` — 整数类型。 + - 如果`decimal-places > 0`,则该函数将值舍入小数点右侧。 + - 如果`decimal-places < 0`,则该函数将小数点左侧的值四舍五入。 + - 如果`decimal-places = 0`,则该函数将该值舍入为整数。在这种情况下,可以省略参数。 + +**返回值:** + +与输入数字相同类型的取整后的数字。 + +### 示例 {#shi-li} + +**使用示例** + +``` sql +SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 +``` + + ┌───x─┬─round(divide(number, 2))─┐ + │ 0 │ 0 │ + │ 0.5 │ 0 │ + │ 1 │ 1 │ + └─────┴──────────────────────────┘ + +**取整的示例** + +取整到最近的数字。 + + round(3.2, 0) = 3 + round(4.1267, 2) = 4.13 + round(22,-1) = 20 + round(467,-2) = 500 + round(-467,-2) = -500 + +银行的取整。 + + round(3.5) = 4 + round(4.5) = 4 + round(3.55, 1) = 3.6 + round(3.65, 1) = 3.6 + +## roundToExp2(num) {#roundtoexp2num} + +接受一个数字。如果数字小于1,则返回0。否则,它将数字向下舍入到最接近的(整个非负)2的x次幂。 + +## 圆形饱和度(num) {#rounddurationnum} + +接受一个数字。如果数字小于1,则返回0。否则,它将数字向下舍入为集合中的数字:1,10,30,60,120,180,240,300,600,1200,1800,3600,7200,18000,36000。此函数用于Yandex.Metrica报表中计算会话的持续时长。 + +## 圆数(num) {#roundagenum} + +接受一个数字。如果数字小于18,则返回0。否则,它将数字向下舍入为集合中的数字:18,25,35,45,55。此函数用于Yandex.Metrica报表中用户年龄的计算。 + +## roundDown(num,arr) {#rounddownnum-arr} + +接受一个数字,将其向下舍入到指定数组中的元素。如果该值小于数组中的最低边界,则返回最低边界。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/zh/sql_reference/functions/splitting_merging_functions.md b/docs/zh/sql_reference/functions/splitting_merging_functions.md new file mode 100644 index 00000000000..d217ea19f0d --- /dev/null +++ b/docs/zh/sql_reference/functions/splitting_merging_functions.md @@ -0,0 +1,30 @@ + +# 字符串拆分合并函数 {#zi-fu-chuan-chai-fen-he-bing-han-shu} + +## splitByChar(分隔符,s) {#splitbycharseparator-s} + +将字符串以'separator'拆分成多个子串。'separator'必须为仅包含一个字符的字符串常量。 +返回拆分后的子串的数组。 如果分隔符出现在字符串的开头或结尾,或者如果有多个连续的分隔符,则将在对应位置填充空的子串。 + +## splitByString(分隔符,s) {#splitbystringseparator-s} + +与上面相同,但它使用多个字符的字符串作为分隔符。 该字符串必须为非空。 + +## arrayStringConcat(arr\[,分隔符\]) {#arraystringconcatarr-separator} + +使用separator将数组中列出的字符串拼接起来。'separator'是一个可选参数:一个常量字符串,默认情况下设置为空字符串。 +返回拼接后的字符串。 + +## alphaTokens(s) {#alphatokenss} + +从范围a-z和A-Z中选择连续字节的子字符串。返回子字符串数组。 + +**示例:** + + SELECT alphaTokens('abca1abc') + + ┌─alphaTokens('abca1abc')─┐ + │ ['abca','abc'] │ + └─────────────────────────┘ + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/zh/sql_reference/functions/string_functions.md b/docs/zh/sql_reference/functions/string_functions.md new file mode 100644 index 00000000000..c04305b9d67 --- /dev/null +++ b/docs/zh/sql_reference/functions/string_functions.md @@ -0,0 +1,170 @@ + +# 字符串函数 {#zi-fu-chuan-han-shu} + +## 空 {#string-functions-empty} + +对于空字符串返回1,对于非空字符串返回0。 +结果类型是UInt8。 +如果字符串包含至少一个字节,则该字符串被视为非空字符串,即使这是一个空格或空字符。 +该函数也适用于数组。 + +## notEmpty {#notempty} + +对于空字符串返回0,对于非空字符串返回1。 +结果类型是UInt8。 +该函数也适用于数组。 + +## 长度 {#length} + +返回字符串的字节长度。 +结果类型是UInt64。 +该函数也适用于数组。 + +## 长度8 {#lengthutf8} + +假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 +结果类型是UInt64。 + +## char\_length,CHAR\_LENGTH {#char-length-char-length} + +假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 +结果类型是UInt64。 + +## 字符长度,字符长度 {#character-length-character-length} + +假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 +结果类型是UInt64。 + +## 低一点 {#lower-lcase} + +将字符串中的ASCII转换为小写。 + +## 上,ucase {#upper-ucase} + +将字符串中的ASCII转换为大写。 + +## lowerUTF8 {#lowerutf8} + +将字符串转换为小写,函数假设字符串是以UTF-8编码文本的字符集。 +同时函数不检测语言。因此对土耳其人来说,结果可能不完全正确。 +如果UTF-8字节序列的长度对于代码点的大写和小写不同,则该代码点的结果可能不正确。 +如果字符串包含一组非UTF-8的字节,则将引发未定义行为。 + +## upperUTF8 {#upperutf8} + +将字符串转换为大写,函数假设字符串是以UTF-8编码文本的字符集。 +同时函数不检测语言。因此对土耳其人来说,结果可能不完全正确。 +如果UTF-8字节序列的长度对于代码点的大写和小写不同,则该代码点的结果可能不正确。 +如果字符串包含一组非UTF-8的字节,则将引发未定义行为。 + +## isValidUTF8 {#isvalidutf8} + +检查字符串是否为有效的UTF-8编码,是则返回1,否则返回0。 + +## toValidUTF8 {#tovalidutf8} + +用`�`(U+FFFD)字符替换无效的UTF-8字符。所有连续的无效字符都会被替换为一个替换字符。 + + toValidUTF8( input_string ) + +参数: + +- input\_string — 任何一个[字符串](../../sql_reference/functions/string_functions.md)类型的对象。 + +返回值: 有效的UTF-8字符串。 + +### 示例 {#shi-li} + +``` sql +SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') +``` + +``` text +┌─toValidUTF8('a����b')─┐ +│ a�b │ +└───────────────────────┘ +``` + +## 反向 {#reverse} + +反转字符串。 + +## reverseUTF8 {#reverseutf8} + +以Unicode字符为单位反转UTF-8编码的字符串。如果字符串不是UTF-8编码,则可能获取到一个非预期的结果(不会抛出异常)。 + +## format(pattern, s0, s1, …) {#formatpattern-s0-s1} + +使用常量字符串`pattern`格式化其他参数。`pattern`字符串中包含由大括号`{}`包围的«替换字段»。 未被包含在大括号中的任何内容都被视为文本内容,它将原样保留在返回值中。 如果你需要在文本内容中包含一个大括号字符,它可以通过加倍来转义:`{{ '{{' }}`和`{{ '{{' }} '}}' }}`。 字段名称可以是数字(从零开始)或空(然后将它们视为连续数字) + +``` sql +SELECT format('{1} {0} {1}', 'World', 'Hello') + +┌─format('{1} {0} {1}', 'World', 'Hello')─┐ +│ Hello World Hello │ +└─────────────────────────────────────────┘ + +SELECT format('{} {}', 'Hello', 'World') + +┌─format('{} {}', 'Hello', 'World')─┐ +│ Hello World │ +└───────────────────────────────────┘ +``` + +## concat(s1, s2, …) {#concats1-s2} + +将参数中的多个字符串拼接,不带分隔符。 + +## concatAssumeInjective(s1, s2, …) {#concatassumeinjectives1-s2} + +与[concat](./string_functions.md#concat-s1-s2)相同,区别在于,你需要保证concat(s1, s2, s3) -\> s4是单射的,它将用于GROUP BY的优化。 + +## 子串(s,offset,length),mid(s,offset,length),substr(s,offset,length) {#substrings-offset-length-mids-offset-length-substrs-offset-length} + +以字节为单位截取指定位置字符串,返回以'offset'位置为开头,长度为'length'的子串。'offset'从1开始(与标准SQL相同)。'offset'和'length'参数必须是常量。 + +## substringf8(s,offset,length) {#substringutf8s-offset-length} + +与'substring'相同,但其操作单位为Unicode字符,函数假设字符串是以UTF-8进行编码的文本。如果不是则可能返回一个预期外的结果(不会抛出异常)。 + +## appendTrailingCharIfAbsent(s,c) {#appendtrailingcharifabsents-c} + +如果's'字符串非空并且末尾不包含'c'字符,则将'c'字符附加到末尾。 + +## convertCharset(s,from,to) {#convertcharsets-from-to} + +返回从'from'中的编码转换为'to'中的编码的字符串's'。 + +## base64Encode(s) {#base64encodes} + +将字符串's'编码成base64 + +## base64Decode(s) {#base64decodes} + +使用base64将字符串解码成原始字符串。如果失败则抛出异常。 + +## tryBase64Decode(s) {#trybase64decodes} + +使用base64将字符串解码成原始字符串。但如果出现错误,将返回空字符串。 + +## endsWith(s,后缀) {#endswiths-suffix} + +返回是否以指定的后缀结尾。如果字符串以指定的后缀结束,则返回1,否则返回0。 + +## 开始使用(s,前缀) {#startswiths-prefix} + +返回是否以指定的前缀开头。如果字符串以指定的前缀开头,则返回1,否则返回0。 + +## trimLeft(s) {#trimlefts} + +返回一个字符串,用于删除左侧的空白字符。 + +## trimRight(s) {#trimrights} + +返回一个字符串,用于删除右侧的空白字符。 + +## trimBoth(s) {#trimboths} + +返回一个字符串,用于删除任一侧的空白字符。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/zh/sql_reference/functions/string_replace_functions.md b/docs/zh/sql_reference/functions/string_replace_functions.md new file mode 100644 index 00000000000..04b110a2cef --- /dev/null +++ b/docs/zh/sql_reference/functions/string_replace_functions.md @@ -0,0 +1,80 @@ + +# 字符串替换函数 {#zi-fu-chuan-ti-huan-han-shu} + +## replaceOne(大海捞针,模式,更换) {#replaceonehaystack-pattern-replacement} + +用'replacement'子串替换'haystack'中与'pattern'子串第一个匹配的匹配项(如果存在)。 +'pattern'和'replacement'必须是常量。 + +## replaceAll(大海捞针,模式,替换),替换(大海捞针,模式,替换) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} + +用'replacement'子串替换'haystack'中出现的所有'pattern'子串。 + +## replaceRegexpOne(大海捞针,模式,更换) {#replaceregexponehaystack-pattern-replacement} + +使用'pattern'正则表达式替换。 ‘pattern’可以是任意一个有效的re2正则表达式。 +如果存在与正则表达式匹配的匹配项,仅替换第一个匹配项。 +同时‘replacement’可以指定为正则表达式中的捕获组。可以包含`\0-\9`。 +在这种情况下,函数将使用正则表达式的整个匹配项替换‘\\0’。使用其他与之对应的子模式替换对应的'\\1-\\9'。要在模版中使用''字符,请使用''将其转义。 +另外还请记住,字符串文字需要额外的转义。 + +示例1.将日期转换为美国格式: + +``` sql +SELECT DISTINCT + EventDate, + replaceRegexpOne(toString(EventDate), '(\\d{4})-(\\d{2})-(\\d{2})', '\\2/\\3/\\1') AS res +FROM test.hits +LIMIT 7 +FORMAT TabSeparated +``` + + 2014-03-17 03/17/2014 + 2014-03-18 03/18/2014 + 2014-03-19 03/19/2014 + 2014-03-20 03/20/2014 + 2014-03-21 03/21/2014 + 2014-03-22 03/22/2014 + 2014-03-23 03/23/2014 + +示例2.复制字符串十次: + +``` sql +SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') AS res +``` + + ┌─res────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ + └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ + +## replaceRegexpAll(大海捞针,模式,替换) {#replaceregexpallhaystack-pattern-replacement} + +与replaceRegexpOne相同,但会替换所有出现的匹配项。例如: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res +``` + + ┌─res────────────────────────┐ + │ HHeelllloo,, WWoorrlldd!! │ + └────────────────────────────┘ + +例外的是,如果使用正则表达式捕获空白子串,则仅会进行一次替换。 +示例: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res +``` + + ┌─res─────────────────┐ + │ here: Hello, World! │ + └─────────────────────┘ + +## regexpQuoteMeta(s) {#regexpquotemetas} + +该函数用于在字符串中的某些预定义字符之前添加反斜杠。 +预定义字符:‘0’,‘\\’,‘\|’,‘(’,‘)’,‘^’,‘$’,‘。’,‘\[’,'\]',‘?’,‘\*’,‘+’,‘{’,‘:’,' - '。 +这个实现与re2 :: RE2 :: QuoteMeta略有不同。它以\\0而不是00转义零字节,它只转义所需的字符。 +有关详细信息,请参阅链接:\[RE2\](https://github.com/google/re2/blob/master/re2/re2.cc\#L473) + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/zh/sql_reference/functions/string_search_functions.md b/docs/zh/sql_reference/functions/string_search_functions.md new file mode 100644 index 00000000000..e0f5e06a357 --- /dev/null +++ b/docs/zh/sql_reference/functions/string_search_functions.md @@ -0,0 +1,123 @@ + +# 字符串搜索函数 {#zi-fu-chuan-sou-suo-han-shu} + +下列所有函数在默认的情况下区分大小写。对于不区分大小写的搜索,存在单独的变体。 + +## 位置(大海捞针),定位(大海捞针) {#positionhaystack-needle-locatehaystack-needle} + +在字符串`haystack`中搜索子串`needle`。 +返回子串的位置(以字节为单位),从1开始,如果未找到子串,则返回0。 + +对于不区分大小写的搜索,请使用函数`positionCaseInsensitive`。 + +## positionUTF8(大海捞针) {#positionutf8haystack-needle} + +与`position`相同,但位置以Unicode字符返回。此函数工作在UTF-8编码的文本字符集中。如非此编码的字符集,则返回一些非预期结果(他不会抛出异常)。 + +对于不区分大小写的搜索,请使用函数`positionCaseInsensitiveUTF8`。 + +## 多搜索分配(干草堆,\[针1,针2, …, needlen\]) {#multisearchallpositionshaystack-needle1-needle2-needlen} + +与`position`相同,但函数返回一个数组,其中包含所有匹配needle的位置。 + +对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchAllPositionsCaseInsensitive,multiSearchAllPositionsUTF8,multiSearchAllPositionsCaseInsensitiveUTF8`。 + +## multiSearchFirstPosition(大海捞针,\[针1,针2, …, needlen\]) {#multisearchfirstpositionhaystack-needle1-needle2-needlen} + +与`position`相同,但返回在`haystack`中与needles字符串匹配的最左偏移。 + +对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchFirstPositionCaseInsensitive,multiSearchFirstPositionUTF8,multiSearchFirstPositionCaseInsensitiveUTF8`。 + +## multiSearchFirstIndex(大海捞针,\[针1,针2, …, needlen\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} + +返回在字符串`haystack`中最先查找到的needle的索引`i`(从1开始),没有找到任何匹配项则返回0。 + +对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchFirstIndexCaseInsensitive,multiSearchFirstIndexUTF8,multiSearchFirstIndexCaseInsensitiveUTF8`。 + +## 多搜索(大海捞针,\[针1,针2, …, needlen\]) {#multisearchanyhaystack-needle1-needle2-needlen} + +如果`haystack`中至少存在一个needle匹配则返回1,否则返回0。 + +对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchAnyCaseInsensitive,multiSearchAnyUTF8,multiSearchAnyCaseInsensitiveUTF8`。 + +!!! note "注意" + 在所有`multiSearch*`函数中,由于实现规范,needles的数量应小于28。 + +## 匹配(大海捞针,模式) {#matchhaystack-pattern} + +检查字符串是否与`pattern`正则表达式匹配。`pattern`可以是一个任意的`re2`正则表达式。 `re2`正则表达式的[语法](https://github.com/google/re2/wiki/Syntax)比Perl正则表达式的语法存在更多限制。 + +如果不匹配返回0,否则返回1。 + +请注意,反斜杠符号(`\`)用于在正则表达式中转义。由于字符串中采用相同的符号来进行转义。因此,为了在正则表达式中转义符号,必须在字符串文字中写入两个反斜杠(\\)。 + +正则表达式与字符串一起使用,就像它是一组字节一样。正则表达式中不能包含空字节。 +对于在字符串中搜索子字符串的模式,最好使用LIKE或«position»,因为它们更加高效。 + +## multiMatchAny(大海捞针,\[模式1,模式2, …, patternn\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} + +与`match`相同,但如果所有正则表达式都不匹配,则返回0;如果任何模式匹配,则返回1。它使用[超扫描](https://github.com/intel/hyperscan)库。对于在字符串中搜索子字符串的模式,最好使用«multisearchany»,因为它更高效。 + +!!! note "注意" + 任何`haystack`字符串的长度必须小于232\字节,否则抛出异常。这种限制是因为hyperscan API而产生的。 + +## multiMatchAnyIndex(大海捞针,\[模式1,模式2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} + +与`multiMatchAny`相同,但返回与haystack匹配的任何内容的索引位置。 + +## multiFuzzyMatchAny(干草堆,距离,\[模式1,模式2, …, patternn\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} + +与`multiMatchAny`相同,但如果在haystack能够查找到任何模式匹配能够在指定的[编辑距离](https://en.wikipedia.org/wiki/Edit_distance)内进行匹配,则返回1。此功能也处于实验模式,可能非常慢。有关更多信息,请参阅[hyperscan文档](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching)。 + +## multiFuzzyMatchAnyIndex(大海捞针,距离,\[模式1,模式2, …, patternn\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} + +与`multiFuzzyMatchAny`相同,但返回匹配项的匹配能容的索引位置。 + +!!! note "注意" + `multiFuzzyMatch*`函数不支持UTF-8正则表达式,由于hyperscan限制,这些表达式被按字节解析。 + +!!! note "注意" + 如要关闭所有hyperscan函数的使用,请设置`SET allow_hyperscan = 0;`。 + +## 提取(大海捞针,图案) {#extracthaystack-pattern} + +使用正则表达式截取字符串。如果'haystack'与'pattern'不匹配,则返回空字符串。如果正则表达式中不包含子模式,它将获取与整个正则表达式匹配的子串。否则,它将获取与第一个子模式匹配的子串。 + +## extractAll(大海捞针,图案) {#extractallhaystack-pattern} + +使用正则表达式提取字符串的所有片段。如果'haystack'与'pattern'正则表达式不匹配,则返回一个空字符串。否则返回所有与正则表达式匹配的字符串数组。通常,行为与'extract'函数相同(它采用第一个子模式,如果没有子模式,则采用整个表达式)。 + +## 像(干草堆,模式),干草堆像模式运算符 {#likehaystack-pattern-haystack-like-pattern-operator} + +检查字符串是否与简单正则表达式匹配。 +正则表达式可以包含的元符号有`%`和`_`。 + +`%` 表示任何字节数(包括零字符)。 + +`_` 表示任何一个字节。 + +可以使用反斜杠(`\`)来对元符号进行转义。请参阅«match»函数说明中有关转义的说明。 + +对于像`%needle%`这样的正则表达式,改函数与`position`函数一样快。 +对于其他正则表达式,函数与'match'函数相同。 + +## 不喜欢(干草堆,模式),干草堆不喜欢模式运算符 {#notlikehaystack-pattern-haystack-not-like-pattern-operator} + +与'like'函数返回相反的结果。 + +## 大海捞针) {#ngramdistancehaystack-needle} + +基于4-gram计算`haystack`和`needle`之间的距离:计算两个4-gram集合之间的对称差异,并用它们的基数和对其进行归一化。返回0到1之间的任何浮点数 – 越接近0则表示越多的字符串彼此相似。如果常量的`needle`或`haystack`超过32KB,函数将抛出异常。如果非常量的`haystack`或`needle`字符串超过32Kb,则距离始终为1。 + +对于不区分大小写的搜索或/和UTF-8格式,使用函数`ngramDistanceCaseInsensitive,ngramDistanceUTF8,ngramDistanceCaseInsensitiveUTF8`。 + +## ツ暗ェツ氾环催ツ団ツ法ツ人) {#ngramsearchhaystack-needle} + +与`ngramDistance`相同,但计算`needle`和`haystack`之间的非对称差异——`needle`的n-gram减去`needle`归一化n-gram。可用于模糊字符串搜索。 + +对于不区分大小写的搜索或/和UTF-8格式,使用函数`ngramSearchCaseInsensitive,ngramSearchUTF8,ngramSearchCaseInsensitiveUTF8`。 + +!!! note "注意" + 对于UTF-8,我们使用3-gram。所有这些都不是完全公平的n-gram距离。我们使用2字节哈希来散列n-gram,然后计算这些哈希表之间的(非)对称差异 - 可能会发生冲突。对于UTF-8不区分大小写的格式,我们不使用公平的`tolower`函数 - 我们将每个Unicode字符字节的第5位(从零开始)和字节的第一位归零 - 这适用于拉丁语,主要用于所有西里尔字母。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/zh/sql_reference/functions/type_conversion_functions.md b/docs/zh/sql_reference/functions/type_conversion_functions.md new file mode 100644 index 00000000000..56375eabc07 --- /dev/null +++ b/docs/zh/sql_reference/functions/type_conversion_functions.md @@ -0,0 +1,170 @@ + +# 类型转换函数 {#lei-xing-zhuan-huan-han-shu} + +## toUInt8,toUInt16,toUInt32,toUInt64 {#touint8-touint16-touint32-touint64} + +## toInt8,toInt16,toInt32,toInt64 {#toint8-toint16-toint32-toint64} + +## toFloat32,toFloat64 {#tofloat32-tofloat64} + +## 今天,今天 {#todate-todatetime} + +## toUInt8OrZero,toUInt16OrZero,toUInt32OrZero,toUInt64OrZero,toInt8OrZero,toInt16OrZero,toInt32OrZero,toInt64OrZero,toFloat32OrZero,toFloat64OrZero,toDateOrZero,toDateTimeOrZero {#touint8orzero-touint16orzero-touint32orzero-touint64orzero-toint8orzero-toint16orzero-toint32orzero-toint64orzero-tofloat32orzero-tofloat64orzero-todateorzero-todatetimeorzero} + +## toUInt8OrNull,toUInt16OrNull,toUInt32OrNull,toUInt64OrNull,toInt8OrNull,toInt16OrNull,toInt32OrNull,toInt64OrNull,toFloat32OrNull,toFloat64OrNull,toDateOrNull,toDateTimeOrNull {#touint8ornull-touint16ornull-touint32ornull-touint64ornull-toint8ornull-toint16ornull-toint32ornull-toint64ornull-tofloat32ornull-tofloat64ornull-todateornull-todatetimeornull} + +## toString {#tostring} + +这些函数用于在数字、字符串(不包含FixedString)、Date以及DateTime之间互相转换。 +所有的函数都接受一个参数。 + +当将其他类型转换到字符串或从字符串转换到其他类型时,使用与TabSeparated格式相同的规则对字符串的值进行格式化或解析。如果无法解析字符串则抛出异常并取消查询。 + +当将Date转换为数字或反之,Date对应Unix时间戳的天数。 +将DataTime转换为数字或反之,DateTime对应Unix时间戳的秒数。 + +toDate/toDateTime函数的日期和日期时间格式定义如下: + + YYYY-MM-DD + YYYY-MM-DD hh:mm:ss + +例外的是,如果将UInt32、Int32、UInt64或Int64类型的数值转换为Date类型,并且其对应的值大于等于65536,则该数值将被解析成unix时间戳(而不是对应的天数)。这意味着允许写入'toDate(unix\_timestamp)'这种常见情况,否则这将是错误的,并且需要便携更加繁琐的'toDate(toDateTime(unix\_timestamp))'。 + +Date与DateTime之间的转换以更为自然的方式进行:通过添加空的time或删除time。 + +数值类型之间的转换与C++中不同数字类型之间的赋值相同的规则。 + +此外,DateTime参数的toString函数可以在第二个参数中包含时区名称。 例如:`Asia/Yekaterinburg`在这种情况下,时间根据指定的时区进行格式化。 + +``` sql +SELECT + now() AS now_local, + toString(now(), 'Asia/Yekaterinburg') AS now_yekat +``` + + ┌───────────now_local─┬─now_yekat───────────┐ + │ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │ + └─────────────────────┴─────────────────────┘ + +另请参阅`toUnixTimestamp`函数。 + +## toDecimal32(value,S),toDecimal64(value,S),toDecimal128(value,S) {#todecimal32value-s-todecimal64value-s-todecimal128value-s} + +将`value`转换为精度为`S`的[十进制](../../sql_reference/functions/type_conversion_functions.md)。`value`可以是数字或字符串。`S`参数为指定的小数位数。 + +## toFixedString(s,N) {#tofixedstrings-n} + +将String类型的参数转换为FixedString(N)类型的值(具有固定长度N的字符串)。N必须是一个常量。 +如果字符串的字节数少于N,则向右填充空字节。如果字符串的字节数多于N,则抛出异常。 + +## toStringCutToZero(s) {#tostringcuttozeros} + +接受String或FixedString参数。返回String,其内容在找到的第一个零字节处被截断。 + +示例: + +``` sql +SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut +``` + + ┌─s─────────────┬─s_cut─┐ + │ foo\0\0\0\0\0 │ foo │ + └───────────────┴───────┘ + +``` sql +SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut +``` + + ┌─s──────────┬─s_cut─┐ + │ foo\0bar\0 │ foo │ + └────────────┴───────┘ + +## reinterpretAsUInt8,reinterpretAsUInt16,reinterpretAsUInt32,reinterpretAsUInt64 {#reinterpretasuint8-reinterpretasuint16-reinterpretasuint32-reinterpretasuint64} + +## reinterpretAsInt8,reinterpretAsInt16,reinterpretAsInt32,reinterpretAsInt64 {#reinterpretasint8-reinterpretasint16-reinterpretasint32-reinterpretasint64} + +## reinterpretAsFloat32,reinterpretAsFloat64 {#reinterpretasfloat32-reinterpretasfloat64} + +## 重新解释日期,重新解释日期时间 {#reinterpretasdate-reinterpretasdatetime} + +这些函数接受一个字符串,并将放在字符串开头的字节解释为主机顺序中的数字(little endian)。如果字符串不够长,则函数就像使用必要数量的空字节填充字符串一样。如果字符串比需要的长,则忽略额外的字节。Date被解释为Unix时间戳的天数,DateTime被解释为Unix时间戳。 + +## 重新解释字符串 {#reinterpretasstring} + +此函数接受数字、Date或DateTime,并返回一个字符串,其中包含表示主机顺序(小端)的相应值的字节。从末尾删除空字节。例如,UInt32类型值255是一个字节长的字符串。 + +## reinterpretAsFixedString {#reinterpretasfixedstring} + +此函数接受数字、Date或DateTime,并返回包含表示主机顺序(小端)的相应值的字节的FixedString。从末尾删除空字节。例如,UInt32类型值255是一个长度为一个字节的FixedString。 + +## 演员(x,t) {#type_conversion_function-cast} + +将'x'转换为't'数据类型。还支持语法CAST(x AS t) + +示例: + +``` sql +SELECT + '2016-06-15 23:00:00' AS timestamp, + CAST(timestamp AS DateTime) AS datetime, + CAST(timestamp AS Date) AS date, + CAST(timestamp, 'String') AS string, + CAST(timestamp, 'FixedString(22)') AS fixed_string +``` + + ┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐ + │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │ + └─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ + +将参数转换为FixedString(N),仅适用于String或FixedString(N)类型的参数。 + +支持将数据转换为[可为空](../../sql_reference/functions/type_conversion_functions.md)。例如: + + SELECT toTypeName(x) FROM t_null + + ┌─toTypeName(x)─┐ + │ Int8 │ + │ Int8 │ + └───────────────┘ + + SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null + + ┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ + │ Nullable(UInt16) │ + │ Nullable(UInt16) │ + └─────────────────────────────────────────┘ + +## 每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每 {#function-tointerval} + +将数字类型参数转换为Interval类型(时间区间)。 +Interval类型实际上是非常有用的,您可以使用此类型的数据直接与Date或DateTime执行算术运算。同时,ClickHouse为Interval类型数据的声明提供了更方便的语法。例如: + +``` sql +WITH + toDate('2019-01-01') AS date, + INTERVAL 1 WEEK AS interval_week, + toIntervalWeek(1) AS interval_to_week +SELECT + date + interval_week, + date + interval_to_week +``` + + ┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ + │ 2019-01-08 │ 2019-01-08 │ + └───────────────────────────┴──────────────────────────────┘ + +## parsedatetimebestefort {#type_conversion_functions-parsedatetimebesteffort} + +将数字类型参数解析为Date或DateTime类型。 +与toDate和toDateTime不同,parseDateTimeBestEffort可以进行更复杂的日期格式。 +有关详细信息,请参阅链接:[复杂日期格式](https://xkcd.com/1179/)。 + +## parsedatetimebestefortornull {#parsedatetimebesteffortornull} + +与[parsedatetimebestefort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回null。 + +## parsedatetimebestefortorzero {#parsedatetimebesteffortorzero} + +与[parsedatetimebestefort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回零Date或零DateTime。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/zh/sql_reference/functions/url_functions.md b/docs/zh/sql_reference/functions/url_functions.md new file mode 100644 index 00000000000..53295221e51 --- /dev/null +++ b/docs/zh/sql_reference/functions/url_functions.md @@ -0,0 +1,119 @@ + +# URL函数 {#urlhan-shu} + +所有这些功能都不遵循RFC。它们被最大程度简化以提高性能。 + +## URL截取函数 {#urljie-qu-han-shu} + +如果URL中没有要截取的内容则返回空字符串。 + +### 协议 {#protocol} + +返回URL的协议。例如: http、ftp、mailto、magnet… + +### 域 {#domain} + +获取域名。 + +### domainwithoutww {#domainwithoutwww} + +返回域名并删除第一个'www.'。 + +### topLevelDomain {#topleveldomain} + +返回顶级域名。例如:.ru。 + +### 第一重要的元素分区域 {#firstsignificantsubdomain} + +返回«第一个有效子域名»。这并不是一个标准概念,仅用于Yandex.Metrica。如果顶级域名为'com',‘net’,‘org’或者‘co’则第一个有效子域名为二级域名。否则则返回三级域名。例如,irstSignificantSubdomain (’https://news.yandex.ru/‘) = ’yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’。一些实现细节在未来可能会进行改变。 + +### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} + +返回包含顶级域名与第一个有效子域名之间的内容(请参阅上面的内容)。 + +例如, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. + +### 路径 {#path} + +返回URL路径。例如:`/top/news.html`,不包含请求参数。 + +### pathFull {#pathfull} + +与上面相同,但包括请求参数和fragment。例如:/top/news.html?page=2\#comments + +### 查询字符串 {#querystring} + +返回请求参数。例如:page=1&lr=213。请求参数不包含问号已经\# 以及\# 之后所有的内容。 + +### 片段 {#fragment} + +返回URL的fragment标识。fragment不包含\#。 + +### querystring andfragment {#querystringandfragment} + +返回请求参数和fragment标识。例如:page=1\#29390。 + +### extractURLParameter(URL,name) {#extracturlparameterurl-name} + +返回URL请求参数中名称为'name'的参数。如果不存在则返回一个空字符串。如果存在多个匹配项则返回第一个相匹配的。此函数假设参数名称与参数值在url中的编码方式相同。 + +### extractURLParameters(URL) {#extracturlparametersurl} + +返回一个数组,其中以name=value的字符串形式返回url的所有请求参数。不以任何编码解析任何内容。 + +### extractURLParameterNames(URL) {#extracturlparameternamesurl} + +返回一个数组,其中包含url的所有请求参数的名称。不以任何编码解析任何内容。 + +### URLHierarchy(URL) {#urlhierarchyurl} + +返回一个数组,其中包含以/切割的URL的所有内容。?将被包含在URL路径以及请求参数中。连续的分割符号被记为一个。 + +### Urlpathhierarchy(URL) {#urlpathhierarchyurl} + +与上面相同,但结果不包含协议和host部分。 /element(root)不包括在内。该函数用于在Yandex.Metric中实现导出URL的树形结构。 + + URLPathHierarchy('https://example.com/browse/CONV-6788') = + [ + '/browse/', + '/browse/CONV-6788' + ] + +### decodeURLComponent(URL) {#decodeurlcomponenturl} + +返回已经解码的URL。 +例如: + +``` sql +SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS DecodedURL; +``` + + ┌─DecodedURL─────────────────────────────┐ + │ http://127.0.0.1:8123/?query=SELECT 1; │ + └────────────────────────────────────────┘ + +## 删除URL中的部分内容 {#shan-chu-urlzhong-de-bu-fen-nei-rong} + +如果URL中不包含指定的部分,则URL不变。 + +### cutWWW {#cutwww} + +删除开始的第一个'www.'。 + +### cutQueryString {#cutquerystring} + +删除请求参数。问号也将被删除。 + +### cutFragment {#cutfragment} + +删除fragment标识。\#同样也会被删除。 + +### cutquerystring andfragment {#cutquerystringandfragment} + +删除请求参数以及fragment标识。问号以及\#也会被删除。 + +### cutURLParameter(URL,name) {#cuturlparameterurl-name} + +删除URL中名称为'name'的参数。改函数假设参数名称以及参数值经过URL相同的编码。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/zh/sql_reference/functions/uuid_functions.md b/docs/zh/sql_reference/functions/uuid_functions.md new file mode 100644 index 00000000000..306a55f08a0 --- /dev/null +++ b/docs/zh/sql_reference/functions/uuid_functions.md @@ -0,0 +1,108 @@ + +# UUID函数 {#uuidhan-shu} + +下面列出了所有UUID的相关函数 + +## generateuidv4 {#uuid-function-generate} + +生成一个UUID([版本4](https://tools.ietf.org/html/rfc4122#section-4.4))。 + +``` sql +generateUUIDv4() +``` + +**返回值** + +UUID类型的值。 + +**使用示例** + +此示例演示如何在表中创建UUID类型的列,并对其写入数据。 + +``` sql +:) CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog + +:) INSERT INTO t_uuid SELECT generateUUIDv4() + +:) SELECT * FROM t_uuid + +┌────────────────────────────────────x─┐ +│ f4bf890f-f9dc-4332-ad5c-0c18e73f28e9 │ +└──────────────────────────────────────┘ +``` + +## toUUID(x) {#touuid-x} + +将String类型的值转换为UUID类型的值。 + +``` sql +toUUID(String) +``` + +**返回值** + +UUID类型的值 + +**使用示例** + +``` sql +:) SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid + +┌─────────────────────────────────uuid─┐ +│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │ +└──────────────────────────────────────┘ +``` + +## UUIDStringToNum {#uuidstringtonum} + +接受一个String类型的值,其中包含36个字符且格式为`xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`,将其转换为UUID的数值并以[固定字符串(16)](../../sql_reference/functions/uuid_functions.md)将其返回。 + +``` sql +UUIDStringToNum(String) +``` + +**返回值** + +固定字符串(16) + +**使用示例** + +``` sql +:) SELECT + '612f3c40-5d3b-217e-707b-6a546a3d7b29' AS uuid, + UUIDStringToNum(uuid) AS bytes + +┌─uuid─────────────────────────────────┬─bytes────────────┐ +│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ +└──────────────────────────────────────┴──────────────────┘ +``` + +## UUIDNumToString {#uuidnumtostring} + +接受一个[固定字符串(16)](../../sql_reference/functions/uuid_functions.md)类型的值,返回其对应的String表现形式。 + +``` sql +UUIDNumToString(FixedString(16)) +``` + +**返回值** + +字符串。 + +**使用示例** + +``` sql +SELECT + 'a/<@];!~p{jTj={)' AS bytes, + UUIDNumToString(toFixedString(bytes, 16)) AS uuid + +┌─bytes────────────┬─uuid─────────────────────────────────┐ +│ a/<@];!~p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ +└──────────────────┴──────────────────────────────────────┘ +``` + +## 另请参阅 {#ling-qing-can-yue} + +- [dictgetuid](ext_dict_functions.md) + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/zh/sql_reference/functions/ym_dict_functions.md b/docs/zh/sql_reference/functions/ym_dict_functions.md new file mode 100644 index 00000000000..87492ec9d12 --- /dev/null +++ b/docs/zh/sql_reference/functions/ym_dict_functions.md @@ -0,0 +1,121 @@ + +# 功能与Yandex的工作。梅特里卡词典 {#functions-for-working-with-yandex-metrica-dictionaries} + +为了使下面的功能正常工作,服务器配置必须指定获取所有Yandex的路径和地址。梅特里卡字典. 字典在任何这些函数的第一次调用时加载。 如果无法加载引用列表,则会引发异常。 + +For information about creating reference lists, see the section «Dictionaries». + +## 多个地理基 {#multiple-geobases} + +ClickHouse支持同时使用多个备选地理基(区域层次结构),以支持某些地区所属国家的各种观点。 + +该 ‘clickhouse-server’ config指定具有区域层次结构的文件::`/opt/geo/regions_hierarchy.txt` + +除了这个文件,它还搜索附近有\_符号和任何后缀附加到名称(文件扩展名之前)的文件。 +例如,它还会找到该文件 `/opt/geo/regions_hierarchy_ua.txt`,如果存在。 + +`ua` 被称为字典键。 对于没有后缀的字典,键是空字符串。 + +所有字典都在运行时重新加载(每隔一定数量的秒重新加载一次,如builtin\_dictionaries\_reload\_interval config参数中定义,或默认情况下每小时一次)。 但是,可用字典列表在服务器启动时定义一次。 + +All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. +示例: + + regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt + regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt + regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt + +### ツ环板(ョツ嘉ッツ偲青regionシツ氾カツ鉄ツ工ツ渉\]) {#regiontocityid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. + +### 虏茅驴麓卤戮碌禄路戮鲁拢\]) {#regiontoareaid-geobase} + +将区域转换为区域(地理数据库中的类型5)。 在所有其他方式,这个功能是一样的 ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + + ┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐ + │ │ + │ Moscow and Moscow region │ + │ St. Petersburg and Leningrad region │ + │ Belgorod region │ + │ Ivanovsk region │ + │ Kaluga region │ + │ Kostroma region │ + │ Kursk region │ + │ Lipetsk region │ + │ Orlov region │ + │ Ryazan region │ + │ Smolensk region │ + │ Tambov region │ + │ Tver region │ + │ Tula region │ + └──────────────────────────────────────────────────────┘ + +### regionToDistrict(id\[,geobase\]) {#regiontodistrictid-geobase} + +将区域转换为联邦区(地理数据库中的类型4)。 在所有其他方式,这个功能是一样的 ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + + ┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐ + │ │ + │ Central federal district │ + │ Northwest federal district │ + │ South federal district │ + │ North Caucases federal district │ + │ Privolga federal district │ + │ Ural federal district │ + │ Siberian federal district │ + │ Far East federal district │ + │ Scotland │ + │ Faroe Islands │ + │ Flemish region │ + │ Brussels capital region │ + │ Wallonia │ + │ Federation of Bosnia and Herzegovina │ + └──────────────────────────────────────────────────────────┘ + +### 虏茅驴麓卤戮碌禄路戮鲁拢(陆毛隆隆(803)888-8325\]) {#regiontocountryid-geobase} + +将区域转换为国家。 在所有其他方式,这个功能是一样的 ‘regionToCity’. +示例: `regionToCountry(toUInt32(213)) = 225` 转换莫斯科(213)到俄罗斯(225)。 + +### 掳胫((禄脢鹿脷露胫鲁隆鹿((酶-11-16""\[脪陆,ase\]) {#regiontocontinentid-geobase} + +将区域转换为大陆。 在所有其他方式,这个功能是一样的 ‘regionToCity’. +示例: `regionToContinent(toUInt32(213)) = 10001` 将莫斯科(213)转换为欧亚大陆(10001)。 + +### ツ环板(ョツ嘉ッツ偲青regionャツ静ャツ青サツ催ャツ渉\]) {#regiontopopulationid-geobase} + +获取区域的人口。 +The population can be recorded in files with the geobase. See the section «External dictionaries». +如果没有为该区域记录人口,则返回0。 +在Yandex地理数据库中,可能会为子区域记录人口,但不会为父区域记录人口。 + +### regionIn(lhs,rhs\[,地理数据库\]) {#regioninlhs-rhs-geobase} + +检查是否 ‘lhs’ 属于一个区域 ‘rhs’ 区域。 如果属于UInt8,则返回等于1的数字,如果不属于则返回0。 +The relationship is reflexive – any region also belongs to itself. + +### ツ暗ェツ氾环催ツ団ツ法ツ人\]) {#regionhierarchyid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. +示例: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. + +### 地区名称(id\[,郎\]) {#regiontonameid-lang} + +Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn't exist, an empty string is returned. + +`ua` 和 `uk` 都意味着乌克兰。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/zh/sql_reference/index.md b/docs/zh/sql_reference/index.md new file mode 100644 index 00000000000..aed704442ab --- /dev/null +++ b/docs/zh/sql_reference/index.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "SQL\u53C2\u8003" +toc_hidden: true +toc_priority: 28 +toc_title: "\u9690\u85CF" +--- + +# SQL参考 {#sql-reference} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md#query_language_queries_alter) +- [其他类型的查询](statements/misc.md) + +[原始文章](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/zh/sql_reference/operators.md b/docs/zh/sql_reference/operators.md new file mode 100644 index 00000000000..fb5c7d0ee38 --- /dev/null +++ b/docs/zh/sql_reference/operators.md @@ -0,0 +1,264 @@ + +# 操作符 {#cao-zuo-fu} + +所有的操作符(运算符)都会在查询时依据他们的优先级及其结合顺序在被解析时转换为对应的函数。下面按优先级从高到低列出各组运算符及其对应的函数: + +## 下标运算符 {#xia-biao-yun-suan-fu} + +`a[N]` – 数组中的第N个元素; 对应函数 `arrayElement(a, N)` + +`a.N` – 元组中第N个元素; 对应函数 `tupleElement(a, N)` + +## 负号 {#fu-hao} + +`-a` – 对应函数 `negate(a)` + +## 乘号、除号和取余 {#cheng-hao-chu-hao-he-qu-yu} + +`a * b` – 对应函数 `multiply(a, b)` + +`a / b` – 对应函数 `divide(a, b)` + +`a % b` – 对应函数 `modulo(a, b)` + +## 加号和减号 {#jia-hao-he-jian-hao} + +`a + b` – 对应函数 `plus(a, b)` + +`a - b` – 对应函数 `minus(a, b)` + +## 关系运算符 {#guan-xi-yun-suan-fu} + +`a = b` – 对应函数 `equals(a, b)` + +`a == b` – 对应函数 `equals(a, b)` + +`a != b` – 对应函数 `notEquals(a, b)` + +`a <> b` – 对应函数 `notEquals(a, b)` + +`a <= b` – 对应函数 `lessOrEquals(a, b)` + +`a >= b` – 对应函数 `greaterOrEquals(a, b)` + +`a < b` – 对应函数 `less(a, b)` + +`a > b` – 对应函数 `greater(a, b)` + +`a LIKE s` – 对应函数 `like(a, b)` + +`a NOT LIKE s` – 对应函数 `notLike(a, b)` + +`a BETWEEN b AND c` – 等价于 `a >= b AND a <= c` + +## 集合关系运算符 {#ji-he-guan-xi-yun-suan-fu} + +*详见此节 [IN 相关操作符](statements/select.md#select-in-operators) 。* + +`a IN ...` – 对应函数 `in(a, b)` + +`a NOT IN ...` – 对应函数 `notIn(a, b)` + +`a GLOBAL IN ...` – 对应函数 `globalIn(a, b)` + +`a GLOBAL NOT IN ...` – 对应函数 `globalNotIn(a, b)` + +## 逻辑非 {#luo-ji-fei} + +`NOT a` – 对应函数 `not(a)` + +## 逻辑与 {#luo-ji-yu} + +`a AND b` – 对应函数`and(a, b)` + +## 逻辑或 {#luo-ji-huo} + +`a OR b` – 对应函数 `or(a, b)` + +## 条件运算符 {#tiao-jian-yun-suan-fu} + +`a ? b : c` – 对应函数 `if(a, b, c)` + +注意: + +条件运算符会先计算表达式b和表达式c的值,再根据表达式a的真假,返回相应的值。如果表达式b和表达式c是 [arrayJoin()](../sql_reference/functions/array_join.md#functions_arrayjoin) 函数,则不管表达式a是真是假,每行都会被复制展开。 + +## 使用日期和时间的操作员 {#operators-datetime} + +### EXTRACT {#operator-extract} + +``` sql +EXTRACT(part FROM date); +``` + +从给定日期中提取部件。 例如,您可以从给定日期检索一个月,或从时间检索一秒钟。 + +该 `part` 参数指定要检索的日期部分。 以下值可用: + +- `DAY` — The day of the month. Possible values: 1–31. +- `MONTH` — The number of a month. Possible values: 1–12. +- `YEAR` — The year. +- `SECOND` — The second. Possible values: 0–59. +- `MINUTE` — The minute. Possible values: 0–59. +- `HOUR` — The hour. Possible values: 0–23. + +该 `part` 参数不区分大小写。 + +该 `date` 参数指定要处理的日期或时间。 无论是 [日期](../sql_reference/data_types/date.md) 或 [日期时间](../sql_reference/data_types/datetime.md) 支持类型。 + +例: + +``` sql +SELECT EXTRACT(DAY FROM toDate('2017-06-15')); +SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); +SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); +``` + +在下面的例子中,我们创建一个表,并在其中插入一个值 `DateTime` 类型。 + +``` sql +CREATE TABLE test.Orders +( + OrderId UInt64, + OrderName String, + OrderDate DateTime +) +ENGINE = Log; +``` + +``` sql +INSERT INTO test.Orders VALUES (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); +``` + +``` sql +SELECT + toYear(OrderDate) AS OrderYear, + toMonth(OrderDate) AS OrderMonth, + toDayOfMonth(OrderDate) AS OrderDay, + toHour(OrderDate) AS OrderHour, + toMinute(OrderDate) AS OrderMinute, + toSecond(OrderDate) AS OrderSecond +FROM test.Orders; +``` + +``` text +┌─OrderYear─┬─OrderMonth─┬─OrderDay─┬─OrderHour─┬─OrderMinute─┬─OrderSecond─┐ +│ 2008 │ 10 │ 11 │ 13 │ 23 │ 44 │ +└───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ +``` + +你可以看到更多的例子 [测试](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). + +### INTERVAL {#operator-interval} + +创建一个 [间隔](../sql_reference/operators.md)-应在算术运算中使用的类型值 [日期](../sql_reference/data_types/date.md) 和 [日期时间](../sql_reference/data_types/datetime.md)-类型值。 + +示例: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +**另请参阅** + +- [间隔](../sql_reference/operators.md) 数据类型 +- [toInterval](../sql_reference/operators.md#function-tointerval) 类型转换函数 + +## CASE条件表达式 {#operator_case} + +``` sql +CASE [x] + WHEN a THEN b + [WHEN ... THEN ...] + [ELSE c] +END +``` + +如果指定了 `x` ,该表达式会转换为 `transform(x, [a, ...], [b, ...], c)` 函数。否则转换为 `multiIf(a, b, ..., c)` + +如果该表达式中没有 `ELSE c` 子句,则默认值就是 `NULL` + +但 `transform` 函数不支持 `NULL` + +## 连接运算符 {#lian-jie-yun-suan-fu} + +`s1 || s2` – 对应函数 `concat(s1, s2)` + +## 创建 Lambda 函数 {#chuang-jian-lambda-han-shu} + +`x -> expr` – 对应函数 `lambda(x, expr)` + +接下来的这些操作符因为其本身是括号没有优先级: + +## 创建数组 {#chuang-jian-shu-zu} + +`[x1, ...]` – 对应函数 `array(x1, ...)` + +## 创建元组 {#chuang-jian-yuan-zu} + +`(x1, x2, ...)` – 对应函数 `tuple(x2, x2, ...)` + +## 结合方式 {#jie-he-fang-shi} + +所有的同级操作符从左到右结合。例如, `1 + 2 + 3` 会转换成 `plus(plus(1, 2), 3)`。 +所以,有时他们会跟我们预期的不太一样。例如, `SELECT 4 > 2 > 3` 的结果是0。 + +为了高效, `and` 和 `or` 函数支持任意多参数,一连串的 `AND` 和 `OR` 运算符会转换成其对应的单个函数。 + +## 判断是否为 `NULL` {#pan-duan-shi-fou-wei-null} + +ClickHouse 支持 `IS NULL` 和 `IS NOT NULL` 。 + +### IS NULL {#operator-is-null} + +- 对于 [可为空](../sql_reference/operators.md) 类型的值, `IS NULL` 会返回: + - `1` 值为 `NULL` + - `0` 否则 +- 对于其他类型的值, `IS NULL` 总会返回 `0` + + + +``` bash +:) SELECT x+100 FROM t_null WHERE y IS NULL + +SELECT x + 100 +FROM t_null +WHERE isNull(y) + +┌─plus(x, 100)─┐ +│ 101 │ +└──────────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + +### IS NOT NULL {#is-not-null} + +- 对于 [可为空](../sql_reference/operators.md) 类型的值, `IS NOT NULL` 会返回: + - `0` 值为 `NULL` + - `1` 否则 +- 对于其他类型的值,`IS NOT NULL` 总会返回 `1` + + + +``` bash +:) SELECT * FROM t_null WHERE y IS NOT NULL + +SELECT * +FROM t_null +WHERE isNotNull(y) + +┌─x─┬─y─┐ +│ 2 │ 3 │ +└───┴───┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + +[来源文章](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/zh/sql_reference/statements/alter.md b/docs/zh/sql_reference/statements/alter.md new file mode 100644 index 00000000000..ee8911edea2 --- /dev/null +++ b/docs/zh/sql_reference/statements/alter.md @@ -0,0 +1,505 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 36 +toc_title: ALTER +--- + +## ALTER {#query_language_queries_alter} + +该 `ALTER` 查询仅支持 `*MergeTree` 表,以及 `Merge`和`Distributed`. 查询有几个变体。 + +### 列操作 {#column-manipulations} + +更改表结构。 + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... +``` + +在查询中,指定一个或多个逗号分隔操作的列表。 +每个操作都是对列的操作。 + +支持以下操作: + +- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. +- [DROP COLUMN](#alter_drop-column) — Deletes the column. +- [CLEAR COLUMN](#alter_clear-column) — Resets column values. +- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. +- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL. + +下面详细描述这些动作。 + +#### ADD COLUMN {#alter_add-column} + +``` sql +ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] +``` + +将一个新列添加到表中,并指定 `name`, `type`, [`codec`](create.md#codecs) 和 `default_expr` (请参阅部分 [默认表达式](create.md#create-default-values)). + +如果 `IF NOT EXISTS` 如果列已经存在,则查询不会返回错误。 如果您指定 `AFTER name_after` (另一列的名称),该列被添加在表列表中指定的一列之后。 否则,该列将添加到表的末尾。 请注意,没有办法将列添加到表的开头。 为了一系列的行动, `name_after` 可将该名称一栏,加入一个以前的行动。 + +添加列只是更改表结构,而不对数据执行任何操作。 数据不会出现在磁盘上后 `ALTER`. 如果从表中读取某一列的数据缺失,则将使用默认值填充该列(如果存在默认表达式,则执行默认表达式,或使用零或空字符串)。 合并数据部分后,该列将出现在磁盘上(请参阅 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)). + +这种方法使我们能够完成 `ALTER` 即时查询,不增加旧数据量。 + +示例: + +``` sql +ALTER TABLE visits ADD COLUMN browser String AFTER user_id +``` + +#### DROP COLUMN {#alter_drop-column} + +``` sql +DROP COLUMN [IF EXISTS] name +``` + +删除具有名称的列 `name`. 如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +从文件系统中删除数据。 由于这将删除整个文件,查询几乎立即完成。 + +示例: + +``` sql +ALTER TABLE visits DROP COLUMN browser +``` + +#### CLEAR COLUMN {#alter_clear-column} + +``` sql +CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name +``` + +重置指定分区的列中的所有数据。 了解有关设置分区名称的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +示例: + +``` sql +ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() +``` + +#### COMMENT COLUMN {#alter_comment-column} + +``` sql +COMMENT COLUMN [IF EXISTS] name 'comment' +``` + +向列添加注释。 如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +每列可以有一个注释。 如果列的注释已存在,则新注释将复盖以前的注释。 + +注释存储在 `comment_expression` 由返回的列 [DESCRIBE TABLE](misc.md#misc-describe-table) 查询。 + +示例: + +``` sql +ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' +``` + +#### MODIFY COLUMN {#alter_modify-column} + +``` sql +MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] +``` + +此查询更改 `name` 列属性: + +- 类型 + +- 默认表达式 + +- TTL + + For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). + +如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +更改类型时,值将被转换为 [toType](../../sql_reference/functions/type_conversion_functions.md) 函数被应用到它们。 如果仅更改默认表达式,则查询不会执行任何复杂的操作,并且几乎立即完成。 + +示例: + +``` sql +ALTER TABLE visits MODIFY COLUMN browser Array(String) +``` + +Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. + +有几个处理阶段: + +- 准备具有修改数据的临时(新)文件。 +- 重命名旧文件。 +- 将临时(新)文件重命名为旧名称。 +- 删除旧文件。 + +只有第一阶段需要时间。 如果在此阶段出现故障,则不会更改数据。 +如果在其中一个连续阶段中出现故障,可以手动恢复数据。 例外情况是,如果旧文件从文件系统中删除,但新文件的数据没有写入磁盘并丢失。 + +该 `ALTER` 复制更改列的查询。 这些指令保存在ZooKeeper中,然后每个副本应用它们。 全部 `ALTER` 查询以相同的顺序运行。 查询等待对其他副本完成适当的操作。 但是,更改复制表中的列的查询可能会中断,并且所有操作都将异步执行。 + +#### 更改查询限制 {#alter-query-limitations} + +该 `ALTER` query允许您在嵌套数据结构中创建和删除单独的元素(列),但不能创建整个嵌套数据结构。 要添加嵌套数据结构,可以添加名称如下的列 `name.nested_name` 和类型 `Array(T)`. 嵌套数据结构等效于名称在点之前具有相同前缀的多个数组列。 + +不支持删除主键或采样键中的列(在主键中使用的列 `ENGINE` 表达式)。 只有在此更改不会导致数据被修改时,才可以更改主键中包含的列的类型(例如,允许您向枚举添加值或更改类型 `DateTime` 到 `UInt32`). + +如果 `ALTER` 查询不足以使您需要的表更改,您可以创建一个新的表,使用 [INSERT SELECT](insert_into.md#insert_query_insert-select) 查询,然后使用切换表 [RENAME](misc.md#misc_operations-rename) 查询并删除旧表。 您可以使用 [ツ环板-ョツ嘉ッツ偲](../../operations/utilities/clickhouse-copier.md) 作为替代 `INSERT SELECT` 查询。 + +该 `ALTER` 查询阻止对表的所有读取和写入。 换句话说,如果长 `SELECT` 正在运行的时间 `ALTER` 查询,该 `ALTER` 查询将等待它完成。 同时,对同一个表的所有新查询将等待 `ALTER` 正在运行。 + +对于本身不存储数据的表(例如 `Merge` 和 `Distributed`), `ALTER` 只是改变了表结构,并且不改变从属表的结构。 例如,当运行ALTER时 `Distributed` 表,你还需要运行 `ALTER` 对于所有远程服务器上的表。 + +### 使用键表达式进行操作 {#manipulations-with-key-expressions} + +支持以下命令: + +``` sql +MODIFY ORDER BY new_expression +``` + +它只适用于在表 [`MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) 家庭(包括 +[复制](../../engines/table_engines/mergetree_family/replication.md) 表)。 该命令更改 +[排序键](../../engines/table_engines/mergetree_family/mergetree.md) 表 +到 `new_expression` (表达式或表达式元组)。 主键保持不变。 + +该命令是轻量级的,因为它只更改元数据。 要保持该数据部分的属性 +行按排序键表达式排序您不能添加包含现有列的表达式 +到排序键(仅由列添加 `ADD COLUMN` 命令在同一个 `ALTER` 查询)。 + +### 使用数据跳过索引进行操作 {#manipulations-with-data-skipping-indices} + +它只适用于在表 [`*MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) 家庭(包括 +[复制](../../engines/table_engines/mergetree_family/replication.md) 表)。 以下操作 +可用: + +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` -将索引描述添加到表元数据。 + +- `ALTER TABLE [db].name DROP INDEX name` -从表元数据中删除索引描述并从磁盘中删除索引文件。 + +这些命令是轻量级的,因为它们只更改元数据或删除文件。 +此外,它们被复制(通过ZooKeeper同步索引元数据)。 + +### 使用约束进行操作 {#manipulations-with-constraints} + +查看更多 [制约因素](create.md#constraints) + +可以使用以下语法添加或删除约束: + +``` sql +ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; +ALTER TABLE [db].name DROP CONSTRAINT constraint_name; +``` + +查询将从表中添加或删除有关约束的元数据,以便立即处理它们。 + +约束检查 *不会被执行* 在现有数据上,如果它被添加。 + +复制表上的所有更改都广播到ZooKeeper,因此将应用于其他副本。 + +### 操作与分区和零件 {#alter_manipulations-with-partitions} + +下面的操作与 [分区](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 可用: + +- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` 目录和忘记它。 +- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. +- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` 目录到表。 +- [REPLACE PARTITION](#alter_replace-partition) -将数据分区从一个表复制到另一个表。 +- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. +- [REPLACE PARTITION](#alter_replace-partition) -将数据分区从一个表复制到另一个表并替换。 +- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition)-将数据分区从一个表移动到另一个表。 +- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) -重置分区中指定列的值。 +- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) -重置分区中指定的二级索引。 +- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. +- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. +- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. + + + +#### 分离分区{\#alter\_detach-partition} {#detach-partition-alter-detach-partition} + +``` sql +ALTER TABLE table_name DETACH PARTITION partition_expr +``` + +将指定分区的所有数据移动到 `detached` 目录。 服务器会忘记分离的数据分区,就好像它不存在一样。 服务器不会知道这个数据,直到你做 [ATTACH](#alter_attach-partition) 查询。 + +示例: + +``` sql +ALTER TABLE visits DETACH PARTITION 201901 +``` + +阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +执行查询后,您可以对查询中的数据进行任何操作 `detached` directory — delete it from the file system, or just leave it. + +This query is replicated – it moves the data to the `detached` 所有副本上的目录。 请注意,您只能对领导副本执行此查询。 要确定副本是否为领导者,请执行 `SELECT` 查询到 [系统。副本](../../operations/system_tables.md#system_tables-replicas) 桌子 或者,它更容易使 `DETACH` 对所有副本进行查询-除了领导副本之外,所有副本都会引发异常。 + +#### DROP PARTITION {#alter_drop-partition} + +``` sql +ALTER TABLE table_name DROP PARTITION partition_expr +``` + +从表中删除指定的分区。 此查询将分区标记为非活动分区,并在大约10分钟内完全删除数据。 + +阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +The query is replicated – it deletes data on all replicas. + +#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} + +``` sql +ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr +``` + +从中删除指定分区的指定部分或所有部分 `detached`. +了解有关在一节中设置分区表达式的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +#### ATTACH PARTITION\|PART {#alter_attach-partition} + +``` sql +ALTER TABLE table_name ATTACH PARTITION|PART partition_expr +``` + +将数据从 `detached` 目录。 可以为整个分区或单独的部分添加数据。 例: + +``` sql +ALTER TABLE visits ATTACH PARTITION 201901; +ALTER TABLE visits ATTACH PART 201901_2_2_0; +``` + +了解有关在一节中设置分区表达式的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +此查询被复制。 副本发起程序检查是否有数据在 `detached` 目录。 如果数据存在,则查询将检查其完整性。 如果一切正确,则查询将数据添加到表中。 所有其他副本都从副本发起程序下载数据。 + +所以你可以把数据到 `detached` 在一个副本上的目录,并使用 `ALTER ... ATTACH` 查询以将其添加到所有副本上的表中。 + +#### ATTACH PARTITION FROM {#alter_attach-partition-from} + +``` sql +ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 +``` + +此查询将数据分区从 `table1` 到 `table2` 将数据添加到存在 `table2`. 请注意,数据不会从中删除 `table1`. + +要使查询成功运行,必须满足以下条件: + +- 两个表必须具有相同的结构。 +- 两个表必须具有相同的分区键。 + +#### REPLACE PARTITION {#alter_replace-partition} + +``` sql +ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 +``` + +此查询将数据分区从 `table1` 到 `table2` 并替换在现有的分区 `table2`. 请注意,数据不会从中删除 `table1`. + +要使查询成功运行,必须满足以下条件: + +- 两个表必须具有相同的结构。 +- 两个表必须具有相同的分区键。 + +#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} + +``` sql +ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest +``` + +此查询将数据分区从 `table_source` 到 `table_dest` 删除数据 `table_source`. + +要使查询成功运行,必须满足以下条件: + +- 两个表必须具有相同的结构。 +- 两个表必须具有相同的分区键。 +- 两个表必须是相同的引擎系列。 (已复制或未复制) +- 两个表必须具有相同的存储策略。 + +#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} + +``` sql +ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr +``` + +重置分区中指定列中的所有值。 如果 `DEFAULT` 创建表时确定了子句,此查询将列值设置为指定的默认值。 + +示例: + +``` sql +ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 +``` + +#### FREEZE PARTITION {#alter_freeze-partition} + +``` sql +ALTER TABLE table_name FREEZE [PARTITION partition_expr] +``` + +此查询创建指定分区的本地备份。 如果 `PARTITION` 子句被省略,查询一次创建所有分区的备份。 + +!!! note "注" + 在不停止服务器的情况下执行整个备份过程。 + +请注意,对于旧式表,您可以指定分区名称的前缀(例如, ‘2019’)-然后查询为所有相应的分区创建备份。 阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +在执行时,对于数据快照,查询将创建指向表数据的硬链接。 硬链接被放置在目录中 `/var/lib/clickhouse/shadow/N/...`,哪里: + +- `/var/lib/clickhouse/` 是配置中指定的工作ClickHouse目录。 +- `N` 是备份的增量编号。 + +!!! note "注" + 如果您使用 [用于在表中存储数据的一组磁盘](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes),该 `shadow/N` 目录出现在每个磁盘上,存储由匹配的数据部分 `PARTITION` 表达。 + +在备份内部创建的目录结构与在备份内部创建的目录结构相同 `/var/lib/clickhouse/`. 查询执行 ‘chmod’ 对于所有文件,禁止写入它们。 + +创建备份后,您可以从以下位置复制数据 `/var/lib/clickhouse/shadow/` 然后将其从本地服务器中删除。 请注意, `ALTER t FREEZE PARTITION` 不复制查询。 它仅在本地服务器上创建本地备份。 + +查询几乎立即创建备份(但首先它会等待对相应表的当前查询完成运行)。 + +`ALTER TABLE t FREEZE PARTITION` 仅复制数据,而不复制表元数据。 若要备份表元数据,请复制该文件 `/var/lib/clickhouse/metadata/database/table.sql` + +要从备份还原数据,请执行以下操作: + +1. 如果表不存在,则创建该表。 要查看查询,请使用。sql文件(替换 `ATTACH` 在它与 `CREATE`). +2. 从复制数据 `data/database/table/` 目录内的备份到 `/var/lib/clickhouse/data/database/table/detached/` 目录。 +3. 快跑 `ALTER TABLE t ATTACH PARTITION` 将数据添加到表的查询。 + +从备份还原不需要停止服务器。 + +有关备份和还原数据的详细信息,请参阅 [数据备份](../../operations/backup.md) 科。 + +#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} + +``` sql +ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr +``` + +查询的工作原理类似于 `CLEAR COLUMN`,但它重置索引而不是列数据。 + +#### FETCH PARTITION {#alter_fetch-partition} + +``` sql +ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +``` + +从另一台服务器下载分区。 此查询仅适用于复制的表。 + +查询执行以下操作: + +1. 从指定的分片下载分区。 在 ‘path-in-zookeeper’ 您必须在ZooKeeper中指定分片的路径。 +2. 然后查询将下载的数据放到 `detached` 的目录 `table_name` 桌子 使用 [ATTACH PARTITION\|PART](#alter_attach-partition) 查询将数据添加到表中。 + +例如: + +``` sql +ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PARTITION 201902; +``` + +请注意: + +- 该 `ALTER ... FETCH PARTITION` 查询不被复制。 它将分区放置在 `detached` 仅在本地服务器上的目录。 +- 该 `ALTER TABLE ... ATTACH` 复制查询。 它将数据添加到所有副本。 数据被添加到从副本之一 `detached` 目录,以及其他-从相邻的副本。 + +在下载之前,系统会检查分区是否存在并且表结构匹配。 从正常副本中自动选择最合适的副本。 + +虽然查询被调用 `ALTER TABLE`,它不会更改表结构,并且不会立即更改表中可用的数据。 + +#### MOVE PARTITION\|PART {#alter_move-partition} + +将分区或数据部分移动到另一个卷或磁盘 `MergeTree`-发动机表。 看 [使用多个块设备进行数据存储](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes). + +``` sql +ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' +``` + +该 `ALTER TABLE t MOVE` 查询: + +- 不复制,因为不同的副本可能具有不同的存储策略。 +- 如果未配置指定的磁盘或卷,则返回错误。 如果无法应用存储策略中指定的数据移动条件,Query还会返回错误。 +- 可以在返回错误的情况下,当要移动的数据已经被后台进程移动时,并发 `ALTER TABLE t MOVE` 查询或作为后台数据合并的结果。 在这种情况下,用户不应该执行任何其他操作。 + +示例: + +``` sql +ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' +ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' +``` + +#### 如何设置分区表达式 {#alter-how-to-specify-part-expr} + +您可以在以下内容中指定分区表达式 `ALTER ... PARTITION` 以不同方式查询: + +- 作为从值 `partition` 列 `system.parts` 桌子 例如, `ALTER TABLE visits DETACH PARTITION 201901`. +- 作为来自表列的表达式。 支持常量和常量表达式。 例如, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. +- 使用分区ID。 分区ID是用作文件系统和ZooKeeper中分区名称的分区的字符串标识符(如果可能的话,人类可读)。 分区ID必须在指定 `PARTITION ID` 子句,用单引号。 例如, `ALTER TABLE visits DETACH PARTITION ID '201901'`. +- 在 [ALTER ATTACH PART](#alter_attach-partition) 和 [DROP DETACHED PART](#alter_drop-detached) 查询时,要指定部件的名称,请将字符串文字与来自 `name` 列 [系统。detached\_parts](../../operations/system_tables.md#system_tables-detached_parts) 桌子 例如, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. + +指定分区时引号的使用取决于分区表达式的类型。 例如,对于 `String` 类型,你必须在引号中指定其名称 (`'`). 为 `Date` 和 `Int*` 类型不需要引号。 + +对于旧式表,您可以将分区指定为数字 `201901` 或者一个字符串 `'201901'`. 对于类型,新样式表的语法更严格(类似于值输入格式的解析器)。 + +上述所有规则也适用于 [OPTIMIZE](misc.md#misc_operations-optimize) 查询。 如果在优化非分区表时需要指定唯一的分区,请设置表达式 `PARTITION tuple()`. 例如: + +``` sql +OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; +``` + +的例子 `ALTER ... PARTITION` 查询在测试中演示 [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) 和 [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). + +### 使用表TTL进行操作 {#manipulations-with-table-ttl} + +你可以改变 [表TTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-table-ttl) 请填写以下表格: + +``` sql +ALTER TABLE table-name MODIFY TTL ttl-expression +``` + +### ALTER查询的同步性 {#synchronicity-of-alter-queries} + +对于不可复制的表,所有 `ALTER` 查询是同步执行的。 对于可复制的表,查询仅添加相应操作的说明 `ZooKeeper`,并尽快执行操作本身。 但是,查询可以等待在所有副本上完成这些操作。 + +为 `ALTER ... ATTACH|DETACH|DROP` 查询,您可以使用 `replication_alter_partitions_sync` 设置设置等待。 +可能的值: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. + +### 突变 {#alter-mutations} + +突变是允许更改或删除表中的行的ALTER查询变体。 与标准相比 `UPDATE` 和 `DELETE` 用于点数据更改的查询,mutations适用于更改表中大量行的繁重操作。 支持的 `MergeTree` 表引擎系列,包括具有复制支持的引擎。 + +现有表可以按原样进行突变(无需转换),但是在将第一次突变应用于表之后,其元数据格式将与以前的服务器版本不兼容,并且无法回退到以前的版本。 + +当前可用的命令: + +``` sql +ALTER TABLE [db.]table DELETE WHERE filter_expr +``` + +该 `filter_expr` 必须是类型 `UInt8`. 查询删除表中此表达式采用非零值的行。 + +``` sql +ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr +``` + +该 `filter_expr` 必须是类型 `UInt8`. 此查询将指定列的值更新为行中相应表达式的值。 `filter_expr` 取非零值。 使用以下命令将值转换为列类型 `CAST` 接线员 不支持更新用于计算主键或分区键的列。 + +``` sql +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name +``` + +查询将重新生成二级索引 `name` 在分区中 `partition_name`. + +一个查询可以包含多个用逗号分隔的命令。 + +For\*MergeTree表的突变通过重写整个数据部分来执行。 没有原子性-部分被取代为突变的部分,只要他们准备好和 `SELECT` 在突变期间开始执行的查询将看到来自已经突变的部件的数据以及来自尚未突变的部件的数据。 + +突变完全按其创建顺序排序,并以该顺序应用于每个部分。 突变也使用插入进行部分排序-在提交突变之前插入到表中的数据将被突变,之后插入的数据将不会被突变。 请注意,突变不会以任何方式阻止插入。 + +Mutation查询在添加mutation条目后立即返回(如果将复制的表复制到ZooKeeper,则将非复制的表复制到文件系统)。 突变本身使用系统配置文件设置异步执行。 要跟踪突变的进度,您可以使用 [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) 桌子 即使重新启动ClickHouse服务器,成功提交的突变仍将继续执行。 一旦提交,没有办法回滚突变,但如果突变由于某种原因被卡住,可以使用 [`KILL MUTATION`](misc.md#kill-mutation) 查询。 + +已完成突变的条目不会立即删除(保留条目的数量由 `finished_mutations_to_keep` 存储引擎参数)。 旧的突变条目将被删除。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/zh/sql_reference/statements/create.md b/docs/zh/sql_reference/statements/create.md new file mode 100644 index 00000000000..1697df692b5 --- /dev/null +++ b/docs/zh/sql_reference/statements/create.md @@ -0,0 +1,264 @@ + +## CREATE DATABASE {#create-database} + +该查询用于根据指定名称创建数据库。 + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name +``` + +数据库其实只是用于存放表的一个目录。 +如果查询中存在`IF NOT EXISTS`,则当数据库已经存在时,该查询不会返回任何错误。 + +## CREATE TABLE {#create-table-query} + +对于`CREATE TABLE`,存在以下几种方式。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = engine +``` + +在指定的'db'数据库中创建一个名为'name'的表,如果查询中没有包含'db',则默认使用当前选择的数据库作为'db'。后面的是包含在括号中的表结构以及表引擎的声明。 +其中表结构声明是一个包含一组列描述声明的组合。如果表引擎是支持索引的,那么可以在表引擎的参数中对其进行说明。 + +在最简单的情况下,列描述是指`名称 类型`这样的子句。例如: `RegionID UInt32`。 +但是也可以为列另外定义默认值表达式(见后文)。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] +``` + +创建一个与`db2.name2`具有相同结构的表,同时你可以对其指定不同的表引擎声明。如果没有表引擎声明,则创建的表将与`db2.name2`使用相同的表引擎。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +``` + +使用指定的引擎创建一个与`SELECT`子句的结果具有相同结构的表,并使用`SELECT`子句的结果填充它。 + +以上所有情况,如果指定了`IF NOT EXISTS`,那么在该表已经存在的情况下,查询不会返回任何错误。在这种情况下,查询几乎不会做任何事情。 + +在`ENGINE`子句后还可能存在一些其他的子句,更详细的信息可以参考 [表引擎](../../sql_reference/statements/create.md) 中关于建表的描述。 + +### 默认值 {#create-default-values} + +在列描述中你可以通过以下方式之一为列指定默认表达式:`DEFAULT expr`,`MATERIALIZED expr`,`ALIAS expr`。 +示例:`URLDomain String DEFAULT domain(URL)`。 + +如果在列描述中未定义任何默认表达式,那么系统将会根据类型设置对应的默认值,如:数值类型为零、字符串类型为空字符串、数组类型为空数组、日期类型为'0000-00-00'以及时间类型为'0000-00-00 00:00:00'。不支持使用NULL作为普通类型的默认值。 + +如果定义了默认表达式,则可以不定义列的类型。如果没有明确的定义类的类型,则使用默认表达式的类型。例如:`EventDate DEFAULT toDate(EventTime)` - 最终'EventDate'将使用'Date'作为类型。 + +如果同时指定了默认表达式与列的类型,则将使用类型转换函数将默认表达式转换为指定的类型。例如:`Hits UInt32 DEFAULT 0`与`Hits UInt32 DEFAULT toUInt32(0)`意思相同。 + +默认表达式可以包含常量或表的任意其他列。当创建或更改表结构时,系统将会运行检查,确保不会包含循环依赖。对于INSERT, 它仅检查表达式是否是可以解析的 - 它们可以从中计算出所有需要的列的默认值。 + +`DEFAULT expr` + +普通的默认值,如果INSERT中不包含指定的列,那么将通过表达式计算它的默认值并填充它。 + +`MATERIALIZED expr` + +物化表达式,被该表达式指定的列不能包含在INSERT的列表中,因为它总是被计算出来的。 +对于INSERT而言,不需要考虑这些列。 +另外,在SELECT查询中如果包含星号,此列不会被用来替换星号,这是因为考虑到数据转储,在使用`SELECT *`查询出的结果总能够被'INSERT'回表。 + +`ALIAS expr` + +别名。这样的列不会存储在表中。 +它的值不能够通过INSERT写入,同时使用SELECT查询星号时,这些列也不会被用来替换星号。 +但是它们可以显示的用于SELECT中,在这种情况下,在查询分析中别名将被替换。 + +当使用ALTER查询对添加新的列时,不同于为所有旧数据添加这个列,对于需要在旧数据中查询新列,只会在查询时动态计算这个新列的值。但是如果新列的默认表示中依赖其他列的值进行计算,那么同样会加载这些依赖的列的数据。 + +如果你向表中添加一个新列,并在之后的一段时间后修改它的默认表达式,则旧数据中的值将会被改变。请注意,在运行后台合并时,缺少的列的值将被计算后写入到合并后的数据部分中。 + +不能够为nested类型的列设置默认值。 + +### 制约因素 {#constraints} + +随着列描述约束可以定义: + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + ... + CONSTRAINT constraint_name_1 CHECK boolean_expr_1, + ... +) ENGINE = engine +``` + +`boolean_expr_1` 可以通过任何布尔表达式。 如果为表定义了约束,则将为表中的每一行检查它们中的每一行 `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. + +添加大量的约束会对big的性能产生负面影响 `INSERT` 查询。 + +### Ttl表达式 {#ttl-expression} + +定义值的存储时间。 只能为MergeTree系列表指定。 有关详细说明,请参阅 [列和表的TTL](../../sql_reference/statements/create.md#table_engine-mergetree-ttl). + +### 列压缩编解ecs {#codecs} + +默认情况下,ClickHouse应用以下定义的压缩方法 [服务器设置](../../sql_reference/statements/create.md#server-settings-compression),列。 您还可以定义在每个单独的列的压缩方法 `CREATE TABLE` 查询。 + +``` sql +CREATE TABLE codec_example +( + dt Date CODEC(ZSTD), + ts DateTime CODEC(LZ4HC), + float_value Float32 CODEC(NONE), + double_value Float64 CODEC(LZ4HC(9)) + value Float32 CODEC(Delta, ZSTD) +) +ENGINE = +... +``` + +如果指定了编解ec,则默认编解码器不适用。 编解码器可以组合在一个流水线中,例如, `CODEC(Delta, ZSTD)`. 要为您的项目选择最佳的编解码器组合,请通过类似于Altinity中描述的基准测试 [新编码提高ClickHouse效率](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) 文章. + +!!! warning "警告" + 您无法使用外部实用程序解压缩ClickHouse数据库文件,如 `lz4`. 相反,使用特殊的 [ツ环板compressorョツ嘉ッツ偲](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) 实用程序。 + +下表引擎支持压缩: + +- [MergeTree](../../sql_reference/statements/create.md) 家庭 +- [日志](../../sql_reference/statements/create.md) 家庭 +- [设置](../../sql_reference/statements/create.md) +- [加入我们](../../sql_reference/statements/create.md) + +ClickHouse支持通用编解码器和专用编解ecs。 + +#### 专业编解ecs {#create-query-specialized-codecs} + +这些编解码器旨在通过使用数据的特定功能使压缩更有效。 其中一些编解码器不压缩数据本身。 相反,他们准备的数据用于共同目的的编解ec,其压缩它比没有这种准备更好。 + +专业编解ecs: + +- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` 用于存储增量值,所以 `delta_bytes` 是原始值的最大大小。 可能 `delta_bytes` 值:1,2,4,8. 默认值 `delta_bytes` 是 `sizeof(type)` 如果等于1,2,4或8。 在所有其他情况下,它是1。 +- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla:一个快速、可扩展的内存时间序列数据库](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla:一个快速、可扩展的内存时间序列数据库](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` 和 `DateTime`). 在算法的每个步骤中,编解码器采用64个值块,将它们放入64x64位矩阵中,对其进行转置,裁剪未使用的值位并将其余部分作为序列返回。 未使用的位是使用压缩的整个数据部分的最大值和最小值之间没有区别的位。 + +`DoubleDelta` 和 `Gorilla` 编解码器在Gorilla TSDB中用作其压缩算法的组件。 大猩猩的方法是有效的情况下,当有缓慢变化的值与他们的时间戳序列。 时间戳是由有效地压缩 `DoubleDelta` 编解ec,和值有效地由压缩 `Gorilla` 编解ec 例如,要获取有效存储的表,可以在以下配置中创建它: + +``` sql +CREATE TABLE codec_example +( + timestamp DateTime CODEC(DoubleDelta), + slow_values Float32 CODEC(Gorilla) +) +ENGINE = MergeTree() +``` + +#### 通用编解ecs {#create-query-common-purpose-codecs} + +编解ecs: + +- `NONE` — No compression. +- `LZ4` — Lossless [数据压缩算法](https://github.com/lz4/lz4) 默认情况下使用。 应用LZ4快速压缩。 +- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` 应用默认级别。 可能的水平:\[1,12\]。 推荐级别范围:\[4,9\]。 +- `ZSTD[(level)]` — [ZSTD压缩算法](https://en.wikipedia.org/wiki/Zstandard) 可配置 `level`. 可能的水平:\[1,22\]。 默认值:1。 + +高压缩级别对于非对称场景非常有用,例如压缩一次,重复解压缩。 更高的级别意味着更好的压缩和更高的CPU使用率。 + +## 临时表 {#lin-shi-biao} + +ClickHouse支持临时表,其具有以下特征: + +- 当回话结束时,临时表将随会话一起消失,这包含链接中断。 +- 临时表仅能够使用Memory表引擎。 +- 无法为临时表指定数据库。它是在数据库之外创建的。 +- 如果临时表与另一个表名称相同,那么当在查询时没有显示的指定db的情况下,将优先使用临时表。 +- 对于分布式处理,查询中使用的临时表将被传递到远程服务器。 + +可以使用下面的语法创建一个临时表: + +``` sql +CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) +``` + +大多数情况下,临时表不是手动创建的,只有在分布式查询处理中使用`(GLOBAL) IN`时为外部数据创建。更多信息,可以参考相关章节。 + +## 分布式DDL查询 (ON CLUSTER 子句) {#fen-bu-shi-ddlcha-xun-on-cluster-zi-ju} + +对于 `CREATE`, `DROP`, `ALTER`,以及`RENAME`查询,系统支持其运行在整个集群上。 +例如,以下查询将在`cluster`集群的所有节点上创建名为`all_hits`的`Distributed`表: + +``` sql +CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits) +``` + +为了能够正确的运行这种查询,每台主机必须具有相同的cluster声明(为了简化配置的同步,你可以使用zookeeper的方式进行配置)。同时这些主机还必须链接到zookeeper服务器。 +这个查询将最终在集群的每台主机上运行,即使一些主机当前处于不可用状态。同时它还保证了所有的查询在单台主机中的执行顺序。 + +## CREATE VIEW {#create-view} + +``` sql +CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... +``` + +创建一个视图。它存在两种可选择的类型:普通视图与物化视图。 + +普通视图不存储任何数据,只是执行从另一个表中的读取。换句话说,普通视图只是保存了视图的查询,当从视图中查询时,此查询被作为子查询用于替换FROM子句。 + +举个例子,假设你已经创建了一个视图: + +``` sql +CREATE VIEW view AS SELECT ... +``` + +还有一个查询: + +``` sql +SELECT a, b, c FROM view +``` + +这个查询完全等价于: + +``` sql +SELECT a, b, c FROM (SELECT ...) +``` + +物化视图存储的数据是由相应的SELECT查询转换得来的。 + +在创建物化视图时,你还必须指定表的引擎 - 将会使用这个表引擎存储数据。 + +目前物化视图的工作原理:当将数据写入到物化视图中SELECT子句所指定的表时,插入的数据会通过SELECT子句查询进行转换并将最终结果插入到视图中。 + +如果创建物化视图时指定了POPULATE子句,则在创建时将该表的数据插入到物化视图中。就像使用`CREATE TABLE ... AS SELECT ...`一样。否则,物化视图只会包含在物化视图创建后的新写入的数据。我们不推荐使用POPULATE,因为在视图创建期间写入的数据将不会写入其中。 + +当一个`SELECT`子句包含`DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`时,请注意,这些仅会在插入数据时在每个单独的数据块上执行。例如,如果你在其中包含了`GROUP BY`,则只会在查询期间进行聚合,但聚合范围仅限于单个批的写入数据。数据不会进一步被聚合。但是当你使用一些其他数据聚合引擎时这是例外的,如:`SummingMergeTree`。 + +目前对物化视图执行`ALTER`是不支持的,因此这可能是不方便的。如果物化视图是使用的`TO [db.]name`的方式进行构建的,你可以使用`DETACH`语句现将视图剥离,然后使用`ALTER`运行在目标表上,然后使用`ATTACH`将之前剥离的表重新加载进来。 + +视图看起来和普通的表相同。例如,你可以通过`SHOW TABLES`查看到它们。 + +没有单独的删除视图的语法。如果要删除视图,请使用`DROP TABLE`。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/create/) + +## CREATE DICTIONARY {#create-dictionary-query} + +``` sql +CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] +( + key1 type1 [DEFAULT|EXPRESSION expr1] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + key2 type2 [DEFAULT|EXPRESSION expr2] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + attr1 type2 [DEFAULT|EXPRESSION expr3], + attr2 type2 [DEFAULT|EXPRESSION expr4] +) +PRIMARY KEY key1, key2 +SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) +LAYOUT(LAYOUT_NAME([param_name param_value])) +LIFETIME([MIN val1] MAX val2) +``` diff --git a/docs/zh/sql_reference/statements/index.md b/docs/zh/sql_reference/statements/index.md new file mode 100644 index 00000000000..bb04551dea1 --- /dev/null +++ b/docs/zh/sql_reference/statements/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u53D1\u8A00" +toc_priority: 31 +--- + + diff --git a/docs/zh/sql_reference/statements/insert_into.md b/docs/zh/sql_reference/statements/insert_into.md new file mode 100644 index 00000000000..a59730f5750 --- /dev/null +++ b/docs/zh/sql_reference/statements/insert_into.md @@ -0,0 +1,68 @@ + +## INSERT {#insert} + +INSERT查询主要用于向系统中添加数据. + +查询的基本格式: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... +``` + +您可以在查询中指定插入的列的列表,如:`[(c1, c2, c3)]`。对于存在于表结构中但不存在于插入列表中的列,它们将会按照如下方式填充数据: + +- 如果存在`DEFAULT`表达式,根据`DEFAULT`表达式计算被填充的值。 +- 如果没有定义`DEFAULT`表达式,则填充零或空字符串。 + +如果 [strict\_insert\_defaults=1](../../operations/settings/settings.md),你必须在查询中列出所有没有定义`DEFAULT`表达式的列。 + +数据可以以ClickHouse支持的任何 [输入输出格式](../../interfaces/formats.md#formats) 传递给INSERT。格式的名称必须显示的指定在查询中: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set +``` + +例如,下面的查询所使用的输入格式就与上面INSERT … VALUES的中使用的输入格式相同: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... +``` + +ClickHouse会清除数据前所有的空白字符与一行摘要信息(如果需要的话)。所以在进行查询时,我们建议您将数据放入到输入输出格式名称后的新的一行中去(如果数据是以空白字符开始的,这将非常重要)。 + +示例: + +``` sql +INSERT INTO t FORMAT TabSeparated +11 Hello, world! +22 Qwerty +``` + +在使用命令行客户端或HTTP客户端时,你可以将具体的查询语句与数据分开发送。更多具体信息,请参考«[客户端](../../interfaces/index.md#interfaces)»部分。 + +### 使用`SELECT`的结果写入 {#insert_query_insert-select} + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... +``` + +写入与SELECT的列的对应关系是使用位置来进行对应的,尽管它们在SELECT表达式与INSERT中的名称可能是不同的。如果需要,会对它们执行对应的类型转换。 + +除了VALUES格式之外,其他格式中的数据都不允许出现诸如`now()`,`1 + 2`等表达式。VALUES格式允许您有限度的使用这些表达式,但是不建议您这么做,因为执行这些表达式总是低效的。 + +系统不支持的其他用于修改数据的查询:`UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`。 +但是,您可以使用 `ALTER TABLE ... DROP PARTITION`查询来删除一些旧的数据。 + +### 性能的注意事项 {#xing-neng-de-zhu-yi-shi-xiang} + +在进行`INSERT`时将会对写入的数据进行一些处理,按照主键排序,按照月份对数据进行分区等。所以如果在您的写入数据中包含多个月份的混合数据时,将会显著的降低`INSERT`的性能。为了避免这种情况: + +- 数据总是以尽量大的batch进行写入,如每次写入100,000行。 +- 数据在写入ClickHouse前预先的对数据进行分组。 + +在以下的情况下,性能不会下降: + +- 数据总是被实时的写入。 +- 写入的数据已经按照时间排序。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/zh/sql_reference/statements/misc.md b/docs/zh/sql_reference/statements/misc.md new file mode 100644 index 00000000000..e50f08464b7 --- /dev/null +++ b/docs/zh/sql_reference/statements/misc.md @@ -0,0 +1,252 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u5176\u4ED6" +--- + +# 杂项查询 {#miscellaneous-queries} + +## ATTACH {#attach} + +这个查询是完全一样的 `CREATE`,但是 + +- 而不是这个词 `CREATE` 它使用这个词 `ATTACH`. +- 查询不会在磁盘上创建数据,但假定数据已经在适当的位置,只是将有关表的信息添加到服务器。 + 执行附加查询后,服务器将知道表的存在。 + +如果表之前已分离 (`DETACH`),意味着其结构是已知的,可以使用速记而不限定该结构。 + +``` sql +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] +``` + +启动服务器时使用此查询。 服务器将表元数据作为文件存储 `ATTACH` 查询,它只是在启动时运行(除了在服务器上显式创建的系统表)。 + +## CHECK TABLE {#check-table} + +检查表中的数据是否已损坏。 + +``` sql +CHECK TABLE [db.]name +``` + +该 `CHECK TABLE` 查询将实际文件大小与存储在服务器上的预期值进行比较。 如果文件大小与存储的值不匹配,则表示数据已损坏。 例如,这可能是由查询执行期间的系统崩溃引起的。 + +查询响应包含 `result` 具有单行的列。 该行的值为 +[布尔值](../../sql_reference/data_types/boolean.md) 类型: + +- 0-表中的数据已损坏。 +- 1-数据保持完整性。 + +该 `CHECK TABLE` 查询支持下表引擎: + +- [日志](../../engines/table_engines/log_family/log.md) +- [TinyLog](../../engines/table_engines/log_family/tinylog.md) +- [StripeLog](../../engines/table_engines/log_family/stripelog.md) +- [梅树家族](../../engines/table_engines/mergetree_family/mergetree.md) + +使用另一个表引擎对表执行会导致异常。 + +从发动机 `*Log` 家庭不提供故障自动数据恢复。 使用 `CHECK TABLE` 查询以及时跟踪数据丢失。 + +为 `MergeTree` 家庭发动机, `CHECK TABLE` 查询显示本地服务器上表的每个单独数据部分的检查状态。 + +**如果数据已损坏** + +如果表已损坏,则可以将未损坏的数据复制到另一个表。 要做到这一点: + +1. 创建具有与损坏的表相同结构的新表。 要执行此操作,请执行查询 `CREATE TABLE AS `. +2. 设置 [max\_threads](../../operations/settings/settings.md#settings-max_threads) 值为1以在单个线程中处理下一个查询。 要执行此操作,请运行查询 `SET max_threads = 1`. +3. 执行查询 `INSERT INTO SELECT * FROM `. 此请求将未损坏的数据从损坏的表复制到另一个表。 只有损坏部分之前的数据才会被复制。 +4. 重新启动 `clickhouse-client` 要重置 `max_threads` 价值。 + +## DESCRIBE TABLE {#misc-describe-table} + +``` sql +DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +返回以下内容 `String` 类型列: + +- `name` — Column name. +- `type`— Column type. +- `default_type` — Clause that is used in [默认表达式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` 或 `ALIAS`). 如果未指定默认表达式,则Column包含一个空字符串。 +- `default_expression` — Value specified in the `DEFAULT` 条款 +- `comment_expression` — Comment text. + +嵌套的数据结构输出 “expanded” 格式。 每列分别显示,名称后面有一个点。 + +## DETACH {#detach} + +删除有关 ‘name’ 表从服务器。 服务器停止了解表的存在。 + +``` sql +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +这不会删除表的数据或元数据。 在下一次服务器启动时,服务器将读取元数据并再次查找有关表的信息。 +同样,一个 “detached” 表可以使用重新连接 `ATTACH` 查询(系统表除外,它们没有为它们存储元数据)。 + +没有 `DETACH DATABASE` 查询。 + +## DROP {#drop} + +此查询有两种类型: `DROP DATABASE` 和 `DROP TABLE`. + +``` sql +DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] +``` + +删除内部的所有表 ‘db’ 数据库,然后删除 ‘db’ 数据库本身。 +如果 `IF EXISTS` 如果数据库不存在,则不会返回错误。 + +``` sql +DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +删除表。 +如果 `IF EXISTS` 如果表不存在或数据库不存在,则不会返回错误。 + + DROP DICTIONARY [IF EXISTS] [db.]name + +删除字典。 +如果 `IF EXISTS` 如果表不存在或数据库不存在,则不会返回错误。 + +## EXISTS {#exists} + +``` sql +EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] +``` + +返回单 `UInt8`-type column,其中包含单个值 `0` 如果表或数据库不存在,或 `1` 如果该表存在于指定的数据库中。 + +## KILL QUERY {#kill-query} + +``` sql +KILL QUERY [ON CLUSTER cluster] + WHERE + [SYNC|ASYNC|TEST] + [FORMAT format] +``` + +尝试强制终止当前正在运行的查询。 +要终止的查询是从系统中选择的。使用在定义的标准进程表 `WHERE` 《公约》条款 `KILL` 查询。 + +例: + +``` sql +-- Forcibly terminates all queries with the specified query_id: +KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' + +-- Synchronously terminates all queries run by 'username': +KILL QUERY WHERE user='username' SYNC +``` + +只读用户只能停止自己的查询。 + +默认情况下,使用异步版本的查询 (`ASYNC`),不等待确认查询已停止。 + +同步版本 (`SYNC`)等待所有查询停止,并在停止时显示有关每个进程的信息。 +响应包含 `kill_status` 列,它可以采用以下值: + +1. ‘finished’ – The query was terminated successfully. +2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. +3. The other values ​​explain why the query can't be stopped. + +测试查询 (`TEST`)仅检查用户的权限并显示要停止的查询列表。 + +## KILL MUTATION {#kill-mutation} + +``` sql +KILL MUTATION [ON CLUSTER cluster] + WHERE + [TEST] + [FORMAT format] +``` + +尝试取消和删除 [突变](alter.md#alter-mutations) 当前正在执行。 要取消的突变选自 [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) 表使用由指定的过滤器 `WHERE` 《公约》条款 `KILL` 查询。 + +测试查询 (`TEST`)仅检查用户的权限并显示要停止的查询列表。 + +例: + +``` sql +-- Cancel and remove all mutations of the single table: +KILL MUTATION WHERE database = 'default' AND table = 'table' + +-- Cancel the specific mutation: +KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' +``` + +The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). + +已经由突变所做的更改不会回滚。 + +## OPTIMIZE {#misc_operations-optimize} + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] +``` + +此查询尝试使用来自表引擎的表初始化表的数据部分的非计划合并 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 家人 + +该 `OPTMIZE` 查询也支持 [MaterializedView](../../engines/table_engines/special/materializedview.md) 和 [缓冲区](../../engines/table_engines/special/buffer.md) 引擎 不支持其他表引擎。 + +当 `OPTIMIZE` 与使用 [ReplicatedMergeTree](../../engines/table_engines/mergetree_family/replication.md) 表引擎的家族,ClickHouse创建合并任务,并等待在所有节点上执行(如果 `replication_alter_partitions_sync` 设置已启用)。 + +- 如果 `OPTIMIZE` 出于任何原因不执行合并,它不通知客户端。 要启用通知,请使用 [optimize\_throw\_if\_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) 设置。 +- 如果您指定 `PARTITION`,仅优化指定的分区。 [如何设置分区表达式](alter.md#alter-how-to-specify-part-expr). +- 如果您指定 `FINAL`,即使所有数据已经在一个部分中,也会执行优化。 +- 如果您指定 `DEDUPLICATE`,然后完全相同的行将被重复数据删除(所有列进行比较),这仅适用于MergeTree引擎。 + +!!! warning "警告" + `OPTIMIZE` 无法修复 “Too many parts” 错误 + +## RENAME {#misc_operations-rename} + +重命名一个或多个表。 + +``` sql +RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] +``` + +所有表都在全局锁定下重命名。 重命名表是一个轻型操作。 如果您在TO之后指定了另一个数据库,则表将被移动到此数据库。 但是,包含数据库的目录必须位于同一文件系统中(否则,将返回错误)。 + +## SET {#query-set} + +``` sql +SET param = value +``` + +分配 `value` 到 `param` [设置](../../operations/settings/index.md) 对于当前会话。 你不能改变 [服务器设置](../../operations/server_configuration_parameters/index.md) 这边 + +您还可以在单个查询中设置指定设置配置文件中的所有值。 + +``` sql +SET profile = 'profile-name-from-the-settings-file' +``` + +有关详细信息,请参阅 [设置](../../operations/settings/settings.md). + +## TRUNCATE {#truncate} + +``` sql +TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +从表中删除所有数据。 当条款 `IF EXISTS` 如果该表不存在,则查询返回错误。 + +该 `TRUNCATE` 查询不支持 [查看](../../engines/table_engines/special/view.md), [文件](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) 和 [Null](../../engines/table_engines/special/null.md) 表引擎. + +## USE {#use} + +``` sql +USE db +``` + +用于设置会话的当前数据库。 +当前数据库用于搜索表,如果数据库没有在查询中明确定义与表名之前的点。 +使用HTTP协议时无法进行此查询,因为没有会话的概念。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/zh/sql_reference/statements/select.md b/docs/zh/sql_reference/statements/select.md new file mode 100644 index 00000000000..9f4e71c3343 --- /dev/null +++ b/docs/zh/sql_reference/statements/select.md @@ -0,0 +1,937 @@ + +# SELECT 查询语法 {#select-cha-xun-yu-fa} + +`SELECT` 语句用于执行数据的检索。 + +``` sql +SELECT [DISTINCT] expr_list + [FROM [db.]table | (subquery) | table_function] [FINAL] + [SAMPLE sample_coeff] + [ARRAY JOIN ...] + [GLOBAL] ANY|ALL INNER|LEFT JOIN (subquery)|table USING columns_list + [PREWHERE expr] + [WHERE expr] + [GROUP BY expr_list] [WITH TOTALS] + [HAVING expr] + [ORDER BY expr_list] + [LIMIT n BY columns] + [LIMIT [n, ]m] + [UNION ALL ...] + [INTO OUTFILE filename] + [FORMAT format] +``` + +所有的子句都是可选的,除了SELECT之后的表达式列表(expr\_list)。 +下面将按照查询运行的顺序逐一对各个子句进行说明。 + +如果查询中不包含`DISTINCT`,`GROUP BY`,`ORDER BY`子句以及`IN`和`JOIN`子查询,那么它将仅使用O(1)数量的内存来完全流式的处理查询 +否则,这个查询将消耗大量的内存,除非你指定了这些系统配置:`max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`。它们规定了可以使用外部排序(将临时表存储到磁盘中)以及外部聚合,`目前系统不存在关于Join的配置`,更多关于它们的信息,可以参见«配置»部分。 + +### FROM 子句 {#select-from} + +如果查询中不包含FROM子句,那么将读取`system.one`。 +`system.one`中仅包含一行数据(此表实现了与其他数据库管理系统中的DUAL相同的功能)。 + +FROM子句规定了将从哪个表、或子查询、或表函数中读取数据;同时ARRAY JOIN子句和JOIN子句也可以出现在这里(见后文)。 + +可以使用包含在括号里的子查询来替代表。 +在这种情况下,子查询的处理将会构建在外部的查询内部。 +不同于SQL标准,子查询后无需指定别名。为了兼容,你可以在子查询后添加'AS 别名',但是指定的名字不能被使用在任何地方。 + +也可以使用表函数来代替表,有关信息,参见«表函数»。 + +执行查询时,在查询中列出的所有列都将从对应的表中提取数据;如果你使用的是子查询的方式,则任何在外部查询中没有使用的列,子查询将从查询中忽略它们; +如果你的查询没有列出任何的列(例如,SELECT count() FROM t),则将额外的从表中提取一些列(最好的情况下是最小的列),以便计算行数。 + +最后的FINAL修饰符仅能够被使用在SELECT from CollapsingMergeTree场景中。当你为FROM指定了FINAL修饰符时,你的查询结果将会在查询过程中被聚合。需要注意的是,在这种情况下,查询将在单个流中读取所有相关的主键列,同时对需要的数据进行合并。这意味着,当使用FINAL修饰符时,查询将会处理的更慢。在大多数情况下,你应该避免使用FINAL修饰符。更多信息,请参阅«CollapsingMergeTree引擎»部分。 + +### SAMPLE 子句 {#select-sample-clause} + +通过SAMPLE子句用户可以进行近似查询处理,近似查询处理仅能工作在MergeTree\*类型的表中,并且在创建表时需要您指定采样表达式(参见«MergeTree 引擎»部分)。 + +`SAMPLE`子句可以使用`SAMPLE k`来表示,其中k可以是0到1的小数值,或者是一个足够大的正整数值。 + +当k为0到1的小数时,查询将使用'k'作为百分比选取数据。例如,`SAMPLE 0.1`查询只会检索数据总量的10%。 +当k为一个足够大的正整数时,查询将使用'k'作为最大样本数。例如, `SAMPLE 10000000`查询只会检索最多10,000,000行数据。 + +示例: + +``` sql +SELECT + Title, + count() * 10 AS PageViews +FROM hits_distributed +SAMPLE 0.1 +WHERE + CounterID = 34 + AND toDate(EventDate) >= toDate('2013-01-29') + AND toDate(EventDate) <= toDate('2013-02-04') + AND NOT DontCountHits + AND NOT Refresh + AND Title != '' +GROUP BY Title +ORDER BY PageViews DESC LIMIT 1000 +``` + +在这个例子中,查询将检索数据总量的0.1 (10%)的数据。值得注意的是,查询不会自动校正聚合函数最终的结果,所以为了得到更加精确的结果,需要将`count()`的结果手动乘以10。 + +当使用像`SAMPLE 10000000`这样的方式进行近似查询时,由于没有了任何关于将会处理了哪些数据或聚合函数应该被乘以几的信息,所以这种方式不适合在这种场景下使用。 + +使用相同的采样率得到的结果总是一致的:如果我们能够看到所有可能存在在表中的数据,那么相同的采样率总是能够得到相同的结果(在建表时使用相同的采样表达式),换句话说,系统在不同的时间,不同的服务器,不同表上总以相同的方式对数据进行采样。 + +例如,我们可以使用采样的方式获取到与不进行采样相同的用户ID的列表。这将表明,你可以在IN子查询中使用采样,或者使用采样的结果与其他查询进行关联。 + +### ARRAY JOIN 子句 {#select-array-join-clause} + +ARRAY JOIN子句可以帮助查询进行与数组和nested数据类型的连接。它有点类似arrayJoin函数,但它的功能更广泛。 + +`ARRAY JOIN` 本质上等同于`INNERT JOIN`数组。 例如: + + :) CREATE TABLE arrays_test (s String, arr Array(UInt8)) ENGINE = Memory + + CREATE TABLE arrays_test + ( + s String, + arr Array(UInt8) + ) ENGINE = Memory + + Ok. + + 0 rows in set. Elapsed: 0.001 sec. + + :) INSERT INTO arrays_test VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []) + + INSERT INTO arrays_test VALUES + + Ok. + + 3 rows in set. Elapsed: 0.001 sec. + + :) SELECT * FROM arrays_test + + SELECT * + FROM arrays_test + + ┌─s───────┬─arr─────┐ + │ Hello │ [1,2] │ + │ World │ [3,4,5] │ + │ Goodbye │ [] │ + └─────────┴─────────┘ + + 3 rows in set. Elapsed: 0.001 sec. + + :) SELECT s, arr FROM arrays_test ARRAY JOIN arr + + SELECT s, arr + FROM arrays_test + ARRAY JOIN arr + + ┌─s─────┬─arr─┐ + │ Hello │ 1 │ + │ Hello │ 2 │ + │ World │ 3 │ + │ World │ 4 │ + │ World │ 5 │ + └───────┴─────┘ + + 5 rows in set. Elapsed: 0.001 sec. + +你还可以为ARRAY JOIN子句指定一个别名,这时你可以通过这个别名来访问数组中的数据,但是数据本身仍然可以通过原来的名称进行访问。例如: + + :) SELECT s, arr, a FROM arrays_test ARRAY JOIN arr AS a + + SELECT s, arr, a + FROM arrays_test + ARRAY JOIN arr AS a + + ┌─s─────┬─arr─────┬─a─┐ + │ Hello │ [1,2] │ 1 │ + │ Hello │ [1,2] │ 2 │ + │ World │ [3,4,5] │ 3 │ + │ World │ [3,4,5] │ 4 │ + │ World │ [3,4,5] │ 5 │ + └───────┴─────────┴───┘ + + 5 rows in set. Elapsed: 0.001 sec. + +当多个具有相同大小的数组使用逗号分割出现在ARRAY JOIN子句中时,ARRAY JOIN会将它们同时执行(直接合并,而不是它们的笛卡尔积)。例如: + + :) SELECT s, arr, a, num, mapped FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped + + SELECT s, arr, a, num, mapped + FROM arrays_test + ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(lambda(tuple(x), plus(x, 1)), arr) AS mapped + + ┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ + │ Hello │ [1,2] │ 1 │ 1 │ 2 │ + │ Hello │ [1,2] │ 2 │ 2 │ 3 │ + │ World │ [3,4,5] │ 3 │ 1 │ 4 │ + │ World │ [3,4,5] │ 4 │ 2 │ 5 │ + │ World │ [3,4,5] │ 5 │ 3 │ 6 │ + └───────┴─────────┴───┴─────┴────────┘ + + 5 rows in set. Elapsed: 0.002 sec. + + :) SELECT s, arr, a, num, arrayEnumerate(arr) FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num + + SELECT s, arr, a, num, arrayEnumerate(arr) + FROM arrays_test + ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num + + ┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ + │ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ + │ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ + │ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ + │ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ + │ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ + └───────┴─────────┴───┴─────┴─────────────────────┘ + + 5 rows in set. Elapsed: 0.002 sec. + +另外ARRAY JOIN也可以工作在nested数据结构上。例如: + + :) CREATE TABLE nested_test (s String, nest Nested(x UInt8, y UInt32)) ENGINE = Memory + + CREATE TABLE nested_test + ( + s String, + nest Nested( + x UInt8, + y UInt32) + ) ENGINE = Memory + + Ok. + + 0 rows in set. Elapsed: 0.006 sec. + + :) INSERT INTO nested_test VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []) + + INSERT INTO nested_test VALUES + + Ok. + + 3 rows in set. Elapsed: 0.001 sec. + + :) SELECT * FROM nested_test + + SELECT * + FROM nested_test + + ┌─s───────┬─nest.x──┬─nest.y─────┐ + │ Hello │ [1,2] │ [10,20] │ + │ World │ [3,4,5] │ [30,40,50] │ + │ Goodbye │ [] │ [] │ + └─────────┴─────────┴────────────┘ + + 3 rows in set. Elapsed: 0.001 sec. + + :) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest + + SELECT s, `nest.x`, `nest.y` + FROM nested_test + ARRAY JOIN nest + + ┌─s─────┬─nest.x─┬─nest.y─┐ + │ Hello │ 1 │ 10 │ + │ Hello │ 2 │ 20 │ + │ World │ 3 │ 30 │ + │ World │ 4 │ 40 │ + │ World │ 5 │ 50 │ + └───────┴────────┴────────┘ + + 5 rows in set. Elapsed: 0.001 sec. + +当你在ARRAY JOIN指定nested数据类型的名称时,其作用与与包含所有数组元素的ARRAY JOIN相同,例如: + + :) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x, nest.y + + SELECT s, `nest.x`, `nest.y` + FROM nested_test + ARRAY JOIN `nest.x`, `nest.y` + + ┌─s─────┬─nest.x─┬─nest.y─┐ + │ Hello │ 1 │ 10 │ + │ Hello │ 2 │ 20 │ + │ World │ 3 │ 30 │ + │ World │ 4 │ 40 │ + │ World │ 5 │ 50 │ + └───────┴────────┴────────┘ + + 5 rows in set. Elapsed: 0.001 sec. + +这种方式也是可以运行的: + + :) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x + + SELECT s, `nest.x`, `nest.y` + FROM nested_test + ARRAY JOIN `nest.x` + + ┌─s─────┬─nest.x─┬─nest.y─────┐ + │ Hello │ 1 │ [10,20] │ + │ Hello │ 2 │ [10,20] │ + │ World │ 3 │ [30,40,50] │ + │ World │ 4 │ [30,40,50] │ + │ World │ 5 │ [30,40,50] │ + └───────┴────────┴────────────┘ + + 5 rows in set. Elapsed: 0.001 sec. + +为了方便使用原来的nested类型的数组,你可以为nested类型定义一个别名。例如: + + :) SELECT s, n.x, n.y, nest.x, nest.y FROM nested_test ARRAY JOIN nest AS n + + SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` + FROM nested_test + ARRAY JOIN nest AS n + + ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ + │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ + │ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ + │ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ + │ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ + │ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ + └───────┴─────┴─────┴─────────┴────────────┘ + + 5 rows in set. Elapsed: 0.001 sec. + +使用arrayEnumerate函数的示例: + + :) SELECT s, n.x, n.y, nest.x, nest.y, num FROM nested_test ARRAY JOIN nest AS n, arrayEnumerate(nest.x) AS num + + SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num + FROM nested_test + ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num + + ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ + │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ + │ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ + │ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ + │ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ + │ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ + └───────┴─────┴─────┴─────────┴────────────┴─────┘ + + 5 rows in set. Elapsed: 0.002 sec. + +在一个查询中只能出现一个ARRAY JOIN子句。 + +如果在WHERE/PREWHERE子句中使用了ARRAY JOIN子句的结果,它将优先于WHERE/PREWHERE子句执行,否则它将在WHERE/PRWHERE子句之后执行,以便减少计算。 + +### JOIN 子句 {#select-join} + +JOIN子句用于连接数据,作用与[SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL))的定义相同。 + +!!! info "注意" + 与 [ARRAY JOIN](#select-array-join-clause) 没有关系. + +``` sql +SELECT +FROM +[GLOBAL] [ANY|ALL] INNER|LEFT|RIGHT|FULL|CROSS [OUTER] JOIN +(ON )|(USING ) ... +``` + +可以使用具体的表名来代替``与``。但这与使用`SELECT * FROM table`子查询的方式相同。除非你的表是\[Join\](../operations/table\_engines/join.md +**支持的`JOIN`类型** + +- `INNER JOIN` +- `LEFT OUTER JOIN` +- `RIGHT OUTER JOIN` +- `FULL OUTER JOIN` +- `CROSS JOIN` + +你可以跳过默认的`OUTER`关键字。 + +**`ANY` 与 `ALL`** + +在使用`ALL`修饰符对JOIN进行修饰时,如果右表中存在多个与左表关联的数据,那么系统则将右表中所有可以与左表关联的数据全部返回在结果中。这与SQL标准的JOIN行为相同。 +在使用`ANY`修饰符对JOIN进行修饰时,如果右表中存在多个与左表关联的数据,那么系统仅返回第一个与左表匹配的结果。如果左表与右表一一对应,不存在多余的行时,`ANY`与`ALL`的结果相同。 + +你可以在会话中通过设置 [join\_default\_strictness](../../operations/settings/settings.md) 来指定默认的JOIN修饰符。 + +**`GLOBAL` 分布** + +当使用普通的`JOIN`时,查询将被发送给远程的服务器。并在这些远程服务器上生成右表并与它们关联。换句话说,右表来自于各个服务器本身。 + +当使用`GLOBAL ... JOIN`,首先会在请求服务器上计算右表并以临时表的方式将其发送到所有服务器。这时每台服务器将直接使用它进行计算。 + +使用`GLOBAL`时需要小心。更多信息,参阅 [分布式子查询](#select-distributed-subqueries) 部分。 + +**使用建议** + +从子查询中删除所有`JOIN`不需要的列。 + +当执行`JOIN`查询时,因为与其他阶段相比没有进行执行顺序的优化:JOIN优先于WHERE与聚合执行。因此,为了显示的指定执行顺序,我们推荐你使用子查询的方式执行`JOIN`。 + +示例: + +``` sql +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY LEFT JOIN +( + SELECT + CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID +) USING CounterID +ORDER BY hits DESC +LIMIT 10 +``` + + ┌─CounterID─┬───hits─┬─visits─┐ + │ 1143050 │ 523264 │ 13665 │ + │ 731962 │ 475698 │ 102716 │ + │ 722545 │ 337212 │ 108187 │ + │ 722889 │ 252197 │ 10547 │ + │ 2237260 │ 196036 │ 9522 │ + │ 23057320 │ 147211 │ 7689 │ + │ 722818 │ 90109 │ 17847 │ + │ 48221 │ 85379 │ 4652 │ + │ 19762435 │ 77807 │ 7026 │ + │ 722884 │ 77492 │ 11056 │ + └───────────┴────────┴────────┘ + +子查询不允许您设置别名或在其他地方引用它们。 +`USING`中指定的列必须在两个子查询中具有相同的名称,而其他列必须具有不同的名称。您可以通过使用别名的方式来更改子查询中的列名(示例中就分别使用了'hits'与'visits'别名)。 + +`USING`子句用于指定要进行链接的一个或多个列,系统会将这些列在两张表中相等的值连接起来。如果列是一个列表,不需要使用括号包裹。同时JOIN不支持其他更复杂的Join方式。 + +右表(子查询的结果)将会保存在内存中。如果没有足够的内存,则无法运行`JOIN`。 + +只能在查询中指定一个`JOIN`。若要运行多个`JOIN`,你可以将它们放入子查询中。 + +每次运行相同的`JOIN`查询,总是会再次计算 - 没有缓存结果。 为了避免这种情况,可以使用'Join'引擎,它是一个预处理的Join数据结构,总是保存在内存中。更多信息,参见«Join引擎»部分。 + +在一些场景下,使用`IN`代替`JOIN`将会得到更高的效率。在各种类型的JOIN中,最高效的是`ANY LEFT JOIN`,然后是`ANY INNER JOIN`,效率最差的是`ALL LEFT JOIN`以及`ALL INNER JOIN`。 + +如果你需要使用`JOIN`来关联一些纬度表(包含纬度属性的一些相对比较小的表,例如广告活动的名称),那么`JOIN`可能不是好的选择,因为语法负责,并且每次查询都将重新访问这些表。对于这种情况,您应该使用«外部字典»的功能来替换`JOIN`。更多信息,参见 [外部字典](../../sql_reference/statements/select.md) 部分。 + +#### Null的处理 {#nullde-chu-li} + +JOIN的行为受 [join\_use\_nulls](../../operations/settings/settings.md) 的影响。当`join_use_nulls=1`时,`JOIN`的工作与SQL标准相同。 + +如果JOIN的key是 [可为空](../../sql_reference/statements/select.md) 类型的字段,则其中至少一个存在 [NULL](../syntax.md) 值的key不会被关联。 + +### WHERE 子句 {#select-where} + +如果存在WHERE子句, 则在该子句中必须包含一个UInt8类型的表达式。 这个表达是通常是一个带有比较和逻辑的表达式。 +这个表达式将会在所有数据转换前用来过滤数据。 + +如果在支持索引的数据库表引擎中,这个表达式将被评估是否使用索引。 + +### PREWHERE 子句 {#prewhere-zi-ju} + +这个子句与WHERE子句的意思相同。主要的不同之处在于表数据的读取。 +当使用PREWHERE时,首先只读取PREWHERE表达式中需要的列。然后在根据PREWHERE执行的结果读取其他需要的列。 + +如果在过滤条件中有少量不适合索引过滤的列,但是它们又可以提供很强的过滤能力。这时使用PREWHERE是有意义的,因为它将帮助减少数据的读取。 + +例如,在一个需要提取大量列的查询中为少部分列编写PREWHERE是很有作用的。 + +PREWHERE 仅支持`*MergeTree`系列引擎。 + +在一个查询中可以同时指定PREWHERE和WHERE,在这种情况下,PREWHERE优先于WHERE执行。 + +值得注意的是,PREWHERE不适合用于已经存在于索引中的列,因为当列已经存在于索引中的情况下,只有满足索引的数据块才会被读取。 + +如果将'optimize\_move\_to\_prewhere'设置为1,并且在查询中不包含PREWHERE,则系统将自动的把适合PREWHERE表达式的部分从WHERE中抽离到PREWHERE中。 + +### GROUP BY 子句 {#select-group-by-clause} + +这是列式数据库管理系统中最重要的一部分。 + +如果存在GROUP BY子句,则在该子句中必须包含一个表达式列表。其中每个表达式将会被称之为«key»。 +SELECT,HAVING,ORDER BY子句中的表达式列表必须来自于这些«key»或聚合函数。简而言之,被选择的列中不能包含非聚合函数或key之外的其他列。 + +如果查询表达式列表中仅包含聚合函数,则可以省略GROUP BY子句,这时会假定将所有数据聚合成一组空«key»。 + +示例: + +``` sql +SELECT + count(), + median(FetchTiming > 60 ? 60 : FetchTiming), + count() - sum(Refresh) +FROM hits +``` + +与SQL标准不同的是,如果表中不存在任何数据(可能表本身中就不存在任何数据,或者由于被WHERE条件过滤掉了),将返回一个空结果,而不是一个包含聚合函数初始值的结果。 + +与MySQL不同的是(实际上这是符合SQL标准的),你不能够获得一个不在key中的非聚合函数列(除了常量表达式)。但是你可以使用'any'(返回遇到的第一个值)、max、min等聚合函数使它工作。 + +示例: + +``` sql +SELECT + domainWithoutWWW(URL) AS domain, + count(), + any(Title) AS title -- getting the first occurred page header for each domain. +FROM hits +GROUP BY domain +``` + +GROUP BY子句会为遇到的每一个不同的key计算一组聚合函数的值。 + +在GROUP BY子句中不支持使用Array类型的列。 + +常量不能作为聚合函数的参数传入聚合函数中。例如: sum(1)。这种情况下你可以省略常量。例如:`count()`。 + +#### NULL 处理 {#null-chu-li} + +对于GROUP BY子句,ClickHouse将 [NULL](../syntax.md) 解释为一个值,并且支持`NULL=NULL`。 + +下面这个例子将说明这将意味着什么。 + +假设你有这样一张表: + + ┌─x─┬────y─┐ + │ 1 │ 2 │ + │ 2 │ ᴺᵁᴸᴸ │ + │ 3 │ 2 │ + │ 3 │ 3 │ + │ 3 │ ᴺᵁᴸᴸ │ + └───┴──────┘ + +运行`SELECT sum(x), y FROM t_null_big GROUP BY y`你将得到如下结果: + + ┌─sum(x)─┬────y─┐ + │ 4 │ 2 │ + │ 3 │ 3 │ + │ 5 │ ᴺᵁᴸᴸ │ + └────────┴──────┘ + +你可以看到GROUP BY为`y=NULL`的聚合了x。 + +如果你在向`GROUP BY`中放入几个key,结果将列出所有的组合可能。就像`NULL`是一个特定的值一样。 + +#### WITH TOTALS 修饰符 {#with-totals-xiu-shi-fu} + +如果你指定了WITH TOTALS修饰符,你将会在结果中得到一个被额外计算出的行。在这一行中将包含所有key的默认值(零或者空值),以及所有聚合函数对所有被选择数据行的聚合结果。 + +该行仅在JSON\*, TabSeparated\*, Pretty\*输出格式中与其他行分开输出。 + +在JSON\*输出格式中,这行将出现在Json的'totals'字段中。在TabSeparated\*输出格式中,这行将位于其他结果之后,同时与其他结果使用空白行分隔。在Pretty\*输出格式中,这行将作为单独的表在所有结果之后输出。 + +当`WITH TOTALS`与HAVING子句同时存在时,它的行为受'totals\_mode'配置的影响。 +默认情况下,`totals_mode = 'before_having'`,这时`WITH TOTALS`将会在HAVING前计算最多不超过`max_rows_to_group_by`行的数据。 + +在`group_by_overflow_mode = 'any'`并指定了`max_rows_to_group_by`的情况下,`WITH TOTALS`的行为受`totals_mode`的影响。 + +`after_having_exclusive` - 在HAVING后进行计算,计算不超过`max_rows_to_group_by`行的数据。 + +`after_having_inclusive` - 在HAVING后进行计算,计算不少于`max_rows_to_group_by`行的数据。 + +`after_having_auto` - 在HAVING后进行计算,采用统计通过HAVING的行数,在超过不超过'max\_rows\_to\_group\_by'指定值(默认为50%)的情况下,包含所有行的结果。否则排除这些结果。 + +`totals_auto_threshold` - 默认 0.5,是`after_having_auto`的参数。 + +如果`group_by_overflow_mode != 'any'`并没有指定`max_rows_to_group_by`情况下,所有的模式都与`after_having`相同。 + +你可以在子查询,包含子查询的JOIN子句中使用WITH TOTALS(在这种情况下,它们各自的总值会被组合在一起)。 + +#### GROUP BY 使用外部存储设备 {#select-group-by-in-external-memory} + +你可以在GROUP BY中允许将临时数据转存到磁盘上,以限制对内存的使用。 +`max_bytes_before_external_group_by`这个配置确定了在GROUP BY中启动将临时数据转存到磁盘上的内存阈值。如果你将它设置为0(这是默认值),这项功能将被禁用。 + +当使用`max_bytes_before_external_group_by`时,我们建议将max\_memory\_usage设置为它的两倍。这是因为一个聚合需要两个阶段来完成:(1)读取数据并形成中间数据 (2)合并中间数据。临时数据的转存只会发生在第一个阶段。如果没有发生临时文件的转存,那么阶段二将最多消耗与1阶段相同的内存大小。 + +例如:如果将`max_memory_usage`设置为10000000000并且你想要开启外部聚合,那么你需要将`max_bytes_before_external_group_by`设置为10000000000的同时将`max_memory_usage`设置为20000000000。当外部聚合被触发时(如果刚好只形成了一份临时数据),它的内存使用量将会稍高与`max_bytes_before_external_group_by`。 + +在分布式查询处理中,外部聚合将会在远程的服务器中执行。为了使请求服务器只使用较少的内存,可以设置`distributed_aggregation_memory_efficient`为1。 + +当合并被刷到磁盘的临时数据以及合并远程的服务器返回的结果时,如果在启动`distributed_aggregation_memory_efficient`的情况下,将会消耗1/256 \* 线程数的总内存大小。 + +当启动外部聚合时,如果数据的大小小于`max_bytes_before_external_group_by`设置的值(数据没有被刷到磁盘中),那么数据的聚合速度将会和没有启动外部聚合时一样快。如果有临时数据被刷到了磁盘中,那么这个查询的运行时间将会被延长几倍(大约是3倍)。 + +如果你在GROUP BY后面存在ORDER BY子句,并且ORDER BY后面存在一个极小限制的LIMIT,那么ORDER BY子句将不会使用太多内存。 +否则请不要忘记启动外部排序(`max_bytes_before_external_sort`)。 + +### LIMIT N BY 子句 {#limit-n-by-zi-ju} + +LIMIT N BY子句和LIMIT没有关系, LIMIT N BY COLUMNS 子句可以用来在每一个COLUMNS分组中求得最大的N行数据。我们可以将它们同时用在一个查询中。LIMIT N BY子句中可以包含任意多个分组字段表达式列表。 + +示例: + +``` sql +SELECT + domainWithoutWWW(URL) AS domain, + domainWithoutWWW(REFERRER_URL) AS referrer, + device_type, + count() cnt +FROM hits +GROUP BY domain, referrer, device_type +ORDER BY cnt DESC +LIMIT 5 BY domain, device_type +LIMIT 100 +``` + +查询将会为每个`domain, device_type`的组合选出前5个访问最多的数据,但是结果最多将不超过100行(`LIMIT n BY + LIMIT`)。 + +### HAVING 子句 {#having-zi-ju} + +HAVING子句可以用来过滤GROUP BY之后的数据,类似于WHERE子句。 +WHERE于HAVING不同之处在于WHERE在聚合前(GROUP BY)执行,HAVING在聚合后执行。 +如果不存在聚合,则不能使用HAVING。 + +### ORDER BY 子句 {#select-order-by} + +如果存在ORDER BY 子句,则该子句中必须存在一个表达式列表,表达式列表中每一个表达式都可以分配一个DESC或ASC(排序的方向)。如果没有指明排序的方向,将假定以ASC的方式进行排序。其中ASC表示按照升序排序,DESC按照降序排序。示例:`ORDER BY Visits DESC, SearchPhrase` + +对于字符串的排序来讲,你可以为其指定一个排序规则,在指定排序规则时,排序总是不会区分大小写。并且如果与ASC或DESC同时出现时,排序规则必须在它们的后面指定。例如:`ORDER BY SearchPhrase COLLATE 'tr'` - 使用土耳其字母表对它进行升序排序,同时排序时不会区分大小写,并按照UTF-8字符集进行编码。 + +我们推荐只在少量的数据集中使用COLLATE,因为COLLATE的效率远低于正常的字节排序。 + +针对排序表达式中相同值的行将以任意的顺序进行输出,这是不确定的(每次都可能不同)。 +如果省略ORDER BY子句,则结果的顺序也是不固定的。 + +`NaN` 和 `NULL` 的排序规则: + +- 当使用`NULLS FIRST`修饰符时,将会先输出`NULL`,然后是`NaN`,最后才是其他值。 +- 当使用`NULLS LAST`修饰符时,将会先输出其他值,然后是`NaN`,最后才是`NULL`。 +- 默认情况下与使用`NULLS LAST`修饰符相同。 + +示例: + +假设存在如下一张表 + + ┌─x─┬────y─┐ + │ 1 │ ᴺᵁᴸᴸ │ + │ 2 │ 2 │ + │ 1 │ nan │ + │ 2 │ 2 │ + │ 3 │ 4 │ + │ 5 │ 6 │ + │ 6 │ nan │ + │ 7 │ ᴺᵁᴸᴸ │ + │ 6 │ 7 │ + │ 8 │ 9 │ + └───┴──────┘ + +运行查询 `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST` 将获得如下结果: + + ┌─x─┬────y─┐ + │ 1 │ ᴺᵁᴸᴸ │ + │ 7 │ ᴺᵁᴸᴸ │ + │ 1 │ nan │ + │ 6 │ nan │ + │ 2 │ 2 │ + │ 2 │ 2 │ + │ 3 │ 4 │ + │ 5 │ 6 │ + │ 6 │ 7 │ + │ 8 │ 9 │ + └───┴──────┘ + +当使用浮点类型的数值进行排序时,不管排序的顺序如何,NaNs总是出现在所有值的后面。换句话说,当你使用升序排列一个浮点数值列时,NaNs好像比所有值都要大。反之,当你使用降序排列一个浮点数值列时,NaNs好像比所有值都小。 + +如果你在ORDER BY子句后面存在LIMIT并给定了较小的数值,则将会使用较少的内存。否则,内存的使用量将与需要排序的数据成正比。对于分布式查询,如果省略了GROUP BY,则在远程服务器上执行部分排序,最后在请求服务器上合并排序结果。这意味这对于分布式查询而言,要排序的数据量可以大于单台服务器的内存。 + +如果没有足够的内存,可以使用外部排序(在磁盘中创建一些临时文件)。可以使用`max_bytes_before_external_sort`来设置外部排序,如果你讲它设置为0(默认),则表示禁用外部排序功能。如果启用该功能。当要排序的数据量达到所指定的字节数时,当前排序的结果会被转存到一个临时文件中去。当全部数据读取完毕后,所有的临时文件将会合并成最终输出结果。这些临时文件将会写到config文件配置的/var/lib/clickhouse/tmp/目录中(默认值,你可以通过修改'tmp\_path'配置调整该目录的位置)。 + +查询运行使用的内存要高于'max\_bytes\_before\_external\_sort',为此,这个配置必须要远远小于'max\_memory\_usage'配置的值。例如,如果你的服务器有128GB的内存去运行一个查询,那么推荐你将'max\_memory\_usage'设置为100GB,'max\_bytes\_before\_external\_sort'设置为80GB。 + +外部排序效率要远低于在内存中排序。 + +### SELECT 子句 {#select-zi-ju} + +在完成上述列出的所有子句后,将对SELECT子句中的表达式进行分析。 +具体来讲,如果在存在聚合函数的情况下,将对聚合函数之前的表达式进行分析。 +聚合函数与聚合函数之前的表达式都将在聚合期间完成计算(GROUP BY)。 +就像他们本身就已经存在结果上一样。 + +### DISTINCT 子句 {#select-distinct} + +如果存在DISTINCT子句,则会对结果中的完全相同的行进行去重。 +在GROUP BY不包含聚合函数,并对全部SELECT部分都包含在GROUP BY中时的作用一样。但该子句还是与GROUP BY子句存在以下几点不同: + +- 可以与GROUP BY配合使用。 +- 当不存在ORDER BY子句并存在LIMIT子句时,查询将在同时满足DISTINCT与LIMIT的情况下立即停止查询。 +- 在处理数据的同时输出结果,并不是等待整个查询全部完成。 + +在SELECT表达式中存在Array类型的列时,不能使用DISTINCT。 + +`DISTINCT`可以与 [NULL](../syntax.md)一起工作,就好像`NULL`仅是一个特殊的值一样,并且`NULL=NULL`。换而言之,在`DISTINCT`的结果中,与`NULL`不同的组合仅能出现一次。 + +### LIMIT 子句 {#limit-zi-ju} + +LIMIT m 用于在查询结果中选择前m行数据。 +LIMIT n, m 用于在查询结果中选择从n行开始的m行数据。 + +'n'与'm'必须是正整数。 + +如果没有指定ORDER BY子句,则结果可能是任意的顺序,并且是不确定的。 + +### UNION ALL 子句 {#union-all-zi-ju} + +UNION ALL子句可以组合任意数量的查询,例如: + +``` sql +SELECT CounterID, 1 AS table, toInt64(count()) AS c + FROM test.hits + GROUP BY CounterID + +UNION ALL + +SELECT CounterID, 2 AS table, sum(Sign) AS c + FROM test.visits + GROUP BY CounterID + HAVING c > 0 +``` + +仅支持UNION ALL,不支持其他UNION规则(UNION DISTINCT)。如果你需要UNION DISTINCT,你可以使用UNION ALL中包含SELECT DISTINCT的子查询的方式。 + +UNION ALL中的查询可以同时运行,它们的结果将被混合到一起。 + +这些查询的结果结果必须相同(列的数量和类型)。列名可以是不同的。在这种情况下,最终结果的列名将从第一个查询中获取。UNION会为查询之间进行类型转换。例如,如果组合的两个查询中包含相同的字段,并且是类型兼容的`Nullable`和non-`Nullable`,则结果将会将该字段转换为`Nullable`类型的字段。 + +作为UNION ALL查询的部分不能包含在括号内。ORDER BY与LIMIT子句应该被应用在每个查询中,而不是最终的查询中。如果你需要做最终结果转换,你可以将UNION ALL作为一个子查询包含在FROM子句中。 + +### INTO OUTFILE 子句 {#into-outfile-zi-ju} + +`INTO OUTFILE filename` 子句用于将查询结果重定向输出到指定文件中(filename是一个字符串类型的值)。 +与MySQL不同,执行的结果文件将在客户端建立,如果文件已存在,查询将会失败。 +此命令可以工作在命令行客户端与clickhouse-local中(通过HTTP借口发送将会失败)。 + +默认的输出格式是TabSeparated(与命令行客户端的批处理模式相同)。 + +### FORMAT 子句 {#format-zi-ju} + +‘FORMAT format’ 子句用于指定返回数据的格式。 +你可以使用它方便的转换或创建数据的转储。 +更多信息,参见«输入输出格式»部分。 +如果不存在FORMAT子句,则使用默认的格式,这将取决与DB的配置以及所使用的客户端。对于批量模式的HTTP客户端和命令行客户端而言,默认的格式是TabSeparated。对于交互模式下的命令行客户端,默认的格式是PrettyCompact(它有更加美观的格式)。 + +当使用命令行客户端时,数据以内部高效的格式在服务器和客户端之间进行传递。客户端将单独的解析FORMAT子句,以帮助数据格式的转换(这将减轻网络和服务器的负载)。 + +### IN 运算符 {#select-in-operators} + +对于`IN`、`NOT IN`、`GLOBAL IN`、`GLOBAL NOT IN`操作符被分别实现,因为它们的功能非常丰富。 + +运算符的左侧是单列或列的元组。 + +示例: + +``` sql +SELECT UserID IN (123, 456) FROM ... +SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ... +``` + +如果左侧是单个列并且是一个索引,并且右侧是一组常量时,系统将使用索引来处理查询。 + +不要在列表中列出太多的值(百万)。如果数据集很大,将它们放入临时表中(可以参考«»), 然后使用子查询。 +Don't list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section «External data for query processing»), then use a subquery. + +右侧可以是一个由常量表达式组成的元组列表(像上面的例子一样),或者是一个数据库中的表的名称,或是一个包含在括号中的子查询。 + +如果右侧是一个表的名字(例如,`UserID IN users`),这相当于`UserID IN (SELECT * FROM users)`。在查询与外部数据表组合使用时可以使用该方法。例如,查询与包含user IDS的'users'临时表一起被发送的同时需要对结果进行过滤时。 + +如果操作符的右侧是一个Set引擎的表时(数据总是在内存中准备好),则不会每次都为查询创建新的数据集。 + +子查询可以指定一个列以上的元组来进行过滤。 +示例: + +``` sql +SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ... +``` + +IN操作符的左右两侧应具有相同的类型。 + +IN操作符的子查询中可以出现任意子句,包含聚合函数与lambda函数。 +示例: + +``` sql +SELECT + EventDate, + avg(UserID IN + ( + SELECT UserID + FROM test.hits + WHERE EventDate = toDate('2014-03-17') + )) AS ratio +FROM test.hits +GROUP BY EventDate +ORDER BY EventDate ASC +``` + + ┌──EventDate─┬────ratio─┐ + │ 2014-03-17 │ 1 │ + │ 2014-03-18 │ 0.807696 │ + │ 2014-03-19 │ 0.755406 │ + │ 2014-03-20 │ 0.723218 │ + │ 2014-03-21 │ 0.697021 │ + │ 2014-03-22 │ 0.647851 │ + │ 2014-03-23 │ 0.648416 │ + └────────────┴──────────┘ + +为3月17日之后的每一天计算与3月17日访问该网站的用户浏览网页的百分比。 +IN子句中的子查询仅在单个服务器上运行一次。不能够是相关子查询。 + +#### NULL 处理 {#null-chu-li-1} + +在处理中,IN操作符总是假定 [NULL](../syntax.md) 值的操作结果总是等于`0`,而不管`NULL`位于左侧还是右侧。`NULL`值不应该包含在任何数据集中,它们彼此不能够对应,并且不能够比较。 + +下面的示例中有一个`t_null`表: + + ┌─x─┬────y─┐ + │ 1 │ ᴺᵁᴸᴸ │ + │ 2 │ 3 │ + └───┴──────┘ + +运行查询`SELECT x FROM t_null WHERE y IN (NULL,3)`将得到如下结果: + + ┌─x─┐ + │ 2 │ + └───┘ + +你可以看到在查询结果中不存在`y = NULL`的结果。这是因为ClickHouse无法确定`NULL`是否包含在`(NULL,3)`数据集中,对于这次比较操作返回了`0`,并且在`SELECT`的最终输出中排除了这行。 + + SELECT y IN (NULL, 3) + FROM t_null + + ┌─in(y, tuple(NULL, 3))─┐ + │ 0 │ + │ 1 │ + └───────────────────────┘ + +#### 分布式子查询 {#select-distributed-subqueries} + +对于带有子查询的(类似与JOINs)IN中,有两种选择:普通的`IN`/`JOIN`与`GLOBAL IN` / `GLOBAL JOIN`。它们对于分布式查询的处理运行方式是不同的。 + +!!! 注意 "注意" + 请记住,下面描述的算法可能因为根据 [设置](../../operations/settings/settings.md) 配置的不同而不同。 + +当使用普通的IN时,查询总是被发送到远程的服务器,并且在每个服务器中运行«IN»或«JOIN»子句中的子查询。 + +当使用`GLOBAL IN` / `GLOBAL JOIN`时,首先会为`GLOBAL IN` / `GLOBAL JOIN`运行所有子查询,并将结果收集到临时表中,并将临时表发送到每个远程服务器,并使用该临时表运行查询。 + +对于非分布式查询,请使用普通的`IN` / `JOIN`。 + +在分布式查询中使用`IN` / `JOIN`子句中使用子查询需要小心。 + +让我们来看一些例子。假设集群中的每个服务器都存在一个正常表**local\_table**。与一个分布式表**distributed\_table**。 + +对于所有查询**distributed\_table**的查询,查询会被发送到所有的远程服务器并使用**local\_table**表运行查询。 + +例如,查询 + +``` sql +SELECT uniq(UserID) FROM distributed_table +``` + +将发送如下查询到所有远程服务器 + +``` sql +SELECT uniq(UserID) FROM local_table +``` + +这时将并行的执行它们,直到达到可以组合数据的中间结果状态。然后中间结果将返回到请求服务器并在请求服务器上进行合并,最终将结果发送给客户端。 + +现在让我运行一个带有IN的查询: + +``` sql +SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) +``` + +- 计算两个站点的用户交集。 + +此查询将被发送给所有的远程服务器 + +``` sql +SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) +``` + +换句话说,IN子句中的数据集将被在每台服务器上被独立的收集,仅与每台服务器上的本地存储上的数据计算交集。 + +如果您已经将数据分散到了集群的每台服务器上,并且单个UserID的数据完全分布在单个服务器上,那么这将是正确且最佳的查询方式。在这种情况下,所有需要的数据都可以在每台服务器的本地进行获取。否则,结果将是不准确的。我们将这种查询称为«local IN»。 + +为了修正这种在数据随机分布的集群中的工作,你可以在子查询中使用**distributed\_table**。查询将更改为这样: + +``` sql +SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) +``` + +此查询将被发送给所有的远程服务器 + +``` sql +SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) +``` + +子查询将在每个远程服务器上执行。因为子查询使用分布式表,所有每个远程服务器上的子查询将查询再次发送给所有的远程服务器 + +``` sql +SELECT UserID FROM local_table WHERE CounterID = 34 +``` + +例如,如果你拥有100台服务器的集群,执行整个查询将需要10,000次请求,这通常被认为是不可接受的。 + +在这种情况下,你应该使用GLOBAL IN来替代IN。让我们看一下它是如何工作的。 + +``` sql +SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID GLOBAL IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) +``` + +在请求服务器上运行子查询 + +``` sql +SELECT UserID FROM distributed_table WHERE CounterID = 34 +``` + +将结果放入内存中的临时表中。然后将请求发送到每一台远程服务器 + +``` sql +SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL IN _data1 +``` + +临时表`_data1`也会随着查询一起发送到每一台远程服务器(临时表的名称由具体实现定义)。 + +这比使用普通的IN更加理想,但是,请注意以下几点: + +1. 创建临时表时,数据不是唯一的,为了减少通过网络传输的数据量,请在子查询中使用DISTINCT(你不需要在普通的IN中这么做) +2. 临时表将发送到所有远程服务器。其中传输不考虑网络的拓扑结构。例如,如果你有10个远程服务器存在与请求服务器非常远的数据中心中,则数据将通过通道发送数据到远程数据中心10次。使用GLOBAL IN时应避免大数据集。 +3. 当向远程服务器发送数据时,网络带宽的限制是不可配置的,这可能会网络的负载造成压力。 +4. 尝试将数据跨服务器分布,这样你将不需要使用GLOBAL IN。 +5. 如果你需要经常使用GLOBAL IN,请规划你的ClickHouse集群位置,以便副本之间不存在跨数据中心,并且它们之间具有快速的网络交换能力,以便查询可以完全在一个数据中心内完成。 + +另外,在`GLOBAL IN`子句中使用本地表也是有用的,比如,本地表仅在请求服务器上可用,并且您希望在远程服务器上使用来自本地表的数据。 + +### 极端值 {#extreme-values} + +除了结果外,你还可以获得结果列的最大值与最小值,可以将**极端**配置设置成1来做到这一点。最大值最小值的计算是针对于数字类型,日期类型进行计算的,对于其他列,将会输出默认值。 + +额外计算的两行结果 - 最大值与最小值,这两行额外的结果仅在JSON\*, TabSeparated\*, and Pretty\* 格式与其他行分开的输出方式输出,不支持其他输出格式。 + +在JSON\*格式中,Extreme值在单独的'extremes'字段中。在TabSeparated\*格式中,在其他结果与'totals'之后输出,并使用空行与其分隔。在Pretty\* 格式中,将在其他结果与'totals'后以单独的表格输出。 + +如果在计算Extreme值的同时包含LIMIT。extremes的计算结果将包含offset跳过的行。在流式的请求中,它可能还包含多余LIMIT的少量行的值。 + +### 注意事项 {#zhu-yi-shi-xiang} + +不同于MySQL, `GROUP BY`与`ORDER BY`子句不支持使用列的位置信息作为参数,但这实际上是符合SQL标准的。 +例如,`GROUP BY 1, 2`将被解释为按照常量进行分组(即,所有的行将会被聚合成一行)。 + +可以在查询的任何部分使用AS。 + +可以在查询的任何部分添加星号,而不仅仅是表达式。在分析查询时,星号被替换为所有的列(不包含`MATERIALIZED`与`ALIAS`的列)。 +只有少数情况下使用星号是合理的: + +- 创建表转储时。 +- 对于仅包含几个列的表,如系统表. +- 获取表中的列信息。在这种情况下应该使用`LIMIT 1`。但是,更好的办法是使用`DESC TABLE`。 +- 当使用`PREWHERE`在少数的几个列上做强过滤时。 +- 在子查询中(因为外部查询不需要的列被排除在子查询之外)。 + +在所有的其他情况下,我们不建议使用星号,因为它是列式数据库的缺点而不是优点。 + +[来源文章](https://clickhouse.tech/docs/zh/query_language/select/) diff --git a/docs/zh/sql_reference/statements/show.md b/docs/zh/sql_reference/statements/show.md new file mode 100644 index 00000000000..f60452f97a3 --- /dev/null +++ b/docs/zh/sql_reference/statements/show.md @@ -0,0 +1,105 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: SHOW +--- + +# 显示查询 {#show-queries} + +## SHOW CREATE TABLE {#show-create-table} + +``` sql +SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +返回单 `String`-类型 ‘statement’ column, which contains a single value – the `CREATE` 用于创建指定对象的查询。 + +## SHOW DATABASES {#show-databases} + +``` sql +SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] +``` + +打印所有数据库的列表。 +这个查询是相同的 `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. + +## SHOW PROCESSLIST {#show-processlist} + +``` sql +SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] +``` + +输出的内容 [系统。流程](../../operations/system_tables.md#system_tables-processes) 表,包含目前正在处理的查询列表,除了 `SHOW PROCESSLIST` 查询。 + +该 `SELECT * FROM system.processes` 查询返回有关所有当前查询的数据。 + +提示(在控制台中执行): + +``` bash +$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" +``` + +## SHOW TABLES {#show-tables} + +显示表的列表。 + +``` sql +SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +如果 `FROM` 如果未指定子句,则查询返回当前数据库中的表列表。 + +你可以得到相同的结果 `SHOW TABLES` 通过以下方式进行查询: + +``` sql +SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**示例** + +下面的查询从表的列表中选择前两行 `system` 数据库,其名称包含 `co`. + +``` sql +SHOW TABLES FROM system LIKE '%co%' LIMIT 2 +``` + +``` text +┌─name───────────────────────────┐ +│ aggregate_function_combinators │ +│ collations │ +└────────────────────────────────┘ +``` + +## SHOW DICTIONARIES {#show-dictionaries} + +显示列表 [外部字典](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +``` sql +SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +如果 `FROM` 如果未指定子句,则查询从当前数据库返回字典列表。 + +你可以得到相同的结果 `SHOW DICTIONARIES` 通过以下方式进行查询: + +``` sql +SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**示例** + +下面的查询从表的列表中选择前两行 `system` 数据库,其名称包含 `reg`. + +``` sql +SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 +``` + +``` text +┌─name─────────┐ +│ regions │ +│ region_names │ +└──────────────┘ +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/zh/sql_reference/statements/system.md b/docs/zh/sql_reference/statements/system.md new file mode 100644 index 00000000000..06d4f6dc1cb --- /dev/null +++ b/docs/zh/sql_reference/statements/system.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: SYSTEM +--- + +# 系统查询 {#query-language-system} + +- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) +- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) +- [DROP DNS CACHE](#query_language-system-drop-dns-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) +- [FLUSH LOGS](#query_language-system-flush_logs) +- [RELOAD CONFIG](#query_language-system-reload-config) +- [SHUTDOWN](#query_language-system-shutdown) +- [KILL](#query_language-system-kill) +- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) +- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) +- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) +- [STOP MERGES](#query_language-system-stop-merges) +- [START MERGES](#query_language-system-start-merges) + +## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} + +重新加载之前已成功加载的所有字典。 +默认情况下,字典是懒惰加载的(请参阅 [dictionaries\_lazy\_load](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)),所以不是在启动时自动加载,而是通过dictGet函数在第一次访问时初始化,或者从ENGINE=Dictionary的表中选择。 该 `SYSTEM RELOAD DICTIONARIES` 查询重新加载这样的字典(加载)。 +总是返回 `Ok.` 无论字典更新的结果如何。 + +## 重新加载字典Dictionary\_name {#query_language-system-reload-dictionary} + +完全重新加载字典 `dictionary_name`,与字典的状态无关(LOADED/NOT\_LOADED/FAILED)。 +总是返回 `Ok.` 无论更新字典的结果如何。 +字典的状态可以通过查询 `system.dictionaries` 桌子 + +``` sql +SELECT name, status FROM system.dictionaries; +``` + +## DROP DNS CACHE {#query_language-system-drop-dns-cache} + +重置ClickHouse的内部DNS缓存。 有时(对于旧的ClickHouse版本)在更改基础架构(更改另一个ClickHouse服务器或字典使用的服务器的IP地址)时需要使用此命令。 + +有关更方便(自动)缓存管理,请参阅disable\_internal\_dns\_cache、dns\_cache\_update\_period参数。 + +## DROP MARK CACHE {#query_language-system-drop-mark-cache} + +重置标记缓存。 用于开发ClickHouse和性能测试。 + +## FLUSH LOGS {#query_language-system-flush_logs} + +Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. + +## RELOAD CONFIG {#query_language-system-reload-config} + +重新加载ClickHouse配置。 当配置存储在ZooKeeeper中时使用。 + +## SHUTDOWN {#query_language-system-shutdown} + +通常关闭ClickHouse(如 `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) + +## KILL {#query_language-system-kill} + +中止ClickHouse进程(如 `kill -9 {$ pid_clickhouse-server}`) + +## 管理分布式表 {#query-language-system-distributed} + +ClickHouse可以管理 [分布](../../engines/table_engines/special/distributed.md) 桌子 当用户将数据插入到这些表中时,ClickHouse首先创建应发送到群集节点的数据队列,然后异步发送它。 您可以使用 [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed),和 [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) 查询。 您也可以同步插入分布式数据与 `insert_distributed_sync` 设置。 + +### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} + +将数据插入分布式表时禁用后台数据分发。 + +``` sql +SYSTEM STOP DISTRIBUTED SENDS [db.] +``` + +### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} + +强制ClickHouse将数据同步发送到群集节点。 如果任何节点不可用,ClickHouse将引发异常并停止查询执行。 您可以重试查询,直到查询成功,这将在所有节点恢复联机时发生。 + +``` sql +SYSTEM FLUSH DISTRIBUTED [db.] +``` + +### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} + +将数据插入分布式表时启用后台数据分发。 + +``` sql +SYSTEM START DISTRIBUTED SENDS [db.] +``` + +### STOP MERGES {#query_language-system-stop-merges} + +提供停止MergeTree系列中表的后台合并的可能性: + +``` sql +SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] +``` + +!!! note "注" + `DETACH / ATTACH` 即使在之前所有MergeTree表的合并已停止的情况下,table也会为表启动后台合并。 + +### START MERGES {#query_language-system-start-merges} + +为MergeTree系列中的表提供启动后台合并的可能性: + +``` sql +SYSTEM START MERGES [[db.]merge_tree_family_table_name] +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/zh/sql_reference/syntax.md b/docs/zh/sql_reference/syntax.md new file mode 100644 index 00000000000..ab9009def47 --- /dev/null +++ b/docs/zh/sql_reference/syntax.md @@ -0,0 +1,187 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 31 +toc_title: "\u8BED\u6CD5" +--- + +# 语法 {#syntax} + +系统中有两种类型的解析器:完整SQL解析器(递归下降解析器)和数据格式解析器(快速流解析器)。 +在所有情况下,除了 `INSERT` 查询时,只使用完整的SQL解析器。 +该 `INSERT` 查询使用的分析程序: + +``` sql +INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') +``` + +该 `INSERT INTO t VALUES` 片段由完整的解析器解析,并且数据 `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` 由快速流解析器解析。 您也可以通过使用 [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) 设置。 当 `input_format_values_interpret_expressions = 1`,ClickHouse首先尝试使用fast stream解析器解析值。 如果失败,ClickHouse将尝试对数据使用完整的解析器,将其视为SQL [表达式](#syntax-expressions). + +数据可以有任何格式。 当接收到查询时,服务器计算不超过 [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) RAM中请求的字节(默认为1MB),其余的是流解析。 +这意味着系统没有大的问题 `INSERT` 查询,就像MySQL一样。 + +使用时 `Values` 格式为 `INSERT` 查询,它可能看起来数据被解析相同的表达式 `SELECT` 查询,但事实并非如此。 该 `Values` 格式更为有限。 + +接下来我们将介绍完整的解析器。 有关格式解析器的详细信息,请参阅 [格式](../interfaces/formats.md) 科。 + +## 空间 {#spaces} + +语法结构之间可能有任意数量的空格符号(包括查询的开始和结束)。 空格符号包括空格、制表符、换行符、CR和换页符。 + +## 评论 {#comments} + +支持SQL样式和C样式注释。 +SQL风格的评论:来自 `--` 直到最后 后的空间 `--` 可以省略。 +C风格的评论:来自 `/*` 到 `*/`. 这些注释可以是多行。 这里也不需要空格。 + +## 关键词 {#syntax-keywords} + +当关键字对应于以下关键字时,不区分大小写: + +- SQL标准。 例如, `SELECT`, `select` 和 `SeLeCt` 都是有效的。 +- 在一些流行的DBMS(MySQL或Postgres)中实现。 例如, `DateTime` 是一样的 `datetime`. + +数据类型名称是否区分大小写可以在 `system.data_type_families` 桌子 + +与标准SQL相比,所有其他关键字(包括函数名称)都是 **区分大小写**. + +不保留关键字(它们只是在相应的上下文中解析为关键字)。 如果您使用 [标识符](#syntax-identifiers) 与关键字相同,将它们括在引号中。 例如,查询 `SELECT "FROM" FROM table_name` 是有效的,如果表 `table_name` 具有名称的列 `"FROM"`. + +## 标识符 {#syntax-identifiers} + +标识符是: + +- 集群、数据库、表、分区和列名称。 +- 功能。 +- 数据类型。 +- [表达式别名](#syntax-expression_aliases). + +标识符可以是引号或非引号。 建议使用非引号标识符。 + +非引号标识符必须与正则表达式匹配 `^[a-zA-Z_][0-9a-zA-Z_]*$` 并且不能等于 [关键词](#syntax-keywords). 例: `x, _1, X_y__Z123_.` + +如果要使用与关键字相同的标识符,或者要在标识符中使用其他符号,请使用双引号或反引号对其进行引用,例如, `"id"`, `` `id` ``. + +## 文字数 {#literals} + +有:数字,字符串,复合和 `NULL` 文字。 + +### 数字 {#numeric} + +数值文本尝试进行分析: + +- 首先作为一个64位有符号的数字,使用 [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) 功能。 +- 如果不成功,作为64位无符号数,使用 [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) 功能。 +- 如果不成功,作为一个浮点数使用 [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) 功能。 +- 否则,将返回错误。 + +相应的值将具有该值适合的最小类型。 +例如,1被解析为 `UInt8`,但256被解析为 `UInt16`. 有关详细信息,请参阅 [数据类型](../sql_reference/data_types/index.md). + +例: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. + +### 字符串 {#syntax-string-literal} + +仅支持单引号中的字符串文字。 封闭的字符可以反斜杠转义。 以下转义序列具有相应的特殊值: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. 在所有其他情况下,转义序列的格式为 `\c`,哪里 `c` 是任何字符,被转换为 `c`. 这意味着您可以使用序列 `\'`和`\\`. 该值将具有 [字符串](../sql_reference/data_types/string.md) 类型。 + +在字符串文字中需要转义的最小字符集: `'` 和 `\`. 单引号可以用单引号,文字转义 `'It\'s'` 和 `'It''s'` 是平等的。 + +### 化合物 {#compound} + +数组支持构造: `[1, 2, 3]` 和元组: `(1, 'Hello, world!', 2)`.. +实际上,这些不是文字,而是分别具有数组创建运算符和元组创建运算符的表达式。 +数组必须至少包含一个项目,元组必须至少包含两个项目。 +元组有一个特殊的用途用于 `IN` a条款 `SELECT` 查询。 元组可以作为查询的结果获得,但它们不能保存到数据库(除了 [记忆](../engines/table_engines/special/memory.md) 表)。 + +### NULL {#null-literal} + +指示该值丢失。 + +为了存储 `NULL` 在表字段中,它必须是 [可为空](../sql_reference/data_types/nullable.md) 类型。 + +根据数据格式(输入或输出), `NULL` 可能有不同的表示。 有关详细信息,请参阅以下文档 [数据格式](../interfaces/formats.md#formats). + +处理有许多细微差别 `NULL`. 例如,如果比较操作的至少一个参数是 `NULL`,此操作的结果也将是 `NULL`. 对于乘法,加法和其他操作也是如此。 有关详细信息,请阅读每个操作的文档。 + +在查询中,您可以检查 `NULL` 使用 [IS NULL](operators.md#operator-is-null) 和 [IS NOT NULL](operators.md) 运算符及相关功能 `isNull` 和 `isNotNull`. + +## 功能 {#functions} + +函数像标识符一样写入,并在括号中包含一个参数列表(可能是空的)。 与标准SQL相比,括号是必需的,即使是空的参数列表。 示例: `now()`. +有常规函数和聚合函数(请参阅部分 “Aggregate functions”). 某些聚合函数可以包含括号中的两个参数列表。 示例: `quantile (0.9) (x)`. 这些聚合函数被调用 “parametric” 函数,并在第一个列表中的参数被调用 “parameters”. 不带参数的聚合函数的语法与常规函数的语法相同。 + +## 运营商 {#operators} + +在查询解析过程中,运算符会转换为相应的函数,同时考虑它们的优先级和关联性。 +例如,表达式 `1 + 2 * 3 + 4` 转化为 `plus(plus(1, multiply(2, 3)), 4)`. + +## 数据类型和数据库表引擎 {#data_types-and-database-table-engines} + +数据类型和表引擎 `CREATE` 查询的编写方式与标识符或函数相同。 换句话说,它们可能包含也可能不包含括在括号中的参数列表。 有关详细信息,请参阅部分 “Data types,” “Table engines,” 和 “CREATE”. + +## 表达式别名 {#syntax-expression_aliases} + +别名是查询中表达式的用户定义名称。 + +``` sql +expr AS alias +``` + +- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` 子句不使用 `AS` 关键字。 + + For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. + + In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + +- `expr` — Any expression supported by ClickHouse. + + For example, `SELECT column_name * 2 AS double FROM some_table`. + +- `alias` — Name for `expr`. 别名应符合 [标识符](#syntax-identifiers) 语法 + + For example, `SELECT "table t".column_name FROM table_name AS "table t"`. + +### 使用注意事项 {#notes-on-usage} + +别名对于查询或子查询是全局的,您可以在查询的任何部分中为任何表达式定义别名。 例如, `SELECT (1 AS n) + 2, n`. + +别名在子查询和子查询之间不可见。 例如,在执行查询时 `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouse生成异常 `Unknown identifier: num`. + +如果为结果列定义了别名 `SELECT` 子查询的子句,这些列在外部查询中可见。 例如, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. + +小心使用与列或表名相同的别名。 让我们考虑以下示例: + +``` sql +CREATE TABLE t +( + a Int, + b Int +) +ENGINE = TinyLog() +``` + +``` sql +SELECT + argMax(a, b), + sum(b) AS b +FROM t +``` + +``` text +Received exception from server (version 18.14.17): +Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. +``` + +在这个例子中,我们声明表 `t` 带柱 `b`. 然后,在选择数据时,我们定义了 `sum(b) AS b` 别名 由于别名是全局的,ClickHouse替换了文字 `b` 在表达式中 `argMax(a, b)` 用表达式 `sum(b)`. 这种替换导致异常。 + +## 星号 {#asterisk} + +在一个 `SELECT` 查询中,星号可以替换表达式。 有关详细信息,请参阅部分 “SELECT”. + +## 表达式 {#syntax-expressions} + +表达式是函数、标识符、文字、运算符的应用程序、括号中的表达式、子查询或星号。 它还可以包含别名。 +表达式列表是一个或多个用逗号分隔的表达式。 +函数和运算符,反过来,可以有表达式作为参数。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/zh/sql_reference/table_functions/file.md b/docs/zh/sql_reference/table_functions/file.md new file mode 100644 index 00000000000..b3c93f7f1fd --- /dev/null +++ b/docs/zh/sql_reference/table_functions/file.md @@ -0,0 +1,121 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: "\u6587\u4EF6" +--- + +# 文件 {#file} + +从文件创建表。 此表函数类似于 [url](url.md) 和 [hdfs](hdfs.md) 一些的。 + +``` sql +file(path, format, structure) +``` + +**输入参数** + +- `path` — The relative path to the file from [user\_files\_path](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-user_files_path). 只读模式下的globs后的文件支持路径: `*`, `?`, `{abc,def}` 和 `{N..M}` 哪里 `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [格式](../../interfaces/formats.md#formats) 的文件。 +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**返回值** + +具有指定结构的表,用于读取或写入指定文件中的数据。 + +**示例** + +设置 `user_files_path` 和文件的内容 `test.csv`: + +``` bash +$ grep user_files_path /etc/clickhouse-server/config.xml + /var/lib/clickhouse/user_files/ + +$ cat /var/lib/clickhouse/user_files/test.csv + 1,2,3 + 3,2,1 + 78,43,45 +``` + +表从`test.csv` 并从中选择前两行: + +``` sql +SELECT * +FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +``` sql +-- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file +SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 +``` + +**路径中的水珠** + +多个路径组件可以具有globs。 对于正在处理的文件应该存在并匹配到整个路径模式(不仅后缀或前缀)。 + +- `*` — Substitutes any number of any characters except `/` 包括空字符串。 +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +建筑与 `{}` 类似于 [远程表功能](../../sql_reference/table_functions/remote.md)). + +**示例** + +1. 假设我们有几个具有以下相对路径的文件: + +- ‘some\_dir/some\_file\_1’ +- ‘some\_dir/some\_file\_2’ +- ‘some\_dir/some\_file\_3’ +- ‘another\_dir/some\_file\_1’ +- ‘another\_dir/some\_file\_2’ +- ‘another\_dir/some\_file\_3’ + +1. 查询这些文件中的行数: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. 查询这两个目录的所有文件中的行数: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "警告" + 如果您的文件列表包含带前导零的数字范围,请单独使用带大括号的构造或使用 `?`. + +**示例** + +从名为 `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## 虚拟列 {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**另请参阅** + +- [虚拟列](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/zh/sql_reference/table_functions/generate.md b/docs/zh/sql_reference/table_functions/generate.md new file mode 100644 index 00000000000..84c711711d5 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/generate.md @@ -0,0 +1,45 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 47 +toc_title: generateRandom +--- + +# generateRandom {#generaterandom} + +使用给定的模式生成随机数据。 +允许用数据填充测试表。 +支持可以存储在表中的所有数据类型,除了 `LowCardinality` 和 `AggregateFunction`. + +``` sql +generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); +``` + +**参数** + +- `name` — Name of corresponding column. +- `TypeName` — Type of corresponding column. +- `limit` — Number of rows to generate. +- `max_array_length` — Maximum array length for all generated arrays. Defaults to `10`. +- `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. +- `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. + +**返回值** + +具有请求架构的表对象。 + +## 用法示例 {#usage-example} + +``` sql +SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2); +``` + +``` text +┌─a────────┬────────────d─┬─c──────────────────────────────────────────────────────────────────┐ +│ [77] │ -124167.6723 │ ('2061-04-17 21:59:44.573','3f72f405-ec3e-13c8-44ca-66ef335f7835') │ +│ [32,110] │ -141397.7312 │ ('1979-02-09 03:43:48.526','982486d1-5a5d-a308-e525-7bd8b80ffa73') │ +│ [68] │ -67417.0770 │ ('2080-03-12 14:17:31.269','110425e5-413f-10a6-05ba-fa6b3e929f15') │ +└──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/zh/sql_reference/table_functions/hdfs.md b/docs/zh/sql_reference/table_functions/hdfs.md new file mode 100644 index 00000000000..2cf79c31c83 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/hdfs.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 45 +toc_title: hdfs +--- + +# hdfs {#hdfs} + +从HDFS中的文件创建表。 此表函数类似于 [url](url.md) 和 [文件](file.md) 一些的。 + +``` sql +hdfs(URI, format, structure) +``` + +**输入参数** + +- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` 和 `{N..M}` 哪里 `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [格式](../../interfaces/formats.md#formats) 的文件。 +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**返回值** + +具有指定结构的表,用于读取或写入指定文件中的数据。 + +**示例** + +表从 `hdfs://hdfs1:9000/test` 并从中选择前两行: + +``` sql +SELECT * +FROM hdfs('hdfs://hdfs1:9000/test', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +**路径中的水珠** + +多个路径组件可以具有globs。 对于正在处理的文件应该存在并匹配到整个路径模式(不仅后缀或前缀)。 + +- `*` — Substitutes any number of any characters except `/` 包括空字符串。 +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +建筑与 `{}` 类似于 [远程表功能](../../sql_reference/table_functions/remote.md)). + +**示例** + +1. 假设我们在HDFS上有几个具有以下Uri的文件: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. 查询这些文件中的行数: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. 查询这两个目录的所有文件中的行数: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "警告" + 如果您的文件列表包含带前导零的数字范围,请单独使用带大括号的构造或使用 `?`. + +**示例** + +从名为 `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## 虚拟列 {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**另请参阅** + +- [虚拟列](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/zh/sql_reference/table_functions/index.md b/docs/zh/sql_reference/table_functions/index.md new file mode 100644 index 00000000000..38ef9bf1f4b --- /dev/null +++ b/docs/zh/sql_reference/table_functions/index.md @@ -0,0 +1,38 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u8868\u51FD\u6570" +toc_priority: 34 +toc_title: "\u5BFC\u8A00" +--- + +# 表函数 {#table-functions} + +表函数是构造表的方法。 + +您可以使用表函数: + +- [FROM](../statements/select.md#select-from) 《公约》条款 `SELECT` 查询。 + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [创建表为\](../statements/create.md#create-table-query) 查询。 + + It's one of the methods of creating a table. + +!!! warning "警告" + 你不能使用表函数,如果 [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) 设置被禁用。 + +| 功能 | 产品描述 | +|--------------------|--------------------------------------------------------------------------------------------------------| +| [文件](file.md) | 创建一个 [文件](../../engines/table_engines/special/file.md)-发动机表。 | +| [合并](merge.md) | 创建一个 [合并](../../engines/table_engines/special/merge.md)-发动机表。 | +| [数字](numbers.md) | 创建一个包含整数填充的单列的表。 | +| [远程](remote.md) | 允许您访问远程服务器,而无需创建 [分布](../../engines/table_engines/special/distributed.md)-发动机表。 | +| [url](url.md) | 创建一个 [Url](../../engines/table_engines/special/url.md)-发动机表。 | +| [mysql](mysql.md) | 创建一个 [MySQL](../../engines/table_engines/integrations/mysql.md)-发动机表。 | +| [jdbc](jdbc.md) | 创建一个 [JDBC](../../engines/table_engines/integrations/jdbc.md)-发动机表。 | +| [odbc](odbc.md) | 创建一个 [ODBC](../../engines/table_engines/integrations/odbc.md)-发动机表。 | +| [hdfs](hdfs.md) | 创建一个 [HDFS](../../engines/table_engines/integrations/hdfs.md)-发动机表。 | + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/zh/sql_reference/table_functions/input.md b/docs/zh/sql_reference/table_functions/input.md new file mode 100644 index 00000000000..72f71576729 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/input.md @@ -0,0 +1,47 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: "\u8F93\u5165" +--- + +# 输入 {#input} + +`input(structure)` -表功能,允许有效地转换和插入数据发送到 +服务器与给定结构的表与另一种结构。 + +`structure` -以下格式发送到服务器的数据结构 `'column1_name column1_type, column2_name column2_type, ...'`. +例如, `'id UInt32, name String'`. + +此功能只能用于 `INSERT SELECT` 查询,只有一次,但其他行为像普通表函数 +(例如,它可以用于子查询等。). + +数据可以以任何方式像普通发送 `INSERT` 查询并传递任何可用 [格式](../../interfaces/formats.md#formats) +必须在查询结束时指定(不像普通 `INSERT SELECT`). + +这个功能的主要特点是,当服务器从客户端接收数据时,它同时将其转换 +根据表达式中的列表 `SELECT` 子句并插入到目标表中。 临时表 +不创建所有传输的数据。 + +**例** + +- 让 `test` 表具有以下结构 `(a String, b String)` + 和数据 `data.csv` 具有不同的结构 `(col1 String, col2 Date, col3 Int32)`. 查询插入 + 从数据 `data.csv` 进 `test` 同时转换的表如下所示: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; +``` + +- 如果 `data.csv` 包含相同结构的数据 `test_structure` 作为表 `test` 那么这两个查询是相等的: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/zh/sql_reference/table_functions/jdbc.md b/docs/zh/sql_reference/table_functions/jdbc.md new file mode 100644 index 00000000000..e2268b42e28 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/jdbc.md @@ -0,0 +1,29 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 43 +toc_title: jdbc +--- + +# jdbc {#table-function-jdbc} + +`jdbc(jdbc_connection_uri, schema, table)` -返回通过JDBC驱动程序连接的表。 + +此表函数需要单独的 `clickhouse-jdbc-bridge` 程序正在运行。 +它支持可空类型(基于查询的远程表的DDL)。 + +**例** + +``` sql +SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/sql_reference/table_functions/merge.md b/docs/zh/sql_reference/table_functions/merge.md new file mode 100644 index 00000000000..7304c447b1f --- /dev/null +++ b/docs/zh/sql_reference/table_functions/merge.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: "\u5408\u5E76" +--- + +# 合并 {#merge} + +`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. + +表结构取自与正则表达式匹配的第一个表。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/zh/sql_reference/table_functions/mysql.md b/docs/zh/sql_reference/table_functions/mysql.md new file mode 100644 index 00000000000..3cdf3047aac --- /dev/null +++ b/docs/zh/sql_reference/table_functions/mysql.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 42 +toc_title: mysql +--- + +# mysql {#mysql} + +允许 `SELECT` 要对存储在远程MySQL服务器上的数据执行的查询。 + +``` sql +mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +**参数** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` 查询到 `REPLACE INTO`. 如果 `replace_query=1`,查询被替换。 + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` 表达式被添加到 `INSERT` 查询。 + + Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. + + To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. + +简单 `WHERE` 条款如 `=, !=, >, >=, <, <=` 当前在MySQL服务器上执行。 + +其余的条件和 `LIMIT` 只有在对MySQL的查询完成后,才会在ClickHouse中执行采样约束。 + +**返回值** + +与原始MySQL表具有相同列的table对象。 + +## 用法示例 {#usage-example} + +MySQL中的表: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +从ClickHouse中选择数据: + +``` sql +SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [该 ‘MySQL’ 表引擎](../../engines/table_engines/integrations/mysql.md) +- [使用MySQL作为外部字典的来源](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/zh/sql_reference/table_functions/numbers.md b/docs/zh/sql_reference/table_functions/numbers.md new file mode 100644 index 00000000000..aaee632d5dc --- /dev/null +++ b/docs/zh/sql_reference/table_functions/numbers.md @@ -0,0 +1,30 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u6570\u5B57" +--- + +# 数字 {#numbers} + +`numbers(N)` – Returns a table with the single ‘number’ 包含从0到N-1的整数的列(UInt64)。 +`numbers(N, M)` -返回一个表与单 ‘number’ 包含从N到(N+M-1)的整数的列(UInt64)。 + +类似于 `system.numbers` 表,它可以用于测试和生成连续的值, `numbers(N, M)` 比 `system.numbers`. + +以下查询是等效的: + +``` sql +SELECT * FROM numbers(10); +SELECT * FROM numbers(0, 10); +SELECT * FROM system.numbers LIMIT 10; +``` + +例: + +``` sql +-- Generate a sequence of dates from 2010-01-01 to 2010-12-31 +select toDate('2010-01-01') + number as d FROM numbers(365); +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/zh/sql_reference/table_functions/odbc.md b/docs/zh/sql_reference/table_functions/odbc.md new file mode 100644 index 00000000000..ad7503fd551 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/odbc.md @@ -0,0 +1,108 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 44 +toc_title: odbc +--- + +# odbc {#table-functions-odbc} + +返回通过连接的表 [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +``` sql +odbc(connection_settings, external_database, external_table) +``` + +参数: + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` 文件 +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +为了安全地实现ODBC连接,ClickHouse使用单独的程序 `clickhouse-odbc-bridge`. 如果直接从ODBC驱动程序加载 `clickhouse-server`,驱动程序问题可能会导致ClickHouse服务器崩溃。 ClickHouse自动启动 `clickhouse-odbc-bridge` 当它是必需的。 ODBC桥程序是从相同的软件包作为安装 `clickhouse-server`. + +与字段 `NULL` 外部表中的值将转换为基数据类型的默认值。 例如,如果远程MySQL表字段具有 `INT NULL` 键入它将转换为0(ClickHouse的默认值 `Int32` 数据类型)。 + +## 用法示例 {#usage-example} + +**通过ODBC从本地MySQL安装获取数据** + +此示例检查Ubuntu Linux18.04和MySQL服务器5.7。 + +确保安装了unixODBC和MySQL连接器。 + +默认情况下(如果从软件包安装),ClickHouse以用户身份启动 `clickhouse`. 因此,您需要在MySQL服务器中创建和配置此用户。 + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +然后配置连接 `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +您可以使用 `isql` unixodbc安装中的实用程序。 + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQL中的表: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +从ClickHouse中的MySQL表中检索数据: + +``` sql +SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ 0 │ 2 │ 0 │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [ODBC外部字典](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC表引擎](../../engines/table_engines/integrations/odbc.md). + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/sql_reference/table_functions/remote.md b/docs/zh/sql_reference/table_functions/remote.md new file mode 100644 index 00000000000..be6e9138fb4 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/remote.md @@ -0,0 +1,83 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 40 +toc_title: "\u8FDC\u7A0B" +--- + +# 远程,远程安全 {#remote-remotesecure} + +允许您访问远程服务器,而无需创建 `Distributed` 桌子 + +签名: + +``` sql +remote('addresses_expr', db, table[, 'user'[, 'password']]) +remote('addresses_expr', db.table[, 'user'[, 'password']]) +``` + +`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`,或者只是 `host`. 主机可以指定为服务器名称,也可以指定为IPv4或IPv6地址。 IPv6地址在方括号中指定。 端口是远程服务器上的TCP端口。 如果省略端口,它使用 `tcp_port` 从服务器的配置文件(默认情况下,9000)。 + +!!! important "重要事项" + IPv6地址需要该端口。 + +例: + +``` text +example01-01-1 +example01-01-1:9000 +localhost +127.0.0.1 +[::]:9000 +[2a02:6b8:0:1111::11]:9000 +``` + +多个地址可以用逗号分隔。 在这种情况下,ClickHouse将使用分布式处理,因此它将将查询发送到所有指定的地址(如具有不同数据的分片)。 + +示例: + +``` text +example01-01-1,example01-02-1 +``` + +表达式的一部分可以用大括号指定。 前面的示例可以写成如下: + +``` text +example01-0{1,2}-1 +``` + +大括号可以包含由两个点(非负整数)分隔的数字范围。 在这种情况下,范围将扩展为生成分片地址的一组值。 如果第一个数字以零开头,则使用相同的零对齐形成值。 前面的示例可以写成如下: + +``` text +example01-{01..02}-1 +``` + +如果您有多对大括号,它会生成相应集合的直接乘积。 + +大括号中的地址和部分地址可以用管道符号(\|)分隔。 在这种情况下,相应的地址集被解释为副本,并且查询将被发送到第一个正常副本。 但是,副本将按照当前设置的顺序进行迭代 [load\_balancing](../../operations/settings/settings.md) 设置。 + +示例: + +``` text +example01-{01..02}-{1|2} +``` + +此示例指定两个分片,每个分片都有两个副本。 + +生成的地址数由常量限制。 现在这是1000个地址。 + +使用 `remote` 表函数比创建一个不太优化 `Distributed` 表,因为在这种情况下,服务器连接被重新建立为每个请求。 此外,如果设置了主机名,则会解析这些名称,并且在使用各种副本时不会计算错误。 在处理大量查询时,始终创建 `Distributed` 表的时间提前,不要使用 `remote` 表功能。 + +该 `remote` 表函数可以在以下情况下是有用的: + +- 访问特定服务器进行数据比较、调试和测试。 +- 查询之间的各种ClickHouse群集用于研究目的。 +- 手动发出的罕见分布式请求。 +- 每次重新定义服务器集的分布式请求。 + +如果未指定用户, `default` 被使用。 +如果未指定密码,则使用空密码。 + +`remoteSecure` -相同 `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) 从配置或9440. + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/zh/sql_reference/table_functions/url.md b/docs/zh/sql_reference/table_functions/url.md new file mode 100644 index 00000000000..d220bb05c2c --- /dev/null +++ b/docs/zh/sql_reference/table_functions/url.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 41 +toc_title: url +--- + +# url {#url} + +`url(URL, format, structure)` -返回从创建的表 `URL` 与给定 +`format` 和 `structure`. + +URL-HTTP或HTTPS服务器地址,它可以接受 `GET` 和/或 `POST` 请求。 + +格式 - [格式](../../interfaces/formats.md#formats) 的数据。 + +结构-表结构 `'UserID UInt64, Name String'` 格式。 确定列名称和类型。 + +**示例** + +``` sql +-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/zh/whats_new/changelog/2017.md b/docs/zh/whats_new/changelog/2017.md new file mode 100644 index 00000000000..ed77ead9023 --- /dev/null +++ b/docs/zh/whats_new/changelog/2017.md @@ -0,0 +1,268 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 79 +toc_title: '2017' +--- + +### ClickHouse释放1.1.54327,2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} + +此版本包含以前版本1.1.54318的错误修复: + +- 修复了可能导致数据丢失的复制中可能存在的争用条件的错误。 此问题影响版本1.1.54310和1.1.54318。 如果将其中一个版本用于复制的表,则强烈建议进行更新。 此问题显示在日志中的警告消息,如 `Part ... from own log doesn't exist.` 即使您在日志中没有看到这些消息,问题也是相关的。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54318-2017-11-30} + +此版本包含以前版本1.1.54310的错误修复: + +- 修复了SummingMergeTree引擎中合并过程中错误的行删除 +- 修复了未复制的MergeTree引擎中的内存泄漏 +- 修复了MergeTree引擎中频繁插入的性能下降 +- 修复了导致复制队列停止运行的问题 +- 固定服务器日志的轮换和归档 + +### ClickHouse释放1.1.54310,2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} + +#### 新功能: {#new-features} + +- MergeTree表引擎系列的自定义分区键。 +- [卡夫卡](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) 表引擎。 +- 增加了对加载的支持 [CatBoost](https://catboost.yandex/) 模型并将其应用到ClickHouse中存储的数据。 +- 增加了对UTC非整数偏移的时区的支持。 +- 增加了对具有时间间隔的算术运算的支持。 +- 日期和日期时间类型的值范围扩展到2105年。 +- 添加了 `CREATE MATERIALIZED VIEW x TO y` 查询(指定用于存储实例化视图数据的现有表)。 +- 添加了 `ATTACH TABLE` 不带参数的查询。 +- 将SummingMergeTree表中名称以-Map结尾的嵌套列的处理逻辑提取到sumMap聚合函数中。 现在,您可以显式指定此类列。 +- IP trie字典的最大大小增加到128M条目。 +- 添加了getSizeOfEnumType函数。 +- 添加了sumWithOverflow聚合函数。 +- 增加了对Cap'n Proto输入格式的支持。 +- 使用zstd算法时,您现在可以自定义压缩级别。 + +#### 向后不兼容的更改: {#backward-incompatible-changes} + +- 不允许使用内存以外的引擎创建临时表。 +- 不允许使用View或MaterializedView引擎显式创建表。 +- 在创建表期间,新检查将验证采样键表达式是否包含在主键中。 + +#### 错误修复: {#bug-fixes} + +- 修复了同步插入到分布式表中时的挂断问题。 +- 修复了复制表中部分的非原子添加和删除。 +- 插入到实例化视图中的数据不会遭受不必要的重复数据删除。 +- 对本地副本滞后且远程副本不可用的分布式表执行查询不会再导致错误。 +- 用户不需要访问权限 `default` 数据库创建临时表了。 +- 修复了在指定数组类型时不带参数的崩溃。 +- 修复了包含服务器日志的磁盘卷已满时的挂机问题。 +- 修复了unix时代的第一周toRelativeWeekNum函数的溢出。 + +#### 构建改进: {#build-improvements} + +- 几个第三方库(特别是Poco)被更新并转换为git子模块。 + +### ClickHouse释放1.1.54304,2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} + +#### 新功能: {#new-features-1} + +- 本机协议中的TLS支持(要启用,请设置 `tcp_ssl_port` 在 `config.xml` ). + +#### 错误修复: {#bug-fixes-1} + +- `ALTER` 对于复制的表现在尝试尽快开始运行。 +- 使用设置读取数据时修复崩溃 `preferred_block_size_bytes=0.` +- 固定的崩溃 `clickhouse-client` 按下时 `Page Down` +- 正确解释某些复杂的查询 `GLOBAL IN` 和 `UNION ALL` +- `FREEZE PARTITION` 现在总是以原子方式工作。 +- 空POST请求现在返回代码411的响应。 +- 修正了像表达式的解释错误 `CAST(1 AS Nullable(UInt8)).` +- 修正了读取时的错误 `Array(Nullable(String))` 从列 `MergeTree` 桌子 +- 修正了解析查询时崩溃,如 `SELECT dummy AS dummy, dummy AS b` +- 用户正确更新无效 `users.xml` +- 可执行字典返回非零响应代码时的正确处理。 + +### ClickHouse释放1.1.54292,2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} + +#### 新功能: {#new-features-2} + +- 添加了 `pointInPolygon` 用于处理坐标平面上的坐标的函数。 +- 添加了 `sumMap` 用于计算数组总和的聚合函数,类似于 `SummingMergeTree`. +- 添加了 `trunc` 功能。 改进舍入函数的性能 (`round`, `floor`, `ceil`, `roundToExp2`)并corrected正了他们如何工作的逻辑。 改变的逻辑 `roundToExp2` 分数和负数的功能。 +- ClickHouse可执行文件现在对libc版本的依赖性较低。 同样的ClickHouse可执行文件可以在各种各样的Linux系统上运行。 使用编译的查询(使用设置)时仍然存在依赖关系 `compile = 1` ,默认情况下不使用)。 +- 减少了动态编译查询所需的时间。 + +#### 错误修复: {#bug-fixes-2} + +- 修正了有时产生的错误 `part ... intersects previous part` 消息和副本的一致性减弱。 +- 修正了一个错误,导致服务器锁定,如果ZooKeeper在关闭过程中不可用。 +- 恢复副本时删除了过多的日志记录。 +- 修复了UNION ALL实现中的错误。 +- 修复了在块中的第一列具有数组类型时发生的concat函数中的错误。 +- 进度现在在系统中正确显示。合并表。 + +### ClickHouse释放1.1.54289,2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} + +#### 新功能: {#new-features-3} + +- `SYSTEM` 服务器管理查询: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. +- 添加了用于处理数组的函数: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. +- 已添加 `root` 和 `identity` ZooKeeper配置的参数。 这允许您隔离同一个ZooKeeper集群上的单个用户。 +- 添加聚合函数 `groupBitAnd`, `groupBitOr`,和 `groupBitXor` (为了兼容,它们也可以在名称下使用 `BIT_AND`, `BIT_OR`,和 `BIT_XOR`). +- 通过在文件系统中指定套接字,可以从MySQL加载外部字典。 +- 外部字典可以通过SSL从MySQL加载 (`ssl_cert`, `ssl_key`, `ssl_ca` 参数)。 +- 添加了 `max_network_bandwidth_for_user` 设置为限制每个用户查询的总带宽使用。 +- 支持 `DROP TABLE` 对于临时表。 +- 支持阅读 `DateTime` 从Unix时间戳格式的值 `CSV` 和 `JSONEachRow` 格式。 +- 分布式查询中的滞后副本现在默认排除(默认阈值为5分钟)。 +- 在ALTER期间使用FIFO锁定:对于连续运行的查询,ALTER查询不会无限期地阻止。 +- 选项设置 `umask` 在配置文件中。 +- 改进了查询的性能 `DISTINCT` . + +#### 错误修复: {#bug-fixes-3} + +- 改进了在ZooKeeper中删除旧节点的过程。 以前,如果插入非常频繁,旧节点有时不会被删除,这导致服务器关闭速度缓慢等等。 +- 修正了选择主机连接到ZooKeeper时的随机化。 +- 修复了在分布式查询中排除滞后副本,如果副本是localhost。 +- 修正了一个错误,其中在一个数据部分 `ReplicatedMergeTree` 运行后表可能会被打破 `ALTER MODIFY` 在一个元素 `Nested` 结构。 +- 修复了可能导致SELECT查询执行以下操作的错误 “hang”. +- 对分布式DDL查询的改进。 +- 修正了查询 `CREATE TABLE ... AS `. +- 解决了在僵局 `ALTER ... CLEAR COLUMN IN PARTITION` 查询为 `Buffer` 桌子 +- 修正了无效的默认值 `Enum` s(0,而不是最小)使用时 `JSONEachRow` 和 `TSKV` 格式。 +- 解决了使用字典时僵尸进程的外观 `executable` 资料来源。 +- 修正了HEAD查询的段错误。 + +#### 改进开发和组装ClickHouse的工作流程: {#improved-workflow-for-developing-and-assembling-clickhouse} + +- 您可以使用 `pbuilder` 建造克里克豪斯 +- 您可以使用 `libc++` 而不是 `libstdc++` 对于构建在Linux上。 +- 添加了使用静态代码分析工具的说明: `Coverage`, `clang-tidy`, `cppcheck`. + +#### 升级时请注意: {#please-note-when-upgrading} + +- MergeTree设置现在有一个更高的默认值 `max_bytes_to_merge_at_max_space_in_pool` (要合并的数据部分的最大总大小,以字节为单位):它已从100GiB增加到150GiB。 这可能会导致服务器升级后运行大型合并,这可能会导致磁盘子系统的负载增加。 如果服务器上的可用空间小于正在运行的合并总量的两倍,这将导致所有其他合并停止运行,包括小数据部分的合并。 因此,插入查询将失败,并显示消息 “Merges are processing significantly slower than inserts.” 使用 `SELECT * FROM system.merges` 查询监控情况。 您还可以检查 `DiskSpaceReservedForMerge` 度量在 `system.metrics` 表,或石墨。 你不需要做任何事情来解决这个问题,因为一旦大合并完成,问题就会自行解决。 如果您发现这是不可接受的,则可以恢复以前的值 `max_bytes_to_merge_at_max_space_in_pool` 设置。 要做到这一点,请转到 在配置部分。xml,设置 ``` ``107374182400 ``` 并重新启动服务器。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54284-2017-08-29} + +- 这是一个错误修正版本,以前的1.1.54282版本。 它修复了ZooKeeper中部件目录中的泄漏。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54282-2017-08-23} + +此版本包含以前版本1.1.54276的错误修复: + +- 固定 `DB::Exception: Assertion violation: !_path.empty()` 当插入到分布式表中。 +- 如果输入数据以";"开头,则以RowBinary格式插入时修复了解析。 +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54276-2017-08-16} + +#### 新功能: {#new-features-4} + +- 为选择查询添加了一个可选的WITH部分。 查询示例: `WITH 1+1 AS a SELECT a, a*a` +- INSERT可以在分布式表中同步执行:仅在所有分片上保存所有数据后才返回OK。 这是由设置insert\_distributed\_sync=1激活的。 +- 添加了用于处理16字节标识符的UUID数据类型。 +- 添加了CHAR,FLOAT和其他类型的别名,以便与Tableau兼容。 +- 添加了toyyyymm,toYYYYMMDD和toyyyyymmddhhmmss将时间转换为数字的功能。 +- 您可以使用IP地址(与主机名一起使用)来标识群集DDL查询的服务器。 +- 增加了对函数中非常量参数和负偏移的支持 `substring(str, pos, len).` +- 添加了max\_size参数 `groupArray(max_size)(column)` 聚合函数,并优化了其性能。 + +#### 主要变化: {#main-changes} + +- 安全性改进:所有服务器文件都使用0640权限创建(可以通过更改 配置参数)。 +- 改进了语法无效的查询的错误消息。 +- 在合并mergetree大部分数据时,显着降低了内存消耗并提高了性能。 +- 显着提高了ReplacingMergeTree引擎的数据合并性能。 +- 通过组合多个源插入来改进来自分布式表的异步插入的性能。 要启用此功能,请使用设置distributed\_directory\_monitor\_batch\_inserts=1。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-1} + +- 改变聚合状态的二进制格式 `groupArray(array_column)` 数组的函数。 + +#### 更改的完整列表: {#complete-list-of-changes} + +- 添加了 `output_format_json_quote_denormals` 设置,允许以JSON格式输出nan和inf值。 +- 从分布式表读取时优化流分配。 +- 如果值没有更改,可以在只读模式下配置设置。 +- 添加了检索MergeTree引擎的非整数颗粒的功能,以满足preferred\_block\_size\_bytes设置中指定的块大小的限制。 其目的是在处理来自具有大列的表的查询时减少RAM消耗并增加缓存局部性。 +- 高效使用包含如下表达式的索引 `toStartOfHour(x)` 对于像条件 `toStartOfHour(x) op сonstexpr.` +- 添加了MergeTree引擎的新设置(配置中的merge\_tree部分。xml): + - replicated\_deduplication\_window\_seconds设置复制表中重复数据删除插入所允许的秒数。 + - cleanup\_delay\_period设置启动清理以删除过时数据的频率。 + - replicated\_can\_become\_leader可以防止副本成为领导者(并分配合并)。 +- 加速清理,从ZooKeeper中删除过时的数据。 +- 针对群集DDL查询的多个改进和修复。 特别令人感兴趣的是新设置distributed\_ddl\_task\_timeout,它限制了等待群集中服务器响应的时间。 如果未在所有主机上执行ddl请求,则响应将包含超时错误,并且请求将以异步模式执行。 +- 改进了服务器日志中堆栈跟踪的显示。 +- 添加了 “none” 压缩方法的值。 +- 您可以在config中使用多个dictionaries\_config部分。xml +- 可以通过文件系统中的套接字连接到MySQL。 +- 系统。部件表有一个新的列,其中包含有关标记大小的信息,以字节为单位。 + +#### 错误修复: {#bug-fixes-4} + +- 使用合并表的分布式表现在可以正确地用于具有条件的SELECT查询 `_table` 场。 +- 修复了检查数据部分时ReplicatedMergeTree中罕见的争用条件。 +- 固定可能冻结 “leader election” 启动服务器时。 +- 使用数据源的本地副本时,将忽略max\_replica\_delay\_for\_distributed\_queries设置。 这已被修复。 +- 修正了不正确的行为 `ALTER TABLE CLEAR COLUMN IN PARTITION` 尝试清除不存在的列时。 +- 修复了multif函数中使用空数组或字符串时的异常。 +- 修正了反序列化本机格式时过多的内存分配。 +- 修正了Trie字典的不正确的自动更新。 +- 修复了使用SAMPLE从合并表中使用GROUP BY子句运行查询时的异常。 +- 修复了使用distributed\_aggregation\_memory\_efficient=1时组的崩溃。 +- 现在,您可以指定数据库。表在右侧的IN和JOIN。 +- 用于并行聚合的线程太多。 这已被修复。 +- 固定如何 “if” 函数与FixedString参数一起使用。 +- 为权重为0的分片从分布式表中选择工作不正确。 这已被修复。 +- 运行 `CREATE VIEW IF EXISTS no longer causes crashes.` +- 修正了input\_format\_skip\_unknown\_fields=1设置并且有负数时的不正确行为。 +- 修正了一个无限循环 `dictGetHierarchy()` 如果字典中有一些无效的数据,则函数。 +- 固定 `Syntax error: unexpected (...)` 在IN或JOIN子句和合并表中使用子查询运行分布式查询时出错。 +- 修复了从字典表中选择查询的不正确解释。 +- 修正了 “Cannot mremap” 在IN和JOIN子句中使用包含超过20亿个元素的数组时出错。 +- 修复了以MySQL为源的字典的故障转移。 + +#### 改进开发和组装ClickHouse的工作流程: {#improved-workflow-for-developing-and-assembling-clickhouse-1} + +- 构建可以在阿卡迪亚组装。 +- 您可以使用gcc7来编译ClickHouse。 +- 现在使用ccache+distcc的并行构建速度更快。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54245-2017-07-04} + +#### 新功能: {#new-features-5} + +- 分布式的DDL(例如, `CREATE TABLE ON CLUSTER`) +- 复制的查询 `ALTER TABLE CLEAR COLUMN IN PARTITION.` +- 字典表的引擎(以表格形式访问字典数据)。 +- 字典数据库引擎(这种类型的数据库会自动为所有连接的外部字典提供字典表)。 +- 您可以通过向源发送请求来检查字典的更新。 +- 限定列名称 +- 使用双引号引用标识符。 +- Http接口中的会话。 +- 复制表的优化查询不仅可以在leader上运行。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-2} + +- 删除设置全局。 + +#### 小的变化: {#minor-changes} + +- 现在,在触发警报之后,日志将打印完整的堆栈跟踪。 +- 在启动时放宽对损坏/额外数据部件数量的验证(有太多误报)。 + +#### 错误修复: {#bug-fixes-5} + +- 修复了连接错误 “sticking” 当插入到分布式表中。 +- GLOBAL IN现在适用于查看分布式表的合并表中的查询。 +- 在Google Compute Engine虚拟机上检测到不正确的内核数。 这已被修复。 +- 缓存外部字典的可执行源如何工作的更改。 +- 修复了包含空字符的字符串的比较。 +- 修正了Float32主键字段与常量的比较。 +- 以前,对字段大小的不正确估计可能导致分配过大。 +- 修复了使用ALTER查询添加到表中的可空列时的崩溃。 +- 修复了按可空列排序时的崩溃,如果行数小于限制。 +- 修复了仅由常量值组成的子查询的顺序。 +- 以前,复制的表在丢弃表失败后可能仍处于无效状态。 +- 具有空结果的标量子查询的别名不再丢失。 +- 现在如果.so文件被损坏,使用编译的查询不会失败并出现错误。 diff --git a/docs/zh/whats_new/changelog/2018.md b/docs/zh/whats_new/changelog/2018.md new file mode 100644 index 00000000000..b62d8372d1a --- /dev/null +++ b/docs/zh/whats_new/changelog/2018.md @@ -0,0 +1,1063 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 78 +toc_title: '2018' +--- + +## ClickHouse释放18.16 {#clickhouse-release-18-16} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-16-1-2018-12-21} + +#### 错误修复: {#bug-fixes} + +- 修复了导致使用ODBC源更新字典时出现问题的错误。 [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- 聚集函数的JIT编译现在适用于低心率列。 [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) + +#### 改进: {#improvements} + +- 添加了 `low_cardinality_allow_in_native_format` 设置(默认情况下启用)。 如果禁用,则选择查询的LowCardinality列将转换为普通列,插入查询将需要普通列。 [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) + +#### 构建改进: {#build-improvements} + +- 修复了基于macOS和ARM的构建。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-16-0-2018-12-14} + +#### 新功能: {#new-features} + +- `DEFAULT` 在以半结构化输入格式加载数据时,会计算表达式是否缺少字段 (`JSONEachRow`, `TSKV`). 该功能与启用 `insert_sample_with_metadata` 设置。 [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) +- 该 `ALTER TABLE` 查询现在有 `MODIFY ORDER BY` 用于在添加或删除表列时更改排序键的操作。 这是在表有用 `MergeTree` 基于此排序键合并时执行其他任务的系列,例如 `SummingMergeTree`, `AggregatingMergeTree`,等等。 [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) +- 对于在表 `MergeTree` 家庭,现在你可以指定一个不同的排序键 (`ORDER BY`)和索引 (`PRIMARY KEY`). 排序键可以长于索引。 [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) +- 添加了 `hdfs` 表功能和 `HDFS` 用于将数据导入和导出到HDFS的表引擎。 [晨兴-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) +- 增加了使用base64的功能: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) +- 现在,您可以使用一个参数来配置的精度 `uniqCombined` 聚合函数(选择HyperLogLog单元格的数量)。 [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) +- 添加了 `system.contributors` 包含在ClickHouse中进行提交的所有人的名称的表。 [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) +- 增加了省略分区的能力 `ALTER TABLE ... FREEZE` 查询以便一次备份所有分区。 [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) +- 已添加 `dictGet` 和 `dictGetOrDefault` 不需要指定返回值类型的函数。 该类型是从字典描述自动确定的。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3564) +- 现在,您可以在表描述中为列指定注释,并使用以下方式对其进行更改 `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) +- 阅读支持 `Join` 使用简单键键入表格。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3728) +- 现在,您可以指定选项 `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`,和 `join_overflow_mode` 当创建一个 `Join` 键入表。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3728) +- 添加了 `joinGet` 功能,允许您使用 `Join` 像字典一样键入表格。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3728) +- 添加了 `partition_key`, `sorting_key`, `primary_key`,和 `sampling_key` 列到 `system.tables` 表以便提供关于表键的信息。 [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- 添加了 `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`,和 `is_in_sampling_key` 列到 `system.columns` 桌子 [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- 添加了 `min_time` 和 `max_time` 列到 `system.parts` 桌子 当分区键是由以下表达式组成的表达式时,将填充这些列 `DateTime` 列。 [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) + +#### 错误修复: {#bug-fixes-1} + +- 修复和性能改进 `LowCardinality` 数据类型。 `GROUP BY` 使用 `LowCardinality(Nullable(...))`. 获取的值 `extremes`. 处理高阶函数。 `LEFT ARRAY JOIN`. 分布 `GROUP BY`. 返回的函数 `Array`. 执行 `ORDER BY`. 写入 `Distributed` 表(nicelulu)。 向后兼容 `INSERT` 从实现旧客户端的查询 `Native` 协议 支持 `LowCardinality` 为 `JOIN`. 在单个流中工作时提高性能。 [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) +- 固定如何 `select_sequential_consistency` 选项工作。 以前,启用此设置时,在开始写入新分区后,有时会返回不完整的结果。 [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) +- 执行DDL时正确指定数据库 `ON CLUSTER` 查询和 `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- 为视图中的子查询正确指定了数据库。 [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) +- 修正了一个错误 `PREWHERE` 与 `FINAL` 为 `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) +- 现在你可以使用 `KILL QUERY` 取消尚未启动的查询,因为它们正在等待锁定表。 [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) +- 更正日期和时间计算,如果时钟被移回午夜(这发生在伊朗,并发生在莫斯科1981年至1983年)。 以前,这导致时间比必要的时间早一天重置,并且还导致文本格式的日期和时间格式不正确。 [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) +- 修正了某些情况下的错误 `VIEW` 和省略数据库的子查询。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3521) +- 修正了一个争用条件时,同时从读取 `MATERIALIZED VIEW` 和删除 `MATERIALIZED VIEW` 由于不锁定内部 `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) +- 修正了错误 `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) +- 固定查询处理时 `compile_expressions` 选项已启用(默认情况下启用)。 非确定性常量表达式,如 `now` 功能不再展开。 [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) +- 修复了在指定非常量比例参数时发生的崩溃 `toDecimal32/64/128` 功能。 +- 修复了尝试插入数组时的错误 `NULL` 中的元素 `Values` 格式化为类型的列 `Array` 没有 `Nullable` (如果 `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) +- 固定连续错误登录 `DDLWorker` 如果动物园管理员不可用。 [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) +- 修正了返回类型 `quantile*` 从功能 `Date` 和 `DateTime` 参数的类型。 [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) +- 修正了 `WITH` 子句,如果它指定了一个没有表达式的简单别名。 [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) +- 固定处理具有命名子查询和限定列名的查询时 `enable_optimize_predicate_expression` 被启用。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3588) +- 修正了错误 `Attempt to attach to nullptr thread group` 使用实例化视图时。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) +- 修正了传递某些不正确的参数时崩溃 `arrayReverse` 功能。 [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- 修正了缓冲区溢出 `extractURLParameter` 功能。 改进的性能。 添加了包含零字节的字符串的正确处理。 [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) +- 在固定缓冲区溢出 `lowerUTF8` 和 `upperUTF8` 功能。 删除了执行这些功能的能力 `FixedString` 类型参数。 [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) +- 修复了删除时罕见的竞争条件 `MergeTree` 桌子 [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) +- 修正了从读取时的争用条件 `Buffer` 表和同时执行 `ALTER` 或 `DROP` 在目标桌上。 [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) +- 修正了一个段错误,如果 `max_temporary_non_const_columns` 超过限制。 [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### 改进: {#improvements-1} + +- 服务器不会将处理后的配置文件写入 `/etc/clickhouse-server/` 目录。 相反,它将它们保存在 `preprocessed_configs` 里面的目录 `path`. 这意味着 `/etc/clickhouse-server/` 目录没有写访问权限 `clickhouse` 用户,从而提高了安全性。 [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- 该 `min_merge_bytes_to_use_direct_io` 默认情况下,选项设置为10GiB。 将在MergeTree系列中执行形成大部分表的合并 `O_DIRECT` 模式,这可以防止过多的页高速缓存逐出。 [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) +- 当表数量非常多时,加速服务器启动。 [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) +- 添加了连接池和HTTP `Keep-Alive` 用于副本之间的连接。 [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) +- 如果查询语法无效,则 `400 Bad Request` 代码在返回 `HTTP` 接口(500以前返回)。 [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) +- 该 `join_default_strictness` 选项设置为 `ALL` 默认情况下为兼容性。 [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) +- 删除日志记录 `stderr` 从 `re2` 无效或复杂正则表达式的库。 [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) +- 添加的 `Kafka` 表引擎:在开始从Kafka读取之前检查订阅;表的kafka\_max\_block\_size设置。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) +- 该 `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`,和 `murmurHash3_64` 函数现在适用于任意数量的参数和元组形式的参数。 [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) +- 该 `arrayReverse` 函数现在适用于任何类型的数组。 [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- 增加了一个可选参数:插槽大小的 `timeSlots` 功能。 [基里尔\*什瓦科夫](https://github.com/ClickHouse/ClickHouse/pull/3724) +- 为 `FULL` 和 `RIGHT JOIN`,该 `max_block_size` 设置用于右表中未连接的数据流。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3699) +- 添加了 `--secure` 命令行参数 `clickhouse-benchmark` 和 `clickhouse-performance-test` 启用TLS。 [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) +- 类型转换时的结构 `Buffer` 表的类型与目标表的结构不匹配。 [维塔利\*巴拉诺夫](https://github.com/ClickHouse/ClickHouse/pull/3603) +- 添加了 `tcp_keep_alive_timeout` 在指定的时间间隔内不活动后启用保持活动数据包的选项。 [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) +- 删除不必要的引用值的分区键中 `system.parts` 表,如果它由单列组成。 [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) +- 模函数适用于 `Date` 和 `DateTime` 数据类型。 [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) +- 添加同义词的 `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`,和 `MID` 功能。 [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) 为了与SQL标准兼容,某些函数名称不区分大小写。 添加语法糖 `SUBSTRING(expr FROM start FOR length)` 对于与SQL的兼容性。 [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) +- 增加了以下能力 `mlock` 对应于存储器页 `clickhouse-server` 可执行代码,以防止它被强制出内存。 默认情况下禁用此功能。 [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) +- 从读取时改进的性能 `O_DIRECT` (与 `min_bytes_to_use_direct_io` 选项启用)。 [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) +- 的改进的性能 `dictGet...OrDefault` 常量键参数和非常量默认参数的函数。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3563) +- 该 `firstSignificantSubdomain` 功能现在处理域 `gov`, `mil`,和 `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) 改进的性能。 [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) +- 能够指定用于启动的自定义环境变量 `clickhouse-server` 使用 `SYS-V init.d` 通过定义脚本 `CLICKHOUSE_PROGRAM_ENV` 在 `/etc/default/clickhouse`. + [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) +- Clickhouse-server init脚本的正确返回代码。 [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) +- 该 `system.metrics` 表现在有 `VersionInteger` 公制和 `system.build_options` 有添加的行 `VERSION_INTEGER`,其中包含ClickHouse版本的数字形式,例如 `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) +- 删除比较的能力 `Date` 输入一个数字,以避免潜在的错误,如 `date = 2018-12-17`,其中日期周围的引号被错误省略。 [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) +- 修正了有状态函数的行为,如 `rowNumberInAllBlocks`. 他们之前输出的结果是由于在查询分析期间启动而大一个数字。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3729) +- 如果 `force_restore_data` 文件无法删除,将显示错误消息。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3794) + +#### 构建改进: {#build-improvements-1} + +- 更新了 `jemalloc` 库,它修复了潜在的内存泄漏。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3557) +- 分析与 `jemalloc` 默认情况下为了调试生成启用。 [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) +- 增加了运行集成测试的能力,当只 `Docker` 安装在系统上。 [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) +- 在SELECT查询中添加了模糊表达式测试。 [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) +- 为提交添加了一个压力测试,它以并行和随机顺序执行功能测试,以检测更多的竞争条件。 [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) +- 改进了在Docker映像中启动clickhouse-server的方法。 [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) +- 对于Docker映像,增加了对使用数据库中的文件初始化数据库的支持 `/docker-entrypoint-initdb.d` 目录。 [康斯坦丁\*列别杰夫](https://github.com/ClickHouse/ClickHouse/pull/3695) +- 修复了基于ARM的构建。 [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) + +#### 向后不兼容的更改: {#backward-incompatible-changes} + +- 删除比较的能力 `Date` 用数字键入。 而不是 `toDate('2018-12-18') = 17883`,必须使用显式类型转换 `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) + +## ClickHouse释放18.14 {#clickhouse-release-18-14} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-14-19-2018-12-19} + +#### 错误修复: {#bug-fixes-2} + +- 修复了导致使用ODBC源更新字典时出现问题的错误。 [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- 执行DDL时正确指定数据库 `ON CLUSTER` 查询。 [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- 修正了一个段错误,如果 `max_temporary_non_const_columns` 超过限制。 [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### 构建改进: {#build-improvements-2} + +- 修复了基于ARM的构建。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-18-2018-12-04} + +#### 错误修复: {#bug-fixes-3} + +- 修正错误 `dictGet...` 类型字典的函数 `range`,如果其中一个参数是恒定的,而另一个则不是。 [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) +- 修复了导致消息的错误 `netlink: '...': attribute type 1 has an invalid length` 要打印在Linux内核日志中,这只发生在足够新鲜的Linux内核版本上。 [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) +- 在功能固定段错误 `empty` 对于争论 `FixedString` 类型。 [丹尼尔,道广明](https://github.com/ClickHouse/ClickHouse/pull/3703) +- 修正了使用大值时过多的内存分配 `max_query_size` 设置(内存块 `max_query_size` 字节被预先分配一次)。 [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) + +#### 构建更改: {#build-changes} + +- 使用操作系统包中的版本7的LLVM/Clang库修复构建(这些库用于运行时查询编译)。 [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-17-2018-11-30} + +#### 错误修复: {#bug-fixes-4} + +- 修复了ODBC桥进程未与主服务器进程终止的情况。 [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) +- 固定同步插入 `Distributed` 具有不同于远程表的列列表的列列表的表。 [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) +- 修复了丢弃MergeTree表时可能导致崩溃的罕见竞争条件。 [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- 修复了查询线程创建失败时的查询死锁 `Resource temporarily unavailable` 错误 [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- 修正了解析 `ENGINE` 条款时 `CREATE AS table` 语法被使用和 `ENGINE` 子句之前指定 `AS table` (错误导致忽略指定的引擎)。 [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-15-2018-11-21} + +#### 错误修复: {#bug-fixes-5} + +- 反序列化类型的列时,高估了内存块的大小 `Array(String)` 这导致 “Memory limit exceeded” 错误。 该问题出现在版本18.12.13中。 [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-14-2018-11-20} + +#### 错误修复: {#bug-fixes-6} + +- 固定 `ON CLUSTER` 当群集配置为安全时进行查询(标志 ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) + +#### 构建更改: {#build-changes-1} + +- 固定的问题(llvm-7从系统,macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-13-2018-11-08} + +#### 错误修复: {#bug-fixes-7} + +- 修正了 `Block structure mismatch in MergingSorted stream` 错误 [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) +- 固定 `ON CLUSTER` 查询的情况下,当安全连接被打开的群集配置( `` 标志)。 [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) +- 修复了查询中使用的错误 `SAMPLE`, `PREWHERE` 和别名列。 [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) +- 修正了一个罕见的 `unknown compression method` 错误时 `min_bytes_to_use_direct_io` 设置已启用。 [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) + +#### 性能改进: {#performance-improvements} + +- 查询的固定性能回归 `GROUP BY` 在AMD EPYC处理器上执行时,uint16或Date类型的列。 [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) +- 修正了处理长字符串的查询的性能回归。 [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) + +#### 构建改进: {#build-improvements-3} + +- 简化阿卡迪亚构建的改进。 [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-12-2018-11-02} + +#### 错误修复: {#bug-fixes-8} + +- 修复了加入两个未命名的子查询时的崩溃。 [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) +- 修正了生成不正确的查询(用空 `WHERE` 子句)查询外部数据库时。 [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) +- 修正了在ODBC字典中使用不正确的超时值。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-11-2018-10-29} + +#### 错误修复: {#bug-fixes-9} + +- 修正了错误 `Block structure mismatch in UNION stream: different number of columns` 在限制查询。 [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) +- 修复了在嵌套结构中包含数组的表中合并数据时出现的错误。 [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) +- 修正了不正确的查询结果,如果 `merge_tree_uniform_read_distribution` 设置被禁用(默认情况下启用)。 [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) +- 修复了在本机格式的分布式表中插入错误。 [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-10-2018-10-23} + +- 该 `compile_expressions` 默认情况下禁用设置(表达式的JIT编译)。 [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) +- 该 `enable_optimize_predicate_expression` 默认情况下禁用设置。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-9-2018-10-16} + +#### 新功能: {#new-features-1} + +- 该 `WITH CUBE` 修饰符 `GROUP BY` (替代语法 `GROUP BY CUBE(...)` 也可用)。 [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) +- 添加了 `formatDateTime` 功能。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) +- 添加了 `JDBC` 表引擎和 `jdbc` 表功能(需要安装clickhouse-jdbc桥)。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- 增加了使用ISO周编号的功能: `toISOWeek`, `toISOYear`, `toStartOfISOYear`,和 `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) +- 现在你可以使用 `Nullable` 列 `MySQL` 和 `ODBC` 桌子 [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- 嵌套的数据结构可以被读取为嵌套的对象 `JSONEachRow` 格式。 添加了 `input_format_import_nested_json` 设置。 [维罗曼\*云坎](https://github.com/ClickHouse/ClickHouse/pull/3144) +- 并行处理可用于许多 `MATERIALIZED VIEW`s插入数据时。 见 `parallel_view_processing` 设置。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) +- 添加了 `SYSTEM FLUSH LOGS` 查询(强制日志刷新到系统表,如 `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) +- 现在,您可以使用预定义 `database` 和 `table` 声明时的宏 `Replicated` 桌子 [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) +- 增加了阅读的能力 `Decimal` 工程表示法中的类型值(表示十的幂)。 [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) + +#### 实验特点: {#experimental-features} + +- 对GROUP BY子句进行优化 `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) +- 表达式的优化计算 `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) + +#### 改进: {#improvements-2} + +- 显着减少查询的内存消耗 `ORDER BY` 和 `LIMIT`. 见 `max_bytes_before_remerge_sort` 设置。 [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- 在没有 `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` 是假定的。 [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) +- 限定星号在以下查询中正常工作 `JOIN`. [张冬](https://github.com/ClickHouse/ClickHouse/pull/3202) +- 该 `ODBC` 表引擎正确地选择用于引用远程数据库的SQL方言中的标识符的方法。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- 该 `compile_expressions` 默认情况下启用设置(表达式的JIT编译)。 +- 修复了同时删除数据库/表(如果存在)和创建数据库/表(如果不存在)的行为。 前情提要 `CREATE DATABASE ... IF NOT EXISTS` 查询可能会返回错误消息 “File … already exists” 和 `CREATE TABLE ... IF NOT EXISTS` 和 `DROP TABLE IF EXISTS` 查询可能会返回 `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) +- 当从MySQL或ODBC表中查询时,LIKE和IN表达式具有常量右半部分被传递到远程服务器。 [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- 当从MySQL和ODBC表查询时,与WHERE子句中常量表达式的比较会传递给远程服务器。 以前,只通过与常量的比较。 [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- 正确计算终端中的行宽 `Pretty` 格式,包括带有象形文字的字符串。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3257). +- `ON CLUSTER` 可以指定 `ALTER UPDATE` 查询。 +- 提高了读取数据的性能 `JSONEachRow` 格式。 [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) +- 添加同义词的 `LENGTH` 和 `CHARACTER_LENGTH` 功能的兼容性。 该 `CONCAT` 函数不再区分大小写。 [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) +- 添加了 `TIMESTAMP` 的同义词 `DateTime` 类型。 [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) +- 服务器日志中始终为query\_id保留空间,即使日志行与查询无关。 这使得使用第三方工具更容易分析服务器文本日志。 +- 当查询超过整数千兆字节的下一级别时,会记录查询的内存消耗。 [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- 为使用本机协议的客户端库错误发送的列少于服务器预期的插入查询时的情况添加了兼容模式。 使用clickhouse-cpp库时,这种情况是可能的。 以前,此方案会导致服务器崩溃。 [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) +- 在用户定义的WHERE表达式中 `clickhouse-copier`,您现在可以使用 `partition_key` 别名(用于按源表分区进行其他过滤)。 如果分区方案在复制过程中发生更改,但仅稍有更改,这很有用。 [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) +- 的工作流程 `Kafka` 引擎已被移动到后台线程池中,以便在高负载下自动降低数据读取速度。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- 支持阅读 `Tuple` 和 `Nested` 结构的值,如 `struct` 在 `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) +- 顶级域名列表 `firstSignificantSubdomain` 功能现在包括域 `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) +- 在外部字典的配置, `null_value` 被解释为默认数据类型的值。 [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) +- 支持 `intDiv` 和 `intDivOrZero` 功能 `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) +- 支持 `Date`, `DateTime`, `UUID`,和 `Decimal` 类型作为键 `sumMap` 聚合函数。 [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) +- 支持 `Decimal` 外部字典中的数据类型。 [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) +- 支持 `Decimal` 数据类型in `SummingMergeTree` 桌子 [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) +- 增加了专业化 `UUID` 在 `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) +- 减少的数量 `open` 和 `close` 从读取时系统调用 `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) +- A `TRUNCATE TABLE` 查询可以在任何副本上执行(将查询传递给领导副本)。 [基里尔\*什瓦科夫](https://github.com/ClickHouse/ClickHouse/pull/3375) + +#### 错误修复: {#bug-fixes-10} + +- 修正了一个问题 `Dictionary` 表 `range_hashed` 字典 此错误发生在版本18.12.17中。 [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) +- 修正了加载时的错误 `range_hashed` 字典(消息 `Unsupported type Nullable (...)`). 此错误发生在版本18.12.17中。 [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- 在固定的错误 `pointInPolygon` 函数由于不准确的计算的多边形与大量的顶点位于彼此靠近的积累。 [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) +- 如果在合并数据部分之后,结果部分的校验和与另一个副本中相同合并的结果不同,则删除合并的结果并从另一个副本下载数据部分(这是正确的行为)。 但是在下载数据部分之后,由于该部分已经存在的错误(因为合并后数据部分被删除了一些延迟),因此无法将其添加到工作集中。 这导致周期性尝试下载相同的数据。 [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) +- 修正了查询总内存消耗的不正确计算(由于计算不正确, `max_memory_usage_for_all_queries` 设置工作不正确, `MemoryTracking` 度量值不正确)。 此错误发生在版本18.12.13中。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) +- 修正的功能 `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` 此错误发生在版本18.12.13中。 [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) +- 修正了数据结构的不必要的准备 `JOIN`如果发起查询的服务器上 `JOIN` 仅在远程服务器上执行。 [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) +- 在固定的错误 `Kafka` 引擎:开始读取数据时异常后的死锁,并在完成时锁定 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- 为 `Kafka` 表,可选 `schema` 参数未被传递(的架构 `Cap'n'Proto` 格式)。 [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) +- 如果ZooKeeper服务器的整体服务器接受连接,但随后立即关闭它,而不是响应握手,ClickHouse选择连接另一台服务器。 以前,这会产生错误 `Cannot read all data. Bytes read: 0. Bytes expected: 4.` 服务器无法启动。 [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) +- 如果ZooKeeper服务器的整体包含DNS查询返回错误的服务器,则忽略这些服务器。 [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) +- 固定类型之间的转换 `Date` 和 `DateTime` 当在插入数据 `VALUES` 格式(如果 `input_format_values_interpret_expressions = 1`). 以前,转换是在Unix Epoch时间中的天数和Unix时间戳的数值之间进行的,这会导致意外的结果。 [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) +- 修正类型之间的转换 `Decimal` 和整数。 [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) +- 在固定的错误 `enable_optimize_predicate_expression` 设置。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3231) +- 如果使用非默认的CSV分隔符,则修复了CSV格式的浮点数解析错误,例如 `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) +- 修正了 `arrayCumSumNonNegative` 函数(它不累加负值,如果累加器小于零)。 [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) +- 固定如何 `Merge` 表工作的顶部 `Distributed` 使用时的表 `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) +- 在错误修复 `ALTER UPDATE` 查询。 +- 在固定的错误 `odbc` 表功能,出现在版本18.12。 [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) +- 修正了聚合函数的操作 `StateArray` 组合子 [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) +- 修正了划分时崩溃 `Decimal` 值为零。 [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) +- 使用固定输出类型的操作 `Decimal` 和整数参数。 [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) +- 修正了在段错误 `GROUP BY` 上 `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) +- 该 `log_query_threads` 设置(关于查询执行的每个线程的日志记录信息)现在生效,只有当 `log_queries` 选项(有关查询的日志记录信息)设置为1。 由于 `log_query_threads` 默认情况下,即使禁用了查询日志记录,也会先前记录有关线程的信息。 [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) +- 修正了分位数聚合函数的分布式操作中的错误(错误消息 `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) +- 修复了同时在18.12.17版服务器和旧服务器的集群上工作时的兼容性问题。 对于具有固定和非固定长度的GROUP BY键的分布式查询,如果要聚合大量数据,则返回的数据并不总是完全聚合(两个不同的行包含相同的聚合键)。 [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) +- 固定处理替换 `clickhouse-performance-test`,如果查询只包含测试中声明的替换的一部分。 [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) +- 修复了使用时的错误 `FINAL` 与 `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- 修复了使用时的错误 `PREWHERE` 在过程中添加的列 `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- 增加了一个检查没有 `arrayJoin` 为 `DEFAULT` 和 `MATERIALIZED` 表达式。 前情提要, `arrayJoin` 插入数据时导致错误。 [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) +- 增加了一个检查没有 `arrayJoin` 在一个 `PREWHERE` 条款 以前,这导致了类似的消息 `Size ... doesn't match` 或 `Unknown compression method` 执行查询时。 [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- 修复了优化后可能发生的极少数情况下的段错误,并将相等性评估与相应的IN表达式链接起来。 [刘一民-字节舞](https://github.com/ClickHouse/ClickHouse/pull/3339) +- 小幅更正 `clickhouse-benchmark`:以前,客户端信息没有发送到服务器;现在关闭时更准确地计算执行的查询数量,并限制迭代次数。 [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) + +#### 向后不兼容的更改: {#backward-incompatible-changes-1} + +- 删除了 `allow_experimental_decimal_type` 选项。 该 `Decimal` 数据类型可供默认使用。 [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) + +## ClickHouse释放18.12 {#clickhouse-release-18-12} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-12-17-2018-09-16} + +#### 新功能: {#new-features-2} + +- `invalidate_query` (指定查询来检查是否需要更新外部字典的能力)实现了 `clickhouse` 资料来源。 [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) +- 增加了使用的能力 `UInt*`, `Int*`,和 `DateTime` 数据类型(与 `Date` 类型)作为 `range_hashed` 定义范围边界的外部字典键。 现在 `NULL` 可用于指定开放范围。 [瓦西里\*内姆科夫](https://github.com/ClickHouse/ClickHouse/pull/3123) +- 该 `Decimal` 类型现在支持 `var*` 和 `stddev*` 聚合函数。 [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- 该 `Decimal` 类型现在支持数学函数 (`exp`, `sin` 等等。) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- 该 `system.part_log` 表现在有 `partition_id` 列。 [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### 错误修复: {#bug-fixes-11} + +- `Merge` 现在正常工作 `Distributed` 桌子 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3159) +- 修复了不兼容(不必要的依赖 `glibc` 版本),这使得它不可能运行ClickHouse的 `Ubuntu Precise` 和旧版本。 在版本18.12.13中出现了不兼容。 [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) +- 在固定的错误 `enable_optimize_predicate_expression` 设置。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3107) +- 修复了在早于18.12.13的版本上使用副本集群并同时在具有较新版本的服务器上创建表的新副本时出现的向后兼容性的一个小问题(如消息中所示 `Can not clone replica, because the ... updated to new ClickHouse version`,这是合乎逻辑的,但不应该发生)。 [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) + +#### 向后不兼容的更改: {#backward-incompatible-changes-2} + +- 该 `enable_optimize_predicate_expression` 默认情况下启用选项(这是相当乐观的)。 如果发生与搜索列名相关的查询分析错误,请设置 `enable_optimize_predicate_expression` 为0。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3107) + +### 碌莽禄,拢,0755-88888888 {#clickhouse-release-18-12-14-2018-09-13} + +#### 新功能: {#new-features-3} + +- 增加了对 `ALTER UPDATE` 查询。 [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) +- 添加了 `allow_ddl` 选项,它限制用户对DDL查询的访问。 [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) +- 添加了 `min_merge_bytes_to_use_direct_io` 备选案文 `MergeTree` 引擎允许您为合并的总大小设置阈值(当超过阈值时,将使用O\_DIRECT处理数据部分文件)。 [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) +- 该 `system.merges` 系统表现在包含 `partition_id` 列。 [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) + +#### 改进 {#improvements-3} + +- 如果数据部分在变异期间保持不变,则副本不会下载该数据部分。 [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) +- 使用时,自动完成可用于设置名称 `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) + +#### 错误修复: {#bug-fixes-12} + +- 添加了一个检查是元素的数组的大小 `Nested` 插入时的类型字段。 [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) +- 修正了一个错误更新外部字典与 `ODBC` 来源和 `hashed` 存储。 此错误发生在版本18.12.13中。 +- 修复了使用以下命令从查询创建临时表时出现的崩溃 `IN` 条件。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3098) +- 修复了聚合函数中可能具有的数组的错误 `NULL` 元素。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3097) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-12-13-2018-09-10} + +#### 新功能: {#new-features-4} + +- 添加了 `DECIMAL(digits, scale)` 数据类型 (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). 要启用它,请使用以下设置 `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) +- 新 `WITH ROLLUP` 修饰符 `GROUP BY` (替代语法: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) +- 在具有JOIN的查询中,星形字符将扩展为符合SQL标准的所有表中的列列表。 您可以通过设置恢复旧行为 `asterisk_left_columns_only` 在用户配置级别上为1。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2787) +- 增加了对连接表函数的支持。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2907) +- 在clickhouse-client中按Tab键进行自动完成。 [谢尔盖\*谢尔宾](https://github.com/ClickHouse/ClickHouse/pull/2447) +- Clickhouse-client中的Ctrl+C清除输入的查询。 [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) +- 添加了 `join_default_strictness` 设置(值: `"`, `'any'`, `'all'`). 这允许您不指定 `ANY` 或 `ALL` 为 `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) +- 与查询处理相关的服务器日志的每一行都显示了查询ID。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 现在,您可以在clickhouse-client中获取查询执行日志(使用 `send_logs_level` 设置)。 通过分布式查询处理,日志从所有服务器级联。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `system.query_log` 和 `system.processes` (`SHOW PROCESSLIST`)表现在有关所有更改的设置信息,当你运行一个查询(的嵌套结构 `Settings` 数据)。 添加了 `log_query_settings` 设置。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `system.query_log` 和 `system.processes` 表现在显示有关参与查询执行的线程数的信息(请参阅 `thread_numbers` 列)。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 已添加 `ProfileEvents` 用于度量通过网络读取和写入磁盘以及读取和写入磁盘所花费的时间、网络错误的数量以及在网络带宽受限时所花费的等待时间。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 已添加 `ProfileEvents`包含来自rusage的系统指标的计数器(您可以使用它们获取有关用户空间和内核、页面错误和上下文切换的CPU使用率的信息),以及taskstats指标(使用它们获取有关I/O等待时间、CPU等待时间以及读取和记录的数据量的信息,无论是否包含页面缓存)。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `ProfileEvents` 计数器应用于全局和每个查询,以及每个查询执行线程,它允许您按查询详细分析资源消耗情况。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 添加了 `system.query_thread_log` 表,其中包含有关每个查询执行线程的信息。 添加了 `log_query_threads` 设置。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `system.metrics` 和 `system.events` 表现在有内置文档。 [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) +- 添加了 `arrayEnumerateDense` 功能。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2975) +- 添加了 `arrayCumSumNonNegative` 和 `arrayDifference` 功能。 [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) +- 添加了 `retention` 聚合函数。 [李尚迪](https://github.com/ClickHouse/ClickHouse/pull/2887) +- 现在,您可以使用plus运算符添加(合并)聚合函数的状态,并将聚合函数的状态乘以非负常数。 [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) +- MergeTree系列中的表现在具有虚拟列 `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### 实验特点: {#experimental-features-1} + +- 添加了 `LowCardinality(T)` 数据类型。 此数据类型自动创建值的本地字典,并允许数据处理而无需解压字典。 [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) +- 添加了JIT编译函数的缓存和编译前使用次数的计数器。 要JIT编译表达式,请启用 `compile_expressions` 设置。 [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) + +#### 改进: {#improvements-4} + +- 修复了放弃副本时复制日志无限积累的问题。 为延迟较长的副本添加了有效的恢复模式。 +- 改进的性能 `GROUP BY` 当其中一个是string,其他是固定长度时,具有多个聚合字段。 +- 使用时提高性能 `PREWHERE` 并与表达式的隐式转移 `PREWHERE`. +- 改进文本格式的解析性能 (`CSV`, `TSV`). [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) +- 改进了读取二进制格式字符串和数组的性能。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2955) +- 提高性能和减少内存消耗的查询 `system.tables` 和 `system.columns` 当单个服务器上有非常大量的表时。 [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) +- 修复了大量查询导致错误的情况下的性能问题( `_dl_addr` 功能是可见的 `perf top`,但服务器没有使用太多的CPU)。 [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) +- 条件被转换到视图中(当 `enable_optimize_predicate_expression` 被启用)。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2907) +- 改进的功能 `UUID` 数据类型。 [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) +- 该 `UUID` -Alchemist字典支持数据类型。 [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) +- 该 `visitParamExtractRaw` 函数与嵌套结构正常工作。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2974) +- 当 `input_format_skip_unknown_fields` 启用设置,在对象字段 `JSONEachRow` 格式被正确跳过。 [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) +- 对于一个 `CASE` 表达式与条件,你现在可以省略 `ELSE`,这相当于 `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) +- 现在可以在使用ZooKeeper时配置操作超时。 [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) +- 您可以指定偏移量 `LIMIT n, m` 作为 `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- 您可以使用 `SELECT TOP n` 语法作为替代 `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- 增加了队列的大小写入系统表,因此 `SystemLog parameter queue is full` 错误不经常发生。 +- 该 `windowFunnel` aggregate函数现在支持满足多个条件的事件。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2801) +- 重复的列可以用于 `USING` 条款 `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) +- `Pretty` 格式现在对列对齐宽度有限制。 使用 `output_format_pretty_max_column_pad_width` 设置。 如果一个值较宽,它仍将完整显示,但表中的其他单元格不会太宽。 [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) +- 该 `odbc` 表函数现在允许您指定数据库/模式名称。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2885) +- 增加了使用在指定的用户名的能力 `clickhouse-client` 配置文件。 [弗拉基米尔\*科兹宾](https://github.com/ClickHouse/ClickHouse/pull/2909) +- 该 `ZooKeeperExceptions` 计数器已被分成三个计数器: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`,和 `ZooKeeperOtherExceptions`. +- `ALTER DELETE` 查询适用于实例化视图。 +- 在定期运行清理线程时添加了随机化 `ReplicatedMergeTree` 表,以避免周期性负载尖峰时有一个非常大的数量 `ReplicatedMergeTree` 桌子 +- 支持 `ATTACH TABLE ... ON CLUSTER` 查询。 [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) + +#### 错误修复: {#bug-fixes-13} + +- 修正了一个问题 `Dictionary` 表(抛出 `Size of offsets doesn't match size of column` 或 `Unknown compression method` 例外)。 此错误出现在版本18.10.3中。 [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- 修复了合并时的错误 `CollapsingMergeTree` 如果其中一个数据部分为空(这些部分在合并或合并期间形成 `ALTER DELETE` 如果所有数据被删除),和 `vertical` 算法被用于合并。 [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) +- 在固定的竞争条件 `DROP` 或 `TRUNCATE` 为 `Memory` 表与同时 `SELECT`,这可能导致服务器崩溃。 此错误出现在版本1.1.54388中。 [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) +- 修正了插入时数据丢失的可能性 `Replicated` 表如果 `Session is expired` 错误返回(数据丢失可以通过检测 `ReplicatedDataLoss` 公制)。 此错误发生在版本1.1.54378。 [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) +- 在修复段错误 `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) +- 修正了错误搜索列名时 `WHERE` 表达式完全由限定列名组成,例如 `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) +- 修正了 “Not found column” 如果从远程服务器请求由IN表达式和子查询组成的单个列,则在执行分布式查询时发生错误。 [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) +- 修正了 `Block structure mismatch in UNION stream: different number of columns` 如果其中一个分片是本地的,而另一个分片不是,则发生分布式查询的错误,并优化移动到 `PREWHERE` 被触发。 [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) +- 修正了 `pointInPolygon` 非凸多边形的某些情况下的函数。 [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) +- 修正了比较时不正确的结果 `nan` 与整数。 [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) +- 修正了一个错误 `zlib-ng` 在极少数情况下可能导致segfault的库。 [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) +- 修复了插入到表中时的内存泄漏 `AggregateFunction` 列,如果聚合函数的状态不简单(分别分配内存),并且如果单个插入请求导致多个小块。 [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) +- 修复了创建和删除相同的竞争条件 `Buffer` 或 `MergeTree` 同时表。 +- 修复了比较由某些非平凡类型(如元组)组成的元组时出现段错误的可能性。 [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) +- 修正了运行某些时段错误的可能性 `ON CLUSTER` 查询。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2960) +- 修正了一个错误 `arrayDistinct` 功能 `Nullable` 数组元素。 [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) +- 该 `enable_optimize_predicate_expression` 选项现在正确支持的情况下 `SELECT *`. [张冬](https://github.com/ClickHouse/ClickHouse/pull/2929) +- 修复了重新初始化ZooKeeper会话时的段错误。 [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) +- 与ZooKeeper工作时固定的潜在阻塞。 +- 修正了不正确的代码添加嵌套的数据结构中 `SummingMergeTree`. +- 在为聚合函数的状态分配内存时,会正确考虑对齐,这使得在实现聚合函数的状态时可以使用需要对齐的操作。 [晨兴-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) + +#### 安全修复: {#security-fix} + +- 安全使用ODBC数据源。 与ODBC驱动程序的交互使用单独的 `clickhouse-odbc-bridge` 过程。 第三方ODBC驱动程序中的错误不再导致服务器稳定性问题或漏洞。 [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) +- 修正了在文件路径的不正确的验证 `catBoostPool` 表功能。 [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) +- 系统表的内容 (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`,和 `replication_queue`)根据用户对数据库的配置访问权限进行过滤 (`allow_databases`). [张冬](https://github.com/ClickHouse/ClickHouse/pull/2856) + +#### 向后不兼容的更改: {#backward-incompatible-changes-3} + +- 在具有JOIN的查询中,星形字符将扩展为符合SQL标准的所有表中的列列表。 您可以通过设置恢复旧行为 `asterisk_left_columns_only` 在用户配置级别上为1。 + +#### 构建更改: {#build-changes-2} + +- 大多数集成测试现在可以通过commit运行。 +- 代码样式检查也可以通过提交运行。 +- 该 `memcpy` 在CentOS7/Fedora上构建时,正确选择实现。 [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) +- 当使用clang来构建时,来自一些警告 `-Weverything` 已添加,除了常规 `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) +- 调试生成使用 `jemalloc` 调试选项。 +- 用于与ZooKeeper交互的库接口被声明为抽象。 [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) + +## ClickHouse释放18.10 {#clickhouse-release-18-10} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-10-3-2018-08-13} + +#### 新功能: {#new-features-5} + +- HTTPS可用于复制。 [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) +- 新增功能 `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`,和 `murmurHash3_128` 除了现有的 `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) +- 支持ClickHouse ODBC驱动程序中的可空类型 (`ODBCDriver2` 输出格式)。 [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) +- 支持 `UUID` 在关键列。 + +#### 改进: {#improvements-5} + +- 当群集从配置文件中删除时,可以在不重新启动服务器的情况下删除群集。 [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) +- 从配置文件中删除外部字典时,可以在不重新启动服务器的情况下删除它们。 [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) +- 已添加 `SETTINGS` 支持 `Kafka` 表引擎。 [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) +- 改进的 `UUID` 数据类型(尚未完成)。 [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) +- 支持合并后的空部件 `SummingMergeTree`, `CollapsingMergeTree` 和 `VersionedCollapsingMergeTree` 引擎 [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) +- 已完成突变的旧记录将被删除 (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) +- 添加了 `system.merge_tree_settings` 桌子 [基里尔\*什瓦科夫](https://github.com/ClickHouse/ClickHouse/pull/2841) +- 该 `system.tables` 表现在具有依赖列: `dependencies_database` 和 `dependencies_table`. [张冬](https://github.com/ClickHouse/ClickHouse/pull/2851) +- 添加了 `max_partition_size_to_drop` 配置选项。 [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) +- 添加了 `output_format_json_escape_forward_slashes` 选项。 [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) +- 添加了 `max_fetch_partition_retries_count` 设置。 [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) +- 添加了 `prefer_localhost_replica` 用于禁用本地副本的首选项以及在不进程间交互的情况下转到本地副本的设置。 [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) +- 该 `quantileExact` 聚合函数返回 `nan` 在聚合在一个空的情况下 `Float32` 或 `Float64` 预备 [李尚迪](https://github.com/ClickHouse/ClickHouse/pull/2855) + +#### 错误修复: {#bug-fixes-14} + +- 删除了ODBC的连接字符串参数的不必要的转义,这使得无法建立连接。 此错误发生在版本18.6.0中。 +- 修正了处理逻辑 `REPLACE PARTITION` 复制队列中的命令。 如果有两个 `REPLACE` 对于同一个分区的命令,不正确的逻辑可能会导致其中一个保留在复制队列中而无法执行。 [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) +- 修正了一个合并错误,当所有的数据部分都是空的(从合并或从形成的部分 `ALTER DELETE` 如果所有数据都被删除)。 此错误出现在18.1.0版本。 [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) +- 修复了并发错误 `Set` 或 `Join`. [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2823) +- 修正了 `Block structure mismatch in UNION stream: different number of columns` 发生的错误 `UNION ALL` 子查询内的查询,如果一个 `SELECT` 查询包含重复的列名。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2094) +- 修复了连接到MySQL服务器时发生异常时的内存泄漏。 +- 在查询错误的情况下修复了不正确的clickhouse客户端响应代码。 +- 修正了包含DISTINCT的实例化视图的不正确行为。 [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) + +#### 向后不兼容的更改 {#backward-incompatible-changes-4} + +- 删除了对分布式表的检查表查询的支持。 + +#### 构建更改: {#build-changes-3} + +- 分配器已被替换: `jemalloc` 现在用来代替 `tcmalloc`. 在某些情况下,这增加了速度高达20%。 但是,有些查询已经减慢了20%。 在某些情况下,内存消耗减少了大约10%,稳定性得到了提高。 由于竞争激烈的负载,用户空间和系统中的CPU使用率略有增加。 [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) +- 从子模块使用libressl。 [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) +- 从子模块使用unixodbc。 [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) +- 从子模块中使用mariadb-connector-c。 [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) +- 将功能性测试文件添加到存储库中,这些文件取决于测试数据的可用性(暂时不包含测试数据本身)。 + +## ClickHouse释放18.6 {#clickhouse-release-18-6} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-6-0-2018-08-02} + +#### 新功能: {#new-features-6} + +- 增加了对ON表达式的支持,以便在语法上加入: + `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` + 表达式必须是由AND运算符连接的等式链。 等式的每一侧都可以是其中一个表的列上的任意表达式。 支持使用完全限定的列名 (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`)对于正确的表。 [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) +- 可以启用HTTPS进行复制。 [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) + +#### 改进: {#improvements-6} + +- 服务器将其版本的补丁组件传递给客户端。 有关修补程序版本组件的数据位于 `system.processes` 和 `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) + +## ClickHouse释放18.5 {#clickhouse-release-18-5} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-5-1-2018-07-31} + +#### 新功能: {#new-features-7} + +- 添加了哈希函数 `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). + +#### 改进: {#improvements-7} + +- 现在你可以使用 `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) 从环境变量设置配置文件中的值的属性。 +- 增加了不区分大小写的版本 `coalesce`, `ifNull`,和 `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). + +#### 错误修复: {#bug-fixes-15} + +- 修复了启动副本时可能出现的错误 [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). + +## ClickHouse释放18.4 {#clickhouse-release-18-4} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-4-0-2018-07-28} + +#### 新功能: {#new-features-8} + +- 添加系统表: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). +- 增加了使用表函数代替表作为参数的能力 `remote` 或 `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). +- 支持 `HTTP Basic` 复制协议中的身份验证 [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). +- 该 `has` 函数现在允许搜索数组中的数值 `Enum` 值 [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). +- 支持添加任意消息分隔符从读取时 `Kafka` [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2701). + +#### 改进: {#improvements-8} + +- 该 `ALTER TABLE t DELETE WHERE` 查询不会重写未受WHERE条件影响的数据部分 [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). +- 该 `use_minimalistic_checksums_in_zookeeper` 备选案文 `ReplicatedMergeTree` 默认情况下启用表。 此设置在版本1.1.54378,2018-04-16中添加。 不能再安装超过1.1.54378的版本。 +- 支持运行 `KILL` 和 `OPTIMIZE` 指定的查询 `ON CLUSTER` [张冬](https://github.com/ClickHouse/ClickHouse/pull/2689). + +#### 错误修复: {#bug-fixes-16} + +- 修正了错误 `Column ... is not under an aggregate function and not in GROUP BY` 用于具有IN表达式的聚合。 此错误出现在18.1.0版本。 ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +- 修正了一个错误 `windowFunnel aggregate function` [张冬](https://github.com/ClickHouse/ClickHouse/pull/2735). +- 修正了一个错误 `anyHeavy` 聚合函数 ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +- 使用时固定服务器崩溃 `countArray()` 聚合函数。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-5} + +- 参数 `Kafka` 发动机从改变 `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` 到 `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. 如果你的表使用 `kafka_schema` 或 `kafka_num_consumers` 参数,你必须手动编辑元数据文件 `path/metadata/database/table.sql` 并添加 `kafka_row_delimiter` 参数 `''` 价值。 + +## ClickHouse释放18.1 {#clickhouse-release-18-1} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-1-0-2018-07-23} + +#### 新功能: {#new-features-9} + +- 支持 `ALTER TABLE t DELETE WHERE` 非复制MergeTree表的查询 ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). +- 支持任意类型的 `uniq*` 聚合函数族 ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). +- 支持比较运算符中的任意类型 ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). +- 该 `users.xml` 文件允许设置子网掩码的格式 `10.0.0.1/255.255.255.0`. 这对于在中间使用零的IPv6网络使用掩码是必要的 ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). +- 添加了 `arrayDistinct` 功能 ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). +- SummingMergeTree引擎现在可以使用AggregateFunction类型列 ([康斯坦丁\*潘](https://github.com/ClickHouse/ClickHouse/pull/2566)). + +#### 改进: {#improvements-9} + +- 更改了发布版本的编号方案。 现在第一部分包含发布年份(公元,莫斯科时区,减去2000),第二部分包含主要更改的数量(大多数版本的增加),第三部分是补丁版本。 除非在更新日志中另有说明,否则版本仍然向后兼容。 +- 更快地将浮点数转换为字符串 ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2664)). +- 如果在插入过程中由于解析错误而跳过某些行(这可能与 `input_allow_errors_num` 和 `input_allow_errors_ratio` 启用设置),跳过的行数现在写入服务器日志 ([列奥纳多\*切奇](https://github.com/ClickHouse/ClickHouse/pull/2669)). + +#### 错误修复: {#bug-fixes-17} + +- 修复了临时表的截断命令 ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2624)). +- 修复了读取响应时出现网络错误时ZooKeeper客户端库中罕见的死锁 ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +- 修复了转换为可空类型期间的错误 ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). +- 修正了不正确的结果 `maxIntersection()` 函数时间间隔的边界重合 ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). +- 修复了函数参数中OR表达式链的不正确转换 ([晨兴-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). +- 修复了包含 `IN (subquery)` 另一个子查询中的表达式 ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). +- 修复了分布式查询中使用不同版本的服务器之间的不兼容性 `CAST` 不是大写字母的函数 ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +- 添加了对外部数据库管理系统查询的缺少标识符引用 ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). + +#### 向后不兼容的更改: {#backward-incompatible-changes-6} + +- 将包含数字零的字符串转换为DateTime不起作用。 示例: `SELECT toDateTime('0')`. 这也是原因 `DateTime DEFAULT '0'` 在表中不起作用,以及 `0` 在字典里 解决方案:替换 `0` 与 `0000-00-00 00:00:00`. + +## ClickHouse释放1.1 {#clickhouse-release-1-1} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54394-2018-07-12} + +#### 新功能: {#new-features-10} + +- 添加了 `histogram` 聚合函数 ([米哈伊尔\*苏林](https://github.com/ClickHouse/ClickHouse/pull/2521)). +- 现在 `OPTIMIZE TABLE ... FINAL` 可以在不指定分区的情况下使用 `ReplicatedMergeTree` ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2600)). + +#### 错误修复: {#bug-fixes-18} + +- 修复了在发送和下载复制数据时读取和写入套接字超时非常小的问题(一秒钟),这使得在网络或磁盘上存在负载时无法下载更大的部分(导致周期性尝试下载部分)。 此错误发生在版本1.1.54388。 +- 修复了在ZooKeeper中使用chroot时在表中插入重复数据块的问题。 +- 该 `has` 函数现在可以正常工作用于具有可为空元素的数组 ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). +- 该 `system.tables` 在分布式查询中使用表现在可以正常工作。 该 `metadata_modification_time` 和 `engine_full` 列现在是非虚拟的。 修复了仅从表中查询这些列时发生的错误。 +- 固定如何空 `TinyLog` 表插入一个空数据块后工作 ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). +- 该 `system.zookeeper` 如果ZooKeeper中节点的值为NULL,表就可以工作。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54390-2018-07-06} + +#### 新功能: {#new-features-11} + +- 查询可以在发送 `multipart/form-data` 格式(在 `query` 字段),如果外部数据也被发送用于查询处理,这是有用的 ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). +- 增加了在读取CSV格式数据时启用或禁用处理单引号或双引号的功能。 您可以在 `format_csv_allow_single_quotes` 和 `format_csv_allow_double_quotes` 设置 ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2574)). +- 现在 `OPTIMIZE TABLE ... FINAL` 可以在不指定非复制变体的分区的情况下使用 `MergeTree` ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2599)). + +#### 改进: {#improvements-10} + +- 在可以使用表索引时使用IN运算符提高性能、减少内存消耗并正确跟踪内存消耗 ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). +- 删除添加数据部分时校验和的冗余检查。 当存在大量副本时,这一点很重要,因为在这些情况下,检查的总数等于N^2。 +- 增加了对 `Array(Tuple(...))` 对于参数 `arrayEnumerateUniq` 功能 ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). +- 已添加 `Nullable` 支持 `runningDifference` 功能 ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). +- 当存在大量表达式时,改进了查询分析性能 ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). +- 更快地选择用于合并的数据部分 `ReplicatedMergeTree` 桌子 更快地恢复ZooKeeper会话 ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). +- 该 `format_version.txt` 文件 `MergeTree` 如果表丢失,则重新创建表,如果在没有文件的情况下复制目录结构后启动ClickHouse,这是有意义的 ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). + +#### 错误修复: {#bug-fixes-19} + +- 修复了与ZooKeeper一起工作时的错误,这可能会导致无法在重新启动服务器之前恢复表的会话和只读状态。 +- 修复了与ZooKeeper一起工作时的错误,如果会话中断,可能会导致旧节点不被删除。 +- 修正了一个错误 `quantileTDigest` Float参数的函数(此错误在版本1.1.54388中引入) ([米哈伊尔\*苏林](https://github.com/ClickHouse/ClickHouse/pull/2553)). +- 修复了MergeTree表索引中的一个错误,如果主键列位于函数内部,用于在相同大小的有符号和无符号整数之间转换类型 ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). +- 如果修复段错误 `macros` 使用,但它们不在配置文件中 ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). +- 修复了重新连接客户端时切换到默认数据库的问题 ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). +- 修正了当发生的错误 `use_index_for_in_with_subqueries` 设置被禁用。 + +#### 安全修复: {#security-fix-1} + +- 当连接到MySQL时,发送文件不再可能 (`LOAD DATA LOCAL INFILE`). + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54388-2018-06-28} + +#### 新功能: {#new-features-12} + +- 支持 `ALTER TABLE t DELETE WHERE` 查询复制的表。 添加了 `system.mutations` 表来跟踪这种类型的查询的进度。 +- 支持 `ALTER TABLE t [REPLACE|ATTACH] PARTITION` 查询\*MergeTree表。 +- 支持 `TRUNCATE TABLE` 查询 ([张冬](https://github.com/ClickHouse/ClickHouse/pull/2260)) +- 几个新的 `SYSTEM` 复制表的查询 (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +- 增加了使用MySQL引擎和相应的表函数写入表的能力 ([三弟](https://github.com/ClickHouse/ClickHouse/pull/2294)). +- 添加了 `url()` 表功能和 `URL` 表引擎 ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). +- 添加了 `windowFunnel` 聚合函数 ([三弟](https://github.com/ClickHouse/ClickHouse/pull/2352)). +- 新 `startsWith` 和 `endsWith` 字符串的函数 ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). +- 该 `numbers()` 表函数现在允许您指定偏移量 ([张冬](https://github.com/ClickHouse/ClickHouse/pull/2535)). +- 密码 `clickhouse-client` 可以交互输入。 +- 服务器日志现在可以发送到系统日志 ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). +- 支持使用共享库源登录字典 ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). +- 支持自定义CSV分隔符 ([伊万\*朱可夫](https://github.com/ClickHouse/ClickHouse/pull/2263)) +- 添加了 `date_time_input_format` 设置。 如果将此设置切换到 `'best_effort'`,日期时间值将以各种格式读取。 +- 添加了 `clickhouse-obfuscator` 用于数据混ob的实用程序。 用法示例:发布性能测试中使用的数据。 + +#### 实验特点: {#experimental-features-2} + +- 增加了计算能力 `and` 只有在需要的地方才能参数 ([阿纳斯塔西娅Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) +- Jit编译为本机代码现在可用于某些表达式 ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). + +#### 错误修复: {#bug-fixes-20} + +- 对于具有以下内容的查询,不再显示重复项 `DISTINCT` 和 `ORDER BY`. +- 查询与 `ARRAY JOIN` 和 `arrayFilter` 不再返回不正确的结果。 +- 修复了从嵌套结构读取数组列时的错误 ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). +- 修复了使用HAVING子句分析查询时出现的错误,如 `HAVING tuple IN (...)`. +- 修复了使用递归别名分析查询时出现的错误。 +- 修复了从REPLACINGMERGETREE读取过滤所有行的PREWHERE中的条件时出现的错误 ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). +- 在HTTP界面中使用会话时,未应用用户配置文件设置。 +- 修复了如何从clickhouse-local中的命令行参数应用设置。 +- ZooKeeper客户端库现在使用从服务器接收的会话超时。 +- 修正了ZooKeeper客户端库中的一个错误,当客户端等待服务器响应时间超过超时时间。 +- 修剪部分的查询与分区键列的条件 ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). +- 合并后,现在可以 `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). +- ODBC表函数中的类型映射已修复 ([三弟](https://github.com/ClickHouse/ClickHouse/pull/2268)). +- 类型比较已修复 `DateTime` 有和没有时区 ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). +- 修正了语法解析和格式化的 `CAST` 接线员 +- 固定插入到分布式表引擎的实例化视图中 ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). +- 修正了从写入数据时的争用条件 `Kafka` 引擎到实例化视图 ([刘杨宽](https://github.com/ClickHouse/ClickHouse/pull/2448)). +- 固定ssrf中的remote()表函数。 +- 固定退出行为 `clickhouse-client` 在多行模式下 ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). + +#### 改进: {#improvements-11} + +- 复制表中的后台任务现在在线程池中执行,而不是在单独的线程中执行 ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). +- 改进的LZ4压缩性能。 +- 更快地分析具有大量联接和子查询的查询。 +- 当有太多的网络错误时,DNS缓存现在会自动更新。 +- 如果由于其中一个实例化视图包含太多部件而无法插入表格插入,则不再发生表格插入。 +- 纠正了事件计数器中的差异 `Query`, `SelectQuery`,和 `InsertQuery`. +- 像这样的表达式 `tuple IN (SELECT tuple)` 如果元组类型匹配,则允许。 +- 即使您没有配置ZooKeeper,具有复制表的服务器也可以启动。 +- 在计算可用CPU内核数时,现在考虑了cgroups的限制 ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). +- 在systemd配置文件中添加了配置目录的chown ([米哈伊尔Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). + +#### 构建更改: {#build-changes-4} + +- Gcc8编译器可用于构建。 +- 增加了从子模块构建llvm的能力。 +- Librdkafka库的版本已更新为v0.11.4。 +- 增加了使用系统libcpuid库的能力。 库版本已更新为0.4.0。 +- 使用vectorclass库修复了构建 ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). +- Cmake现在默认情况下为ninja生成文件(如使用 `-G Ninja`). +- 添加了使用libtinfo库而不是libtermcap的功能 ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). +- 修复了Fedora Rawhide中的头文件冲突 ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). + +#### 向后不兼容的更改: {#backward-incompatible-changes-7} + +- 删除逃逸 `Vertical` 和 `Pretty*` 格式和删除 `VerticalRaw` 格式。 +- 如果在分布式查询中同时使用版本1.1.54388(或更高版本)的服务器和版本较旧的服务器,并且查询具有 `cast(x, 'Type')` 表达式没有 `AS` 关键字并没有这个词 `cast` 以大写形式,将引发一个异常,并显示如下消息 `Not found column cast(0, 'UInt8') in block`. 解决方案:更新整个群集上的服务器。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682 {#clickhouse-release-1-1-54385-2018-06-01} + +#### 错误修复: {#bug-fixes-21} + +- 修复了在某些情况下导致ZooKeeper操作阻塞的错误。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54383-2018-05-22} + +#### 错误修复: {#bug-fixes-22} + +- 修正了如果一个表有许多副本,复制队列的放缓。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54381-2018-05-14} + +#### 错误修复: {#bug-fixes-23} + +- 修复了ClickHouse与ZooKeeper服务器断开连接时,ZooKeeper中的节点泄漏问题。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54380-2018-04-21} + +#### 新功能: {#new-features-13} + +- 增加了表功能 `file(path, format, structure)`. 从读取字节的示例 `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. + +#### 改进: {#improvements-12} + +- 子查询可以包装在 `()` 括号以增强查询的可读性。 例如: `(SELECT 1) UNION ALL (SELECT 1)`. +- 简单 `SELECT` 从查询 `system.processes` 表不包括在 `max_concurrent_queries` 限制。 + +#### 错误修复: {#bug-fixes-24} + +- 修正了不正确的行为 `IN` 从中选择时的运算符 `MATERIALIZED VIEW`. +- 修正了不正确的过滤分区索引的表达式,如 `partition_key_column IN (...)`. +- 固定无法执行 `OPTIMIZE` 在以下情况下对非领导副本进行查询 `REANAME` 在桌子上进行。 +- 修复了执行时的授权错误 `OPTIMIZE` 或 `ALTER` 对非领导副本的查询。 +- 固定的冻结 `KILL QUERY`. +- 修复了ZooKeeper客户端库中的错误,这导致了手表丢失,分布式的DDL队列冻结,并在复制队列中的速度变慢,如果非空 `chroot` 前缀在ZooKeeper配置中使用。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-8} + +- 删除对如下表达式的支持 `(a, b) IN (SELECT (a, b))` (可以使用等效表达式 `(a, b) IN (SELECT a, b)`). 在以前的版本中,这些表达式导致未确定 `WHERE` 过滤或导致的错误。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54378-2018-04-16} + +#### 新功能: {#new-features-14} + +- 可以在不重新启动服务器的情况下更改日志记录级别。 +- 添加了 `SHOW CREATE DATABASE` 查询。 +- 该 `query_id` 可以传递给 `clickhouse-client` (肘部空间)。 +- 新设置: `max_network_bandwidth_for_all_users`. +- 增加了对 `ALTER TABLE ... PARTITION ...` 为 `MATERIALIZED VIEW`. +- 在系统表中以未压缩形式添加有关数据部件大小的信息。 +- 对分布式表的服务器到服务器加密支持 (`1` 在副本配置中 ``). +- 表级别的配置 `ReplicatedMergeTree` 家庭,以最大限度地减少存储在Zookeeper的数据量: : `use_minimalistic_checksums_in_zookeeper = 1` +- 的配置 `clickhouse-client` 提示。 默认情况下,服务器名称现在输出到提示符。 可以更改服务器的显示名称。 它也发送了 `X-ClickHouse-Display-Name` HTTP头(基里尔Shvakov)。 +- 多个逗号分隔 `topics` 可为指定 `Kafka` 发动机(托比亚斯\*亚当森) +- 当查询停止时 `KILL QUERY` 或 `replace_running_query`,客户端接收 `Query was canceled` 异常而不是不完整的结果。 + +#### 改进: {#improvements-13} + +- `ALTER TABLE ... DROP/DETACH PARTITION` 查询在复制队列的前面运行。 +- `SELECT ... FINAL` 和 `OPTIMIZE ... FINAL` 即使表具有单个数据部分,也可以使用。 +- A `query_log` 如果手动删除(基里尔Shvakov),则会在飞行中重新创建表格。 +- 该 `lengthUTF8` 功能运行速度更快(zhang2014)。 +- 在同步刀片的性能提高 `Distributed` 表 (`insert_distributed_sync = 1`)当有一个非常大的数量的碎片。 +- 服务器接受 `send_timeout` 和 `receive_timeout` 从客户端设置并在连接到客户端时应用它们(它们以相反的顺序应用:服务器套接字的 `send_timeout` 被设置为 `receive_timeout` 值,反之亦然)。 +- 更强大的崩溃恢复异步插入 `Distributed` 桌子 +- 的返回类型 `countEqual` 功能从更改 `UInt32` 到 `UInt64` (谢磊). + +#### 错误修复: {#bug-fixes-25} + +- 修正了一个错误 `IN` 当表达式的左侧是 `Nullable`. +- 使用元组时,现在返回正确的结果 `IN` 当某些元组组件位于表索引中时。 +- 该 `max_execution_time` limit现在可以正常使用分布式查询。 +- 在计算复合列的大小时修正错误 `system.columns` 桌子 +- 修复了创建临时表时的错误 `CREATE TEMPORARY TABLE IF NOT EXISTS.` +- 修正错误 `StorageKafka` (\#\#2075) +- 修复了某些聚合函数的无效参数导致的服务器崩溃。 +- 修正了防止错误 `DETACH DATABASE` 查询停止后台任务 `ReplicatedMergeTree` 桌子 +- `Too many parts` 插入到聚合实例化视图时,状态不太可能发生(\#\#2084)。 +- 如果替换必须在同一级别上跟随另一个替换,则更正了配置中替换的递归处理。 +- 更正了创建元数据文件时的语法 `VIEW` 这使用一个查询 `UNION ALL`. +- `SummingMergeTree` 现在可以正常使用复合键对嵌套数据结构进行求和。 +- 修复了在选择领导者时出现竞争条件的可能性 `ReplicatedMergeTree` 桌子 + +#### 构建更改: {#build-changes-5} + +- 构建支持 `ninja` 而不是 `make` 和用途 `ninja` 默认情况下,构建版本。 +- 重命名的软件包: `clickhouse-server-base` 在 `clickhouse-common-static`; `clickhouse-server-common` 在 `clickhouse-server`; `clickhouse-common-dbg` 在 `clickhouse-common-static-dbg`. 要安装,请使用 `clickhouse-server clickhouse-client`. 具有旧名称的软件包仍将加载到存储库中,以便向后兼容。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-9} + +- 如果在左侧指定了数组,则删除了IN表达式的特殊解释。 以前,表达式 `arr IN (set)` 被解释为 “at least one `arr` element belongs to the `set`”. 要在新版本中获得相同的行为,请编写 `arrayExists(x -> x IN (set), arr)`. +- 禁用套接字选项的不正确使用 `SO_REUSEPORT`,默认情况下,Poco库中未正确启用。 请注意,在Linux上,不再有任何理由同时指定地址 `::` 和 `0.0.0.0` for listen – use just `::`,它允许监听通过IPv4和IPv6的连接(使用默认的内核配置设置)。 您还可以通过指定以下命令恢复到以前版本中的行为 `1` 在配置。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54370-2018-03-16} + +#### 新功能: {#new-features-15} + +- 添加了 `system.macros` 更改配置文件时,宏的表和自动更新。 +- 添加了 `SYSTEM RELOAD CONFIG` 查询。 +- 添加了 `maxIntersections(left_col, right_col)` 聚合函数,它返回同时相交间隔的最大数目 `[left; right]`. 该 `maxIntersectionsPosition(left, right)` 函数返回的开始 “maximum” 间隔。 ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). + +#### 改进: {#improvements-14} + +- 当在一个插入数据 `Replicated` 表,较少的请求是由 `ZooKeeper` (和大多数用户级错误已经从消失 `ZooKeeper` 日志)。 +- 添加了为数据集创建别名的功能。 示例: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. + +#### 错误修复: {#bug-fixes-26} + +- 修正了 `Illegal PREWHERE` 从合并表读取时出错 `Distributed`桌子 +- 添加了修复,允许您在仅支持IPv4的Docker容器中启动clickhouse-server。 +- 修正了从系统读取时的争用条件 `system.parts_columns tables.` +- 同步插入到一个过程中删除双缓冲 `Distributed` 表,这可能导致连接超时。 +- 修正了一个错误,导致过长的等待不可用的副本开始之前 `SELECT` 查询。 +- 在固定不正确的日期 `system.parts` 桌子 +- 修正了一个错误,使得它无法在插入数据 `Replicated` 表if `chroot` 是非空的配置 `ZooKeeper` 集群。 +- 修正了一个空的垂直合并算法 `ORDER BY` 桌子 +- 恢复了在对远程表的查询中使用字典的能力,即使这些字典不存在于请求者服务器上。 此功能在版本1.1.54362中丢失。 +- 恢复查询的行为,如 `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` 当右侧的 `IN` 应该使用远程 `default.table` 而不是当地的 此行为在版本1.1.54358中被破坏。 +- 删除了无关的错误级别日志记录 `Not found column ... in block`. + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54362-2018-03-11} + +#### 新功能: {#new-features-16} + +- 聚合不 `GROUP BY` 对于一个空集(如 `SELECT count(*) FROM table WHERE 0`)现在返回一个结果,其中一行为聚合函数带有null值,符合SQL标准。 要恢复旧行为(返回一个空结果),请设置 `empty_result_for_aggregation_by_empty_set` 到1。 +- 增加了类型转换 `UNION ALL`. 不同的别名被允许 `SELECT` 在职位 `UNION ALL`,符合SQL标准。 +- 任意表达式支持 `LIMIT BY` 条款 以前,只能使用以下内容产生的列 `SELECT`. +- 的索引 `MergeTree` 表用于以下情况 `IN` 应用于来自主键列的表达式元组。 示例: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova) +- 添加了 `clickhouse-copier` 用于在群集之间复制和重新分布数据的工具(测试版)。 +- 添加了一致的哈希函数: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. 它们可以用作分片密钥,以减少后续重新分片期间的网络流量。 +- 新增功能: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. +- 添加了 `arrayCumSum` 功能(哈维桑塔纳)。 +- 添加了 `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`,和 `parseDateTimeBestEffortOrNull` 用于从包含各种可能格式的文本的字符串中读取DateTime的函数。 +- 数据可以在更新期间从外部字典部分重新加载(加载只是记录,其中指定字段的值大于先前的下载)(Arsen Hakobyan)。 +- 添加了 `cluster` 表功能。 示例: `cluster(cluster_name, db, table)`. 该 `remote` 表函数可以接受集群名称作为第一个参数,如果它被指定为标识符。 +- 该 `remote` 和 `cluster` 表函数可用于 `INSERT` 查询。 +- 添加了 `create_table_query` 和 `engine_full` 虚拟列到 `system.tables`桌子 该 `metadata_modification_time` 列是虚拟的。 +- 添加了 `data_path` 和 `metadata_path` 列 `system.tables`和`system.databases` 表,并添加了 `path` 列到 `system.parts` 和 `system.parts_columns` 桌子 +- 添加了关于合并的其他信息 `system.part_log` 桌子 +- 一个任意的分区键可以用于 `system.query_log` 表(基里尔Shvakov)。 +- 该 `SHOW TABLES` 查询现在还显示临时表。 添加临时表和 `is_temporary` 列到 `system.tables` (张2014)。 +- 已添加 `DROP TEMPORARY TABLE` 和 `EXISTS TEMPORARY TABLE` 查询(zhang2014)。 +- 支持 `SHOW CREATE TABLE` 对于临时表(zhang2014)。 +- 添加了 `system_profile` 内部进程使用的设置的配置参数。 +- 支持加载 `object_id` 作为一个属性 `MongoDB` 字典(帕维尔\*利特维年科)。 +- 阅读 `null` 作为加载数据的外部字典与时的默认值 `MongoDB` 资料来源(帕维尔\*利特维年科)。 +- 阅读 `DateTime` 在值 `Values` 从不带单引号的Unix时间戳格式化。 +- 故障转移支持 `remote` 当某些副本缺少请求的表时,表函数。 +- 运行时可以在命令行中复盖配置设置 `clickhouse-server`. 示例: `clickhouse-server -- --logger.level=information`. +- 实施了 `empty` 从功能 `FixedString` 参数:如果字符串完全由空字节组成,则函数返回1(zhang2014)。 +- 添加了 `listen_try`如果某些地址无法侦听,则在不退出的情况下侦听至少一个侦听地址的配置参数(对于禁用IPv4或IPv6支持的系统非常有用)。 +- 添加了 `VersionedCollapsingMergeTree` 表引擎。 +- 对于行和任意数字类型的支持 `library` 字典源. +- `MergeTree` 表可以在没有主键的情况下使用(您需要指定 `ORDER BY tuple()`). +- A `Nullable` 类型可以是 `CAST` 到非-`Nullable` 如果参数不是,则键入 `NULL`. +- `RENAME TABLE` 可以进行 `VIEW`. +- 添加了 `throwIf` 功能。 +- 添加了 `odbc_default_field_size` 选项,它允许您扩展从ODBC源加载的值的最大大小(默认情况下为1024)。 +- 该 `system.processes` 表和 `SHOW PROCESSLIST` 现在有 `is_cancelled` 和 `peak_memory_usage` 列。 + +#### 改进: {#improvements-15} + +- 结果的限制和配额不再应用于以下内容的中间数据 `INSERT SELECT` 查询或 `SELECT` 子查询。 +- 更少的虚假触发 `force_restore_data` 当检查的状态 `Replicated` 服务器启动时的表。 +- 添加了 `allow_distributed_ddl` 选项。 +- 表达式中不允许使用非确定性函数 `MergeTree` 表键。 +- 从替换文件 `config.d` 目录按字母顺序加载。 +- 的改进的性能 `arrayElement` 函数在常量多维数组的情况下,以空数组作为元素之一。 示例: `[[1], []][x]`. +- 当使用具有非常大的替换(例如,非常大的IP网络列表)的配置文件时,服务器现在启动速度更快。 +- 运行查询时,表值函数运行一次。 前情提要, `remote` 和 `mysql` 表值函数执行两次相同的查询以从远程服务器检索表结构。 +- 该 `MkDocs` 使用文档生成器。 +- 当您尝试删除表列时 `DEFAULT`/`MATERIALIZED` 取决于其他列的表达式,会抛出异常(zhang2014)。 +- 增加了解析文本格式的空行作为数字0的能力 `Float` 数据类型。 此功能以前可用,但在版本1.1.54342中丢失。 +- `Enum` 值可以用于 `min`, `max`, `sum` 和其他一些功能。 在这些情况下,它使用相应的数值。 此功能以前可用,但在版本1.1.54337中丢失。 +- 已添加 `max_expanded_ast_elements` 递归扩展别名后限制AST的大小。 + +#### 错误修复: {#bug-fixes-27} + +- 修复了错误地从子查询中删除不必要的列或未从包含以下内容的子查询中删除不必要列的情况 `UNION ALL`. +- 修正了合并的错误 `ReplacingMergeTree` 桌子 +- 在固定的同步插入 `Distributed` 表 (`insert_distributed_sync = 1`). +- 固定段错误的某些用途 `FULL` 和 `RIGHT JOIN` 在子查询中使用重复的列。 +- 固定段错误的某些用途 `replace_running_query` 和 `KILL QUERY`. +- 固定的顺序 `source` 和 `last_exception` 在列 `system.dictionaries` 桌子 +- 修正了一个错误,当 `DROP DATABASE` 查询没有删除带有元数据的文件。 +- 修正了 `DROP DATABASE` 查询为 `Dictionary` 数据库。 +- 固定的低精度 `uniqHLL12` 和 `uniqCombined` 功能基数大于100万个项目(Alex克斯Bocharov)。 +- 修复了在必要时计算隐式默认值,以便同时计算默认显式表达式 `INSERT` 查询(zhang2014)。 +- 修正了一个罕见的情况下,当一个查询 `MergeTree` 表不能完成(陈星-xc)。 +- 修正了运行时发生的崩溃 `CHECK` 查询为 `Distributed` 如果所有分片都是本地的(chenxing.xc)。 +- 修复了使用正则表达式的函数的轻微性能回归。 +- 修复了从复杂表达式创建多维数组时的性能回归。 +- 修正了一个错误,可能会导致一个额外的 `FORMAT` 部分出现在一个 `.sql` 具有元数据的文件。 +- 修复了导致 `max_table_size_to_drop` 尝试删除时应用的限制 `MATERIALIZED VIEW` 查看显式指定的表。 +- 修复了与旧客户端的不兼容性(旧客户端有时会发送数据 `DateTime('timezone')` 类型,他们不明白)。 +- 修复了阅读时的错误 `Nested` 使用以下方式添加的结构的列元素 `ALTER` 但是,这是空的旧分区,当这些列的条件移动到 `PREWHERE`. +- 修正了通过虚拟过滤表时的错误 `_table` 查询中的列 `Merge` 桌子 +- 修复了使用时的错误 `ALIAS` 列 `Distributed` 桌子 +- 修正了一个错误,使得动态编译不可能从聚合函数的查询 `quantile` 家人 +- 修复了查询执行管道中极少数情况下使用时发生的争用条件 `Merge` 具有大量表的表,并且当使用 `GLOBAL` 子查询。 +- 修复了将不同大小的数组传递给 `arrayReduce` 使用来自多个参数的聚合函数时的函数。 +- 禁止使用与查询 `UNION ALL` 在一个 `MATERIALIZED VIEW`. +- 修正了初始化过程中的错误 `part_log` 服务器启动时的系统表(默认情况下, `part_log` 被禁用)。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-10} + +- 删除了 `distributed_ddl_allow_replicated_alter` 选项。 默认情况下启用此行为。 +- 删除了 `strict_insert_defaults` 设置。 如果您使用此功能,请写入 `clickhouse-feedback@yandex-team.com`. +- 删除了 `UnsortedMergeTree` 引擎 + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102005602 {#clickhouse-release-1-1-54343-2018-02-05} + +- 在分布式DDL查询和分布式表的构造函数中添加了用于定义集群名称的宏支持: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. +- 现在像查询 `SELECT ... FROM table WHERE expr IN (subquery)` 使用处理 `table` 指数。 +- 在插入到复制表时改进了重复项的处理,因此它们不再减慢复制队列的执行速度。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682 {#clickhouse-release-1-1-54342-2018-01-22} + +此版本包含以前版本1.1.54337的错误修复: + +- 修正了1.1.54337中的回归:如果默认用户具有只读访问权限,则服务器拒绝启动消息 `Cannot create database in readonly mode`. +- 修正了1.1.54337中的回归:在具有systemd的系统上,无论配置如何,日志总是写入syslog;看门狗脚本仍然使用init。d。 +- 修正了1.1.54337中的回归:Docker映像中错误的默认配置。 +- 修正GraphiteMergeTree的非确定性行为(您可以在日志消息中看到它 `Data after merge is not byte-identical to the data on another replicas`). +- 修复了优化查询到复制表后可能导致合并不一致的错误(您可能会在日志消息中看到它 `Part ... intersects the previous part`). +- 当目标表中存在具体化列时,缓冲区表现在可以正常工作(by zhang2014)。 +- 修复了实现NULL的错误。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54337-2018-01-18} + +#### 新功能: {#new-features-17} + +- 增加了对多维数组和元组存储的支持 (`Tuple` 表中的数据类型)。 +- 支持表函数 `DESCRIBE` 和 `INSERT` 查询。 增加了对子查询的支持 `DESCRIBE`. 例: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. 支持 `INSERT INTO TABLE` 除了 `INSERT INTO`. +- 改进了对时区的支持。 该 `DateTime` 数据类型可以使用用于以文本格式进行分析和格式化的时区进行注释。 示例: `DateTime('Europe/Moscow')`. 当在函数中指定时区时 `DateTime` 参数,返回类型将跟踪时区,并且值将按预期显示。 +- 新增功能 `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. 该 `toRelativeHour`/`Minute`/`Second` 函数可以采用类型的值 `Date` 作为参数。 该 `now` 函数名称区分大小写。 +- 添加了 `toStartOfFifteenMinutes` 功能(基里尔Shvakov)。 +- 添加了 `clickhouse format` 用于格式化查询的工具。 +- 添加了 `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` 格式。 架构文件只能位于指定的目录中。 +- 增加了对配置替换的支持 (`incl` 和 `conf.d`)外部字典和模型的配置(帕维尔\*亚库宁)。 +- 添加了一列文档 `system.settings` 表(基里尔Shvakov)。 +- 添加了 `system.parts_columns` 表中的每个数据部分的列大小信息 `MergeTree` 桌子 +- 添加了 `system.models` 包含已加载信息的表 `CatBoost` 机器学习模型。 +- 添加了 `mysql` 和 `odbc` 表函数和对应 `MySQL` 和 `ODBC` 用于访问远程数据库的表引擎。 此功能处于测试阶段。 +- 增加了传递类型参数的可能性 `AggregateFunction` 为 `groupArray` 聚合函数(这样你就可以创建一些聚合函数的状态数组)。 +- 删除了对聚合函数组合器的各种组合的限制。 例如,您可以使用 `avgForEachIf` 以及 `avgIfForEach` 聚合函数,它们具有不同的行为。 +- 该 `-ForEach` 聚合函数combinator是针对多个参数的聚合函数的情况进行扩展的。 +- 增加了对聚合函数的支持 `Nullable` 即使是函数返回非参数的情况-`Nullable` 结果(添加Silviu Caragea的贡献)。 示例: `groupArray`, `groupUniqArray`, `topK`. +- 添加了 `max_client_network_bandwidth` 为 `clickhouse-client` (基里尔\*什瓦科夫)。 +- 用户与 `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). +- 增加了对使用多个消费者的支持 `Kafka` 引擎 扩展的配置选项 `Kafka` (Marek Vavruša). +- 添加了 `intExp3` 和 `intExp4` 功能。 +- 添加了 `sumKahan` 聚合函数。 +- 添加了to\*Number\*OrNull函数,其中\*Number\*是数字类型。 +- 增加了对 `WITH` a的子句 `INSERT SELECT` 查询(作者:zhang2014)。 +- 添加设置: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. 特别是,这些设置用于下载用于复制的数据部分。 如果网络过载,更改这些设置可以更快地进行故障转移。 +- 增加了对 `ALTER` 对于类型的表 `Null` (Anastasiya Tsarkova) +- 该 `reinterpretAsString` 函数扩展为连续存储在内存中的所有数据类型。 +- 添加了 `--silent` 选项的 `clickhouse-local` 工具 它禁止在stderr中打印查询执行信息。 +- 增加了对读取类型值的支持 `Date` 从使用单个数字而不是两个数字(Amos Bird)指定月份和/或月份日的格式的文本。 + +#### 性能优化: {#performance-optimizations} + +- 改进聚合函数的性能 `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` 从字符串参数。 +- 改进功能的性能 `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. +- 改进了解析和格式化的性能 `Date` 和 `DateTime` 以文本格式键入值。 +- 改进了解析浮点数的性能和精度。 +- 降低内存使用量 `JOIN` 在左部分和右部分具有不包含在相同名称的列的情况下 `USING` . +- 改进聚合函数的性能 `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` 通过降低计算稳定性。 旧函数的名称下可用 `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. + +#### 错误修复: {#bug-fixes-28} + +- 固定数据重复数据删除运行后 `DROP` 或 `DETACH PARTITION` 查询。 在以前的版本中,删除分区并再次插入相同的数据不起作用,因为插入的块被认为是重复的。 +- 修复了可能导致错误解释的错误 `WHERE` 条款 `CREATE MATERIALIZED VIEW` 查询与 `POPULATE` . +- 修正了在使用 `root_path` 在参数 `zookeeper_servers` 配置。 +- 通过固定意外的结果 `Date` 论据 `toStartOfDay` . +- 修正了 `addMonths` 和 `subtractMonths` 函数和算术 `INTERVAL n MONTH` 在情况下,当结果有前一年。 +- 增加了缺少的支持 `UUID` 数据类型 `DISTINCT` , `JOIN` ,和 `uniq` 聚合函数和外部字典(叶夫根尼伊万诺夫)。 支持 `UUID` 仍然是不完整的。 +- 固定 `SummingMergeTree` 行为的情况下,当行相加为零。 +- 各种修复 `Kafka` engine (Marek Vavruša). +- 修正了不正确的行为 `Join` 表引擎(阿莫斯鸟)。 +- 修复了FreeBSD和OS X下不正确的分配器行为。 +- 该 `extractAll` 函数现在支持空匹配。 +- 修复了阻止使用的错误 `libressl` 而不是 `openssl` . +- 修正了 `CREATE TABLE AS SELECT` 从临时表查询。 +- 修复了更新复制队列的非原子性。 这可能导致副本在服务器重新启动之前不同步。 +- 修正了可能的溢出 `gcd` , `lcm` 和 `modulo` (`%` 运营商)(Maks Skorokhod)。 +- `-preprocessed` 现在更改后创建文件 `umask` (`umask` 可以在配置中更改)。 +- 修正了部分的背景检查中的错误 (`MergeTreePartChecker` )使用自定义分区密钥时。 +- 元组的固定解析(的值 `Tuple` 数据类型)的文本格式。 +- 改进了有关传递到的不兼容类型的错误消息 `multiIf` , `array` 和其他一些功能。 +- 重新设计的支持 `Nullable` 类型。 修复了可能导致服务器崩溃的错误。 修正了与几乎所有其他错误 `NULL` 支持:insert SELECT中的类型转换不正确,HAVING和PREWHERE中对Nullable的支持不足, `join_use_nulls` 模式,可以为Null的类型作为参数 `OR` 操作员等。 +- 修正了与数据类型的内部语义相关的各种错误。 例子:不必要的总结 `Enum` 输入字段 `SummingMergeTree` ;对齐 `Enum` 类型 `Pretty` 格式等。 +- 对复合列的允许组合进行更严格的检查。 +- 修复了指定一个非常大的参数时的溢出 `FixedString` 数据类型。 +- 修正了一个错误 `topK` 一般情况下的聚合函数。 +- 在聚合函数的n元变体的参数中添加了对数组大小相等性的缺失检查。 `-Array` combinator +- 修正了一个错误 `--pager` 为 `clickhouse-client` (作者:ks1322)。 +- 固定的精度 `exp10` 功能。 +- 固定的行为 `visitParamExtract` 功能更好地符合文档。 +- 修复了指定不正确的数据类型时的崩溃。 +- 固定的行为 `DISTINCT` 在所有列都是常量的情况下。 +- 在使用的情况下固定的查询格式 `tupleElement` 使用复数常量表达式作为元组元素索引的函数。 +- 修正了一个错误 `Dictionary` 表 `range_hashed` 字典 +- 修正了导致结果中的过多行的错误 `FULL` 和 `RIGHT JOIN` (阿莫斯鸟)。 +- 修复了在创建和删除临时文件时的服务器崩溃 `config.d` 配置重新加载期间的目录。 +- 修正了 `SYSTEM DROP DNS CACHE` 查询:缓存已刷新,但群集节点的地址未更新。 +- 固定的行为 `MATERIALIZED VIEW` 执行后 `DETACH TABLE` for the table under the view (Marek Vavruša). + +#### 构建改进: {#build-improvements-4} + +- 该 `pbuilder` 工具用于构建。 构建过程几乎完全独立于构建主机环境。 +- 单个构建用于不同的操作系统版本。 软件包和二进制文件已经与各种Linux系统兼容。 +- 添加了 `clickhouse-test` 包。 它可用于运行功能测试。 +- 现在可以将源代码包发布到存储库。 它可以用来在不使用GitHub的情况下重现构建。 +- 增加了有限的集成与特拉维斯CI。 由于Travis中的构建时间限制,仅测试调试构建并运行有限的测试子集。 +- 增加了对 `Cap'n'Proto` 在默认构建中。 +- 更改文档来源的格式 `Restricted Text` 到 `Markdown`. +- 增加了对 `systemd` (弗拉基米尔\*斯米尔诺夫)。 默认情况下,由于与某些操作系统映像不兼容,它被禁用,并且可以手动启用。 +- 用于动态代码生成, `clang` 和 `lld` 嵌入到 `clickhouse` 二进制 它们也可以被调用为 `clickhouse clang` 和 `clickhouse lld` . +- 从代码中删除GNU扩展的使用。 启用 `-Wextra` 选项。 当与建设 `clang` 默认值为 `libc++` 而不是 `libstdc++`. +- 提取 `clickhouse_parsers` 和 `clickhouse_common_io` 库,以加快各种工具的构建。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-11} + +- 标记的格式 `Log` 键入包含以下内容的表 `Nullable` 列以向后不兼容的方式进行了更改。 如果你有这些表,你应该将它们转换为 `TinyLog` 在启动新服务器版本之前键入。 要做到这一点,替换 `ENGINE = Log` 与 `ENGINE = TinyLog` 在相应的 `.sql` 文件中的 `metadata` 目录。 如果你的桌子没有 `Nullable` 列或表的类型不是 `Log`,那么你什么都不需要做。 +- 删除了 `experimental_allow_extended_storage_definition_syntax` 设置。 现在,此功能默认启用。 +- 该 `runningIncome` 函数重命名为 `runningDifferenceStartingWithFirstvalue` 为了避免混confusion。 +- 删除了 `FROM ARRAY JOIN arr` 在FROM with no table(Amos Bird)之后直接指定数组连接时的语法。 +- 删除了 `BlockTabSeparated` 仅用于演示目的的格式。 +- 更改聚合函数的状态格式 `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. 如果您已将这些聚合函数的状态存储在表中(使用 `AggregateFunction` 数据类型或具体化视图与相应的状态),请写信给clickhouse-feedback@yandex-team.com. +- 在以前的服务器版本中,有一个未记录的功能:如果聚合函数依赖于参数,则仍然可以在AggregateFunction数据类型中指定它而不带参数。 示例: `AggregateFunction(quantiles, UInt64)` 而不是 `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. 此功能已丢失。 虽然它没有记录,但我们计划在未来的版本中再次支持它。 +- 枚举数据类型不能用于最小/最大聚合函数。 这种能力将在下一个版本中返回。 + +#### 升级时请注意: {#please-note-when-upgrading} + +- 当在群集上执行滚动更新时,当某些副本运行旧版本的ClickHouse,而某些副本运行新版本时,复制会暂时停止,并且消息 `unknown parameter 'shard'` 出现在日志中。 更新集群的所有副本后,复制将继续。 +- 如果群集服务器上运行不同版本的ClickHouse,则使用以下函数的分布式查询可能会产生不正确的结果: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. 您应该更新所有群集节点。 + +## [更新日志2017](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/zh/whats_new/changelog/2019.md b/docs/zh/whats_new/changelog/2019.md new file mode 100644 index 00000000000..f776141b14a --- /dev/null +++ b/docs/zh/whats_new/changelog/2019.md @@ -0,0 +1,2074 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 77 +toc_title: '2019' +--- + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v19-17} + +### ClickHouse释放v19.17.6.36,2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### 错误修复 {#bug-fix} + +- 在解压缩固定潜在的缓冲区溢出。 恶意用户可以传递制造的压缩数据,可能导致缓冲区后读取。 这个问题是由Yandex信息安全团队的Eldar Zaitov发现的。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了可能的服务器崩溃 (`std::terminate`)当服务器不能发送或写入JSON或XML格式的数据与字符串数据类型的值(需要UTF-8验证),或者当压缩结果数据与Brotli算法或在其他一些罕见的情况下。 [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从clickhouse源固定字典 `VIEW`,现在阅读这样的字典不会导致错误 `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复了用户中指定的host\_regexp是否允许客户端主机的检查。xml [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- `RENAME TABLE` 对于分布式表,现在在发送到分片之前重命名包含插入数据的文件夹。 这解决了连续重命名的问题 `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- `range_hashed` DDL查询创建的外部字典现在允许任意数字类型的范围。 [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([阿利沙平](https://github.com/alesapin)) +- 固定 `INSERT INTO table SELECT ... FROM mysql(...)` 表功能。 [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- 修复段错误 `INSERT INTO TABLE FUNCTION file()` 同时插入到一个不存在的文件。 现在在这种情况下,文件将被创建,然后插入将被处理。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修正了聚合位图和标量位图相交时的位图和错误。 [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([黄月](https://github.com/moon03432)) +- 修复段错误时 `EXISTS` 查询没有使用 `TABLE` 或 `DICTIONARY` 预选赛,就像 `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 函数的固定返回类型 `rand` 和 `randConstant` 在可为空的参数的情况下。 现在函数总是返回 `UInt32` 而且从来没有 `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定 `DROP DICTIONARY IF EXISTS db.dict`,现在它不会抛出异常,如果 `db` 根本不存在 [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 如果由于服务器崩溃而未完全删除表,服务器将尝试恢复并加载它 [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- 修正了一个简单的计数查询分布式表,如果有两个以上的分片本地表。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- 修正了导致DB::BlockStreamProfileInfo::calculateRowsBeforeLimit数据竞赛的错误() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 固定 `ALTER table MOVE part` 在合并指定部件后立即执行,这可能导致移动指定部件合并到的部件。 现在它正确地移动指定的部分。 [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 字典的表达式现在可以指定为字符串。 这对于从非ClickHouse源中提取数据时计算属性非常有用,因为它允许对这些表达式使用非ClickHouse语法。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([阿利沙平](https://github.com/alesapin)) +- 修正了一个非常罕见的比赛 `clickhouse-copier` 由于ZXid的溢出。 [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁香飞](https://github.com/dingxiangfei2009)) +- 修复了查询失败后的错误(由于 “Too many simultaneous queries” 例如)它不会读取外部表信息,并且 + 下一个请求会将此信息解释为下一个查询的开始,导致如下错误 `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 避免空取消引用后 “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- 恢复对所有ICU区域设置的支持,添加对常量表达式应用排序规则的能力,并将语言名称添加到系统。排序规则表。 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([阿利沙平](https://github.com/alesapin)) +- 用于读取的流数 `StorageFile` 和 `StorageHDFS` 现在是有限的,以避免超过内存限制。 [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([阿利沙平](https://github.com/alesapin)) +- 固定 `CHECK TABLE` 查询为 `*MergeTree` 表没有关键. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([阿利沙平](https://github.com/alesapin)) +- 如果没有突变,则从部件名称中删除突变编号。 这种删除提高了与旧版本的兼容性。 [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([阿利沙平](https://github.com/alesapin)) +- 修复了某些附加部分因data\_version大于表突变版本而跳过突变的问题。 [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([余志昌](https://github.com/yuzhichang)) +- 允许在将部件移动到其他设备后使用冗余副本启动服务器。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了错误 “Sizes of columns doesn’t match” 使用聚合函数列时可能会出现。 [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- 现在在使用WITH TIES和LIMIT BY的情况下,将抛出一个异常。 现在可以使用TOP with LIMIT BY。 [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复字典重新加载,如果它有 `invalidate_query`,停止更新,并在以前的更新尝试一些异常。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放v19.17.4.11时,2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### 向后不兼容的更改 {#backward-incompatible-change} + +- 使用列而不是AST来存储标量子查询结果以获得更好的性能。 设置 `enable_scalar_subquery_optimization` 在19.17中添加,默认情况下启用。 它会导致以下错误 [这](https://github.com/ClickHouse/ClickHouse/issues/7851) 在从以前的版本升级到19.17.2或19.17.3期间。 默认情况下,19.17.4中禁用此设置,以便可以从19.16及更早版本升级而不会出现错误。 [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 新功能 {#new-feature} + +- 添加使用DDL查询创建字典的功能。 [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([阿利沙平](https://github.com/alesapin)) +- 赂眉露\>\> `bloom_filter` 支持的索引类型 `LowCardinality` 和 `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加功能 `isValidJSON` 要检查传递的字符串是否是有效的json。 [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) +- 执行 `arrayCompact` 功能 [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([备忘录](https://github.com/Joeywzr)) +- 创建函数 `hex` 对于十进制数。 它的工作原理如下 `hex(reinterpretAsString())`,但不会删除最后的零字节。 [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加 `arrayFill` 和 `arrayReverseFill` 函数,用数组中其他元素替换它们前面/后面的元素。 [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) +- 添加 `CRC32IEEE()`/`CRC64()` 碌莽禄support: [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) +- 执行 `char` 功能类似于一个 [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) +- 添加 `bitmapTransform` 功能。 它将位图中的值数组转换为另一个值数组,结果是一个新的位图 [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([余志昌](https://github.com/yuzhichang)) +- 已实施 `javaHashUTF16LE()` 功能 [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) +- 添加 `_shard_num` 分布式引擎的虚拟列 [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) + +#### 实验特点 {#experimental-feature} + +- 支持处理器(新的查询执行管道) `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 错误修复 {#bug-fix-1} + +- 修复不正确的浮点解析 `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- 修复启用trace\_log时可能发生的罕见死锁。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- 当生成Kafka表时有任何从中选择的Mv时,防止消息重复 [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([伊万](https://github.com/abyss7)) +- 支持 `Array(LowCardinality(Nullable(String)))` 在 `IN`. 决定 [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) +- 添加处理 `SQL_TINYINT` 和 `SQL_BIGINT`,并修复处理 `SQL_FLOAT` ODBC桥中的数据源类型。 [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- 修复聚合 (`avg` 和分位数)在空的十进制列 [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([安德烈\*科尼亚耶夫](https://github.com/akonyaev90)) +- 修复 `INSERT` 变成分布式 `MATERIALIZED` 列 [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- 赂眉露\>\> `MOVE PARTITION` 如果分区的某些部分已经在目标磁盘或卷上,则可以工作 [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了在突变过程中无法创建硬链接的错误 `ReplicatedMergeTree` 在多磁盘配置。 [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复了当整个部分保持不变并且在另一个磁盘上找到最佳空间时,MergeTree上出现突变的错误 [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正错误 `keep_free_space_ratio` 未从磁盘读取配置 [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正错误与表只包含 `Tuple` 列或具有复杂路径的列。 修复 [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([阿利沙平](https://github.com/alesapin)) +- 在max\_memory\_usage限制中不考虑缓冲区引擎的内存 [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) +- 修复最终标记用法 `MergeTree` 表排序 `tuple()`. 在极少数情况下,它可能会导致 `Can't adjust last granule` 选择时出错。 [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了需要上下文操作(例如json函数)的谓词突变中的错误,这可能会导致崩溃或奇怪的异常。 [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([阿利沙平](https://github.com/alesapin)) +- 修复转义的数据库和表名称不匹配 `data/` 和 `shadow/` 目录 [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. 在这种情况下修复崩溃。 [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复 `Not found column in block` 当加入表达式与权利或完全连接。 [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) +- 再次尝试修复无限循环 `PrettySpace` 格式 [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复bug `concat` 函数时,所有的参数 `FixedString` 同样大小的 [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([阿利沙平](https://github.com/alesapin)) +- 在定义S3,URL和HDFS存储时使用1个参数的情况下修复了异常。 [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复查询视图的InterpreterSelectQuery的范围 [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) + +#### 改进 {#improvement} + +- `Nullable` ODBC-bridge可识别的列和正确处理的NULL值 [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 以原子方式写入分布式发送的当前批次 [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) +- 如果我们无法在查询中检测到列名称的表,则引发异常。 [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加 `merge_max_block_size` 设置为 `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) +- 查询与 `HAVING` 而没有 `GROUP BY` 假设按常量分组。 所以, `SELECT 1 HAVING 1` 现在返回一个结果。 [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([阿莫斯鸟](https://github.com/amosbird)) +- 支持解析 `(X,)` 作为类似python的元组。 [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([阿莫斯鸟](https://github.com/amosbird)) +- 赂眉露\>\> `range` 函数行为几乎像pythonic。 [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) +- 添加 `constraints` 列到表 `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Tcp处理程序的更好的Null格式,以便可以使用 `select ignore() from table format Null` 通过clickhouse-client进行性能测量 [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([阿莫斯鸟](https://github.com/amosbird)) +- 查询如 `CREATE TABLE ... AS (SELECT (1, 2))` 正确解析 [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) + +#### 性能改进 {#performance-improvement} + +- 改进了对短字符串键的聚合性能。 [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [阿莫斯鸟](https://github.com/amosbird)) +- 运行另一次语法/表达式分析以在常量谓词折叠后获得潜在的优化。 [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([阿莫斯鸟](https://github.com/amosbird)) +- 使用存储元信息来评估琐碎 `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([阿莫斯鸟](https://github.com/amosbird), [阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 矢量化处理 `arrayReduce` 与聚合器类似 `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([阿莫斯鸟](https://github.com/amosbird)) +- 在性能的小改进 `Kafka` 消费 [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement} + +- 添加对交叉编译的支持到CPU架构AARCH64。 重构打包器脚本。 [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([伊万](https://github.com/abyss7)) +- 在构建软件包时,将darwin-x86\_64和linux-aarch64工具链解压缩到已挂载的Docker卷中 [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([伊万](https://github.com/abyss7)) +- 更新二进制打包器的Docker映像 [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([伊万](https://github.com/abyss7)) +- 修复了MacOS Catalina上的编译错误 [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([欧内斯特\*波列塔耶夫](https://github.com/ernestp)) +- 查询分析逻辑中的一些重构:将复杂的类拆分为几个简单的类。 [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复没有子模块的构建 [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) +- 更好 `add_globs` 在CMake文件中 [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([阿莫斯鸟](https://github.com/amosbird)) +- 删除硬编码路径 `unwind` 目标 [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) +- 允许在没有ssl的情况下使用mysql格式 [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) + +#### 其他 {#other} + +- 为ClickHouse SQL方言添加了ANTLR4语法 [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v19-16} + +#### ClickHouse版本v19.16.14.65,2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} + +- 修复了多个参数(超过10)的三元逻辑运算批量计算中的错误。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([亚历山大\*卡扎科夫](https://github.com/Akazz))这个错误修正是由Altinity的特殊要求回移到版本19.16的。 + +#### ClickHouse释放v19.16.14.65,2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- 修复分布式子查询与旧版本的CH不兼容。 修复 [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(tabplubix)](https://github.com/tavplubix) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). 还修复检查本地地址 `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- 现在背景合并 `*MergeTree` 表引擎家族更准确地保留存储策略卷顺序。 + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 相关: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(菲利蒙诺夫)](https://github.com/filimonov) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 修复 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 结合 [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(菲利蒙诺夫)](https://github.com/filimonov) +- 允许使用 `MaterializedView` 与上面的子查询 `Kafka` 桌子 + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) + +#### 新功能 {#new-feature-1} + +- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse发布版本v19.16.2.2,2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### 向后不兼容的更改 {#backward-incompatible-change-1} + +- 为count/counIf添加缺失的验证。 + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) +- 删除旧版 `asterisk_left_columns_only` 设置(默认情况下禁用)。 + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 模板数据格式的格式字符串现在在文件中指定。 + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) + +#### 新功能 {#new-feature-2} + +- 引入uniqCombined64()来计算大于UINT\_MAX的基数。 + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat + Khuzhin](https://github.com/azat)) +- 支持数组列上的Bloom filter索引。 + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([achimbab](https://github.com/achimbab)) +- 添加函数 `getMacro(name)` 返回与相应值的字符串 `` + 从服务器配置. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为基于HTTP源的字典设置两个配置选项: `credentials` 和 + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([纪尧姆 + Tassery](https://github.com/YiuRULE)) +- 添加新的ProfileEvent `Merge` 这计算启动的背景合并的数量。 + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([米哈伊尔 + 科罗托夫](https://github.com/millb)) +- 添加返回完全限定域名的fullHostName函数。 + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) +- 添加功能 `arraySplit` 和 `arrayReverseSplit` 通过拆分数组 “cut off” + 条件。 它们在时间序列处理中非常有用。 + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) +- 添加返回multiMatch函数系列中所有匹配索引的数组的新函数。 + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila + 库特宁](https://github.com/danlark1)) +- 添加新的数据库引擎 `Lazy` 即针对存储大量小日志进行了优化 + 桌子 [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([尼基塔 + Vasilev](https://github.com/nikvas0)) +- 为位图列添加聚合函数groupBitmapAnd,-或-Xor。 [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([知昌 + 阿优](https://github.com/yuzhichang)) +- 添加聚合函数组合器-OrNull和-OrDefault,它们返回null + 或默认值时没有任何聚合。 + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([hcz](https://github.com/hczhcz)) +- 引入支持自定义转义的CustomSeparated数据格式 + 分隔符规则。 [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) +- 支持Redis作为外部字典的来源。 [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [安东 + 波波夫](https://github.com/CurtizJ)) + +#### 错误修复 {#bug-fix-2} + +- 修复错误的查询结果,如果它有 `WHERE IN (SELECT ...)` 部分和 `optimize_read_in_order` 是 + 使用。 [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([安东 + 波波夫](https://github.com/CurtizJ)) +- 禁用MariaDB身份验证插件,这取决于项目之外的文件。 + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([尤里 + 巴拉诺夫](https://github.com/yurriy)) +- 修复异常 `Cannot convert column ... because it is constant but values of constants are different in source and result` 这可能很少发生,当功能 `now()`, `today()`, + `yesterday()`, `randConstant()` 被使用。 + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([尼古拉 + Kochetov](https://github.com/KochetovNicolai)) +- 修复了使用HTTP保持活动超时而不是TCP保持活动超时的问题。 + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([瓦西里 + Nemkov](https://github.com/Enmk)) +- 修复了groupBitmapOr中的分段错误(问题 [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([知昌 + 阿优](https://github.com/yuzhichang)) +- 对于实例化视图,在写入所有数据之后调用kafka的提交。 + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([伊万](https://github.com/abyss7)) +- 修复错误 `duration_ms` 值 `system.part_log` 桌子 这是十次关闭。 + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([弗拉基米尔 + Chebotarev](https://github.com/excitoon)) +- 快速修复解决实时查看表中的崩溃并重新启用所有实时查看测试。 + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([vzakaznikov](https://github.com/vzakaznikov)) +- 在MergeTree部件的最小/最大索引中正确序列化NULL值。 + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 不要把虚拟列。创建表时的sql元数据 `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([伊万](https://github.com/abyss7)) +- 修复分段故障 `ATTACH PART` 查询。 + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([阿利沙平](https://github.com/alesapin)) +- 修复了子查询中empty和empty优化给出的某些查询的错误结果 + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([尼古拉 + Kochetov](https://github.com/KochetovNicolai)) +- 修复LIVE VIEW getHeader()方法中的AddressSanitizer错误。 + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([vzakaznikov](https://github.com/vzakaznikov)) + +#### 改进 {#improvement-1} + +- 在queue\_wait\_max\_ms等待发生的情况下添加消息。 + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat + Khuzhin](https://github.com/azat)) +- 制作设置 `s3_min_upload_part_size` 表级别。 + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([弗拉基米尔 + Chebotarev](https://github.com/excitoon)) +- 检查Ttl在StorageFactory。 [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([sundyli](https://github.com/sundy-li)) +- 在部分合并连接(优化)中压缩左侧块。 + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 不允许在复制表引擎的突变中使用非确定性函数,因为这 + 可能会在副本之间引入不一致。 + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([亚历山大 + 卡扎科夫](https://github.com/Akazz)) +- 将异常堆栈跟踪转换为字符串时禁用内存跟踪器。 它可以防止损失 + 类型的错误消息 `Memory limit exceeded` 在服务器上,这导致了 `Attempt to read after eof` 客户端上的例外。 [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 其他格式改进。 决定 + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([tavplubix](https://github.com/tavplubix)) +- ClickHouse将忽略IN运算符右侧不可转换为左侧的值 + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 支持ASOF加入缺失的不平等。 它可以加入小于或等于变体和严格 + 在语法上,ASOF列的变体越来越多。 + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 优化部分合并连接。 [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([Artem Zuikov](https://github.com/4ertus2)) +- 不要在uniqCombined函数中使用超过98K的内存。 + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat + Khuzhin](https://github.com/azat)) +- 在PartialMergeJoin中刷新磁盘上右连接表的部分(如果没有足够的 + 记忆)。 需要时加载数据。 [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([Artem Zuikov](https://github.com/4ertus2)) + +#### 性能改进 {#performance-improvement-1} + +- 通过避免数据重复加快使用const参数的joinGet。 + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 如果子查询为空,请提前返回。 + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- 优化值中SQL表达式的解析。 + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([tavplubix](https://github.com/tavplubix)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-1} + +- 禁用交叉编译到Mac OS的一些贡献。 + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([伊万](https://github.com/abyss7)) +- 为clickhouse\_common\_io添加与PocoXML缺少的链接。 + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat + Khuzhin](https://github.com/azat)) +- 在clickhouse-test中接受多个测试过滤器参数。 + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 为ARM启用musl和jemalloc。 [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([阿莫斯鸟](https://github.com/amosbird)) +- 已添加 `--client-option` 参数 `clickhouse-test` 将其他参数传递给客户端。 + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([尼古拉 + Kochetov](https://github.com/KochetovNicolai)) +- 在rpm软件包升级时保留现有配置。 + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([filimonov](https://github.com/filimonov)) +- 修复PVS检测到的错误。 [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 修复达尔文的构建。 [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([伊万](https://github.com/abyss7)) +- glibc2.29兼容性. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 确保dh\_clean不会触及潜在的源文件。 + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 尝试避免从altinity rpm更新时发生冲突-它有单独打包的配置文件 + 在clickhouse服务器-常见. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([filimonov](https://github.com/filimonov)) +- 优化一些头文件,以便更快地重建。 + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 添加日期和日期时间的性能测试。 [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([瓦西里 + Nemkov](https://github.com/Enmk)) +- 修复一些包含非确定性突变的测试。 + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([亚历山大 + 卡扎科夫](https://github.com/Akazz)) +- 添加构建与MemorySanitizer CI。 [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 避免在MetricsTransmitter中使用未初始化的值。 + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat + Khuzhin](https://github.com/azat)) +- 修复MemorySanitizer发现的字段中的一些问题。 + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([阿莫斯鸟](https://github.com/amosbird)) +- 修复murmurhash32中未定义的行为。 [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 修复StoragesInfoStream中未定义的行为。 [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([tavplubix](https://github.com/tavplubix)) +- 固定常量表达式折叠外部数据库引擎(MySQL,ODBC,JDBC)。 在上一页 + 版本它不适用于多个常量表达式,并且根本不适用于日期, + 日期时间和UUID。 这修复 [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在访问no\_users\_thread变量时修复实时查看中的ThreadSanitizer数据竞争错误。 + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([vzakaznikov](https://github.com/vzakaznikov)) +- 在libcommon中摆脱malloc符号 + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 添加全局标志ENABLE\_LIBRARY以禁用所有库。 + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([proller](https://github.com/proller)) + +#### 代码清理 {#code-cleanup} + +- 概括配置存储库以准备字典的DDL。 [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([阿利沙平](https://github.com/alesapin)) +- 解析器字典DDL没有任何语义。 + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([阿利沙平](https://github.com/alesapin)) +- 将ParserCreateQuery拆分为不同的较小的解析器。 + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([阿利沙平](https://github.com/alesapin)) +- 在外部字典附近进行小型重构和重命名。 + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([阿利沙平](https://github.com/alesapin)) +- 重构一些代码以准备基于角色的访问控制。 [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([维塔利 + 巴拉诺夫](https://github.com/vitlibar)) +- DatabaseOrdinary代码中的一些改进。 + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([尼基塔 + Vasilev](https://github.com/nikvas0)) +- 不要在哈希表的find()和emplace()方法中使用迭代器。 + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 修正getMultipleValuesFromConfig的情况下,当参数根不为空。 [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 删除一些复制粘贴(TemporaryFile和TemporaryFileStream) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 改进了代码的可读性一点点 (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([弗拉基米尔 + Chebotarev](https://github.com/excitoon)) +- 等待使用本地对象的所有计划作业,如果 `ThreadPool::schedule(...)` 投掷 + 一个例外 重命名 `ThreadPool::schedule(...)` 到 `ThreadPool::scheduleOrThrowOnError(...)` 和 + 修复注释,使明显的,它可能会抛出。 + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([tavplubix](https://github.com/tavplubix)) + +## ClickHouse释放19.15 {#clickhouse-release-19-15} + +### ClickHouse释放19.15.4.10,2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### 错误修复 {#bug-fix-3} + +- 增加了sql\_tinyint和SQL\_BIGINT的处理,并修复了ODBC桥中SQL\_FLOAT数据源类型的处理。 + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- 允许在移动分区中的目标磁盘或卷上有一些部分。 + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 通过ODBC桥固定可空列中的NULL值。 + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 固定插入到具体化列的分布式非本地节点。 + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- 固定函数getMultipleValuesFromConfig。 + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 修复了使用HTTP保持活动超时而不是TCP保持活动超时的问题。 + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 等待所有作业在异常时完成(修复罕见的段错误)。 + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) +- 在插入Kafka表时不要推送MVs。 + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([伊万](https://github.com/abyss7)) +- 禁用异常堆栈的内存跟踪器。 + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复了外部数据库转换查询中的错误代码。 + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免在MetricsTransmitter中使用未初始化的值。 + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) +- 添加了用于测试的宏的示例配置 ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### ClickHouse释放19.15.3.6,2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### 错误修复 {#bug-fix-4} + +- 修正了哈希字典中的bad\_variant。 + ([阿利沙平](https://github.com/alesapin)) +- 修复了附加部件查询中分段故障的错误。 + ([阿利沙平](https://github.com/alesapin)) +- 固定时间计算 `MergeTreeData`. + ([Vladimir Chebotarev](https://github.com/excitoon)) +- 写作完成后明确提交给Kafka。 + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([伊万](https://github.com/abyss7)) +- 在MergeTree部件的最小/最大索引中正确序列化NULL值。 + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-15-2-2-2019-10-01} + +#### 新功能 {#new-feature-3} + +- 分层存储:支持使用MergeTree引擎对表使用多个存储卷。 可以将新数据存储在SSD上,并自动将旧数据移动到HDD。 ([示例](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([阿利沙平](https://github.com/alesapin)) +- 添加表函数 `input` 用于读取传入的数据 `INSERT SELECT` 查询。 [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([安东\*波波夫](https://github.com/CurtizJ)) +- 添加一个 `sparse_hashed` 字典布局,即在功能上等同于 `hashed` 布局,但更高效的内存。 它使用的内存减少了大约两倍,代价是较慢的值检索。 [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) +- 实现定义用户列表以访问字典的能力。 仅使用当前连接的数据库。 [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加 `LIMIT` 选项 `SHOW` 查询。 [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- 添加 `bitmapSubsetLimit(bitmap, range_start, limit)` 函数,返回最小的子集 `limit` 设置中的值不小于 `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([余志昌](https://github.com/yuzhichang)) +- 添加 `bitmapMin` 和 `bitmapMax` 功能。 [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([余志昌](https://github.com/yuzhichang)) +- 添加功能 `repeat` 有关 [问题-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([弗林](https://github.com/ucasFL)) + +#### 实验特点 {#experimental-feature-1} + +- 实现(在内存中)不更改当前管道的合并联接变体。 结果按合并键进行部分排序。 设置 `partial_merge_join = 1` 要使用此功能。 合并联接仍在开发中。 [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加 `S3` 发动机和表功能. 它仍在开发中(还没有身份验证支持)。 [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) + +#### 改进 {#improvement-2} + +- 从Kafka读取的每条消息都是以原子方式插入的。 这解决了Kafka引擎的几乎所有已知问题。 [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([伊万](https://github.com/abyss7)) +- 对分布式查询故障转移的改进。 缩短恢复时间,也是现在可配置的,可以看出 `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 直接支持枚举的数值 `IN` 科。 \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) +- 支持(可选,默认情况下禁用)对URL存储进行重定向。 [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) +- 当具有较旧版本的客户端连接到服务器时添加信息消息。 [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- 删除在分布式表中发送数据的最大退避睡眠时间限制 [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) +- 添加将配置文件事件(计数器)与累积值发送到graphite的能力。 它可以在启用 `` 在服务器 `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) +- 添加自动转换类型 `T` 到 `LowCardinality(T)` 在类型的列中插入数据 `LowCardinality(T)` 在本机格式通过HTTP。 [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加使用功能的能力 `hex` 不使用 `reinterpretAsString` 为 `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([米哈伊尔\*科罗托夫](https://github.com/millb)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-2} + +- 将gdb-index添加到带有调试信息的clickhouse二进制文件。 这将加快启动时间 `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([阿利沙平](https://github.com/alesapin)) +- 加速deb包装与补丁dpkg-deb它使用 `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([阿利沙平](https://github.com/alesapin)) +- 设置 `enable_fuzzing = 1` 启用所有项目代码的libfuzzer检测功能。 [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) +- 在CI中添加拆分构建烟雾测试。 [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([阿利沙平](https://github.com/alesapin)) +- 添加构建与MemorySanitizer CI。 [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 替换 `libsparsehash` 与 `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) + +#### 错误修复 {#bug-fix-5} + +- 修复了大型表上复杂键的索引分析的性能下降。 这修复了#6924。 [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复从Kafka空主题中选择时导致段错误的逻辑错误。 [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([伊万](https://github.com/abyss7)) +- 修复过早的MySQL连接关闭 `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 返回对非常旧的Linux内核的支持(修复 [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能的数据丢失 `insert select` 在输入流中的空块的情况下进行查询。 \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复功能 `АrrayEnumerateUniqRanked` 在参数中使用空数组 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- 使用数组联接和全局子查询修复复杂的查询。 [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([伊万](https://github.com/abyss7)) +- 修复 `Unknown identifier` 按顺序排列和按多个联接分组的错误 [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定 `MSan` 执行函数时发出警告 `LowCardinality` 争论。 [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 向后不兼容的更改 {#backward-incompatible-change-2} + +- 更改了位图\*聚合函数状态的序列化格式,以提高性能。 无法读取以前版本的位图\*的序列化状态。 [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([余志昌](https://github.com/yuzhichang)) + +## ClickHouse释放19.14 {#clickhouse-release-19-14} + +### ClickHouse释放19.14.7.15,2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### 错误修复 {#bug-fix-6} + +- 此版本还包含19.11.12.69的所有错误修复。 +- 修复了19.14和早期版本之间分布式查询的兼容性。 这修复 [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### ClickHouse释放19.14.6.12,2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### 错误修复 {#bug-fix-7} + +- 修复功能 `АrrayEnumerateUniqRanked` 在参数中使用空数组。 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- 修复了查询中的子查询名称 `ARRAY JOIN` 和 `GLOBAL IN subquery` 用化名。 如果指定了外部表名,请使用子查询别名。 [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-3} + +- 修复 [拍打](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) 测试 `00715_fetch_merged_or_mutated_part_zookeeper` 通过将其重写为shell脚本,因为它需要等待突变应用。 [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修正了UBSan和MemSan功能失败 `groupUniqArray` 使用emtpy数组参数。 这是由于放置空 `PaddedPODArray` 因为没有调用零单元格值的构造函数,所以将其转换为哈希表零单元格。 [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([阿莫斯鸟](https://github.com/amosbird)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-14-3-3-2019-09-10} + +#### 新功能 {#new-feature-4} + +- `WITH FILL` 修饰符 `ORDER BY`. (继续 [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([安东\*波波夫](https://github.com/CurtizJ)) +- `WITH TIES` 修饰符 `LIMIT`. (继续 [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([安东\*波波夫](https://github.com/CurtizJ)) +- 解析无引号 `NULL` 文字为NULL(如果设置 `format_csv_unquoted_null_literal_as_null=1`). 如果此字段的数据类型不可为空,则使用默认值初始化null字段(如果设置 `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) +- 支持表函数路径中的通配符 `file` 和 `hdfs`. 如果路径包含通配符,则表将为只读。 使用示例: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` 和 `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) +- 新 `system.metric_log` 表存储的值 `system.events` 和 `system.metrics` 具有指定的时间间隔。 [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许将ClickHouse文本日志写入 `system.text_log` 桌子 [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在堆栈跟踪中显示私有符号(这是通过解析ELF文件的符号表来完成的)。 如果存在调试信息,则在堆栈跟踪中添加有关文件和行号的信息。 使用程序中存在的索引符号加速符号名称查找。 增加了新的SQL函数的反省: `demangle` 和 `addressToLine`. 重命名函数 `symbolizeAddress` 到 `addressToSymbol` 为了一致性。 功能 `addressToSymbol` 将返回错位的名称出于性能原因,你必须申请 `demangle`. 添加设置 `allow_introspection_functions` 默认情况下,这是关闭的。 [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 表函数 `values` (名称不区分大小写)。 它允许从读取 `VALUES` 建议的名单 [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). 示例: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) +- 增加了改变存储设置的功能。 语法: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([阿利沙平](https://github.com/alesapin)) +- 用于拆卸分离部件的支撑。 语法: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) +- 表约束。 允许将约束添加到将在插入时检查的表定义。 [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持级联实例化视图。 [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([阿莫斯鸟](https://github.com/amosbird)) +- 默认情况下,打开查询探查器以每秒对每个查询执行线程进行一次采样。 [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 输入格式 `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) +- 增加了两个新功能: `sigmoid` 和 `tanh` (这对于机器学习应用程序非常有用)。 [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 功能 `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` 检查给定的令牌是否在干草堆中。 Token是两个非字母数字ASCII字符(或干草堆的边界)之间的最大长度子串。 Token必须是常量字符串。 由tokenbf\_v1索引专业化支持。 [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 新功能 `neighbor(value, offset[, default_value])`. 允许在一个数据块中的列中达到上一个/下一个值。 [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) +- 创建了一个函数 `currentUser()`,返回授权用户的登录。 添加别名 `user()` 对于与MySQL的兼容性。 [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) +- 新的聚合函数 `quantilesExactInclusive` 和 `quantilesExactExclusive` 这是在提出 [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) +- 功能 `bitmapRange(bitmap, range_begin, range_end)` 返回具有指定范围的新集(不包括 `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([余志昌](https://github.com/yuzhichang)) +- 功能 `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` 它创建了一系列精确的长串geohash盒复盖提供的区域。 [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 实现对插入查询的支持 `Kafka` 桌子 [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([伊万](https://github.com/abyss7)) +- 增加了对 `_partition` 和 `_timestamp` 虚拟列到Kafka引擎。 [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([伊万](https://github.com/abyss7)) +- 可以从中删除敏感数据 `query_log`,服务器日志,基于正则表达式的规则的进程列表。 [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) + +#### 实验特点 {#experimental-feature-2} + +- 输入和输出数据格式 `Template`. 它允许为输入和输出指定自定义格式字符串。 [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) +- 执行 `LIVE VIEW` 最初提出的表 [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898),准备 [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925),然后更新 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). 看 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) 详细描述。 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov))请注意 `LIVE VIEW` 功能可能会在下一个版本中删除。 + +#### 错误修复 {#bug-fix-8} + +- 此版本还包含19.13和19.11的所有错误修复。 +- 修复表有跳过索引和垂直合并发生时的分段错误。 [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([阿利沙平](https://github.com/alesapin)) +- 使用非平凡的列默认值修复每列TTL。 以前在强制TTL合并的情况下 `OPTIMIZE ... FINAL` 查询,过期的值被替换为类型默认值,而不是用户指定的列默认值。 [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复kafka服务器正常重启时的消息重复问题。 [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([伊万](https://github.com/abyss7)) +- 修正了读取Kafka消息时的无限循环。 根本不要暂停/恢复订阅消费者-否则在某些情况下可能会无限期暂停。 [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([伊万](https://github.com/abyss7)) +- 修复 `Key expression contains comparison between inconvertible types` 例外 `bitmapContains` 功能。 [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) +- 修复已启用的段错误 `optimize_skip_unused_shards` 还丢失了分片钥匙 [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了可能导致内存损坏的突变中的错误代码。 修复了读取地址的段错误 `0x14c0` 这可能发生由于并发 `DROP TABLE` 和 `SELECT` 从 `system.parts` 或 `system.parts_columns`. 在准备突变查询时修复了竞争条件。 修复了由于 `OPTIMIZE` 复制的表和并发修改操作,如改变。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在MySQL界面中删除了额外的详细日志记录 [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返回解析布尔设置的能力 ‘true’ 和 ‘false’ 在配置文件中。 [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([阿利沙平](https://github.com/alesapin)) +- 修复崩溃 `quantile` 和 `median` 功能结束 `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了可能不完整的结果返回 `SELECT` 查询与 `WHERE` 主键上的条件包含转换为浮点类型。 它是由不正确的单调性检查引起的 `toFloat` 功能。 [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- 检查 `max_expanded_ast_elements` 设置为突变。 明确突变后 `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([张冬](https://github.com/zhang2014)) +- 修复使用键列时的联接结果 `join_use_nulls`. 附加空值而不是列默认值。 [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了跳过索引与垂直合并和改变。 修复 `Bad size of marks file` 例外。 [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([阿利沙平](https://github.com/alesapin)) +- 修复罕见的崩溃 `ALTER MODIFY COLUMN` 和垂直合并,当合并/改变的部分之一是空的(0行) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([阿利沙平](https://github.com/alesapin)) +- 修正错误的转换 `LowCardinality` 类型 `AggregateFunctionFactory`. 这修复 [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误的行为和可能的段错误 `topK` 和 `topKWeighted` 聚合函数。 [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([安东\*波波夫](https://github.com/CurtizJ)) +- 固定周围的不安全代码 `getIdentifier` 功能。 [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在MySQL线协议(连接到ClickHouse的形式MySQL客户端时使用)修正了错误。 引起的堆缓冲区溢出 `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 固定内存泄漏 `bitmapSubsetInRange` 功能。 [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([余志昌](https://github.com/yuzhichang)) +- 修复粒度变化后执行突变时的罕见错误。 [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([阿利沙平](https://github.com/alesapin)) +- 默认情况下允许包含所有字段的protobuf消息。 [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 解决错误 `nullIf` 功能,当我们发送 `NULL` 第二个参数的参数。 [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修正了错误的内存分配/解除分配在复杂的键高速缓存字典与字符串字段,导致无限的内存消耗罕见的错误(看起来像内存泄漏)。 当字符串大小为8(8,16,32等)开始的2的幂时,错误会重现。 [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([阿利沙平](https://github.com/alesapin)) +- 修复了导致异常的小序列上的大猩猩编码 `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 允许在连接中使用不可为空的类型 `join_use_nulls` 已启用。 [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) +- 禁用 `Poco::AbstractConfiguration` 查询中的替换 `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免死锁 `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用 `arrayReduce` 对于不变的参数可能会导致段错误。 [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能出现的不一致的部分,如果副本恢复后 `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- 固定挂起 `JSONExtractRaw` 功能。 [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误跳过索引序列化和聚合与自适应粒度。 [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([阿利沙平](https://github.com/alesapin)) +- 修复 `WITH ROLLUP` 和 `WITH CUBE` 修饰符 `GROUP BY` 具有两级聚合。 [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复编写具有自适应粒度的二级索引标记的错误。 [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([阿利沙平](https://github.com/alesapin)) +- 修复服务器启动时的初始化顺序。 由于 `StorageMergeTree::background_task_handle` 在初始化 `startup()` 该 `MergeTreeBlockOutputStream::write()` 可以尝试在初始化之前使用它。 只需检查它是否被初始化。 [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([伊万](https://github.com/abyss7)) +- 从以前的读取操作中清除数据缓冲区,该操作完成时出现错误。 [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([尼古拉](https://github.com/bopohaa)) +- 修复为复制\*MergeTree表创建新副本时启用自适应粒度的错误。 [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([阿利沙平](https://github.com/alesapin)) +- 修复了在服务器启动过程中发生异常的情况下可能发生的崩溃 `libunwind` 在异常访问未初始化 `ThreadStatus` 结构。 [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复崩溃 `yandexConsistentHash` 功能。 通过模糊测试发现。 [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了服务器过载和全局线程池接近满时挂起查询的可能性。 这在具有大量分片(数百个)的集群上发生的机会更高,因为分布式查询为每个分片分配每个连接的线程。 例如,如果集群330分片正在处理30个并发分布式查询,则此问题可能再现。 此问题会影响从19.2开始的所有版本。 [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 的固定逻辑 `arrayEnumerateUniqRanked` 功能。 [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 解码符号表时修复段错误。 [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([阿莫斯鸟](https://github.com/amosbird)) +- 在固定不相关的异常转换 `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 删除描述中的额外引用 `system.settings` 桌子 [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免可能的死锁 `TRUNCATE` 复制的表。 [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复读取排序键的顺序。 [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复 `ALTER TABLE ... UPDATE` 查询表 `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([阿利沙平](https://github.com/alesapin)) +- 修复错误打开 [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (自19.4.0)。 当我们不查询任何列时,在对MergeTree表的分布式表的查询中复制 (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([阿利沙平](https://github.com/alesapin)) +- 在有符号类型的整数划分为无符号类型的固定溢出。 这种行为与C或C++语言(整数升级规则)完全相同,这可能令人惊讶。 请注意,当将大型有符号数字划分为大型无符号数字或反之亦然时,溢出仍然是可能的(但这种情况不太常见)。 所有服务器版本都存在此问题。 [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 限制最大睡眠时间限制时 `max_execution_speed` 或 `max_execution_speed_bytes` 已设置。 修正错误,如 `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 关于使用固定的问题 `MATERIALIZED` 列和别名 `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([阿莫斯鸟](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `FormatFactory` 未实现为处理器的输入流的行为。 [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定错字。 [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) +- 错字在错误消息(是-\>是)。 [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) +- 修复了从字符串中解析列列表时的错误,如果类型包含逗号(这个问题与 `File`, `URL`, `HDFS` 储存) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) + +#### 安全修复 {#security-fix} + +- 此版本还包含19.13和19.11的所有错误安全修复。 +- 修复了由于SQL解析器中的堆栈溢出而导致服务器崩溃的制造查询的可能性。 修复了合并和分布式表,实例化视图和涉及子查询的行级安全性条件中堆栈溢出的可能性。 [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-3} + +- 三元逻辑的正确实现 `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 现在,值和行与过期的TTL将被删除后 `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` 查询。 添加查询 `SYSTEM STOP/START TTL MERGES` 要禁止/允许使用TTL分配合并,并在所有合并中过滤过期值。 [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([安东\*波波夫](https://github.com/CurtizJ)) +- 可以更改ClickHouse历史文件的位置为客户端使用 `CLICKHOUSE_HISTORY_FILE` env [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) +- 删除 `dry_run` 从标志 `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 碌莽禄Support: `ASOF JOIN` 与 `ON` 科。 [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) +- 更好地支持用于突变和复制的跳过索引。 支持 `MATERIALIZE/CLEAR INDEX ... IN PARTITION` 查询。 `UPDATE x = x` 重新计算使用列的所有索引 `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 允许 `ATTACH` 实时视图(例如,在服务器启动时),无论 `allow_experimental_live_view` 设置。 [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对于由查询探查器收集的堆栈跟踪,不包括由查询探查器本身生成的堆栈帧。 [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在表函数 `values`, `file`, `url`, `hdfs` 支持别名列。 [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 如果抛出异常 `config.d` 文件没有相应的根元素作为配置文件。 [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- 在异常消息中打印额外的信息 `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) +- 当确定一个碎片 `Distributed` 要被读取查询复盖的表(用于 `optimize_skip_unused_shards` =1)ClickHouse现在从两个检查条件 `prewhere` 和 `where` select语句的子句。 [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 已启用 `SIMDJSON` 对于没有AVX2,但与SSE4.2和PCLMUL指令集的机器。 [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ClickHouse可以在文件系统上工作,而无需 `O_DIRECT` 支持(如ZFS和BtrFS),无需额外的调整。 [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持最终子查询的下推谓词。 [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好 `JOIN ON` 密钥提取 [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) +- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 优化最小列的选择 `SELECT count()` 查询。 [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([阿莫斯鸟](https://github.com/amosbird)) +- 已添加 `strict` 参数in `windowFunnel()`. 当 `strict` 设置,该 `windowFunnel()` 仅对唯一值应用条件。 [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) +- 更安全的界面 `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) +- 执行时选项行大小 `--help` 选项现在与终端大小对应。 [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) +- 禁用 “read in order” 优化无键的聚合。 [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([安东\*波波夫](https://github.com/CurtizJ)) +- Http状态代码 `INCORRECT_DATA` 和 `TYPE_MISMATCH` 错误代码已从默认值更改 `500 Internal Server Error` 到 `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([亚历山大\*罗丹](https://github.com/a-rodin)) +- 从移动连接对象 `ExpressionAction` 成 `AnalyzedJoin`. `ExpressionAnalyzer` 和 `ExpressionAction` 不知道 `Join` 不再上课了 它的逻辑被隐藏 `AnalyzedJoin` 伊菲斯 [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了当其中一个分片是localhost但查询通过网络连接发送时可能出现的分布式查询死锁。 [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改多个表的语义 `RENAME` 为了避免可能的死锁。 [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 重写MySQL兼容性服务器以防止在内存中加载完整的数据包有效负载。 每个连接的内存消耗减少到大约 `2 * DBMS_DEFAULT_BUFFER_SIZE` (读/写缓冲区)。 [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 将AST别名解释逻辑移出不必了解查询语义的解析器。 [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) +- 稍微更安全的解析 `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `clickhouse-copier`:允许使用 `where_condition` 从配置 `partition_key` 查询中用于检查分区存在的别名(以前它仅用于读取数据查询)。 [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) +- 在添加可选的消息参数 `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) +- 在发送插入数据时,服务器异常也正在客户端中处理。 [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) +- 添加了指标 `DistributedFilesToInsert` 这显示了文件系统中选择通过分布式表发送到远程服务器的文件总数。 该数字在所有分片之间相加。 [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将大部分连接准备逻辑从 `ExpressionAction/ExpressionAnalyzer` 到 `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复曾 [警告](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 关于缺乏Linux功能的更好的信息消息。 记录致命错误 “fatal” 水平,这将使它更容易找到 `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当启用转储临时数据到磁盘,以限制内存使用期间 `GROUP BY`, `ORDER BY`,它没有检查可用磁盘空间。 修复程序添加新设置 `min_free_disk_space`,当可用磁盘空间小于阈值时,查询将停止并抛出 `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([徐伟清](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过线程删除递归rwlock。 这是没有意义的,因为线程在查询之间重用。 `SELECT` 查询可以在一个线程中获取锁,从另一个线程持有锁并从第一个线程退出。 在同一时间,第一个线程可以通过重复使用 `DROP` 查询。 这将导致虚假 “Attempt to acquire exclusive lock recursively” 消息 [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 斯普利特 `ExpressionAnalyzer.appendJoin()`. 准备一个地方 `ExpressionAnalyzer` 为 `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `mysql_native_password` mysql兼容性服务器的身份验证插件。 [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 更少的数量 `clock_gettime` 调用;调试/发布之间的固定ABI兼容性 `Allocator` (微不足道的问题)。 [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 移动 `collectUsedColumns` 从 `ExpressionAnalyzer` 到 `SyntaxAnalyzer`. `SyntaxAnalyzer` 赂眉露\>\> `required_source_columns` 现在本身。 [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `joined_subquery_requires_alias` 要求子选择和表函数的别名 `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) +- 提取物 `GetAggregatesVisitor` 从类 `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) +- `system.query_log`:更改数据类型 `type` 列到 `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 静态链接 `sha256_password` 身份验证插件。 [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 避免对设置的额外依赖 `compile` 去工作 在以前的版本中,用户可能会得到如下错误 `cannot open crti.o`, `unable to find library -lc` 等。 [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对可能来自恶意副本的输入进行更多验证。 [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在 `clickhouse-obfuscator` 文件是可用的 `clickhouse-client` 包。 在以前的版本中,它可以作为 `clickhouse obfuscator` (带空格)。 [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) +- 当我们至少有两个查询以不同的顺序读取至少两个表,另一个查询对其中一个表执行DDL操作时,修复了死锁。 修复了另一个非常罕见的死锁。 [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `os_thread_ids` 列到 `system.processes` 和 `system.query_log` 为了更好的调试可能性。 [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当发生PHP mysqlnd扩展错误的解决方法 `sha256_password` 用作默认身份验证插件(在描述 [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 删除不需要的地方与更改为空列。 [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) +- 设置默认值 `queue_max_wait_ms` 为零,因为当前值(五秒)是没有意义的。 在极少数情况下,此设置有任何用途。 添加设置 `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` 和 `connection_pool_max_wait_ms` 用于消除歧义。 [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 提取物 `SelectQueryExpressionAnalyzer` 从 `ExpressionAnalyzer`. 保留最后一个用于非select查询。 [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) +- 删除重复输入和输出格式。 [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 允许用户复盖 `poll_interval` 和 `idle_connection_timeout` 连接设置。 [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `MergeTree` 现在有一个额外的选项 `ttl_only_drop_parts` (默认情况下禁用),以避免部分的部分修剪,以便在部分中的所有行都过期时完全删除它们。 [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([塞尔吉\*弗拉季金](https://github.com/svladykin)) +- 类型检查set索引函数。 如果函数类型错误,则引发异常。 这修复了模糊测试与UBSan。 [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) + +#### 性能改进 {#performance-improvement-2} + +- 优化查询 `ORDER BY expressions` 条款,其中 `expressions` 有重合前缀与排序键 `MergeTree` 桌子 此优化由以下方式控制 `optimize_read_in_order` 设置。 [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([安东\*波波夫](https://github.com/CurtizJ)) +- 允许在零件装载和拆卸期间使用多个螺纹。 [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 实现了更新聚合函数状态的批处理变体。 这可能导致性能优势。 [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用 `FastOps` 函数库 `exp`, `log`, `sigmoid`, `tanh`. FastOps是迈克尔\*帕拉欣(Yandex的首席技术官)的快速矢量数学库。 改进的性能 `exp` 和 `log` 功能超过6倍。 功能 `exp` 和 `log` 从 `Float32` 参数将返回 `Float32` (在以前的版本中,他们总是返回 `Float64`). 现在 `exp(nan)` 可能会回来 `inf`. 的结果 `exp` 和 `log` 函数可能不是最接近机器可代表的数字到真正的答案。 [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))使用Danila Kutenin变体使fastops工作 [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁用连续密钥优化 `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) +- 改进的性能 `simdjson` 库通过摆脱动态分配 `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 预故障页分配内存时 `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) +- 修复性能错误 `Decimal` 比较。 [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-4} + +- 删除编译器(运行时模板实例化),因为我们已经赢得了它的性能。 [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了性能测试,以显示gcc-9以更隔离的方式性能下降。 [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加表功能 `numbers_mt`,这是多线程版本 `numbers`. 使用哈希函数更新性能测试。 [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 比较模式 `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) +- 尽最大努力打印堆栈痕迹。 还添加了 `SIGPROF` 作为调试信号,打印正在运行的线程的堆栈跟踪。 [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 每个函数都在自己的文件中,第10部分。 [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除两倍常量 `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) +- 格式化更改 `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) +- 更好的联接创建子查询 `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) +- 删除冗余条件(由PVS Studio找到)。 [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) +- 分隔散列表接口 `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) +- 重构设置。 [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([阿利沙平](https://github.com/alesapin)) +- 添加注释 `set` 索引函数。 [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 在Linux上的调试版本中增加OOM分数。 [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) +- HDFS HA现在在调试版本中工作。 [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([徐伟清](https://github.com/weiqxu)) +- 添加了一个测试 `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为Kafka表添加多个实例化视图的测试。 [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([伊万](https://github.com/abyss7)) +- 制定一个更好的构建计划。 [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([伊万](https://github.com/abyss7)) +- 固定 `test_external_dictionaries` 集成的情况下,它是在非root用户下执行。 [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 当写入的数据包的总大小超过 `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 增加了一个测试 `RENAME` 表竞争条件 [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免在设置数据竞赛 `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过缓存字典添加处理错误的集成测试。 [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在Mac OS上禁用ELF对象文件的解析,因为这是没有意义的。 [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 尝试使更新日志生成器更好。 [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `-Wshadow` 切换到海湾合作委员会。 [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 删除过时的代码 `mimalloc` 支持。 [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `zlib-ng` 确定x86功能并将此信息保存到全局变量。 这是在defalteInit调用中完成的,它可以由不同的线程同时进行。 为了避免多线程写入,请在库启动时执行此操作。 [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) +- 回归测试一个错误,在连接这是固定的 [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) +- 修正MSan报告。 [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复扑ttl测试。 [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修正了虚假数据竞赛 `MergeTreeDataPart::is_frozen` 场。 [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了模糊测试中的超时。 在以前的版本中,它设法在查询中找到虚假挂断 `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加了调试检查 `static_cast` 列。 [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在官方RPM软件包中支持Oracle Linux。 [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从更改json perftests `once` 到 `loop` 类型。 [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` 定义 `main()` 所以它不应该被包括在 `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Origej Desh](https://github.com/orivej)) +- 测试碰撞 `FULL|RIGHT JOIN` 右表的键中有空值。 [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) +- 为了以防万一,增加了对别名扩展限制的测试。 [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从切换 `boost::filesystem` 到 `std::filesystem` 在适当的情况下。 [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将RPM包添加到网站。 [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为固定添加测试 `Unknown identifier` 例外 `IN` 科。 [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) +- 简化操作 `shared_ptr_helper` 因为人们面临困难理解它。 [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了固定大猩猩和DoubleDelta编解码器的性能测试。 [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 拆分集成测试 `test_dictionaries` 分成四个单独的测试。 [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复PVS-Studio中的警告 `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 允许使用 `library` 与ASan字典源. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了从Pr列表生成更新日志的选项。 [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 锁定 `TinyLog` 读取时存储。 [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) +- 检查CI中损坏的符号链接。 [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加超时时间 “stack overflow” 测试,因为它可能需要很长的时间在调试构建。 [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加了双空格检查。 [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `new/delete` 使用消毒剂构建时的内存跟踪。 跟踪不清楚。 它只防止测试中的内存限制异常。 [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) +- 启用链接时对未定义符号的检查。 [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([伊万](https://github.com/abyss7)) +- 避免重建 `hyperscan` 每天 [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在固定的瑞银报告 `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 不要允许将查询探查器与消毒器一起使用,因为它不兼容。 [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加测试计时器失败后重新加载字典。 [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复不一致 `PipelineExecutor::prepareProcessor` 参数类型。 [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加了对坏Uri的测试。 [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了更多的检查 `CAST` 功能。 这应该获得更多关于模糊测试中分割故障的信息。 [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 已添加 `gcc-9` 支持 `docker/builder` 本地生成映像的容器。 [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) +- 测试主键 `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) +- 修复了缓慢堆栈跟踪打印影响的测试。 [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加崩溃的测试用例 `groupUniqArray` 固定在 [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) +- 固定指数突变测试。 [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 在性能测试中,不要读取我们没有运行的查询的查询日志。 [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) +- 现在可以使用任何低基数类型创建实例化视图,而不考虑关于可疑低基数类型的设置。 [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) +- 更新的测试 `send_logs_level` 设置。 [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复gcc-8.2下的构建。 [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) +- 修复构建与内部libc++。 [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([伊万](https://github.com/abyss7)) +- 修复共享构建 `rdkafka` 图书馆 [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([伊万](https://github.com/abyss7)) +- 修复Mac OS构建(不完整)。 [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) +- 修复 “splitted” 碌莽禄.拢. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 其他构建修复: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([阿莫斯鸟](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([伊万](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) + +#### 向后不兼容的更改 {#backward-incompatible-change-3} + +- 删除很少使用的表函数 `catBoostPool` 和存储 `CatBoostPool`. 如果您使用了此表功能,请写电子邮件至 `clickhouse-feedback@yandex-team.com`. 请注意,CatBoost集成仍然存在,并将受到支持。 [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁用 `ANY RIGHT JOIN` 和 `ANY FULL JOIN` 默认情况下。 设置 `any_join_distinct_right_table_keys` 设置启用它们。 [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) + +## ClickHouse释放19.13 {#clickhouse-release-19-13} + +### ClickHouse释放19.13.6.51,2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### 错误修复 {#bug-fix-9} + +- 此版本还包含19.11.12.69的所有错误修复。 + +### ClickHouse释放19.13.5.44,2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### 错误修复 {#bug-fix-10} + +- 此版本还包含19.14.6.12的所有错误修复。 +- 修复了在执行时表的可能不一致的状态 `DROP` 在zookeeper无法访问时查询复制的表。 [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复了StorageMerge中的数据竞赛 [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复查询分析器中引入的错误,从而导致套接字无休止的recv。 [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([阿利沙平](https://github.com/alesapin)) +- 修复执行时过多的CPU使用率 `JSONExtractRaw` 函数在一个布尔值。 [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在推送到实例化视图时修复回归。 [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([伊万](https://github.com/abyss7)) +- 表函数 `url` 该漏洞是否允许攻击者在请求中注入任意HTTP头。 这个问题被发现 [尼基塔\*季霍米罗夫](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复无用 `AST` 检查设置索引。 [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 修正了解析 `AggregateFunction` 查询中嵌入的值。 [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([余志昌](https://github.com/yuzhichang)) +- 修正了错误的行为 `trim` 功能家庭。 [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### ClickHouse释放19.13.4.32,2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### 错误修复 {#bug-fix-11} + +- 此版本还包含19.11.9.52和19.11.10.54的所有错误安全修复。 +- 固定数据竞赛 `system.parts` 表和 `ALTER` 查询。 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了从带有sample和prewhere的空分布式表中读取流中发生的不匹配标题。 [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([钱丽祥](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复使用时的崩溃 `IN` 子句带有一个元组的子查询。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 修复具有相同列名的大小写 `GLOBAL JOIN ON` 科。 [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复强制转换类型时的崩溃 `Decimal` 这不支持它。 抛出异常代替。 [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了崩溃 `extractAll()` 功能。 [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) +- 查询转换 `MySQL`, `ODBC`, `JDBC` 表函数现在正常工作 `SELECT WHERE` 具有多个查询 `AND` 表达式。 [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) +- 添加了以前的声明检查MySQL8集成。 [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([拉斐尔\*大卫\*蒂诺科](https://github.com/rafaeldtinoco)) + +#### 安全修复 {#security-fix-1} + +- 修复解压缩阶段编解码器中的两个漏洞(恶意用户可以制造压缩数据,导致解压缩中的缓冲区溢出)。 [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102005602 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### 错误修复 {#bug-fix-12} + +- 修复 `ALTER TABLE ... UPDATE` 查询表 `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([阿利沙平](https://github.com/alesapin)) +- 在使用IN子句时修复带有元组的子查询。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 修复了一个问题,即如果一个陈旧的副本变为活动的,它可能仍然有被删除分区的数据部分。 [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- 修正了解析CSV的问题 [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- 修正了系统中的数据竞赛。部件表和ALTER查询。 这修复 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了可能导致内存损坏的突变中的错误代码。 修复了读取地址的段错误 `0x14c0` 这可能发生由于并发 `DROP TABLE` 和 `SELECT` 从 `system.parts` 或 `system.parts_columns`. 在准备突变查询时修复了竞争条件。 修复了由于 `OPTIMIZE` 复制的表和并发修改操作,如改变。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复后可能的数据丢失 `ALTER DELETE` 查询表跳过索引。 [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) + +#### 安全修复 {#security-fix-2} + +- 如果攻击者具有对ZooKeeper的写入访问权限,并且能够从ClickHouse运行的网络中运行可用的自定义服务器,则可以创建自定义构建的恶意服务器,该服务器将充当ClickHouse副本并将其注册到ZooKeeper中。 当另一个副本从恶意副本中获取数据部分时,它可以强制clickhouse-server写入文件系统上的任意路径。 由Yandex的信息安全团队Eldar Zaitov发现。 [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-13-2-19-2019-08-14} + +#### 新功能 {#new-feature-5} + +- 查询级别上的采样探查器。 [示例](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- 允许指定列的列表 `COLUMNS('regexp')` 表达的工作原理就像一个更复杂的变体 `*` 星号 [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` 现在是可能的 [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) +- 亚当优化随机梯度下降默认情况下使用 `stochasticLinearRegression()` 和 `stochasticLogisticRegression()` 聚合函数,因为它显示了良好的质量,几乎没有任何调整。 [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([杨小姐](https://github.com/andyyzh)) +- `RENAME` 查询现在适用于所有存储。 [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([伊万](https://github.com/abyss7)) +- 现在客户端通过设置从服务器接收任何所需级别的日志 `send_logs_level` 无论服务器设置中指定的日志级别如何。 [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) + +#### 向后不兼容的更改 {#backward-incompatible-change-4} + +- 设置 `input_format_defaults_for_omitted_fields` 默认情况下启用。 分布式表中的插入需要此设置在集群上相同(您需要在滚动更新之前设置它)。 它允许计算复杂的默认表达式的省略字段 `JSONEachRow` 和 `CSV*` 格式。 它应该是预期的行为,但可能导致可忽略不计的性能差异。 [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) + +#### 实验特点 {#experimental-features} + +- 新的查询处理管道。 使用 `experimental_use_processors=1` 选项来启用它。 用你自己的麻烦。 [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 错误修复 {#bug-fix-13} + +- Kafka集成已在此版本中修复。 +- 固定 `DoubleDelta` 编码 `Int64` 对于大 `DoubleDelta` 值,改进 `DoubleDelta` 编码为随机数据 `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 固定的高估 `max_rows_to_read` 如果设置 `merge_tree_uniform_read_distribution` 置为0。 [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-4} + +- 如果抛出异常 `config.d` 文件没有相应的根元素作为配置文件 [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) + +#### 性能改进 {#performance-improvement-3} + +- 优化 `count()`. 现在它使用最小的列(如果可能的话)。 [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-5} + +- 在性能测试中报告内存使用情况。 [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) +- 修复构建与外部 `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([伊万](https://github.com/abyss7)) +- 修复共享构建 `rdkafka` 图书馆 [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([伊万](https://github.com/abyss7)) + +## ClickHouse释放19.11 {#clickhouse-release-19-11} + +### ClickHouse释放19.11.13.74,2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### 错误修复 {#bug-fix-14} + +- 修复了罕见的崩溃 `ALTER MODIFY COLUMN` 当合并/更改部分之一为空(0行)时,垂直合并。 [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([阿利沙平](https://github.com/alesapin)) +- 手动更新 `SIMDJSON`. 这修复了stderr文件可能泛滥的错误json诊断消息。 [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修正错误 `mrk` 突变的文件扩展名 ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放19.11.12.69,2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### 错误修复 {#bug-fix-15} + +- 修复了大型表上复杂键的索引分析的性能下降。 这修复 [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用分布式引擎在表中发送数据时避免罕见的SIGSEGV (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `Unknown identifier` 有多个连接。 这修复 [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-11-57-2019-09-13} + +- 修复从Kafka空主题中选择时导致段错误的逻辑错误。 [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([伊万](https://github.com/abyss7)) +- 修复功能 `АrrayEnumerateUniqRanked` 在参数中使用空数组。 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) + +### ClickHouse释放19.11.10.54,2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### 错误修复 {#bug-fix-16} + +- 手动存储Kafka消息的偏移量,以便能够一次性为所有分区提交它们。 修复潜在的重复 “one consumer - many partitions” 场景。 [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([伊万](https://github.com/abyss7)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682 {#clickhouse-release-19-11-9-52-2019-09-6} + +- 改进缓存字典中的错误处理。 [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在功能固定错误 `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) +- 修复 `JSONExtract` 功能,同时提取 `Tuple` 从JSON。 [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复后可能的数据丢失 `ALTER DELETE` 查询表跳过索引。 [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 固定性能测试。 [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 实木复合地板:修复读取布尔列。 [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了错误的行为 `nullIf` 常量参数的函数。 [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复kafka服务器正常重启时的消息重复问题。 [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([伊万](https://github.com/abyss7)) +- 修正了一个问题,当长 `ALTER UPDATE` 或 `ALTER DELETE` 可能会阻止常规合并运行。 如果没有足够的可用线程,则防止突变执行。 [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) +- 修正了处理错误 “timezone” 在服务器配置文件中。 [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复卡夫卡测试。 [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([伊万](https://github.com/abyss7)) + +#### 安全修复 {#security-fix-3} + +- 如果攻击者具有对ZooKeeper的写入访问权限,并且能够从运行ClickHouse的网络中运行可用的自定义服务器,则可以创建自定义构建的恶意服务器,该服务器将充当ClickHouse副本并将其注册到ZooKeeper中。 当另一个副本从恶意副本中获取数据部分时,它可以强制clickhouse-server写入文件系统上的任意路径。 由Yandex的信息安全团队Eldar Zaitov发现。 [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-8-46-2019-08-22} + +#### 错误修复 {#bug-fix-17} + +- 修复 `ALTER TABLE ... UPDATE` 查询表 `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([阿利沙平](https://github.com/alesapin)) +- 在使用IN子句时修复带有元组的子查询。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 修复了一个问题,即如果一个陈旧的副本变为活动的,它可能仍然有被删除分区的数据部分。 [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- 修正了解析CSV的问题 [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- 修正了系统中的数据竞赛。部件表和ALTER查询。 这修复 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了可能导致内存损坏的突变中的错误代码。 修复了读取地址的段错误 `0x14c0` 这可能发生由于并发 `DROP TABLE` 和 `SELECT` 从 `system.parts` 或 `system.parts_columns`. 在准备突变查询时修复了竞争条件。 修复了由于 `OPTIMIZE` 复制的表和并发修改操作,如改变。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-7-40-2019-08-14} + +#### 错误修复 {#bug-fix-18} + +- Kafka集成已在此版本中修复。 +- 使用时修复段错误 `arrayReduce` 对于不断的参数。 [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `toFloat()` 单调性。 [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- 修复已启用的段错误 `optimize_skip_unused_shards` 还丢失了分片钥匙 [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) +- 的固定逻辑 `arrayEnumerateUniqRanked` 功能。 [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从MySQL处理程序中删除了额外的详细日志记录。 [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误的行为和可能的段错误 `topK` 和 `topKWeighted` 聚合函数。 [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) +- 不要公开虚拟列 `system.columns` 桌子 这是向后兼容所必需的。 [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复复杂键缓存字典中字符串字段的内存分配错误。 [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([阿利沙平](https://github.com/alesapin)) +- 修复创建新副本时启用自适应粒度的错误 `Replicated*MergeTree` 桌子 [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([阿利沙平](https://github.com/alesapin)) +- 阅读Kafka消息时修复无限循环。 [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) +- 修复了由于SQL解析器中的堆栈溢出和堆栈溢出的可能性而导致服务器崩溃的编造查询的可能性 `Merge` 和 `Distributed` 表 [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在小序列固定大猩猩编码错误。 [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) + +#### 改进 {#improvement-5} + +- 允许用户复盖 `poll_interval` 和 `idle_connection_timeout` 连接设置。 [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102003042 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### 错误修复 {#bug-fix-19} + +- 修复了服务器超载时挂起查询的可能性。 [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复yandexConsistentHash函数中的FPE。 这修复 [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误的转换 `LowCardinality` 类型 `AggregateFunctionFactory`. 这修复 [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复解析 `bool` 从设置 `true` 和 `false` 配置文件中的字符串。 [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([阿利沙平](https://github.com/alesapin)) +- 修复查询中不兼容的流头的罕见错误 `Distributed` 桌子结束 `MergeTree` 表时的一部分 `WHERE` 移动到 `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([阿利沙平](https://github.com/alesapin)) +- 在有符号类型的整数划分为无符号类型的固定溢出。 这修复 [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 向后不兼容的更改 {#backward-incompatible-change-5} + +- `Kafka` 还是坏了 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-4-24-2019-08-01} + +#### 错误修复 {#bug-fix-20} + +- 修复编写具有自适应粒度的二级索引标记的错误。 [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([阿利沙平](https://github.com/alesapin)) +- 修复 `WITH ROLLUP` 和 `WITH CUBE` 修饰符 `GROUP BY` 具有两级聚合。 [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([安东\*波波夫](https://github.com/CurtizJ)) +- 固定挂起 `JSONExtractRaw` 功能。 固定 [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复ExternalLoader::reloadOutdated()中的段错误。 [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复了服务器可能关闭侦听套接字但不关闭并继续提供剩余查询的情况。 您最终可能会有两个正在运行的clickhouse服务器进程。 有时,服务器可能会返回错误 `bad_function_call` 对于剩余的查询。 [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了通过ODBC,MySQL,ClickHouse和HTTP初始加载外部字典的更新字段无用和不正确的条件。 这修复 [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在固定不相关的异常转换 `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复非确定性结果 “uniq” 在极少数情况下聚合函数。 该错误存在于所有ClickHouse版本。 [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Segfault当我们在函数上设置了一点点太高的CIDR `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修复了服务器从许多不同上下文中抛出许多异常时的小内存泄漏。 [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复消费者在订阅之前暂停而之后未恢复的情况。 [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([伊万](https://github.com/abyss7))请注意,卡夫卡在这个版本中被打破。 +- 从以前的读取操作中清除Kafka数据缓冲区,并且完成了错误操作 [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([尼古拉](https://github.com/bopohaa))请注意,卡夫卡在这个版本中被打破。 +- 由于 `StorageMergeTree::background_task_handle` 在初始化 `startup()` 该 `MergeTreeBlockOutputStream::write()` 可以尝试在初始化之前使用它。 只需检查它是否被初始化。 [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-6} + +- 新增官方 `rpm` 包. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([阿利沙平](https://github.com/alesapin)) +- 添加构建能力 `.rpm` 和 `.tgz` 包 `packager` 脚本 [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([阿利沙平](https://github.com/alesapin)) +- 修复了 “Arcadia” 构建系统。 [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) + +#### 向后不兼容的更改 {#backward-incompatible-change-6} + +- `Kafka` 在这个版本中被打破。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-3-11-2019-07-18} + +#### 新功能 {#new-feature-6} + +- 增加了对准备好的语句的支持。 [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([亚历山大](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `DoubleDelta` 和 `Gorilla` 列编解ecs [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 已添加 `os_thread_priority` 设置,允许控制 “nice” 操作系统用于调整动态调度优先级的查询处理线程的值。 它需要 `CAP_SYS_NICE` 能力的工作。 这实现了 [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行 `_topic`, `_offset`, `_key` kafka引擎的列 [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([伊万](https://github.com/abyss7))请注意,卡夫卡在这个版本中被打破。 +- 添加聚合函数组合 `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) +- 聚合函数 `groupArrayMovingSum(win_size)(x)` 和 `groupArrayMovingAvg(win_size)(x)`,计算移动和/平均有或没有窗口大小限制。 [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) +- 添加synonim `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) +- Intergate H3功能 `geoToH3` 从尤伯杯. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 错误修复 {#bug-fix-21} + +- 使用异步更新实现DNS缓存。 单独的线程解析所有主机并更新dns缓存(设置 `dns_cache_update_period`). 当主机的ip频繁更改时,它应该有所帮助。 [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复段错误 `Delta` 影响值小于32位大小的列的编解ec。 该错误导致随机内存损坏。 [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([阿利沙平](https://github.com/alesapin)) +- 修复ttl合并中的段错误与块中的非物理列。 [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复在检查部分罕见的错误 `LowCardinality` 列。 前情提要 `checkDataPart` 总是失败的一部分 `LowCardinality` 列。 [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([阿利沙平](https://github.com/alesapin)) +- 避免在服务器线程池已满时挂起连接。 它是从连接重要 `remote` 当连接超时时,表函数或连接到没有副本的分片。 这修复 [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持常量参数 `evalMLModel` 功能。 这修复 [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了ClickHouse将默认时区确定为 `UCT` 而不是 `UTC`. 这修复 [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定缓冲区下溢 `visitParamExtractRaw`. 这修复 [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在分发 `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` 查询将直接在leader副本上执行。 [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([阿利沙平](https://github.com/alesapin)) +- 修复 `coalesce` 为 `ColumnConst` 与 `ColumnNullable` +相关变化. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复 `ReadBufferFromKafkaConsumer` 所以它不断阅读新的消息后 `commit()` 即使它之前停滞不前 [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([伊万](https://github.com/abyss7)) +- 修复 `FULL` 和 `RIGHT` 加入时加入结果 `Nullable` 键在右表. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) +- 可能修复低优先级查询的无限休眠。 [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复争用条件,这导致某些查询可能不会出现在query\_log后 `SYSTEM FLUSH LOGS` 查询。 [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([安东\*波波夫](https://github.com/CurtizJ)) +- 固定 `heap-use-after-free` 由手表引起的ClusterCopier中的警告尝试使用已经删除的复印机对象。 [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误 `StringRef` 由一些实现返回的指针 `IColumn::deserializeAndInsertFromArena`. 这个错误只影响单元测试。 [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 防止源数组和中间数组连接掩蔽相同名称列的列。 [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复插入并选择查询MySQL引擎与MySQL样式标识符引用。 [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([张冬](https://github.com/zhang2014)) +- 现在 `CHECK TABLE` 查询可以与MergeTree引擎系列一起使用。 它返回检查状态和消息,如果任何为每个部分(或文件在simplier引擎的情况下)。 此外,修复获取损坏部分的错误。 [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([阿利沙平](https://github.com/alesapin)) +- 修复SPLIT\_SHARED\_LIBRARY运行时 [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) +- 固定时区初始化时 `/etc/localtime` 是一个相对的符号链接,如 `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- clickhouse复印机:修复使用-关机后免费 [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) +- 更新 `simdjson`. 修复了一些无效的零字节Json成功解析的问题。 [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复系统日志的关机 [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复当invalidate\_query中的条件取决于字典时挂起。 [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +#### 改进 {#improvement-6} + +- 允许群集配置中的无法解析的地址。 它们将被视为不可用,并尝试在每次连接尝试时解决。 这对Kubernetes特别有用。 这修复 [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 关闭空闲TCP连接(默认情况下为一小时超时)。 这对于每台服务器上具有多个分布式表的大型集群尤其重要,因为每台服务器都可能保留与其他服务器的连接池,并且在高峰查询并发之后,连接将停 这修复 [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好的质量 `topK` 功能。 如果新元素具有更大的权重,则更改了SavingSpace set行为以删除最后一个元素。 [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 与域一起使用的URL函数现在可以在没有方案的情况下适用于不完整的Url [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([阿利沙平](https://github.com/alesapin)) +- 校验和添加到 `system.parts_columns` 桌子 [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 已添加 `Enum` 数据类型作为synonim `Enum8` 或 `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) +- 全位转置变种 `T64` 编解ec 可能会导致更好的压缩 `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) +- 条件 `startsWith` 函数现在可以使用主键。 这修复 [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) 和 [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) +- 允许使用 `clickhouse-copier` 通过允许空数据库名称来实现具有交叉复制的群集拓扑。 [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 使用 `UTC` 作为系统上的默认时区,而不 `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` 被打印并且服务器或客户机拒绝启动。 [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返回对函数中浮点参数的支持 `quantileTiming` 为了向后兼容性。 [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在错误消息中显示哪个表缺少列。 [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([伊万](https://github.com/abyss7)) +- 不允许不同用户使用相同的query\_id运行查询 [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) +- 用于向Graphite发送指标的更强大的代码。 它甚至可以在长时间的多重工作 `RENAME TABLE` 操作。 [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当ThreadPool无法计划执行任务时,将显示更多信息错误消息。 这修复 [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 反转ngramSearch更直观 [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) +- 在HDFS引擎生成器中添加用户解析 [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) +- 更新默认值 `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) +- 增加了过时设置的概念。 过时的设置 `allow_experimental_low_cardinality_type` 可以没有效果使用。 [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) + +#### 性能改进 {#performance-improvement-4} + +- 增加从合并表中选择的流数量,以便更均匀地分布线程。 添加设置 `max_streams_multiplier_for_merge_tables`. 这修复 [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-7} + +- 为与不同版本的clickhouse的客户端-服务器交互添加向后兼容性测试。 [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([阿利沙平](https://github.com/alesapin)) +- 每个提交和拉取请求中的测试复盖率信息。 [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([阿利沙平](https://github.com/alesapin)) +- 与address sanitizer合作,支持我们的自定义分alloc (`Arena` 和 `ArenaWithFreeLists`)为了更好地调试 “use-after-free” 错误。 [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) +- 切换到 [LLVM libunwind实现](https://github.com/llvm-mirror/libunwind) 用于C++异常处理和堆栈跟踪打印 [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([尼基塔\*拉普科夫](https://github.com/laplab)) +- 添加来自-Weverything的两个警告 [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许用内存消毒剂建立ClickHouse。 [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 关于固定的ubsan报告 `bitTest` 在模糊测试功能。 [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Docker:增加了初始化需要身份验证的ClickHouse实例的可能性。 [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([科尔维亚科夫\*安德烈](https://github.com/shurshun)) +- 将librdkafka更新到版本1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([伊万](https://github.com/abyss7)) +- 为集成测试添加全局超时,并在测试代码中禁用其中一些。 [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([阿利沙平](https://github.com/alesapin)) +- 修复一些ThreadSanitizer故障。 [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) +- 该 `--no-undefined` 选项强制链接器在链接时检查所有外部名称是否存在。 在拆分构建模式下跟踪库之间的真实依赖关系非常有用。 [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([伊万](https://github.com/abyss7)) +- 增加了性能测试 [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 与gcc-7固定兼容性。 [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了对gcc-9的支持。 这修复 [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了libunwind链接不正确时的错误。 [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了PVS-Studio发现的一些警告。 [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了初始支持 `clang-tidy` 静态分析仪。 [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 转换BSD/Linux endian宏( ‘be64toh’ 和 ‘htobe64’)到Mac OS x当量 [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([傅辰](https://github.com/fredchenbj)) +- 改进的集成测试指南. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 在macosx+gcc9修复构建 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) +- 修复难以识别的错字:aggreAGte-\>aggregate。 [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) +- 修复freebsd构建 [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) +- 添加链接到实验YouTube频道的网站 [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([伊万\*布林科夫](https://github.com/blinkov)) +- CMake:为复盖率标志添加选项:WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) +- 修复一些内联PODArray的初始大小。 [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) +- clickhouse服务器.postinst:修复centos6的操作系统检测 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) +- 添加Arch linux软件包生成。 [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 拆分常见/配置.h by libs(dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) +- 修复了 “Arcadia” 构建平台 [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) +- 修复了非常规构建(gcc9,没有子模块) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) +- 在unalignedStore中需要显式类型,因为它被证明容易出现错误 [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) +- 修复MacOS构建 [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) +- 关于具有更大数据集的新JIT功能的性能测试,请参阅此处 [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 在压力测试中运行有状态测试 [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([阿利沙平](https://github.com/alesapin)) + +#### 向后不兼容的更改 {#backward-incompatible-change-7} + +- `Kafka` 在这个版本中被打破。 +- 启用 `adaptive_index_granularity` =10mb默认为新 `MergeTree` 桌子 如果您在19.11+版本上创建了新的MergeTree表,则不可能降级到19.6之前的版本。 [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([阿利沙平](https://github.com/alesapin)) +- 删除了Yandex使用的过时无证嵌入式字典。梅特里卡 功能 `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` 不再可用。 如果您正在使用这些功能,请写电子邮件至clickhouse-feedback@yandex-team.com注:在最后时刻,我们决定保持这些功能一段时间。 [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## ClickHouse释放19.10 {#clickhouse-release-19-10} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-10-1-5-2019-07-12} + +#### 新功能 {#new-feature-7} + +- 添加新列编解ec: `T64`. 为(U)IntX/EnumX/Data(时间)/DecimalX列制作。 它应该适用于具有常量或小范围值的列。 编解码器本身允许放大或缩小数据类型而无需重新压缩。 [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加数据库引擎 `MySQL` 允许查看远程MySQL服务器中的所有表 [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([张冬](https://github.com/zhang2014)) +- `bitmapContains` 执行。 这是2倍的速度比 `bitmapHasAny` 如果第二个位图包含一个元素。 [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([余志昌](https://github.com/yuzhichang)) +- 支持 `crc32` 功能(与MySQL或PHP中的行为完全相同)。 如果您需要散列函数,请不要使用它。 [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) +- 已实施 `SYSTEM START/STOP DISTRIBUTED SENDS` 查询控制异步插入到 `Distributed` 桌子 [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([张冬](https://github.com/zhang2014)) + +#### 错误修复 {#bug-fix-22} + +- 在执行突变时忽略查询执行限制和合并限制的最大部件大小。 [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复可能导致重复数据删除正常块(极其罕见)和插入重复块(更常见)的错误。 [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([阿利沙平](https://github.com/alesapin)) +- 功能修复 `arrayEnumerateUniqRanked` 对于具有空数组的参数 [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) +- 不要在没有轮询任何消息的情况下订阅Kafka主题。 [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([伊万](https://github.com/abyss7)) +- 使设置 `join_use_nulls` 对于不能在Nullable内的类型不起作用 [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) +- 固定 `Incorrect size of index granularity` 错误 [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) +- 修正浮动到十进制转换溢出 [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) +- 冲洗缓冲区时 `WriteBufferFromHDFS`的析构函数被调用。 这修复了写入 `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([新东鹏](https://github.com/eejoin)) + +#### 改进 {#improvement-7} + +- 对待空单元格 `CSV` 作为默认值时的设置 `input_format_defaults_for_omitted_fields` 被启用。 [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) +- 外部字典的非阻塞加载。 [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 可以根据设置动态更改已建立的连接的网络超时。 [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) +- 使用 “public\_suffix\_list” 对于功能 `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. 它使用一个完美的哈希表生成 `gperf` 从文件生成的列表:https://publicsuffix.org/list/public\_suffix\_list.dat(例如,现在我们认识到域 `ac.uk` 作为非显着)。 [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 通过 `IPv6` 系统表中的数据类型;统一客户端信息列 `system.processes` 和 `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用会话与MySQL兼容性协议的连接。 \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 支持更多 `ALTER` 查询 `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) +- 碌莽禄Support: `` 第1节 `clickhouse-local` 配置文件。 [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) +- 允许运行查询 `remote` 表函数 `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) + +#### 性能改进 {#performance-improvement-5} + +- 添加在MergeTree列末尾写最后标记的可能性。 它允许避免对超出表数据范围的键进行无用的读取。 仅当使用自适应索引粒度时才启用此功能。 [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([阿利沙平](https://github.com/alesapin)) +- 通过减少非常慢的文件系统上的MergeTree表的性能 `stat` syscalls [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了从版本19.6中引入的MergeTree表读取时的性能下降。 修复#5631。 [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-8} + +- 已实施 `TestKeeper` 作为用于测试的ZooKeeper接口的实现 [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) +- 从现在起 `.sql` 测试可以通过服务器隔离,并行运行,并使用随机数据库。 它允许更快地运行它们,使用自定义服务器配置添加新的测试,并确保不同的测试不会相互影响。 [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([伊万](https://github.com/abyss7)) +- 删除 `` 和 `` 从性能测试 [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) +- 固定 “select\_format” 性能测试 `Pretty` 格式 [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## ClickHouse释放19.9 {#clickhouse-release-19-9} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-9-3-31-2019-07-05} + +#### 错误修复 {#bug-fix-23} + +- 修复增量编解码器中的段错误,这会影响值小于32位大小的列。 该错误导致随机内存损坏。 [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([阿利沙平](https://github.com/alesapin)) +- 修复在检查部分低心率列中罕见的错误。 [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([阿利沙平](https://github.com/alesapin)) +- 修复ttl合并中的段错误与块中的非物理列。 [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复低优先级查询的潜在无限休眠。 [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复ClickHouse如何将默认时区确定为UCT而不是UTC。 [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复在领导者副本之前的跟随者副本上执行分布式删除/更改/截断/优化集群查询的错误。 现在他们将直接在领导者副本上执行。 [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([阿利沙平](https://github.com/alesapin)) +- 修复了系统刷新日志查询后某些查询可能不会立即出现在query\_log中的竞争条件。 [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([安东\*波波夫](https://github.com/CurtizJ)) +- 增加了对常量参数的缺失支持 `evalMLModel` 功能。 [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-9-2-4-2019-06-24} + +#### 新功能 {#new-feature-8} + +- 打印有关冷冻部件的信息 `system.parts` 桌子 [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) +- 在clickhouse上询问客户端密码-如果未在参数中设置,则在tty上启动客户端 [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) +- 执行 `dictGet` 和 `dictGetOrDefault` 十进制类型的函数。 [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改进 {#improvement-8} + +- Debian的初始化:添加服务停止超时 [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) +- 默认情况下添加禁止设置,以创建具有可疑类型的表格 [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) +- 当不用作函数中的状态时,回归函数返回模型权重 `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) +- 重命名和改进回归方法。 [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) +- 更清晰的字符串搜索界面。 [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) + +#### 错误修复 {#bug-fix-24} + +- 修复Kafka中潜在的数据丢失 [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([伊万](https://github.com/abyss7)) +- 修复潜在的无限循环 `PrettySpace` 使用零列调用时的格式 [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修正了线性模型中的UInt32溢出错误。 允许对非常量模型参数的eval ML模型。 [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` 如果提供的索引不存在,则不应引发异常 [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) +- 修复段错误 `bitmapHasAny` 在标量子查询中 [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([余志昌](https://github.com/yuzhichang)) +- 修复了复制连接池不重试解析主机时的错误,即使删除了DNS缓存。 [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([阿利沙平](https://github.com/alesapin)) +- 固定 `ALTER ... MODIFY TTL` 在ReplicatedMergeTree上。 [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复插入到具体化列的分布式表中 [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) +- 修复截断联接存储时的错误alloc [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) +- 在最近版本的包tzdata中,现在有些文件是符号链接。 当前用于检测默认时区的机制被打破,并为某些时区提供错误的名称。 现在至少我们强制时区名称到TZ的内容,如果提供。 [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([伊万](https://github.com/abyss7)) +- 修复一些极为罕见的情况下,MultiVolnitsky搜索器时,在总和恒定针至少16KB长。 该算法错过或复盖以前的结果,这可能导致错误的结果 `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) +- 修复ExternalData请求的设置无法使用ClickHouse设置时的问题。 此外,现在,设置 `date_time_input_format` 和 `low_cardinality_allow_in_native_format` 由于名称的歧义,无法使用(在外部数据中,它可以解释为表格式,在查询中它可以是一个设置)。 [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) +- 修复只从FS中删除部件而不从Zookeeper中删除部件时的错误。 [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([阿利沙平](https://github.com/alesapin)) +- 从MySQL协议中删除调试日志记录 [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在DDL查询处理过程中跳过ZNONODE [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- 修复混合 `UNION ALL` 结果列类型。 有些情况下,结果列的数据和列类型不一致。 [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- 在错误的整数上抛出异常 `dictGetT` 功能,而不是崩溃。 [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复散列字典中错误的element\_count和load\_factor `system.dictionaries` 桌子 [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-9} + +- 固定构建没有 `Brotli` HTTP压缩支持 (`ENABLE_BROTLI=OFF` cmake变量)。 [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) +- 包括ro哮。h为ro哮/咆哮。h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Origej Desh](https://github.com/orivej)) +- 修复超扫描中的gcc9警告(#行指令是邪恶的!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) +- 使用gcc-9编译时修复所有警告。 修复一些contrib问题。 修复gcc9ICE并将其提交给bugzilla。 [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) +- 与lld固定链接 [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除字典中未使用的专业化 [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) +- 针对不同类型的文件进行格式化和解析表的改进性能测试 [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复并行测试运行 [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) +- Docker:使用clickhouse-test中的configs [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) +- 修复编译为FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) +- 升级提升到1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) +- 修复构建clickhouse作为子模块 [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) +- 改进JSONExtract性能测试 [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +## ClickHouse释放19.8 {#clickhouse-release-19-8} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-8-3-8-2019-06-11} + +#### 新功能 {#new-features} + +- 添加了与JSON一起使用的函数 [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加一个函数basename,具有类似于basename函数的行为,它存在于许多语言中 (`os.path.basename` 在python中, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 已添加 `LIMIT n, m BY` 或 `LIMIT m OFFSET n BY` 为LIMIT BY子句设置n偏移量的语法。 [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([安东\*波波夫](https://github.com/CurtizJ)) +- 增加了新的数据类型 `SimpleAggregateFunction`,它允许在一个具有光聚集的列 `AggregatingMergeTree`. 这只能用于简单的功能,如 `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) +- 增加了对函数中非常量参数的支持 `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) +- 新增功能 `skewPop`, `skewSamp`, `kurtPop` 和 `kurtSamp` 分别计算序列偏度、样本偏度、峰度和样本峰度。 [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) +- 支持重命名操作 `MaterializeView` 存储。 [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加了允许使用MySQL客户端连接到ClickHouse的服务器。 [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 添加 `toDecimal*OrZero` 和 `toDecimal*OrNull` 功能。 [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) +- 支持函数中的十进制类型: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted` 媒体加权。 [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) +- 已添加 `format` 功能。 使用参数中列出的字符串格式化常量模式(简化的Python格式模式)。 [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) +- 已添加 `system.detached_parts` 表包含有关分离部分的信息 `MergeTree` 桌子 [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) +- 已添加 `ngramSearch` 函数来计算针和大海捞针之间的非对称差异。 [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) +- 使用聚合函数接口实现基本的机器学习方法(随机线性回归和逻辑回归)。 有不同的策略,用于更新模型权重(简单梯度下降,动量法,涅斯捷罗夫法)。 还支持自定义大小的小批次。 [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) +- 执行 `geohashEncode` 和 `geohashDecode` 功能。 [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加聚合功能 `timeSeriesGroupSum`,从而可以聚合不同的时间序列,即采样时间戳不对齐。 它将在两个采样时间戳之间使用线性插值,然后将时间序列和在一起。 添加聚合功能 `timeSeriesGroupRateSum`,它计算时间序列的速率,然后将速率总和在一起。 [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([刘杨宽](https://github.com/LiuYangkuan)) +- 新增功能 `IPv4CIDRtoIPv4Range` 和 `IPv6CIDRtoIPv6Range` 使用CIDR计算子网中IP的下限和上限。 [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加一个X-ClickHouse-Summary头,当我们发送查询使用HTTP启用设置 `send_progress_in_http_headers`. 返回X-ClickHouse-Progress的常用信息,以及其他信息,例如在查询中插入了多少行和字节。 [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) + +#### 改进 {#improvements} + +- 已添加 `max_parts_in_total` 设置表的MergeTree家族(默认:100 000)防止分区键的不安全规范\#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`:通过将初始种子与列名(而不是列位置)组合来派生单个列的种子。 这用于转换具有多个相关表的数据集,以便在转换后表将保持可联接。 [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 新增功能 `JSONExtractRaw`, `JSONExtractKeyAndValues`. 重命名函数 `jsonExtract` 到 `JSONExtract`. 当出现问题时,这些函数返回对应的值,而不是 `NULL`. 修改功能 `JSONExtract`,现在它从最后一个参数中获取返回类型,并且不会注入nullables。 在AVX2指令不可用的情况下实现了回退到RapidJSON。 Simdjson库更新到新版本。 [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 现在 `if` 和 `multiIf` 功能不依赖于条件的 `Nullable`,但依靠分支来实现sql兼容性。 [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([吴健](https://github.com/janplus)) +- `In` 谓词现在生成 `Null` 结果来自 `Null` 输入像 `Equal` 功能。 [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([吴健](https://github.com/janplus)) +- 检查来自Kafka的每个(flush\_interval/poll\_timeout)行数的时间限制。 这允许更频繁地中断Kafka consumer的读取,并检查顶级流的时间限制 [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([伊万](https://github.com/abyss7)) +- 链接rdkafka捆绑的SASL。 它应该允许使用SASL SCRAM身份验证 [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([伊万](https://github.com/abyss7)) +- 所有联接的RowRefList的批处理版本。 [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) +- clickhouse服务器:更多信息侦听错误消息。 [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) +- 在clickhouse-复印机的功能支持字典 `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) +- 添加新设置 `kafka_commit_every_batch` 来规范卡夫卡的承诺政策。 + 它允许设置提交模式:在处理每批消息之后,或者在整个块写入存储之后。 这是在某些极端情况下丢失一些消息或阅读两次之间的权衡。 [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([伊万](https://github.com/abyss7)) +- 赂眉露\>\> `windowFunnel` 支持其他无符号整数类型。 [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) +- 允许对虚拟列进行阴影 `_table` 在合并引擎。 [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([伊万](https://github.com/abyss7)) +- 赂眉露\>\> `sequenceMatch` 聚合函数支持其他无符号整数类型 [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) +- 如果校验和不匹配很可能是由硬件故障引起的,则更好的错误消息。 [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 检查基础表是否支持以下内容的采样 `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([伊万](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- MySQL线协议的改进。 将格式名称更改为MySQLWire。 使用RAII调用RSA\_free。 如果无法创建上下文,则禁用SSL。 [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) +- 尊重异步插入到分布式表中的查询设置。 [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) +- 重命名函数 `leastSqr` 到 `simpleLinearRegression`, `LinearRegression` 到 `linearRegression`, `LogisticRegression` 到 `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 性能改进 {#performance-improvements} + +- 在ALTER MODIFY查询中并行处理非复制MergeTree表的部分。 [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([伊万库什](https://github.com/IvanKush)) +- Regular达式提取中的优化。 [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) +- 如果仅在join on部分中使用,则不要将右连接键列添加到join result。 [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) +- 在第一个空响应之后冻结Kafka缓冲区。 它避免了多次调用 `ReadBuffer::next()` 对于一些行解析流的空结果。 [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([伊万](https://github.com/abyss7)) +- `concat` 多个参数的函数优化。 [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) +- 使用reference one升级我们的LZ4实现以获得更快的解压缩。 [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) +- 实现了MSD基数排序(基于kxsort)和部分排序。 [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) + +#### 错误修复 {#bug-fixes} + +- 修复推送需要列与联接 [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([张冬](https://github.com/zhang2014)) +- 修正了当ClickHouse由systemd运行时,命令 `sudo service clickhouse-server forcerestart` 没有按预期工作。 [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) +- 修复DataPartsExchange中的http错误代码(9009端口上的服务器间http服务器始终返回代码200,即使是错误)。 [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) +- 修复SimpleAggregateFunction字符串长于MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) +- 修复错误 `Decimal` 到 `Nullable(Decimal)` 转换中。 支持其他十进制到十进制转换(包括不同的比例)。 [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了simdjson库中导致错误计算的FPU clobbering `uniqHLL` 和 `uniqCombined` 聚合函数和数学函数,如 `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定处理JSON函数中的混合常量/非常量情况。 [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复 `retention` 功能。 现在所有满足一行数据的条件都被添加到数据状态。 [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- 修复结果类型 `quantileExact` 用小数。 [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 文件 {#documentation} + +- 翻译文档 `CollapsingMergeTree` 到中国。 [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- 将一些关于表格引擎的文档翻译成中文。 + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([永远不会李](https://github.com/neverlee)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements} + +- 修复一些显示可能使用后免费的消毒剂报告。[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([伊万](https://github.com/abyss7)) +- 为了方便起见,将性能测试从单独的目录中移出。 [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复不正确的性能测试。 [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([阿利沙平](https://github.com/alesapin)) +- 增加了一个工具来计算由位翻转引起的校验和,以调试硬件问题。 [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使亚军脚本更有用。 [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) +- 添加如何编写性能测试的小指令。 [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([阿利沙平](https://github.com/alesapin)) +- 添加在性能测试中创建,填写和删除查询中进行替换的功能 [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) + +## ClickHouse释放19.7 {#clickhouse-release-19-7} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-7-5-29-2019-07-05} + +#### 错误修复 {#bug-fix-25} + +- 使用JOIN修复某些查询中的性能回归。 [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([张冬](https://github.com/zhang2014)) + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102003042 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### 新功能 {#new-features-1} + +- 添加位图相关功能 `bitmapHasAny` 和 `bitmapHasAll` 类似于 `hasAny` 和 `hasAll` 数组的函数。 [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([塞尔吉\*弗拉季金](https://github.com/svladykin)) + +#### 错误修复 {#bug-fixes-1} + +- 修复段错误 `minmax` 具有空值的索引。 [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 根据需要输出标记"LIMIT BY"中的所有输入列。 它修复 ‘Not found column’ 某些分布式查询中出错。 [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([康斯坦丁\*潘](https://github.com/kvap)) +- 修复 “Column ‘0’ already exists” 错误 `SELECT .. PREWHERE` 在具有默认值的列上 [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) +- 修复 `ALTER MODIFY TTL` 查询开 `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([安东\*波波夫](https://github.com/CurtizJ)) +- 当Kafka消费者无法启动时,不要使服务器崩溃。 [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([伊万](https://github.com/abyss7)) +- 固定位图函数产生错误的结果。 [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([杨小姐](https://github.com/andyyzh)) +- 修复散列字典的element\_count(不包括重复项) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) +- 使用环境变量TZ的内容作为时区的名称。 在某些情况下,它有助于正确检测默认时区。[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([伊万](https://github.com/abyss7)) +- 不要试图将整数转换为 `dictGetT` 功能,因为它不能正常工作。 而是抛出一个异常。 [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- 在ExternalData HTTP请求修复设置。 [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila + 库特宁](https://github.com/danlark1)) +- 修复只从FS中删除部件而不从Zookeeper中删除部件时的错误。 [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([阿利沙平](https://github.com/alesapin)) +- 修复分段故障 `bitmapHasAny` 功能。 [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([余志昌](https://github.com/yuzhichang)) +- 修复了复制连接池不重试解析主机时的错误,即使删除了DNS缓存。 [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([阿利沙平](https://github.com/alesapin)) +- 固定 `DROP INDEX IF EXISTS` 查询。 现在 `ALTER TABLE ... DROP INDEX IF EXISTS ...` 如果提供的索引不存在,查询不会引发异常。 [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) +- 修复联合所有超类型列。 有些情况下,结果列的数据和列类型不一致。 [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- 在DDL查询处理过程中跳过ZNONODE。 之前,如果另一个节点删除znode在任务队列中,那一个 + 没有处理它,但已经得到子列表,将终止DDLWorker线程。 [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- 修复插入到具体化列的分布式()表中。 [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-7-3-9-2019-05-30} + +#### 新功能 {#new-features-2} + +- 允许限制用户可以指定的设置的范围。 + 这些约束可以在用户设置配置文件中设置。 + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([维塔利 + 巴拉诺夫](https://github.com/vitlibar)) +- 添加该函数的第二个版本 `groupUniqArray` 用一个可选的 + `max_size` 限制结果数组大小的参数。 这 + 行为类似于 `groupArray(max_size)(x)` 功能。 + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([纪尧姆 + Tassery](https://github.com/YiuRULE)) +- 对于TSVWithNames/CSVWithNames输入文件格式,列顺序现在可以是 + 从文件头确定。 这是由控制 + `input_format_with_names_use_header` 参数。 + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([亚历山大](https://github.com/Akazz)) + +#### 错误修复 {#bug-fixes-2} + +- 在合并过程中uncompressed\_cache+JOIN崩溃(#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila + 库特宁](https://github.com/danlark1)) +- Clickhouse客户端查询到系统表上的分段错误。 \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([伊万](https://github.com/abyss7)) +- 通过KafkaEngine重负载数据丢失(#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([伊万](https://github.com/abyss7)) +- 修复了在执行UNION查询时可能发生的非常罕见的数据争用条件,所有查询都涉及至少两个来自系统的选择。列,系统。表,系统。部件,系统。parts\_tables或Merge系列的表,并同时执行相关表的列的更改。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 性能改进 {#performance-improvements-1} + +- 使用基数排序按单个数字列进行排序 `ORDER BY` 没有 + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([Evgenii Pravda](https://github.com/kvinty), + [阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 文件 {#documentation-1} + +- 将某些表格引擎的文档翻译为中文。 + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([从来没有 + 李](https://github.com/neverlee)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-1} + +- 正确打印UTF-8字符 `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为clickhouse-client添加命令行参数以始终加载建议 + 戴达 [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 解决一些PVS-Studio警告。 + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila + 库特宁](https://github.com/danlark1)) +- 添加gperf以构建即将到来的拉取请求#5030的requirements。 + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([proller](https://github.com/proller)) + +## ClickHouse释放19.6 {#clickhouse-release-19-6} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-6-3-18-2019-06-13} + +#### 错误修复 {#bug-fixes-3} + +- 修复了来自表函数的查询的条件下推 `mysql` 和 `odbc` 和相应的表引擎。 这修复了#3540和#2384。 [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复动物园管理员的死锁。 [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) +- 允许在CSV中引用小数。 [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) +- 禁止从float Inf/NaN转换为小数(抛出异常)。 [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复重命名查询中的数据竞赛。 [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([张冬](https://github.com/zhang2014)) +- 暂时禁用LFAlloc。 使用LFAlloc可能会导致大量MAP\_FAILED在分配UncompressedCache时,并导致高负载服务器上的查询崩溃。 [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-6-2-11-2019-05-13} + +#### 新功能 {#new-features-3} + +- 列和表的TTL表达式。 [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([安东\*波波夫](https://github.com/CurtizJ)) +- 增加了对 `brotli` http响应的压缩(接受编码:br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([米哈伊尔](https://github.com/fandyushin)) +- 增加了新功能 `isValidUTF8` 用于检查一组字节是否被正确地utf-8编码。 [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) +- 添加新的负载平衡策略 `first_or_random` 它将查询发送到第一个指定的主机,如果无法访问,则向分片的随机主机发送查询。 对于跨复制拓扑设置非常有用。 [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([纳瓦托洛梅](https://github.com/nvartolomei)) + +#### 实验特点 {#experimental-features-1} + +- 添加设置 `index_granularity_bytes` (自适应索引粒度)对于MergeTree\*表族. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([阿利沙平](https://github.com/alesapin)) + +#### 改进 {#improvements-1} + +- 增加了对函数的非常量和负大小和长度参数的支持 `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在左联接中禁用向下推到右表,在右联接中禁用左表,并在完全联接中禁用两个表。 在某些情况下,这可以修复错误的连接结果。 [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([伊万](https://github.com/abyss7)) +- `clickhouse-copier`:从自动上传任务配置 `--task-file` 备选案文 [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) +- 为存储工厂和表函数工厂添加了错别字处理程序。 [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) +- 支持不带子查询的多个联接的星号和限定星号 [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) +- 使缺少列错误消息更加用户友好。 [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 性能改进 {#performance-improvements-2} + +- ASOF加速显着 [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) + +#### 向后不兼容的更改 {#backward-incompatible-changes} + +- HTTP头 `Query-Id` 改名为 `X-ClickHouse-Query-Id` 为了一致性。 [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([米哈伊尔](https://github.com/fandyushin)) + +#### 错误修复 {#bug-fixes-4} + +- 修正了潜在的空指针取消引用 `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) +- 修复了使用JOIN+ARRAY JOIN查询的错误 [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定挂在服务器的启动时,字典依赖于另一个字典通过引擎数据库=字典。 [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复潜在的错误结果 `SELECT DISTINCT` 与 `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了在执行UNION查询时可能发生的非常罕见的数据争用条件,所有查询都涉及至少两个来自系统的选择。列,系统。表,系统。部件,系统。parts\_tables或Merge系列的表,并同时执行相关表的列的更改。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-2} + +- 在不同的主机上运行clickhouse服务器时修复测试失败 [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- clickhouse-test:在非tty环境中禁用颜色控制序列。 [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([阿利沙平](https://github.com/alesapin)) +- clickhouse-test:允许使用任何测试数据库(删除 `test.` 在可能的情况下获得资格) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) +- 修复ubsan错误 [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Yandex LFAlloc被添加到ClickHouse中,以不同的方式分配MarkCache和UncompressedCache数据,以更可靠地捕获段错误 [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) +- Python util帮助反向移植和更改日志。 [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([伊万](https://github.com/abyss7)) + +## ClickHouse释放19.5 {#clickhouse-release-19-5} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-5-4-22-2019-05-13} + +#### 错误修复 {#bug-fixes-5} + +- 修正了位图\*功能中可能出现的崩溃 [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([杨小姐](https://github.com/andyyzh)) +- 修复了在执行UNION查询时可能发生的非常罕见的数据争用条件,所有查询都涉及至少两个来自系统的选择。列,系统。表,系统。部件,系统。parts\_tables或Merge系列的表,并同时执行相关表的列的更改。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误 `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. 如果lowcardinality列是主键的一部分,则会发生此错误。 \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修改保留函数:如果一行同时满足第一个和第N个条件,则只有第一个满足的条件被添加到数据状态。 现在所有满足一行数据的条件都被添加到数据状态。 [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-5-3-8-2019-04-18} + +#### 错误修复 {#bug-fixes-6} + +- 固定设置类型 `max_partitions_per_insert_block` 从布尔到UInt64。 [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([2.Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-5-2-6-2019-04-15} + +#### 新功能 {#new-features-4} + +- [超扫描](https://github.com/intel/hyperscan) 添加了多个正则表达式匹配(函数 `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) +- `multiSearchFirstPosition` 添加了功能。 [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) +- 为表实现每行的预定义表达式筛选器。 [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([伊万](https://github.com/abyss7)) +- 一种基于bloom过滤器的新型数据跳过索引(可用于 `equal`, `in` 和 `like` 功能)。 [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 已添加 `ASOF JOIN` 它允许运行连接到最新已知值的查询。 [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) +- 重写多个 `COMMA JOIN` 到 `CROSS JOIN`. 然后将它们重写为 `INNER JOIN` 如果可能的话 [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改进 {#improvement-9} + +- `topK` 和 `topKWeighted` 现在支持自定义 `loadFactor` (修复问题 [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([基里尔丹信](https://github.com/kirillDanshin)) +- 允许使用 `parallel_replicas_count > 1` 即使对于没有采样的表(设置简单地忽略它们)。 在以前的版本中,它导致异常。 [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) +- 支持 `CREATE OR REPLACE VIEW`. 允许在单个语句中创建视图或设置新定义。 [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) +- `Buffer` 表引擎现在支持 `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([刘杨宽](https://github.com/LiuYangkuan)) +- 添加在zookeeper中启动没有元数据的复制表的能力 `readonly` 模式 [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([阿利沙平](https://github.com/alesapin)) +- 在clickhouse客户端固定进度条闪烁。 使用时,这个问题最明显 `FORMAT Null` 随着流查询。 [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许禁用功能 `hyperscan` 基于每个用户的库,以限制潜在的过度和不受控制的资源使用。 [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加版本号记录所有错误。 [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) +- 增加了限制 `multiMatch` 需要字符串大小以适应的函数 `unsigned int`. 还增加了参数的数量限制 `multiSearch` 功能。 [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) +- 改进了超扫描暂存空间的使用和错误处理。 [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) +- 填充 `system.graphite_detentions` 从表配置 `*GraphiteMergeTree` 发动机表. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 重命名 `trigramDistance` 功能 `ngramDistance` 并添加更多的功能 `CaseInsensitive` 和 `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) +- 改进的数据跳过指数计算。 [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 保持平凡, `DEFAULT`, `MATERIALIZED` 和 `ALIAS` 在一个列表中的列(修复问题 [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +#### 错误修复 {#bug-fix-26} + +- 避免 `std::terminate` 在内存分配失败的情况下。 现在 `std::bad_alloc` 按预期引发异常。 [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复capnproto从缓冲区读取。 有时文件没有通过HTTP成功加载。 [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([弗拉季斯拉夫](https://github.com/smirnov-vs)) +- 修复错误 `Unknown log entry type: 0` 后 `OPTIMIZE TABLE FINAL` 查询。 [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([阿莫斯鸟](https://github.com/amosbird)) +- 错误的参数 `hasAny` 或 `hasAll` 函数可能会导致段错误。 [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时可能会发生死锁 `DROP DATABASE dictionary` 查询。 [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复未定义的行为 `median` 和 `quantile` 功能。 [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- 修复压缩级别检测时 `network_compression_method` 小写。 在19.1节中被打破。 [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- 固定的无知 `UTC` 设置(修复问题 [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- 修复 `histogram` 函数行为 `Distributed` 桌子 [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- 固定tsan报告 `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了由于系统日志使用中的争用条件而关闭的TSan报告。 修复了当part\_log启用时关机后的潜在使用。 [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新检查零件 `ReplicatedMergeTreeAlterThread` 在错误的情况下。 [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 对中间聚合函数状态的算术运算不适用于常量参数(如子查询结果)。 [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 始终在元数据中反引用列名。 否则,不可能创建一个名为列的表 `index` (由于格式错误,服务器无法重新启动 `ATTACH` 元数据中的查询)。 [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复崩溃 `ALTER ... MODIFY ORDER BY` 上 `Distributed` 桌子 [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- 修复段错误 `JOIN ON` 已启用 `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([张冬](https://github.com/zhang2014)) +- 修复kafka使用protobuf消息后添加无关行的错误。 [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复崩溃 `JOIN` 在不可为空的vs可为空的列上。 修复 `NULLs` 在右键 `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 在固定的竞争条件 `SELECT` 从 `system.tables` 如果同时重命名或更改表。 [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 获取已经过时的数据部分时修复了数据竞赛。 [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定罕见的数据竞赛,可以在发生 `RENAME` MergeTree家族的表. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正功能中的分段故障 `arrayIntersect`. 如果函数使用常量和普通参数混合调用,则可能会发生分段错误。 [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([钱丽祥](https://github.com/fancyqlx)) +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复崩溃 `FULL/RIGHT JOIN` 当我们加入可为空vs不可为空时。 [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复 `No message received` 在副本之间获取部件时出现异常。 [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([阿利沙平](https://github.com/alesapin)) +- 固定 `arrayIntersect` 函数错误导致在单个数组中的几个重复值的情况下。 [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在并发期间修复争用条件 `ALTER COLUMN` 可能导致服务器崩溃的查询(修复问题 [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复不正确的结果 `FULL/RIGHT JOIN` 与常量列。 [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复重复 `GLOBAL JOIN` 用星号。 [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复参数扣除 `ALTER MODIFY` 列 `CODEC` 未指定列类型时。 [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([阿利沙平](https://github.com/alesapin)) +- 功能 `cutQueryStringAndFragment()` 和 `queryStringAndFragment()` 现在正常工作时 `URL` 包含一个片段,没有查询。 [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复设置时罕见的错误 `min_bytes_to_use_direct_io` 大于零,这发生在线程必须在列文件中向后寻找时。 [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([阿利沙平](https://github.com/alesapin)) +- 修复聚合函数的错误参数类型 `LowCardinality` 参数(修复问题 [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误的名称资格 `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复功能 `toISOWeek` 1970年的结果。 [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `DROP`, `TRUNCATE` 和 `OPTIMIZE` 查询重复,在执行时 `ON CLUSTER` 为 `ReplicatedMergeTree*` 表家庭. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([阿利沙平](https://github.com/alesapin)) + +#### 向后不兼容的更改 {#backward-incompatible-change-8} + +- 重命名设置 `insert_sample_with_metadata` 到设置 `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `max_partitions_per_insert_block` (默认值为100)。 如果插入的块包含较大数量的分区,则会引发异常。 如果要删除限制(不推荐),请将其设置为0。 [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 多搜索功能被重命名 (`multiPosition` 到 `multiSearchAllPositions`, `multiSearch` 到 `multiSearchAny`, `firstMatch` 到 `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) + +#### 性能改进 {#performance-improvement-6} + +- 通过内联优化Volnitsky搜索器,为许多针或许多类似bigrams的查询提供约5-10%的搜索改进。 [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) +- 修复设置时的性能问题 `use_uncompressed_cache` 大于零时,即出现在所有读取缓存中包含的数据时。 [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([阿利沙平](https://github.com/alesapin)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-10} + +- 强化调试构建:更精细的内存映射和ASLR;为标记缓存和索引添加内存保护。 这允许在ASan和MSan无法做到这一点的情况下找到更多的内存st脚错误。 [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加对cmake变量的支持 `ENABLE_PROTOBUF`, `ENABLE_PARQUET` 和 `ENABLE_BROTLI` 它允许启用/禁用上述功能(与我们对librdkafka,mysql等所做的相同)。 [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) +- 添加打印进程列表和堆栈跟踪的所有线程的能力,如果一些查询测试运行后挂起。 [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([阿利沙平](https://github.com/alesapin)) +- 添加重试 `Connection loss` 错误 `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([阿利沙平](https://github.com/alesapin)) +- 在打包程序脚本中添加使用vagrant的freebsd build和使用thread sanitizer的build。 [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([阿利沙平](https://github.com/alesapin)) +- 现在用户要求用户密码 `'default'` 在安装过程中。 [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) +- 禁止在警告 `rdkafka` 图书馆. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许在没有ssl的情况下构建。 [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) +- 添加从自定义用户启动clickhouse服务器映像的方法。 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 升级contrib升压到1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) +- 禁用使用 `mremap` 使用线程消毒剂编译时。 令人惊讶的是,TSan并没有拦截 `mremap` (虽然它确实拦截 `mmap`, `munmap` 这会导致误报。 修复了有状态测试中的TSan报告。 [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过HTTP接口使用格式模式添加测试检查。 [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +## ClickHouse释放19.4 {#clickhouse-release-19-4} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-4-33-2019-04-17} + +#### 错误修复 {#bug-fixes-7} + +- 避免 `std::terminate` 在内存分配失败的情况下。 现在 `std::bad_alloc` 按预期引发异常。 [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复capnproto从缓冲区读取。 有时文件没有通过HTTP成功加载。 [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([弗拉季斯拉夫](https://github.com/smirnov-vs)) +- 修复错误 `Unknown log entry type: 0` 后 `OPTIMIZE TABLE FINAL` 查询。 [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([阿莫斯鸟](https://github.com/amosbird)) +- 错误的参数 `hasAny` 或 `hasAll` 函数可能会导致段错误。 [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时可能会发生死锁 `DROP DATABASE dictionary` 查询。 [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复未定义的行为 `median` 和 `quantile` 功能。 [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- 修复压缩级别检测时 `network_compression_method` 小写。 在19.1节中被打破。 [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- 固定的无知 `UTC` 设置(修复问题 [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- 修复 `histogram` 函数行为 `Distributed` 桌子 [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- 固定tsan报告 `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了由于系统日志使用中的争用条件而关闭的TSan报告。 修复了当part\_log启用时关机后的潜在使用。 [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新检查零件 `ReplicatedMergeTreeAlterThread` 在错误的情况下。 [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 对中间聚合函数状态的算术运算不适用于常量参数(如子查询结果)。 [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 始终在元数据中反引用列名。 否则,不可能创建一个名为列的表 `index` (由于格式错误,服务器无法重新启动 `ATTACH` 元数据中的查询)。 [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复崩溃 `ALTER ... MODIFY ORDER BY` 上 `Distributed` 桌子 [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- 修复段错误 `JOIN ON` 已启用 `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([张冬](https://github.com/zhang2014)) +- 修复kafka使用protobuf消息后添加无关行的错误。 [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 在固定的竞争条件 `SELECT` 从 `system.tables` 如果同时重命名或更改表。 [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 获取已经过时的数据部分时修复了数据竞赛。 [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定罕见的数据竞赛,可以在发生 `RENAME` MergeTree家族的表. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正功能中的分段故障 `arrayIntersect`. 如果函数使用常量和普通参数混合调用,则可能会发生分段错误。 [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([钱丽祥](https://github.com/fancyqlx)) +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复 `No message received` 在副本之间获取部件时出现异常。 [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([阿利沙平](https://github.com/alesapin)) +- 固定 `arrayIntersect` 函数错误导致在单个数组中的几个重复值的情况下。 [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在并发期间修复争用条件 `ALTER COLUMN` 可能导致服务器崩溃的查询(修复问题 [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复参数扣除 `ALTER MODIFY` 列 `CODEC` 未指定列类型时。 [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([阿利沙平](https://github.com/alesapin)) +- 功能 `cutQueryStringAndFragment()` 和 `queryStringAndFragment()` 现在正常工作时 `URL` 包含一个片段,没有查询。 [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复设置时罕见的错误 `min_bytes_to_use_direct_io` 大于零,这发生在线程必须在列文件中向后寻找时。 [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([阿利沙平](https://github.com/alesapin)) +- 修复聚合函数的错误参数类型 `LowCardinality` 参数(修复问题 [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复功能 `toISOWeek` 1970年的结果。 [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `DROP`, `TRUNCATE` 和 `OPTIMIZE` 查询重复,在执行时 `ON CLUSTER` 为 `ReplicatedMergeTree*` 表家庭. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([阿利沙平](https://github.com/alesapin)) + +#### 改进 {#improvements-2} + +- 保持平凡, `DEFAULT`, `MATERIALIZED` 和 `ALIAS` 在一个列表中的列(修复问题 [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-3-11-2019-04-02} + +#### 错误修复 {#bug-fixes-8} + +- 修复崩溃 `FULL/RIGHT JOIN` 当我们加入可为空vs不可为空时。 [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-11} + +- 添加从自定义用户启动clickhouse服务器映像的方法。 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-2-7-2019-03-30} + +#### 错误修复 {#bug-fixes-9} + +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-1-3-2019-03-19} + +#### 错误修复 {#bug-fixes-10} + +- 包含两个固定的远程查询 `LIMIT BY` 和 `LIMIT`. 以前,如果 `LIMIT BY` 和 `LIMIT` 用于远程查询, `LIMIT` 可能发生之前 `LIMIT BY`,这导致过滤的结果。 [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([康斯坦丁\*潘](https://github.com/kvap)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-0-49-2019-03-09} + +#### 新功能 {#new-features-5} + +- 增加了全面支持 `Protobuf` 格式(输入和输出,嵌套数据结构)。 [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加位图功能与Ro哮的位图。 [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([杨小姐](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 实木复合地板格式支持。 [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) +- 为模糊字符串比较添加了N-gram距离。 它类似于R语言中的q-gram指标。 [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) +- 结合专用聚合和保留模式中的石墨汇总规则。 [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 已添加 `max_execution_speed` 和 `max_execution_speed_bytes` 限制资源使用。 已添加 `min_execution_speed_bytes` 设置以补充 `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([张冬](https://github.com/zhang2014)) +- 实现功能 `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) +- 新增功能 `arrayEnumerateDenseRanked` 和 `arrayEnumerateUniqRanked` (这就像 `arrayEnumerateUniq` 但是允许微调数组深度以查看多维数组内部)。 [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 错误修复 {#bug-fixes-11} + +- 此版本还包含19.3和19.1中的所有错误修复。 +- 修正了数据跳过索引的错误:插入后颗粒顺序不正确。 [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 固定 `set` ツ环板forョ `Nullable` 和 `LowCardinality` 列。 在它之前, `set` 索引与 `Nullable` 或 `LowCardinality` 列导致错误 `Data type must be deserialized with multiple streams` 同时选择。 [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 正确设置完整的update\_time `executable` 字典更新. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) +- 修复19.3中损坏的进度条。 [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) +- 在某些情况下,修复了内存区域收缩时MemoryTracker的不一致值。 [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了ThreadPool中未定义的行为。 [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个非常罕见的崩溃的消息 `mutex lock failed: Invalid argument` 当MergeTree表与SELECT同时删除时,可能会发生这种情况。 [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) +- ODBC驱动程序兼容 `LowCardinality` 数据类型。 [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) +- FreeBSD:修复程序 `AIOcontextPool: Found io_event with unknown id 0` 错误 [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `system.part_log` 无论配置如何,都会创建表。 [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复未定义的行为 `dictIsIn` 缓存字典功能。 [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([阿利沙平](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) +- 默认情况下禁用compile\_expressions,直到我们得到自己 `llvm` contrib并且可以测试它 `clang` 和 `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([阿利沙平](https://github.com/alesapin)) +- 预防 `std::terminate` 当 `invalidate_query` 为 `clickhouse` 外部字典源返回了错误的结果集(空或一行以上或一列以上)。 固定的问题,当 `invalidate_query` 执行每五秒钟,无论到 `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免死锁时 `invalidate_query` 对于与字典 `clickhouse` 资料来源涉及 `system.dictionaries` 表或 `Dictionaries` 数据库(罕见的情况)。 [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了交叉连接与空在哪里。 [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) +- 在功能固定段错误 “replicate” 传递常量参数时。 [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用谓词优化器修复lambda函数。 [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([张冬](https://github.com/zhang2014)) +- 多个联接多个修复。 [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改进 {#improvements-3} + +- 在右表列的连接上部分支持别名。 [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) +- 结果多加入了需要正确的结果,名称为使用中子选择. 替换平的别名来源中的名称结果。 [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) +- 改进连接语句的下推逻辑。 [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([伊万](https://github.com/abyss7)) + +#### 性能改进 {#performance-improvements-3} + +- 改进的启发式 “move to PREWHERE” 优化。 [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用适当的查找表,使用HashTable的api用于8位和16位密钥。 [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([阿莫斯鸟](https://github.com/amosbird)) +- 改进字符串比较的性能。 [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在单独的线程中清理分布式DDL队列,以便它不会减慢处理分布式DDL任务的主循环。 [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) +- 当 `min_bytes_to_use_direct_io` 如果设置为1,则不是每个文件都使用O\_DIRECT模式打开,因为要读取的数据大小有时被一个压缩块的大小所低估。 [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-12} + +- 增加了对clang-9的支持 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误 `__asm__` 说明(再次) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) +- 添加指定设置的能力 `clickhouse-performance-test` 从命令行。 [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([阿利沙平](https://github.com/alesapin)) +- 将字典测试添加到集成测试。 [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([阿利沙平](https://github.com/alesapin)) +- 在网站上添加了来自基准测试的查询,以自动化性能测试。 [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `xxhash.h` 在外部lz4中不存在,因为它是一个实现细节,并且它的符号是命名空间的 `XXH_NAMESPACE` 麦克罗 当lz4是外部的,xxHash也必须是外部的,并且依赖者必须链接到它。 [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Origej Desh](https://github.com/orivej)) +- 固定的情况下,当 `quantileTiming` 聚合函数可以用负或浮点参数调用(这修复了使用未定义的行为消毒器的模糊测试)。 [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 拼写错误更正。 [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) +- 在Mac上修复编译。 [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Freebsd和各种不寻常的构建配置的构建修复程序。 [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) + +## ClickHouse释放19.3 {#clickhouse-release-19-3} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-3-9-1-2019-04-02} + +#### 错误修复 {#bug-fixes-12} + +- 修复崩溃 `FULL/RIGHT JOIN` 当我们加入可为空vs不可为空时。 [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-13} + +- 添加从自定义用户启动clickhouse服务器映像的方法 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-7-2019-03-12} + +#### 错误修复 {#bug-fixes-13} + +- 修正了#3920中的错误。 此错误表现为随机缓存损坏(消息 `Unknown codec family code`, `Cannot seek through file`)和段错误。 这个错误最早出现在19.1版本中,并且存在于19.1.10和19.3.6之前的版本中。 [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-3-6-2019-03-02} + +#### 错误修复 {#bug-fixes-14} + +- 当线程池中有超过1000个线程时, `std::terminate` 线程退出时可能发生。 [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在可以创建 `ReplicatedMergeTree*` 对没有默认值的列进行注释的表和对没有注释和默认值的列进行编解码的表。 还修复编解码器的比较。 [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([阿利沙平](https://github.com/alesapin)) +- 修复了与数组或元组联接时的崩溃。 [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了clickhouse-复印机中的消息崩溃 `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) +- 如果使用分布式Ddl,则在服务器关闭时修复了挂机问题。 [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) +- 错误的列编号打印在有关文本格式分析的列数大于10的错误消息中。 [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-3} + +- 固定构建与启用AVX。 [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 基于已知版本而不是编译它的内核启用扩展记帐和IO记帐。 [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 允许跳过core\_dump的设置。size\_limit,如果限制设置失败,则警告而不是throw。 [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) +- 删除了 `inline` 标签 `void readBinary(...)` 在 `Field.cpp`. 也合并冗余 `namespace DB` 块。 [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-5-2019-02-21} + +#### 错误修复 {#bug-fixes-15} + +- 修正了大型http插入查询处理的错误。 [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([阿利沙平](https://github.com/alesapin)) +- 修正了向后不兼容的旧版本,由于错误的实现 `send_logs_level` 设置。 [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了表函数的向后不兼容性 `remote` 与列注释介绍. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-4-2019-02-16} + +#### 改进 {#improvements-4} + +- 执行以下操作时,表索引大小不考虑内存限制 `ATTACH TABLE` 查询。 避免了分离后无法连接表的可能性。 [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 稍微提高了从ZooKeeper接收的最大字符串和数组大小的限制。 它允许继续与增加的尺寸工作 `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` 在动物园管理员。 [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许修复被遗弃的副本,即使它已经在其队列中拥有大量的节点。 [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加一个必需的参数 `SET` 索引(最大存储行数)。 [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) + +#### 错误修复 {#bug-fixes-16} + +- 固定 `WITH ROLLUP` 单组结果 `LowCardinality` 钥匙 [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在设置索引固定错误(删除颗粒,如果它包含超过 `max_rows` 行)。 [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 很多的FreeBSD构建修复。 [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) +- 固定别名替换查询与子查询包含相同的别名(问题 [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-4} + +- 添加运行能力 `clickhouse-server` 对于docker镜像中的无状态测试。 [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([瓦西里\*内姆科夫](https://github.com/Enmk)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-3-2019-02-13} + +#### 新功能 {#new-features-6} + +- 添加了 `KILL MUTATION` 允许删除由于某些原因卡住的突变的声明。 已添加 `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` 字段到 `system.mutations` 表更容易排除故障。 [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) +- 添加聚合功能 `entropy` 计算香农熵 [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) +- 添加发送查询的功能 `INSERT INTO tbl VALUES (....` 到服务器而不拆分 `query` 和 `data` 零件。 [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([阿利沙平](https://github.com/alesapin)) +- 通用实现 `arrayWithConstant` 添加了功能。 [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已实施 `NOT BETWEEN` 比较运算符。 [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) +- 执行 `sumMapFiltered` 为了能够限制其值将被求和的键的数量 `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 增加了支持 `Nullable` 类型 `mysql` 表功能。 [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- 支持任意常量表达式 `LIMIT` 条款 [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) +- 已添加 `topKWeighted` 采用带有(无符号整数)权重的附加参数的聚合函数。 [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([安德鲁\*戈尔曼](https://github.com/andrewgolman)) +- `StorageJoin` 现在支持 `join_any_take_last_row` 允许复盖同一键的现有值的设置。 [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([阿莫斯鸟](https://github.com/amosbird) +- 添加功能 `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 已添加 `RowBinaryWithNamesAndTypes` 格式。 [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V.Kozlyuk](https://github.com/DarkWanderer)) +- 已添加 `IPv4` 和 `IPv6` 数据类型。 更有效的实现 `IPv*` 功能。 [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加功能 `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 已添加 `Protobuf` 输出格式。 [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 增加了对数据导入(插入)HTTP接口的brotli支持。 [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([米哈伊尔](https://github.com/fandyushin)) +- 增加了提示,而用户做出错字的函数名称或键入命令行客户端。 [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) +- 已添加 `Query-Id` 到服务器的HTTP响应头。 [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([米哈伊尔](https://github.com/fandyushin)) + +#### 实验特点 {#experimental-features-2} + +- 已添加 `minmax` 和 `set` MergeTree表引擎系列的数据跳过索引。 [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 增加了转换 `CROSS JOIN` 到 `INNER JOIN` 如果可能的话 [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 错误修复 {#bug-fixes-17} + +- 固定 `Not found column` 对于重复的列 `JOIN ON` 科。 [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- 赂眉露\>\> `START REPLICATED SENDS` 命令开始复制发送。 [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 固定聚合函数执行 `Array(LowCardinality)` 争论。 [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 修正了错误的行为,当做 `INSERT ... SELECT ... FROM file(...)` 查询和文件有 `CSVWithNames` 或 `TSVWIthNames` 格式和第一个数据行丢失。 [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 如果字典不可用,则修复了字典重新加载时的崩溃。 此错误出现在19.1.6中。 [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- 固定 `ALL JOIN` 右表中有重复项。 [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了分段故障 `use_uncompressed_cache=1` 和异常与错误的未压缩大小。 此错误出现在19.1.6中。 [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([阿利沙平](https://github.com/alesapin)) +- 固定 `compile_expressions` 错误与大(超过int16)日期的比较。 [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([阿利沙平](https://github.com/alesapin)) +- 从表函数选择时固定无限循环 `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 暂时禁用谓词优化 `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([张冬](https://github.com/zhang2014)) +- 固定 `Illegal instruction` 在旧Cpu上使用base64函数时出错。 仅当ClickHouse使用gcc-8编译时,才会重现此错误。 [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `No message received` 通过TLS连接与PostgreSQL ODBC驱动程序交互时出错。 还修复了使用MySQL ODBC驱动程序时的段错误。 [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误的结果时 `Date` 和 `DateTime` 参数用于条件运算符(函数)的分支 `if`). 增加了函数的通用案例 `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ClickHouse字典现在加载内 `clickhouse` 过程。 [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复死锁时 `SELECT` 从一个表 `File` 引擎被重试后 `No such file or directory` 错误 [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从选择时固定的竞争条件 `system.tables` 可能会给 `table doesn't exist` 错误 [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `clickhouse-client` 如果在交互模式下运行,则在加载命令行建议的数据时可以在退出时段错误。 [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个错误,当包含突变的执行 `IN` 操作员产生了不正确的结果。 [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修正错误:如果有一个数据库 `Dictionary` 引擎中,所有字典在服务器启动时强制加载,如果有来自localhost的ClickHouse源字典,则字典无法加载。 [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了在服务器关闭时尝试再次创建系统日志时的错误。 [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 正确返回正确的类型和正确处理锁 `joinGet` 功能。 [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([阿莫斯鸟](https://github.com/amosbird)) +- 已添加 `sumMapWithOverflow` 功能。 [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 固定段错误 `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正错误与不正确 `Date` 和 `DateTime` 比较。 [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- 在未定义的行为消毒固定模糊测试:增加了参数类型检查 `quantile*Weighted` 家庭的功能。 [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了在删除旧数据部分时罕见的争用条件可能会失败 `File not found` 错误 [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复缺少/etc/clickhouse-server/config的安装包。xml [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-5} + +- Debian软件包:根据配置正确的/etc/clickhouse-server/预处理链接。 [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Freebsd的各种构建修复程序。 [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) +- 增加了在perftest中创建,填充和删除表的能力。 [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([阿利沙平](https://github.com/alesapin)) +- 添加了一个脚本来检查重复的包括。 [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了在性能测试中通过索引运行查询的能力。 [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([阿利沙平](https://github.com/alesapin)) +- 建议安装带有调试符号的软件包。 [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 重构性能测试。 更好的记录和信号处理。 [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([阿利沙平](https://github.com/alesapin)) +- 将文档添加到匿名Yandex。Metrika数据集. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([阿利沙平](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) +- 添加了有关s3中两个数据集的文档。 [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([阿利沙平](https://github.com/alesapin)) +- 增加了从拉请求描述创建更新日志的脚本。 [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 为ClickHouse添加了木偶模块。 [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- 添加了一组无证函数的文档。 [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([张冬](https://github.com/zhang2014)) +- ARM构建修复。 [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) +- 字典测试现在能够从运行 `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) +- 现在 `/etc/ssl` 用作带有SSL证书的默认目录。 [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在开始时添加了检查SSE和AVX指令。 [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) +- 初始化脚本将等待服务器,直到启动。 [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) + +#### 向后不兼容的更改 {#backward-incompatible-changes-1} + +- 已删除 `allow_experimental_low_cardinality_type` 设置。 `LowCardinality` 数据类型已准备就绪。 [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 根据可用内存量减少标记高速缓存大小和未压缩高速缓存大小。 [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) +- 添加关键字 `INDEX` 在 `CREATE TABLE` 查询。 具有名称的列 `index` 必须使用反引号或双引号引用: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- `sumMap` 现在提升结果类型而不是溢出。 老 `sumMap` 行为可以通过使用获得 `sumMapWithOverflow` 功能。 [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### 性能改进 {#performance-improvements-4} + +- `std::sort` 改为 `pdqsort` 对于没有 `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) +- 现在服务器重用全局线程池中的线程。 这会影响某些角落情况下的性能。 [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvements-5} + +- 实现了对FreeBSD的AIO支持。 [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` 现在回来 `a` 和 `b` 列仅从左表。 [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) +- 允许 `-C` 客户端的选项作为工作 `-c` 选项。 [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) +- 现在选项 `--password` 无值使用需要从标准输入的密码。 [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) +- 在包含字符串文字中添加了非转义元字符的突出显示 `LIKE` 表达式或正则表达式。 [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加取消HTTP只读查询,如果客户端套接字消失。 [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 现在,服务器报告进度,以保持客户端连接活跃。 [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([伊万](https://github.com/abyss7)) +- 稍微好一点的消息与优化查询的原因 `optimize_throw_if_noop` 设置已启用。 [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了支持 `--version` clickhouse服务器的选项。 [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) +- 已添加 `--help/-h` 选项 `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 增加了对具有聚合函数状态结果的标量子查询的支持。 [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 改进服务器关闭时间并改变等待时间。 [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加了有关replicated\_can\_become\_leader设置到系统的信息。如果副本不会尝试成为领导者,则添加日志记录。 [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) + +## ClickHouse释放19.1 {#clickhouse-release-19-1} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-14-2019-03-14} + +- 修正错误 `Column ... queried more than once` 这可能发生,如果设置 `asterisk_left_columns_only` 在使用的情况下设置为1 `GLOBAL JOIN` 与 `SELECT *` (罕见的情况)。 该问题在19.3及更新版本中不存在。 [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-13-2019-03-12} + +此版本包含与19.3.7完全相同的补丁集。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-1-10-2019-03-03} + +此版本包含与19.3.6完全相同的补丁集。 + +## ClickHouse释放19.1 {#clickhouse-release-19-1-1} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-9-2019-02-21} + +#### 错误修复 {#bug-fixes-18} + +- 修正了向后不兼容的旧版本,由于错误的实现 `send_logs_level` 设置。 [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了表函数的向后不兼容性 `remote` 与列注释介绍. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-8-2019-02-16} + +#### 错误修复 {#bug-fixes-19} + +- 修复缺少/etc/clickhouse-server/config的安装包。xml [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +## ClickHouse释放19.1 {#clickhouse-release-19-1-2} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-7-2019-02-15} + +#### 错误修复 {#bug-fixes-20} + +- 正确返回正确的类型和正确处理锁 `joinGet` 功能。 [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复了在服务器关闭时尝试再次创建系统日志时的错误。 [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误:如果有一个数据库 `Dictionary` 引擎中,所有字典在服务器启动时强制加载,如果有来自localhost的ClickHouse源字典,则字典无法加载。 [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个错误,当包含突变的执行 `IN` 操作员产生了不正确的结果。 [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- `clickhouse-client` 如果在交互模式下运行,则在加载命令行建议的数据时可以在退出时段错误。 [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从选择时固定的竞争条件 `system.tables` 可能会给 `table doesn't exist` 错误 [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复死锁时 `SELECT` 从一个表 `File` 引擎被重试后 `No such file or directory` 错误 [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了一个问题:本地ClickHouse字典通过TCP加载,但应该在进程中加载。 [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `No message received` 通过TLS连接与PostgreSQL ODBC驱动程序交互时出错。 还修复了使用MySQL ODBC驱动程序时的段错误。 [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 暂时禁用谓词优化 `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([张冬](https://github.com/zhang2014)) +- 从表函数选择时固定无限循环 `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `compile_expressions` 错误与大(超过int16)日期的比较。 [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([阿利沙平](https://github.com/alesapin)) +- 修正了分段故障 `uncompressed_cache=1` 和异常与错误的未压缩大小。 [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([阿利沙平](https://github.com/alesapin)) +- 固定 `ALL JOIN` 右表中有重复项。 [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了错误的行为,当做 `INSERT ... SELECT ... FROM file(...)` 查询和文件有 `CSVWithNames` 或 `TSVWIthNames` 格式和第一个数据行丢失。 [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定聚合函数执行 `Array(LowCardinality)` 争论。 [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Debian软件包:根据配置正确的/etc/clickhouse-server/预处理链接。 [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- 在未定义的行为消毒固定模糊测试:增加了参数类型检查 `quantile*Weighted` 家庭的功能。 [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `START REPLICATED SENDS` 命令开始复制发送。 [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 固定 `Not found column` 对于联接部分中的重复列。 [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- 现在 `/etc/ssl` 用作带有SSL证书的默认目录。 [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 如果字典不可用,则修复了字典重新加载时的崩溃。 [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- 修正错误与不正确 `Date` 和 `DateTime` 比较。 [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- 修正错误的结果时 `Date` 和 `DateTime` 参数用于条件运算符(函数)的分支 `if`). 增加了函数的通用案例 `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-6-2019-01-24} + +#### 新功能 {#new-features-7} + +- 自定义每列压缩编解码器的表。 [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([阿利沙平](https://github.com/alesapin), [张冬](https://github.com/zhang2014), [阿纳托利](https://github.com/Sindbag)) +- 添加压缩编解ec `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([阿利沙平](https://github.com/alesapin)) +- 允许 `ALTER` 压缩编解ecs。 [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([阿利沙平](https://github.com/alesapin)) +- 新增功能 `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` 对于SQL标准的兼容性。 [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([伊万\*布林科夫](https://github.com/blinkov)) +- 支持写入 `HDFS` 表和 `hdfs` 表功能。 [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([阿利沙平](https://github.com/alesapin)) +- 增加了从big haystack中搜索多个常量字符串的功能: `multiPosition`, `multiSearch` ,`firstMatch` 也与 `-UTF8`, `-CaseInsensitive`,和 `-CaseInsensitiveUTF8` 变体。 [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) +- 修剪未使用的碎片,如果 `SELECT` 通过分片键查询过滤器(设置 `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [伊万](https://github.com/abyss7)) +- 允许 `Kafka` 引擎忽略每个块的解析错误数。 [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([伊万](https://github.com/abyss7)) +- 增加了对 `CatBoost` 多类模型评估。 功能 `modelEvaluate` 返回带有多类模型的每类原始预测的元组。 `libcatboostmodel.so` 应建立与 [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 新增功能 `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) +- 添加了哈希函数 `xxHash64` 和 `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) +- 已添加 `gccMurmurHash` 散列函数(GCC风味杂音散列),它使用相同的散列种子 [海湾合作委员会](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) +- 添加了哈希函数 `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([上书结365](https://github.com/shangshujie365)) +- 添加表功能 `remoteSecure`. 函数的工作原理为 `remote`,但使用安全连接。 [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) + +#### 实验特点 {#experimental-features-3} + +- 添加了多个联接仿真 (`allow_experimental_multiple_joins_emulation` 设置)。 [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 错误修复 {#bug-fixes-21} + +- 赂眉露\>\> `compiled_expression_cache_size` 默认情况下设置有限,以降低内存消耗。 [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([阿利沙平](https://github.com/alesapin)) +- 修复导致执行更改复制表的线程和从ZooKeeper更新配置的线程中挂断的错误。 [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复了执行分布式ALTER任务时的争用条件。 争用条件导致多个副本试图执行任务和所有副本,除了一个失败与ZooKeeper错误。 [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复错误时 `from_zk` 在对ZooKeeper的请求超时后,配置元素没有刷新。 [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复IPv4子网掩码错误前缀的错误。 [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([阿利沙平](https://github.com/alesapin)) +- 固定崩溃 (`std::terminate`)在极少数情况下,由于资源耗尽而无法创建新线程。 [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误时 `remote` 表函数执行时,错误的限制被用于 `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([阿利沙平](https://github.com/alesapin)) +- 修复netlink套接字的泄漏。 它们被放置在一个池中,在那里它们永远不会被删除,并且当所有当前套接字都在使用时,在新线程开始时创建了新的套接字。 [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复关闭错误 `/proc/self/fd` 目录早于所有fds被读取 `/proc` 分叉后 `odbc-bridge` 子进程。 [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([阿利沙平](https://github.com/alesapin)) +- 在主键中使用字符串的情况下,固定字符串到UInt单调转换。 [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([张冬](https://github.com/zhang2014)) +- 整数转换函数单调性计算中的固定误差。 [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复段错误 `arrayEnumerateUniq`, `arrayEnumerateDense` 函数在一些无效的参数的情况下。 [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在StorageMerge修复UB。 [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([阿莫斯鸟](https://github.com/amosbird)) +- 修正函数中的段错误 `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误:功能 `round`, `floor`, `trunc`, `ceil` 在整数参数和大负比例执行时可能会返回虚假结果。 [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个错误引起的 ‘kill query sync’ 从而导致核心转储。 [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) +- 修复空复制队列后延迟较长的bug。 [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([阿利沙平](https://github.com/alesapin)) +- 修复了插入到表中的过多内存使用情况 `LowCardinality` 主键。 [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定 `LowCardinality` 序列化 `Native` 在空数组的情况下格式化。 [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定不正确的结果,而使用distinct通过单LowCardinality数字列。 [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定专门的聚合与LowCardinality键(以防万一 `compile` 设置已启用)。 [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 修复复制表查询的用户和密码转发。 [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([阿利沙平](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- 修复了在重新加载字典时在字典数据库中列出表时可能发生的非常罕见的争用条件。 [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了与ROLLUP或CUBE一起使用时的错误结果。 [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([周三](https://github.com/reflection)) +- 用于查询的固定列别名 `JOIN ON` 语法和分布式表。 [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([张冬](https://github.com/zhang2014)) +- 在内部实现固定的错误 `quantileTDigest` (由阿尔乔姆Vakhrushev发现)。 这个错误从来没有发生在ClickHouse中,只有那些直接使用ClickHouse代码库作为库的人才有关。 [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvements-6} + +- 支持 `IF NOT EXISTS` 在 `ALTER TABLE ADD COLUMN` 发言以及 `IF EXISTS` 在 `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) +- 功能 `parseDateTimeBestEffort`:支持格式 `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` 和相似。 [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` 现在支持锯齿结构。 [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) +- 可用性改进:增加了从数据目录的所有者启动服务器进程的检查。 如果数据属于非root用户,则不允许从root用户启动服务器。 [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([谢尔盖-v-加尔采夫](https://github.com/sergey-v-galtsev)) +- 在分析具有联接的查询期间检查所需列的更好的逻辑。 [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) +- 减少在单个服务器中有大量分布式表的情况下的连接数。 [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([张冬](https://github.com/zhang2014)) +- 支持的总计行 `WITH TOTALS` 查询ODBC驱动程序。 [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) +- 允许使用 `Enum`s为if函数内的整数。 [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([伊万](https://github.com/abyss7)) +- 已添加 `low_cardinality_allow_in_native_format` 设置。 如果禁用,请不要使用 `LowCadrinality` 输入 `Native` 格式。 [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 从编译表达式缓存中删除了一些冗余对象以降低内存使用率。 [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([阿利沙平](https://github.com/alesapin)) +- 添加检查 `SET send_logs_level = 'value'` 查询接受适当的值。 [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin马克西姆](https://github.com/s-mx)) +- 固定数据类型检查类型转换功能。 [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([张冬](https://github.com/zhang2014)) + +#### 性能改进 {#performance-improvements-5} + +- 添加MergeTree设置 `use_minimalistic_part_header_in_zookeeper`. 如果启用,复制的表将在单个零件znode中存储紧凑零件元数据。 这可以显着减少ZooKeeper快照大小(特别是如果表有很多列)。 请注意,启用此设置后,您将无法降级到不支持它的版本。 [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) +- 为函数添加基于DFA的实现 `sequenceMatch` 和 `sequenceCount` 以防模式不包含时间。 [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 整数序列化的性能改进。 [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([阿莫斯鸟](https://github.com/amosbird)) +- 零左填充PODArray,使-1元素始终有效并归零。 它用于无分支计算偏移量。 [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([阿莫斯鸟](https://github.com/amosbird)) +- 还原 `jemalloc` 版本导致性能下降。 [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 向后不兼容的更改 {#backward-incompatible-changes-2} + +- 删除无证功能 `ALTER MODIFY PRIMARY KEY` 因为它被 `ALTER MODIFY ORDER BY` 指挥部 [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) +- 删除功能 `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁止使用具有结果类型的标量子查询 `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-6} + +- 增加了对PowerPC的支持 (`ppc64le`)建设。 [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) +- 有状态功能测试在公共可用数据集上运行。 [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了服务器无法启动时的错误 `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` Docker或systemd-nspawn中的消息。 [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `rdkafka` 库v1.0.0-RC5。 使用cppkafka而不是原始的C接口。 [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([伊万](https://github.com/abyss7)) +- 更新 `mariadb-client` 图书馆. 修复了UBSan发现的问题之一。 [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- UBSan版本的一些修复。 [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了使用UBSan构建的每次提交运行的测试。 +- 增加了PVS-Studio静态分析器的每次提交运行。 +- 修复了PVS-Studio发现的错误。 [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了glibc兼容性问题。 [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将Docker映像移动到18.10并为glibc\>=2.28添加兼容性文件 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([阿利沙平](https://github.com/alesapin)) +- 如果用户不想在服务器码头镜像中播放目录,请添加env变量。 [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([阿利沙平](https://github.com/alesapin)) +- 启用了大多数来自警告 `-Weverything` 在叮当声。 已启用 `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了一些只在clang8中可用的警告。 [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 链接到 `libLLVM` 在使用共享链接时,而不是单独的LLVM库。 [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Origej Desh](https://github.com/orivej)) +- 为测试图像添加了消毒变量。 [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([阿利沙平](https://github.com/alesapin)) +- `clickhouse-server` debian软件包会推荐 `libcap2-bin` 使用包 `setcap` 设置功能的工具。 这是可选的。 [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的编译时间,固定包括。 [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) +- 添加了哈希函数的性能测试。 [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) +- 固定循环库依赖。 [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) +- 改进的编译与低可用内存。 [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) +- 添加了测试脚本,以重现性能下降 `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了在下面的注释和字符串文字拼写错误 `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) +- 修正了错别字的评论。 [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) + +## [2018年的更新日志](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/zh/whats_new/changelog/index.md b/docs/zh/whats_new/changelog/index.md new file mode 100644 index 00000000000..33bb7bfd5f1 --- /dev/null +++ b/docs/zh/whats_new/changelog/index.md @@ -0,0 +1,665 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +--- + +## 碌莽禄release拢.0755-88888888 {#clickhouse-release-v20-3} + +### ClickHouse版本v20.3.4.10,2020-03-20 {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### 错误修复 {#bug-fix} + +- 此版本还包含20.1.8.41的所有错误修复 +- 修复丢失 `rows_before_limit_at_least` 用于通过http进行查询(使用处理器管道)。 这修复 [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +### ClickHouse释放v20.3.3.6,2020-03-17 {#clickhouse-release-v20-3-3-6-2020-03-17} + +#### 错误修复 {#bug-fix-1} + +- 此版本还包含20.1.7.38的所有错误修复 +- 修复复制中的错误,如果用户在以前的版本上执行了突变,则不允许复制工作。 这修复 [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 +- 添加设置 `use_compact_format_in_distributed_parts_names` 它允许写文件 `INSERT` 查询到 `Distributed` 表格格式更紧凑。 这修复 [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 + +### ClickHouse版本v20.3.2.1,2020-03-12 {#clickhouse-release-v20-3-2-1-2020-03-12} + +#### 向后不兼容的更改 {#backward-incompatible-change} + +- 修正了这个问题 `file name too long` 当发送数据 `Distributed` 大量副本的表。 修复了服务器日志中显示副本凭据的问题。 磁盘上的目录名格式已更改为 `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([米哈伊尔\*科罗托夫](https://github.com/millb))升级到新版本后,您将无法在没有人工干预的情况下降级,因为旧的服务器版本无法识别新的目录格式。 如果要降级,则必须手动将相应的目录重命名为旧格式。 仅当您使用了异步时,此更改才相关 `INSERT`s到 `Distributed` 桌子 在版本20.3.3中,我们将介绍一个设置,让您逐渐启用新格式。 +- 更改了mutation命令的复制日志条目的格式。 在安装新版本之前,您必须等待旧的突变处理。 +- 实现简单的内存分析器,将堆栈跟踪转储到 `system.trace_log` 超过软分配限制的每N个字节 [\#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([伊万](https://github.com/abyss7)) [\#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))列 `system.trace_log` 从改名 `timer_type` 到 `trace_type`. 这将需要改变第三方性能分析和flamegraph处理工具。 +- 在任何地方使用操作系统线程id,而不是内部线程编号。 这修复 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) 老 `clickhouse-client` 无法接收从服务器发送的日志,当设置 `send_logs_level` 已启用,因为结构化日志消息的名称和类型已更改。 另一方面,不同的服务器版本可以相互发送不同类型的日志。 当你不使用 `send_logs_level` 设置,你不应该关心。 [\#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `indexHint` 功能 [\#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `findClusterIndex`, `findClusterValue` 功能。 这修复 [\#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). 如果您正在使用这些功能,请发送电子邮件至 `clickhouse-feedback@yandex-team.com` [\#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在不允许创建列或添加列 `SELECT` 子查询作为默认表达式。 [\#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([阿利沙平](https://github.com/alesapin)) +- 需要联接中的子查询的别名。 [\#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([阿利沙平](https://github.com/alesapin)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 设置 `experimental_use_processors` 默认情况下启用。 此设置允许使用新的查询管道。 这是内部重构,我们期望没有明显的变化。 如果您将看到任何问题,请将其设置为返回零。 [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 新功能 {#new-feature} + +- 添加 `Avro` 和 `AvroConfluent` 输入/输出格式 [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 过期密钥的多线程和非阻塞更新 `cache` 字典(可选的权限读取旧的)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加查询 `ALTER ... MATERIALIZE TTL`. 它运行突变,强制通过TTL删除过期的数据,并重新计算所有部分有关ttl的元信息。 [\#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果需要,从HashJoin切换到MergeJoin(在磁盘上 [\#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `MOVE PARTITION` 命令 `ALTER TABLE` [\#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [\#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 动态地从配置文件重新加载存储配置。 [\#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许更改 `storage_policy` 为了不那么富有的人。 [\#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了对s3存储和表功能的globs/通配符的支持。 [\#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 执行 `bitAnd`, `bitOr`, `bitXor`, `bitNot` 为 `FixedString(N)` 数据类型。 [\#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加功能 `bitCount`. 这修复 [\#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [\#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ikopylov](https://github.com/ikopylov)) +- 添加 `generateRandom` 表函数生成具有给定模式的随机行。 允许用数据填充任意测试表。 [\#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([Ilya Yatsishin](https://github.com/qoega)) +- `JSONEachRowFormat`:当对象包含在顶层数组中时,支持特殊情况。 [\#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([克鲁格洛夫\*帕维尔](https://github.com/Avogar)) +- 现在可以创建一个列 `DEFAULT` 取决于默认列的表达式 `ALIAS` 表达。 [\#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([阿利沙平](https://github.com/alesapin)) +- 允许指定 `--limit` 超过源数据大小 `clickhouse-obfuscator`. 数据将以不同的随机种子重复。 [\#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `groupArraySample` 功能(类似于 `groupArray`)与reservior采样算法。 [\#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在,您可以监视更新队列的大小 `cache`/`complex_key_cache` 通过系统指标字典。 [\#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 允许使用CRLF作为CSV输出格式的行分隔符与设置 `output_format_csv_crlf_end_of_line` 设置为1 [\#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [\#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [\#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 实现的更多功能 [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` 和 `stringToH3` [\#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([Nico Mandery](https://github.com/nmandery)) +- 引入新设置: `max_parser_depth` 控制最大堆栈大小并允许大型复杂查询。 这修复 [\#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) 和 [\#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [\#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([马克西姆\*斯米尔诺夫](https://github.com/qMBQx8GH)) +- 添加设置 `force_optimize_skip_unused_shards` 如果无法跳过未使用的分片,则设置为抛出 [\#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +- 允许配置多个磁盘/卷用于存储数据发送 `Distributed` 发动机 [\#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +- 支持存储策略 (``)用于存储临时数据。 [\#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +- 已添加 `X-ClickHouse-Exception-Code` 如果在发送数据之前引发异常,则设置的HTTP头。 这实现了 [\#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [\#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `ifNotFinite`. 这只是一个句法糖: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [\#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `last_successful_update_time` 列中 `system.dictionaries` 表 [\#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加 `blockSerializedSize` 功能(磁盘大小不压缩) [\#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +- 添加功能 `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +- 添加系统表 `system.zeros` 和 `system.zeros_mt` 以及故事功能 `zeros()` 和 `zeros_mt()`. 表(和表函数)包含具有名称的单列 `zero` 和类型 `UInt8`. 此列包含零。 为了测试目的,需要它作为生成许多行的最快方法。 这修复 [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 实验特点 {#experimental-feature} + +- 添加新的紧凑格式的部件 `MergeTree`-家庭表中的所有列都存储在一个文件中。 它有助于提高小型和频繁插入的性能。 旧的格式(每列一个文件)现在被称为wide。 数据存储格式由设置控制 `min_bytes_for_wide_part` 和 `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([安东\*波波夫](https://github.com/CurtizJ)) +- 支持S3存储 `Log`, `TinyLog` 和 `StripeLog` 桌子 [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([帕维尔\*科瓦连科](https://github.com/Jokser)) + +#### 错误修复 {#bug-fix-2} + +- 修正了日志消息中不一致的空格。 [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复在创建表时将未命名元组数组展平为嵌套结构的错误。 [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) +- 修复了以下问题 “Too many open files” 如果有太多的文件匹配glob模式可能会发生错误 `File` 表或 `file` 表功能。 现在文件懒洋洋地打开。 这修复 [\#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [\#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除临时表现在只删除临时表。 [\#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 当我们关闭服务器或分离/附加表时删除过时的分区。 [\#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 默认磁盘如何计算可用空间 `data` 子目录。 修复了可用空间量计算不正确的问题,如果 `data` 目录被安装到一个单独的设备(罕见的情况)。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 允许逗号(交叉)与IN()内部连接。 [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +- 如果在WHERE部分中有\[NOT\]LIKE运算符,则允许将CROSS重写为INNER JOIN。 [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复后可能不正确的结果 `GROUP BY` 启用设置 `distributed_aggregation_memory_efficient`. 修复 [\#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [\#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 找到的键在缓存字典的指标中被计为错过。 [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 在固定的竞争条件 `queue_task_handle` 在启动 `ReplicatedMergeTree` 桌子 [\#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 令牌 `NOT` 没有工作 `SHOW TABLES NOT LIKE` 查询 [\#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [\#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加范围检查功能 `h3EdgeLengthM`. 如果没有这个检查,缓冲区溢出是可能的。 [\#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了多个参数(超过10)的三元逻辑运算批量计算中的错误。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修复PREWHERE优化的错误,这可能导致段错误或 `Inconsistent number of columns got from MergeTreeRangeReader` 例外。 [\#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复意外 `Timeout exceeded while reading from socket` 异常,在实际超时之前以及启用查询探查器时,在安全连接上随机发生。 还添加 `connect_timeout_with_failover_secure_ms` 设置(默认100ms),这是类似于 `connect_timeout_with_failover_ms`,但用于安全连接(因为SSL握手比普通TCP连接慢) [\#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +- 修复突变最终确定的错误,当突变可能处于以下状态时 `parts_to_do=0` 和 `is_done=0`. [\#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([阿利沙平](https://github.com/alesapin)) +- 使用新的任何连接逻辑 `partial_merge_join` 设置。 有可能使 `ANY|ALL|SEMI LEFT` 和 `ALL INNER` 加入与 `partial_merge_join=1` 现在 [\#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +- Shard现在将从发起者获得的设置夹到shard的constaints,而不是抛出异常。 此修补程序允许将查询发送到具有另一个约束的分片。 [\#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修正了内存管理问题 `MergeTreeReadPool`. [\#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复 `toDecimal*OrNull()` 使用字符串调用时的函数系列 `e`. 修复 [\#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [\#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +- 请确保 `FORMAT Null` 不向客户端发送数据。 [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 修复时间戳中的错误 `LiveViewBlockInputStream` 不会更新。 `LIVE VIEW` 是一个实验特征。 [\#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [\#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +- 固定 `ALTER MODIFY TTL` 不允许删除旧ttl表达式的错误行为。 [\#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复了MergeTreeIndexSet中的UBSan报告。 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从apache Avro第三方库中的析构函数抛出。 [\#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 不要提交从轮询的批次 `Kafka` 部分,因为它可能会导致数据漏洞。 [\#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov)) +- 修复 `joinGet` 使用可为空的返回类型。 https://github.com/ClickHouse/ClickHouse/issues/8919 [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复压缩时的数据不兼容 `T64` 编解ec [\#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2))修复数据类型id `T64` 在受影响的版本中导致错误(de)压缩的压缩编解ec。 [\#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `enable_early_constant_folding` 并禁用它在某些情况下,导致错误。 [\#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +- 使用VIEW修复下推谓词优化器并启用测试 [\#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([张冬](https://github.com/zhang2014)) +- 修复段错误 `Merge` 表,从读取时可能发生 `File` 储存 [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. 否则,它可以使部分数据重新启动后无法访问,并阻止ClickHouse启动。 [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复改变,如果有TTL设置表。 [\#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复在以下情况下可能发生的竞争条件 `SYSTEM RELOAD ALL DICTIONARIES` 在某些字典被修改/添加/删除时执行。 [\#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在以前的版本 `Memory` 数据库引擎使用空数据路径,因此在以下位置创建表 `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [\#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +- 修复了关于缺少默认磁盘或策略的错误日志消息。 [\#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 允许表中的第一列 `Log` 引擎是别名 [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([伊万](https://github.com/abyss7)) +- 从读取时修复范围的顺序 `MergeTree` 表中的一个线程。 它可能会导致例外 `MergeTreeRangeReader` 或错误的查询结果。 [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([安东\*波波夫](https://github.com/CurtizJ)) +- 赂眉露\>\> `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 避免极少数情况下,当用户可以得到错误的错误消息 (`Success` 而不是详细的错误描述)。 [\#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时不要崩溃 `Template` 使用空行模板格式化。 [\#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 系统表的元数据文件可能在错误的位置创建 [\#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix))修复 [\#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +- 修复缓存字典中exception\_ptr上的数据竞赛 [\#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [\#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 不要为查询引发异常 `ATTACH TABLE IF NOT EXISTS`. 以前它是抛出,如果表已经存在,尽管 `IF NOT EXISTS` 条款 [\#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了异常消息中丢失的关闭paren。 [\#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免消息 `Possible deadlock avoided` 在clickhouse客户端在交互模式下启动。 [\#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([filimonov](https://github.com/filimonov)) +- 在固定的异常 `DROP TABLE IF EXISTS` [\#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 支持在JSON相关函数中不适合Int64的UInt64号码。 更新SIMDJSON掌握。 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当使用非严格单调函数索引时,固定执行反转谓词。 [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 不要试图折叠 `IN` 常量在 `GROUP BY` [\#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复bug `ALTER DELETE` 突变导致索引损坏。 这修复 [\#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) 和 [\#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). 另外修复极其罕见的竞争条件 `ReplicatedMergeTree` `ALTER` 查询。 [\#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([阿利沙平](https://github.com/alesapin)) +- 当设置 `compile_expressions` 被启用,你可以得到 `unexpected column` 在 `LLVMExecutableFunction` 当我们使用 `Nullable` 类型 [\#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 多个修复 `Kafka` 引擎:1)修复在消费者组重新平衡期间出现的重复项。 2)修复罕见 ‘holes’ 当数据从一个轮询的几个分区轮询并部分提交时出现(现在我们总是处理/提交整个轮询的消息块)。 3)通过块大小修复刷新(在此之前,只有超时刷新才能正常工作)。 4)更好的订阅程序(与分配反馈)。 5)使测试工作得更快(默认时间间隔和超时)。 由于数据之前没有被块大小刷新(根据文档),pr可能会导致默认设置的性能下降(由于更频繁和更小的刷新不太理想)。 如果您在更改后遇到性能问题-请增加 `kafka_max_block_size` 在表中的更大的值(例如 `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). 修复 [\#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [\#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([filimonov](https://github.com/filimonov)) +- 修复 `Parameter out of bound` 在PREWHERE优化之后的某些查询中出现异常。 [\#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +- 修正了函数参数混合常量的情况 `arrayZip`. [\#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修正了双重移动可能会损坏原始部分的错误。 这是相关的,如果你使用 `ALTER TABLE MOVE` [\#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许 `interval` 用于正确解析的标识符,而无需反引号。 当一个查询不能被执行,即使固定的问题 `interval` 标识符用反引号或双引号括起来。 这修复 [\#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [\#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了模糊测试和不正确的行为 `bitTestAll`/`bitTestAny` 功能。 [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 使用enabled编写的部件修复突变 `insert_quorum`. [\#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([阿利沙平](https://github.com/alesapin)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复运行时显示误导性错误消息的错误 `SHOW CREATE TABLE a_table_that_does_not_exist`. [\#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) +- 固定 `Parameters are out of bound` 例外在一些罕见的情况下,当我们在一个常数 `SELECT` 条款时,我们有一个 `ORDER BY` 和一个 `LIMIT` 条款 [\#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修复突变定稿,当已经完成突变可以有状态 `is_done=0`. [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([阿利沙平](https://github.com/alesapin)) +- 防止执行 `ALTER ADD INDEX` 对于旧语法的MergeTree表,因为它不起作用。 [\#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在服务器启动时不要访问表,这 `LIVE VIEW` 取决于,所以服务器将能够启动。 也删除 `LIVE VIEW` 分离时的依赖关系 `LIVE VIEW`. `LIVE VIEW` 是一个实验特征。 [\#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修正了一个错误,当部分没有被移动的情况下,只有一个卷的TTL规则在后台。 [\#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了这个问题 `Method createColumn() is not implemented for data type Set`. 这修复 [\#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [\#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复 `intDiv` 减一个常数 [\#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +- 修复可能的竞争条件 `BlockIO`. [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([filimonov](https://github.com/filimonov)) +- 增加了解决方法,如果操作系统返回错误的结果 `timer_create` 功能。 [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在使用固定错误 `min_marks_for_seek` 参数。 修复了分布式表中没有分片键时的错误消息,并且我们尝试跳过未使用的分片。 [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### 改进 {#improvement} + +- 执行 `ALTER MODIFY/DROP` 对突变的顶部查询 `ReplicatedMergeTree*` 引擎家族. 现在 `ALTERS` 仅在元数据更新阶段阻止,之后不阻止。 [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([阿利沙平](https://github.com/alesapin)) +- 添加重写交叉到内部连接的能力 `WHERE` 包含未编译名称的部分。 [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +- 赂眉露\>\> `SHOW TABLES` 和 `SHOW DATABASES` 查询支持 `WHERE` 表达式和 `FROM`/`IN` [\#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +- 添加了一个设置 `deduplicate_blocks_in_dependent_materialized_views`. [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +- 在最近的变化之后,MySQL客户端开始以十六进制打印二进制字符串,从而使它们不可读 ([\#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). ClickHouse中的解决方法是将字符串列标记为UTF-8,这并不总是如此,但通常是这种情况。 [\#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 添加对字符串和FixedString键的支持 `sumMap` [\#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +- 支持SummingMergeTree地图中的字符串键 [\#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +- 即使线程已抛出异常,也向线程池发送线程终止信号 [\#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([丁香飞](https://github.com/dingxiangfei2009)) +- 允许设置 `query_id` 在 `clickhouse-benchmark` [\#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([安东\*波波夫](https://github.com/CurtizJ)) +- 不要让奇怪的表达 `ALTER TABLE ... PARTITION partition` 查询。 这个地址 [\#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [\#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 表 `system.table_engines` 现在提供有关功能支持的信息(如 `supports_ttl` 或 `supports_sort_order`). [\#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +- 启用 `system.metric_log` 默认情况下。 它将包含具有ProfileEvents值的行,CurrentMetrics收集与 “collect\_interval\_milliseconds” 间隔(默认情况下为一秒)。 该表非常小(通常以兆字节为单位),默认情况下收集此数据是合理的。 [\#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [\#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [\#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([伊万](https://github.com/abyss7)) +- 现在是暂时的 `LIVE VIEW` 创建者 `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` 而不是 `CREATE TEMPORARY LIVE VIEW ...`,因为以前的语法不符合 `CREATE TEMPORARY TABLE ...` [\#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +- 添加text\_log。级别配置参数,以限制进入 `system.text_log` 表 [\#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +- 允许根据TTL规则将下载的部分放入磁盘/卷 [\#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 对于外部MySQL字典,允许将MySQL连接池共同化为 “share” 他们在字典中。 此选项显着减少到MySQL服务器的连接数。 [\#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 显示分位数的最近查询执行时间 `clickhouse-benchmark` 输出而不是插值值。 最好显示与某些查询的执行时间相对应的值。 [\#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 可以在将数据插入到Kafka时为消息添加密钥和时间戳。 修复 [\#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [\#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([filimonov](https://github.com/filimonov)) +- 如果服务器从终端运行,请按颜色突出显示线程号,查询id和日志优先级。 这是为了提高开发人员相关日志消息的可读性。 [\#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好的异常消息,同时加载表 `Ordinary` 数据库。 [\#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行 `arraySlice` 对于具有聚合函数状态的数组。 这修复 [\#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [\#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许在in运算符的右侧使用常量函数和常量数组。 [\#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果在获取系统数据时发生了zookeeper异常。副本,将其显示在单独的列中。 这实现了 [\#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [\#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 原子删除destroy上的MergeTree数据部分。 [\#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 支持分布式表的行级安全性。 [\#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([伊万](https://github.com/abyss7)) +- Now we recognize suffix (like KB, KiB…) in settings values. [\#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在构建大型连接的结果时防止内存不足。 [\#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +- 在交互模式下为建议添加群集名称 `clickhouse-client`. [\#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [\#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([伊万](https://github.com/abyss7)) +- 添加列 `exception_code` 在 `system.query_log` 桌子 [\#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在端口上启用MySQL兼容服务器 `9004` 在默认服务器配置文件中。 在配置的例子固定密码生成命令。 [\#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 如果文件系统是只读的,请防止在关闭时中止。 这修复 [\#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [\#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当HTTP POST查询中需要长度时,更好的异常消息。 [\#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `_path` 和 `_file` 虚拟列 `HDFS` 和 `File` 发动机和 `hdfs` 和 `file` 表函数 [\#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复错误 `Cannot find column` 同时插入到 `MATERIALIZED VIEW` 在情况下,如果新列被添加到视图的内部表。 [\#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [\#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [\#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [\#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 通过最终更新后发送进度(如日志)修复本机客户端-服务器协议的进度。 这可能仅与使用本机协议的某些第三方工具相关。 [\#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +- 添加系统指标跟踪使用MySQL协议的客户端连接数 ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([尤金\*克里莫夫](https://github.com/Slach)) +- 从现在开始,HTTP响应将有 `X-ClickHouse-Timezone` 标题设置为相同的时区值 `SELECT timezone()` 会报告。 [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### 性能改进 {#performance-improvement} + +- 使用IN提高分析指标的性能 [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([安东\*波波夫](https://github.com/CurtizJ)) +- 逻辑函数+代码清理更简单,更有效的代码。 跟进到 [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 整体性能改善(范围为5%。.通过确保使用C++20功能进行更严格的别名处理,对于受影响的查询来说,这是200%)。 [\#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([阿莫斯鸟](https://github.com/amosbird)) +- 比较函数的内部循环更严格的别名。 [\#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对于算术函数的内部循环更严格的别名。 [\#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ColumnVector::replicate()的实现速度快约3倍,通过该实现ColumnConst::convertToFullColumn()。 在实现常数时,也将在测试中有用。 [\#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 另一个小的性能改进 `ColumnVector::replicate()` (这加快了 `materialize` 函数和高阶函数),甚至进一步改进 [\#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [\#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 改进的性能 `stochasticLinearRegression` 聚合函数。 此补丁由英特尔贡献。 [\#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 提高性能 `reinterpretAsFixedString` 功能。 [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 不要向客户端发送块 `Null` 处理器管道中的格式。 [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +- 替换 `readline` 与 `replxx` 对于在交互式线编辑 `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([伊万](https://github.com/abyss7)) +- 在FunctionsComparison中更好的构建时间和更少的模板实例化。 [\#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了与集成 `clang-tidy` 在线人 另请参阅 [\#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [\#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们使用CI链接ClickHouse `lld` 即使是 `gcc`. [\#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([阿利沙平](https://github.com/alesapin)) +- 允许随机线程调度和插入毛刺时 `THREAD_FUZZER_*` 设置环境变量。 这有助于测试。 [\#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在无状态测试中启用安全套接字 [\#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +- 使SPLIT\_SHARED\_LIBRARIES=OFF更强大 [\#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +- 赂眉露\>\> “performance\_introspection\_and\_logging” 测试可靠的随机服务器卡住。 这可能发生在CI环境中。 另请参阅 [\#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [\#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在样式检查中验证XML。 [\#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了测试中的竞争条件 `00738_lock_for_inner_table`. 这个测试依赖于睡眠。 [\#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除类型的性能测试 `once`. 这是在统计比较模式下运行所有性能测试(更可靠)所需的。 [\#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了算术函数的性能测试。 [\#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了性能测试 `sumMap` 和 `sumMapWithOverflow` 聚合函数。 后续行动 [\#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [\#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过样式检查确保错误代码的样式。 [\#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为测试历史添加脚本。 [\#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([阿利沙平](https://github.com/alesapin)) +- 添加GCC警告 `-Wsuggest-override` 找到并修复所有地方 `override` 必须使用关键字。 [\#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 在Mac OS X下忽略弱符号,因为它必须被定义 [\#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([已删除用户](https://github.com/ghost)) +- 规范性能测试中某些查询的运行时间。 这是在准备在比较模式下运行所有性能测试时完成的。 [\#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复一些测试,以支持pytest与查询测试 [\#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([伊万](https://github.com/abyss7)) +- 使用MSan在生成中启用SSL,因此在运行无状态测试时,服务器不会在启动时失败 [\#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +- 修复测试结果中的数据库替换 [\#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([Ilya Yatsishin](https://github.com/qoega)) +- 针对其他平台构建修复程序 [\#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [\#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [\#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +- 将磁盘部分添加到无状态复盖率测试docker映像 [\#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 使用GRPC构建时,摆脱源代码树中的文件 [\#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([阿莫斯鸟](https://github.com/amosbird)) +- 通过从上下文中删除SessionCleaner来缩短构建时间。 让SessionCleaner的代码更简单。 [\#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新了clickhouse-test脚本中挂起查询的检查 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 从存储库中删除了一些无用的文件。 [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改类型的数学perftests从 `once` 到 `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/index.html) +- 抑制MSan下的一些测试失败。 [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libcxx` 和 `libcxxabi` 为了主人 在准备 [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复flacky测试 `00910_zookeeper_test_alter_compression_codecs`. [\#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清理重复的链接器标志。 确保链接器不会查找意想不到的符号。 [\#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加 `clickhouse-odbc` 驱动程序进入测试图像。 这允许通过自己的ODBC驱动程序测试ClickHouse与ClickHouse的交互。 [\#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) +- 修复单元测试中的几个错误。 [\#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([阿利沙平](https://github.com/alesapin)) +- 启用 `-Wmissing-include-dirs` GCC警告消除所有不存在的包括-主要是由于CMake脚本错误 [\#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 描述查询探查器无法工作的原因。 这是用于 [\#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [\#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将OpenSSL更新到上游主机。 修复了TLS连接可能会失败并显示消息的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` 和 `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. 该问题出现在版本20.1中。 [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新服务器的Dockerfile [\#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +- Build-gcc-from-sources脚本中的小修复 [\#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +- 替换 `numbers` 到 `zeros` 在perftests其中 `number` 不使用列。 这将导致更干净的测试结果。 [\#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复列构造函数中使用initializer\_list时堆栈溢出问题。 [\#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([已删除用户](https://github.com/ghost)) +- 将librdkafka升级到v1.3.0。 启用bund绑 `rdkafka` 和 `gsasl` mac OS X上的库 [\#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 在GCC9.2.0上构建修复程序 [\#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v20-1} + +### ClickHouse版本v20.1.8.41,2020-03-20 {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### 错误修复 {#bug-fix-3} + +- 修复可能的永久性 `Cannot schedule a task` 错误(由于未处理的异常 `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). 这修复 [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +- 修复过多的内存消耗 `ALTER` 查询(突变)。 这修复 [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) 和 [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([阿利沙平](https://github.com/alesapin)) +- 修复外部字典DDL中反引用的错误。 这修复 [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放v20.1.7.38,2020-03-18 {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### 错误修复 {#bug-fix-4} + +- 修正了不正确的内部函数名称 `sumKahan` 和 `sumWithOverflow`. 在远程查询中使用此函数时,我会导致异常。 [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). 这个问题是在所有ClickHouse版本。 +- 允许 `ALTER ON CLUSTER` 的 `Distributed` 具有内部复制的表。 这修复 [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). 这个问题是在所有ClickHouse版本。 +- 修复可能的异常 `Size of filter doesn't match size of column` 和 `Invalid number of rows in Chunk` 在 `MergeTreeRangeReader`. 它们可能在执行时出现 `PREWHERE` 在某些情况下。 修复 [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了这个问题:如果你编写一个简单的算术表达式,则不会保留时区 `time + 1` (与像这样的表达形成对比 `time + INTERVAL 1 SECOND`). 这修复 [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)). 这个问题是在所有ClickHouse版本。 +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-1} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + +### ClickHouse释放v20.1.6.30,2020-03-05 {#clickhouse-release-v20-1-6-30-2020-03-05} + +#### 错误修复 {#bug-fix-5} + +- 修复压缩时的数据不兼容 `T64` 编解ec + [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +- 在一个线程中从MergeTree表中读取时修复范围顺序。 修复 [\#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). + [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ))](https://github.com/CurtizJ) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. 修复 [\#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). + [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ))](https://github.com/CurtizJ) +- 修复 `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. + [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +- 修复 `joinGet` 使用可为空的返回类型。 修复 [\#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) + [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(amosbird)](https://github.com/amosbird) +- 修复bittestall/bitTestAny函数的模糊测试和不正确的行为。 + [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 修复当干草堆有零字节时匹配和提取函数的行为。 当干草堆不变时,这种行为是错误的。 修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) + [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 当使用非严格单调函数索引时,固定执行反转谓词。 修复 [\#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) + [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +- 允许重写 `CROSS` 到 `INNER JOIN` 如果有 `[NOT] LIKE` 操作员在 `WHERE` 科。 修复 [\#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) + [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4ertus2)](https://github.com/4ertus2) +- 允许使用日志引擎的表中的第一列成为别名。 + [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +- 允许逗号加入 `IN()` 进去 修复 [\#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). + [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). + [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +- 修复突变最终确定,当已经完成突变时可以具有状态is\_done=0。 + [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +- 碌莽禄Support: “Processors” 管道系统.数字和系统.numbers\_mt 这也修复了错误时 `max_execution_time` 不被尊重。 + [\#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 修复错误的计数 `DictCacheKeysRequestedFound` 公制。 + [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` 否则可能使部分数据在重新启动后无法访问,并阻止ClickHouse启动。 + [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(excitoon)](https://github.com/excitoon) +- 在固定的瑞银报告 `MergeTreeIndexSet`. 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) + [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 在BlockIO中修复可能的数据集。 + [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 支持 `UInt64` 在JSON相关函数中不适合Int64的数字。 更新 `SIMDJSON` 为了主人 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) + [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 如果将数据目录挂载到单独的设备,则修复可用空间量计算不正确时的问题。 对于默认磁盘,计算数据子目录的可用空间。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) + [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(米尔布)](https://github.com/millb) +- 修复TLS连接可能会失败并显示消息时的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` 将OpenSSL更新到上游主机。 + [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). 还修复了ClickHouseDictionarySource中检查本地地址。 + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- 修复段错误 `StorageMerge`,从StorageFile读取时可能发生。 + [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 相关: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(菲利蒙诺夫)](https://github.com/filimonov) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 修复 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 结合 [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(菲利蒙诺夫)](https://github.com/filimonov) + +#### 新功能 {#new-feature-1} + +- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse版本v20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} + +#### 向后不兼容的更改 {#backward-incompatible-change-1} + +- 使设置 `merge_tree_uniform_read_distribution` 过时了 服务器仍可识别此设置,但无效。 [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改函数的返回类型 `greatCircleDistance` 到 `Float32` 因为现在计算的结果是 `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在预计查询参数表示为 “escaped” 格式。 例如,要传递字符串 `ab` 你必须写 `a\tb` 或 `a\b` 并分别, `a%5Ctb` 或 `a%5C%09b` 在URL中。 这是需要添加传递NULL作为的可能性 `\N`. 这修复 [\#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [\#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 启用 `use_minimalistic_part_header_in_zookeeper` 设置 `ReplicatedMergeTree` 默认情况下。 这将显着减少存储在ZooKeeper中的数据量。 自19.1版本以来支持此设置,我们已经在多个服务的生产中使用它,半年以上没有任何问题。 如果您有机会降级到19.1以前的版本,请禁用此设置。 [\#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 数据跳过索引已准备就绪并默认启用。 设置 `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` 和 `allow_experimental_multiple_joins_emulation` 现在已经过时,什么也不做。 [\#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加新建 `ANY JOIN` 逻辑 `StorageJoin` 符合 `JOIN` 操作。 要在不改变行为的情况下进行升级,您需要添加 `SETTINGS any_join_distinct_right_table_keys = 1` 引擎联接表元数据或在升级后重新创建这些表。 [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 新功能 {#new-feature-2} + +- 添加了有关部件路径的信息 `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 添加执行能力 `SYSTEM RELOAD DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加执行能力 `CREATE DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([阿利沙平](https://github.com/alesapin)) +- 现在用户的个人资料 `users.xml` 可以继承多个配置文件。 [\#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 已添加 `system.stack_trace` 允许查看所有服务器线程的堆栈跟踪的表。 这对于开发人员反省服务器状态非常有用。 这修复 [\#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [\#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `DateTime64` 具有可配置子秒精度的数据类型。 [\#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加表函数 `clusterAllReplicas` 这允许查询集群中的所有节点。 [\#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) +- 添加聚合函数 `categoricalInformationValue` 其计算出离散特征的信息值。 [\#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) +- 加快数据文件的解析 `CSV`, `TSV` 和 `JSONEachRow` 通过并行进行格式化。 [\#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加功能 `bankerRound` 它执行银行家的四舍五入。 [\#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) +- 支持区域名称的嵌入式字典中的更多语言: ‘ru’, ‘en’, ‘ua’, ‘uk’, ‘by’, ‘kz’, ‘tr’, ‘de’, ‘uz’, ‘lv’, ‘lt’, ‘et’, ‘pt’, ‘he’, ‘vi’. [\#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的一致性 `ANY JOIN` 逻辑 现在 `t1 ANY LEFT JOIN t2` 等于 `t2 ANY RIGHT JOIN t1`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `any_join_distinct_right_table_keys` 这使旧的行为 `ANY INNER JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加新建 `SEMI` 和 `ANTI JOIN`. 老 `ANY INNER JOIN` 行为现在可作为 `SEMI LEFT JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `Distributed` 格式 `File` 发动机和 `file` 表函数,它允许从读 `.bin` 通过异步插入生成的文件 `Distributed` 桌子 [\#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加可选的重置列参数 `runningAccumulate` 这允许为每个新的键值重置聚合结果。 [\#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([谢尔盖\*科诺年科](https://github.com/kononencheg)) +- 添加使用ClickHouse作为普罗米修斯端点的能力。 [\#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([vdimir](https://github.com/Vdimir)) +- 添加部分 `` 在 `config.xml` 这将限制允许的主机用于远程表引擎和表函数 `URL`, `S3`, `HDFS`. [\#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `greatCircleAngle` 它计算球体上的距离(以度为单位)。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改变地球半径与h3库一致。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `JSONCompactEachRow` 和 `JSONCompactEachRowWithNamesAndTypes` 输入和输出格式。 [\#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 增加了与文件相关的表引擎和表函数的功能 (`File`, `S3`, `URL`, `HDFS`)它允许读取和写入 `gzip` 基于附加引擎参数或文件扩展名的文件。 [\#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 添加了 `randomASCII(length)` 函数,生成一个字符串与一个随机集 [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) 可打印字符。 [\#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([刺刀](https://github.com/BayoNet)) +- 添加功能 `JSONExtractArrayRaw` 它返回从未解析的json数组元素上的数组 `JSON` 字符串。 [\#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([Oleg Matrokhin](https://github.com/errx)) +- 添加 `arrayZip` 函数允许将多个长度相等的数组合成一个元组数组。 [\#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([张冬](https://github.com/zhang2014)) +- 添加根据配置的磁盘之间移动数据的能力 `TTL`-表达式为 `*MergeTree` 表引擎家族. [\#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了新的聚合功能 `avgWeighted` 其允许计算加权平均值。 [\#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 现在并行解析默认启用 `TSV`, `TSKV`, `CSV` 和 `JSONEachRow` 格式。 [\#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从添加几个地理功能 `H3` 图书馆: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` 和 `h3kRing`. [\#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([Konstantin Malanchev](https://github.com/hombit)) +- 增加了对brotli的支持 (`br`)压缩文件相关的存储和表函数。 这修复 [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `groupBit*` 功能的 `SimpleAggregationFunction` 类型。 [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) + +#### 错误修复 {#bug-fix-6} + +- 修复重命名表 `Distributed` 引擎 修复问题 [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- 现在字典支持 `EXPRESSION` 对于非ClickHouse SQL方言中任意字符串中的属性。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([阿利沙平](https://github.com/alesapin)) +- 修复损坏 `INSERT SELECT FROM mysql(...)` 查询。 这修复 [\#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) 和 [\#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- 修复错误 “Mismatch column sizes” 插入默认值时 `Tuple` 从 `JSONEachRow`. 这修复 [\#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [\#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) +- 现在将在使用的情况下抛出一个异常 `WITH TIES` 旁边的 `LIMIT BY`. 还增加了使用能力 `TOP` 与 `LIMIT BY`. 这修复 [\#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从新鲜的glibc版本中修复unintendent依赖关系 `clickhouse-odbc-bridge` 二进制 [\#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([阿莫斯鸟](https://github.com/amosbird)) +- 修正错误的检查功能 `*MergeTree` 引擎家族. 现在,当我们在最后一个颗粒和最后一个标记(非最终)中有相同数量的行时,它不会失败。 [\#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([阿利沙平](https://github.com/alesapin)) +- 修复插入 `Enum*` 列后 `ALTER` 查询,当基础数值类型等于表指定类型时。 这修复 [\#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [\#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([安东\*波波夫](https://github.com/CurtizJ)) +- 允许非常数负 “size” 函数的参数 `substring`. 这是不允许的错误。 这修复 [\#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [\#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复当错误数量的参数传递到解析错误 `(O|J)DBC` 表引擎。 [\#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([阿利沙平](https://github.com/alesapin)) +- 将日志发送到syslog时使用正在运行的clickhouse进程的命令名。 在以前的版本中,使用空字符串而不是命令名称。 [\#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([Michael Nacharov](https://github.com/mnach)) +- 修复检查允许的主机 `localhost`. 这个公关修复了在提供的解决方案 [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复罕见的崩溃 `argMin` 和 `argMax` 长字符串参数的函数,当结果被用于 `runningAccumulate` 功能。 这修复 [\#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [\#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([恐龙](https://github.com/769344359)) +- 修复表的内存过度使用 `Buffer` 引擎 [\#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([Azat Khuzhin](https://github.com/azat)) +- 修正了可以采取的功能中的潜在错误 `NULL` 作为参数之一,并返回非NULL。 [\#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在线程池中更好地计算后台进程的指标 `MergeTree` 表引擎. [\#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复功能 `IN` 里面 `WHERE` 存在行级表筛选器时的语句。 修复 [\#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [\#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([伊万](https://github.com/abyss7)) +- 现在,如果整数值没有完全解析设置值,则会引发异常。 [\#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 修复当聚合函数用于查询具有两个以上本地分片的分布式表时出现的异常。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- 现在,bloom filter可以处理零长度数组,并且不执行冗余计算。 [\#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) +- 修正了通过匹配客户端主机来检查客户端主机是否允许 `host_regexp` 在指定 `users.xml`. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 放松不明确的列检查,导致多个误报 `JOIN ON` 科。 [\#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了可能的服务器崩溃 (`std::terminate`)当服务器不能发送或写入数据 `JSON` 或 `XML` 格式与值 `String` 数据类型(需要 `UTF-8` 验证)或使用Brotli算法或其他一些罕见情况下压缩结果数据时。 这修复 [\#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复竞争条件 `StorageDistributedDirectoryMonitor` 被线人发现 这修复 [\#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [\#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在背景合并 `*MergeTree` 表引擎家族更准确地保留存储策略卷顺序。 [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在表引擎 `Kafka` 与正常工作 `Native` 格式。 这修复 [\#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [\#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [\#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 固定格式与标题(如 `CSVWithNames`)这是抛出关于EOF表引擎的异常 `Kafka`. [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 修复了从子查询右侧部分制作set的错误 `IN` 科。 这修复 [\#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) 和 [\#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [\#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从存储读取时修复可能的崩溃 `File`. [\#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在固定的文件读取 `Parquet` 包含类型列的格式 `list`. [\#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([马苏兰](https://github.com/maxulan)) +- 修复错误 `Not found column` 对于分布式查询 `PREWHERE` 条件取决于采样键if `max_parallel_replicas > 1`. [\#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误 `Not found column` 如果使用查询 `PREWHERE` 依赖于表的别名,结果集由于主键条件而为空。 [\#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 函数的固定返回类型 `rand` 和 `randConstant` 在情况下 `Nullable` 争论。 现在函数总是返回 `UInt32` 而且从来没有 `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 禁用谓词下推 `WITH FILL` 表达。 这修复 [\#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [\#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([张冬](https://github.com/zhang2014)) +- 修正错误 `count()` 结果 `SummingMergeTree` 当 `FINAL` 部分被使用。 [\#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [\#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复来自远程服务器的常量函数可能不正确的结果。 它发生在具有以下功能的查询中 `version()`, `uptime()` 等。 它为不同的服务器返回不同的常量值。 这修复 [\#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [\#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复下推谓词优化中导致错误结果的复杂错误。 这解决了下推谓词优化的很多问题。 [\#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([张冬](https://github.com/zhang2014)) +- 修复崩溃 `CREATE TABLE .. AS dictionary` 查询。 [\#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([Azat Khuzhin](https://github.com/azat)) +- 一些改进ClickHouse语法 `.g4` 文件 [\#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([太阳里](https://github.com/taiyang-li)) +- 修复导致崩溃的错误 `JOIN`s与表与发动机 `Join`. 这修复 [\#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [\#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [\#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [\#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [\#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复冗余字典重新加载 `CREATE DATABASE`. [\#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat)) +- 限制从读取流的最大数量 `StorageFile` 和 `StorageHDFS`. 修复https://github.com/ClickHouse/ClickHouse/issues/7650. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([阿利沙平](https://github.com/alesapin)) +- 修复bug `ALTER ... MODIFY ... CODEC` 查询,当用户同时指定默认表达式和编解ec。 修复 [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [\#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([阿利沙平](https://github.com/alesapin)) +- 修复列的后台合并错误 `SimpleAggregateFunction(LowCardinality)` 类型。 [\#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定类型签入功能 `toDateTime64`. [\#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 现在服务器不崩溃 `LEFT` 或 `FULL JOIN` 与和加入引擎和不支持 `join_use_nulls` 设置。 [\#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) +- 现在 `DROP DICTIONARY IF EXISTS db.dict` 查询不会抛出异常,如果 `db` 根本不存在 [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复表函数中可能出现的崩溃 (`file`, `mysql`, `remote`)引用删除引起的 `IStorage` 对象。 修复插入表函数时指定的列的不正确解析。 [\#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) +- 确保网络启动前 `clickhouse-server`. 这修复 [\#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [\#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([余志昌](https://github.com/yuzhichang)) +- 修复安全连接的超时处理,因此查询不会无限挂起。 这修复 [\#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [\#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `clickhouse-copier`并发工人之间的冗余争用。 [\#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([丁香飞](https://github.com/dingxiangfei2009)) +- 现在突变不会跳过附加的部分,即使它们的突变版本比当前的突变版本大。 [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([余志昌](https://github.com/yuzhichang)) [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([阿利沙平](https://github.com/alesapin)) +- 忽略冗余副本 `*MergeTree` 数据部分移动到另一个磁盘和服务器重新启动后。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复崩溃 `FULL JOIN` 与 `LowCardinality` 在 `JOIN` 钥匙 [\#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) +- 禁止在插入查询中多次使用列名,如 `INSERT INTO tbl (x, y, x)`. 这修复 [\#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [\#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [\#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([阿利沙平](https://github.com/alesapin)) +- 增加了回退,用于检测未知Cpu的物理CPU内核数量(使用逻辑CPU内核数量)。 这修复 [\#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [\#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `There's no column` 实例化列和别名列出错。 [\#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定切断崩溃时 `EXISTS` 查询没有使用 `TABLE` 或 `DICTIONARY` 预选赛 就像 `EXISTS t`. 这修复 [\#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). 此错误在版本19.17中引入。 [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复罕见错误 `"Sizes of columns doesn't match"` 使用时可能会出现 `SimpleAggregateFunction` 列。 [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- 修正错误,其中用户空 `allow_databases` 可以访问所有数据库(和相同的 `allow_dictionaries`). [\#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) +- 修复客户端崩溃时,服务器已经从客户端断开连接。 [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `ORDER BY` 在按主键前缀和非主键后缀排序的情况下的行为。 [\#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([安东\*波波夫](https://github.com/CurtizJ)) +- 检查表中是否存在合格列。 这修复 [\#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [\#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定行为 `ALTER MOVE` 合并完成后立即运行移动指定的超部分。 修复 [\#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 使用时修复可能的服务器崩溃 `UNION` 具有不同数量的列。 修复 [\#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [\#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复函数结果子字符串的大小 `substr` 负大小。 [\#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在服务器不执行部分突变 `MergeTree` 如果后台池中没有足够的可用线程。 [\#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([tavplubix](https://github.com/tavplubix)) +- 修复格式化时的小错字 `UNION ALL` AST. [\#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([litao91](https://github.com/litao91)) +- 修正了负数不正确的布隆过滤结果。 这修复 [\#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [\#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([张冬](https://github.com/zhang2014)) +- 在解压缩固定潜在的缓冲区溢出。 恶意用户可以传递捏造的压缩数据,这将导致缓冲区后读取。 这个问题是由Yandex信息安全团队的Eldar Zaitov发现的。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复因整数溢出而导致的错误结果 `arrayIntersect`. [\#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在 `OPTIMIZE TABLE` query不会等待脱机副本执行该操作。 [\#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([javi santana](https://github.com/javisantana)) +- 固定 `ALTER TTL` 解析器 `Replicated*MergeTree` 桌子 [\#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复服务器和客户端之间的通信,以便服务器在查询失败后读取临时表信息。 [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `bitmapAnd` 在聚合位图和标量位图相交时出现函数错误。 [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([黄月](https://github.com/moon03432)) +- 完善的定义 `ZXid` 根据动物园管理员的程序员指南,它修复了错误 `clickhouse-cluster-copier`. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁香飞](https://github.com/dingxiangfei2009)) +- `odbc` 表函数现在尊重 `external_table_functions_use_nulls` 设置。 [\#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了导致罕见的数据竞赛的错误。 [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 现在 `SYSTEM RELOAD DICTIONARY` 完全重新加载字典,忽略 `update_field`. 这修复 [\#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [\#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加检查字典是否存在于创建查询的能力。 [\#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([阿利沙平](https://github.com/alesapin)) +- 修复 `Float*` 解析中 `Values` 格式。 这修复 [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- 修复崩溃时,我们不能在一些后台操作保留空间 `*MergeTree` 表引擎家族. [\#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复表包含合并操作时的崩溃 `SimpleAggregateFunction(LowCardinality)` 列。 这修复 [\#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [\#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([Azat Khuzhin](https://github.com/azat)) +- 恢复对所有ICU区域设置的支持,并添加对常量表达式应用排序规则的功能。 还添加语言名称 `system.collations` 桌子 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([阿利沙平](https://github.com/alesapin)) +- 修正错误时,外部字典与零最小寿命 (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)`)不要在后台更新。 [\#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([阿利沙平](https://github.com/alesapin)) +- 修复当clickhouse源外部字典在查询中有子查询时崩溃。 [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复文件扩展名不正确的解析表与引擎 `URL`. 这修复 [\#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [\#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 修复 `CHECK TABLE` 查询为 `*MergeTree` 表没有关键. 修复 [\#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([阿利沙平](https://github.com/alesapin)) +- 固定转换 `Float64` 到MySQL类型。 [\#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在,如果表没有完全删除,因为服务器崩溃,服务器将尝试恢复并加载它。 [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- 修复了表函数中的崩溃 `file` 同时插入到不存在的文件。 现在在这种情况下,文件将被创建,然后插入将被处理。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复罕见的死锁时,可能发生 `trace_log` 处于启用状态。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- 添加能力与不同类型的工作,除了 `Date` 在 `RangeHashed` 从DDL查询创建的外部字典。 修复 [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([阿利沙平](https://github.com/alesapin)) +- 修复崩溃时 `now64()` 用另一个函数的结果调用。 [\#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了通过mysql有线协议检测客户端IP连接的错误。 [\#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([Dmitry Muzyka](https://github.com/dmitriy-myz)) +- 修复空阵列处理 `arraySplit` 功能。 这修复 [\#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [\#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([hcz](https://github.com/hczhcz)) +- 修复了以下问题 `pid-file` 另一个运行 `clickhouse-server` 可能会被删除。 [\#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([徐伟清](https://github.com/weiqxu)) +- 修复字典重新加载,如果它有 `invalidate_query`,停止更新,并在以前的更新尝试一些异常。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([阿利沙平](https://github.com/alesapin)) +- 修正了功能错误 `arrayReduce` 这可能会导致 “double free” 和聚合函数组合器中的错误 `Resample` 这可能会导致内存泄漏。 添加聚合功能 `aggThrow`. 此功能可用于测试目的。 [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-1} + +- 改进了使用时的日志记录 `S3` 表引擎。 [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 在调用时未传递任何参数时打印帮助消息 `clickhouse-local`. 这修复 [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([安德烈\*纳戈尔尼](https://github.com/Melancholic)) +- 添加设置 `mutations_sync` 这允许等待 `ALTER UPDATE/DELETE` 同步查询。 [\#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([阿利沙平](https://github.com/alesapin)) +- 允许设置相对 `user_files_path` 在 `config.xml` (在类似的方式 `format_schema_path`). [\#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([hcz](https://github.com/hczhcz)) +- 为转换函数添加非法类型的异常 `-OrZero` 后缀 [\#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([安德烈\*科尼亚耶夫](https://github.com/akonyaev90)) +- 简化在分布式查询中发送到分片的数据头的格式。 [\#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- `Live View` 表引擎重构。 [\#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +- 为从DDL查询创建的外部字典添加额外的检查。 [\#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([阿利沙平](https://github.com/alesapin)) +- 修复错误 `Column ... already exists` 使用时 `FINAL` 和 `SAMPLE` together, e.g. `select count() from table final sample 1/2`. 修复 [\#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [\#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在表的第一个参数 `joinGet` 函数可以是表标识符。 [\#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([阿莫斯鸟](https://github.com/amosbird)) +- 允许使用 `MaterializedView` 与上面的子查询 `Kafka` 桌子 [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) +- 现在后台在磁盘之间移动,运行它的seprate线程池。 [\#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) +- `SYSTEM RELOAD DICTIONARY` 现在同步执行。 [\#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 堆栈跟踪现在显示物理地址(对象文件中的偏移量),而不是虚拟内存地址(加载对象文件的位置)。 这允许使用 `addr2line` 当二进制独立于位置并且ASLR处于活动状态时。 这修复 [\#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [\#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持行级安全筛选器的新语法: `
    `. 修复 [\#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [\#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([伊万](https://github.com/abyss7)) +- 现在 `cityHash` 功能可以与工作 `Decimal` 和 `UUID` 类型。 修复 [\#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [\#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 从系统日志中删除了固定的索引粒度(它是1024),因为它在实现自适应粒度之后已经过时。 [\#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当ClickHouse在没有SSL的情况下编译时,启用MySQL兼容服务器。 [\#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在服务器校验和分布式批处理,这在批处理中损坏数据的情况下提供了更多详细的错误。 [\#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([Azat Khuzhin](https://github.com/azat)) +- 碌莽禄Support: `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` 和 `ATTACH TABLE` 为 `MySQL` 数据库引擎。 [\#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([张冬](https://github.com/zhang2014)) +- 在S3表功能和表引擎中添加身份验证。 [\#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了检查额外的部分 `MergeTree` 在不同的磁盘上,为了不允许错过未定义磁盘上的数据部分。 [\#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 启用Mac客户端和服务器的SSL支持。 [\#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([伊万](https://github.com/abyss7)) +- 现在ClickHouse可以作为MySQL联合服务器(参见https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html)。 [\#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- `clickhouse-client` 现在只能启用 `bracketed-paste` 当多查询处于打开状态且多行处于关闭状态时。 这修复(#7757)\[https://github.com/ClickHouse/ClickHouse/issues/7757。 [\#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([阿莫斯鸟](https://github.com/amosbird)) +- 碌莽禄Support: `Array(Decimal)` 在 `if` 功能。 [\#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2)) +- 支持小数 `arrayDifference`, `arrayCumSum` 和 `arrayCumSumNegative` 功能。 [\#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `lifetime` 列到 `system.dictionaries` 桌子 [\#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [\#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekekule](https://github.com/kekekekule)) +- 改进了检查不同磁盘上的现有部件 `*MergeTree` 表引擎. 地址 [\#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [\#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 集成与 `AWS SDK` 为 `S3` 交互允许使用开箱即用的所有S3功能。 [\#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 增加了对子查询的支持 `Live View` 桌子 [\#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +- 检查使用 `Date` 或 `DateTime` 从列 `TTL` 表达式已删除。 [\#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 有关磁盘的信息已添加到 `system.detached_parts` 桌子 [\#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在设置 `max_(table|partition)_size_to_drop` 无需重新启动即可更改。 [\#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 错误消息的可用性略好。 要求用户不要删除下面的行 `Stack trace:`. [\#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地阅读消息 `Kafka` 引擎在各种格式后 [\#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [\#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([伊万](https://github.com/abyss7)) +- 与不支持MySQL客户端更好的兼容性 `sha2_password` 验证插件。 [\#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 支持MySQL兼容性服务器中的更多列类型。 [\#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 执行 `ORDER BY` 优化 `Merge`, `Buffer` 和 `Materilized View` 存储与底层 `MergeTree` 桌子 [\#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([安东\*波波夫](https://github.com/CurtizJ)) +- 现在我们总是使用POSIX实现 `getrandom` 与旧内核更好的兼容性(\<3.17)。 [\#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([阿莫斯鸟](https://github.com/amosbird)) +- 更好地检查移动ttl规则中的有效目标。 [\#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 更好地检查损坏的刀片批次 `Distributed` 表引擎。 [\#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([Azat Khuzhin](https://github.com/azat)) +- 添加带有部件名称数组的列,这些部件将来必须处理突变 `system.mutations` 桌子 [\#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([阿利沙平](https://github.com/alesapin)) +- 处理器的并行合并排序优化。 [\#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 设置 `mark_cache_min_lifetime` 现在已经过时了,什么也不做。 在以前的版本中,标记缓存可以在内存中增长大于 `mark_cache_size` 以容纳内的数据 `mark_cache_min_lifetime` 秒。 这导致了混乱和比预期更高的内存使用率,这在内存受限的系统上尤其糟糕。 如果您在安装此版本后会看到性能下降,则应增加 `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 准备使用 `tid` 到处都是 这是必要的 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 性能改进 {#performance-improvement-1} + +- 处理器管道中的性能优化。 [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 缓存字典中过期密钥的非阻塞更新(具有读取旧密钥的权限)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 没有编译ClickHouse `-fno-omit-frame-pointer` 在全球范围内多余一个寄存器。 [\#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([阿莫斯鸟](https://github.com/amosbird)) +- 加速 `greatCircleDistance` 功能,并为它添加性能测试。 [\#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([Olga Khvostikova](https://github.com/stavrolia)) +- 改进的功能性能 `roundDown`. [\#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `max`, `min`, `argMin`, `argMax` 为 `DateTime64` 数据类型。 [\#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进了无限制或大限制和外部排序的排序性能。 [\#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能格式化浮点数高达6倍。 [\#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `modulo` 功能。 [\#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([阿莫斯鸟](https://github.com/amosbird)) +- 优化 `ORDER BY` 并与单列键合并。 [\#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地实施 `arrayReduce`, `-Array` 和 `-State` 组合子 [\#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在 `PREWHERE` 应优化为至少一样高效 `WHERE`. [\#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([阿莫斯鸟](https://github.com/amosbird)) +- 改进方式 `round` 和 `roundBankers` 处理负数。 [\#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([hcz](https://github.com/hczhcz)) +- 改进的解码性能 `DoubleDelta` 和 `Gorilla` 编解码器大约30-40%。 这修复 [\#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [\#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进的性能 `base64` 相关功能。 [\#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了一个功能 `geoDistance`. 它类似于 `greatCircleDistance` 但使用近似于WGS-84椭球模型。 两个功能的性能几乎相同。 [\#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更快 `min` 和 `max` 聚合函数 `Decimal` 数据类型。 [\#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([Artem Zuikov](https://github.com/4ertus2)) +- 矢量化处理 `arrayReduce`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([阿莫斯鸟](https://github.com/amosbird)) +- `if` 链现在优化为 `multiIf`. [\#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([kamalov-ruslan](https://github.com/kamalov-ruslan)) +- 修复性能回归 `Kafka` 表引擎在19.15中引入。 这修复 [\#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [\#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([filimonov](https://github.com/filimonov)) +- 已删除 “pie” 代码生成 `gcc` 从Debian软件包偶尔带来默认情况下。 [\#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 并行解析数据格式 [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 启用优化的解析器 `Values` 默认使用表达式 (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-2} + +- 构建修复 `ARM` 而在最小模式。 [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) +- 添加复盖文件刷新 `clickhouse-server` 当不调用std::atexit时。 还略微改进了无状态测试的复盖率日志记录。 [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([阿利沙平](https://github.com/alesapin)) +- 更新contrib中的LLVM库。 避免从操作系统包中使用LLVM。 [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使bund绑 `curl` 建立完全安静。 [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 修复一些 `MemorySanitizer` 警告。 [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使用 `add_warning` 和 `no_warning` 宏 `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([伊万](https://github.com/abyss7)) +- 添加对Minio S3兼容对象的支持(https://min.io/)为了更好的集成测试。 [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 导入 `libc` 标题到contrib。 它允许在各种系统中使构建更加一致(仅适用于 `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `-fPIC` 从一些图书馆。 [\#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清洁 `CMakeLists.txt` 对于卷曲。 看https://github.com/ClickHouse/ClickHouse/pull/8011\#issuecomment-569478910 [\#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 无声警告 `CapNProto` 图书馆. [\#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为短字符串优化哈希表添加性能测试。 [\#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在ClickHouse将建立在 `AArch64` 即使 `MADV_FREE` 不可用。 这修复 [\#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [\#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([阿莫斯鸟](https://github.com/amosbird)) +- 更新 `zlib-ng` 来解决记忆消毒的问题 [\#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [\#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在非Linux系统上启用内部MySQL库,因为操作系统包的使用非常脆弱,通常根本不起作用。 这修复 [\#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [\#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了启用后在某些系统上构建的问题 `libc++`. 这取代了 [\#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [\#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `Field` 方法更类型安全,以找到更多的错误。 [\#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [\#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加丢失的文件到 `libc-headers` 子模块。 [\#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误 `JSON` 引用性能测试输出。 [\#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在堆栈跟踪显示 `std::exception` 和 `Poco::Exception`. 在以前的版本中,它仅适用于 `DB::Exception`. 这改进了诊断。 [\#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 移植 `clock_gettime` 和 `clock_nanosleep` 对于新鲜的glibc版本。 [\#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([阿莫斯鸟](https://github.com/amosbird)) +- 启用 `part_log` 在示例配置开发人员。 [\#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新加载的异步性质 `01036_no_superfluous_dict_reload_on_create_database*`. [\#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([Azat Khuzhin](https://github.com/azat)) +- 固定编解码器性能测试。 [\#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加安装脚本 `.tgz` 为他们构建和文档。 [\#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [\#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([阿利沙平](https://github.com/alesapin)) +- 删除旧 `ZSTD` 测试(它是在2016年创建的,以重现zstd1.0版本之前的错误)。 这修复 [\#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [\#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定构建在Mac OS卡特琳娜。 [\#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([meo](https://github.com/meob)) +- 增加编解码器性能测试中的行数,以使结果显着。 [\#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 在调试版本中,处理 `LOGICAL_ERROR` 异常作为断言失败,使得它们更容易被注意到。 [\#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使与格式相关的性能测试更具确定性。 [\#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `lz4` 来修复记忆消毒器的故障 [\#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在异常处理中抑制已知MemorySanitizer误报。 [\#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 更新 `gcc` 和 `g++` 到版本9在 `build/docker/build.sh` [\#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([TLightSky](https://github.com/tlightsky)) +- 添加性能测试用例来测试 `PREWHERE` 比 `WHERE`. [\#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([阿莫斯鸟](https://github.com/amosbird)) +- 在修复一个笨拙的测试方面取得了进展。 [\#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从MemorySanitizer报告数据 `libunwind`. [\#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libc++` 到最新版本。 [\#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从源头构建ICU库。 这修复 [\#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [\#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从切换 `libressl` 到 `openssl`. ClickHouse应在此更改后支持TLS1.3和SNI。 这修复 [\#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [\#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时固定的UBSan报告 `chacha20_poly1305` 从SSL(发生在连接到https://yandex.ru/)。 [\#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复默认密码文件的模式 `.deb` linux发行版。 [\#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) +- 改进的表达式获取 `clickhouse-server` PID输入 `clickhouse-test`. [\#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 更新contrib/googletest到v1.10.0。 [\#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- 修复了ThreadSaninitizer报告 `base64` 图书馆. 还将此库更新到最新版本,但无关紧要。 这修复 [\#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [\#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `00600_replace_running_query` 对于处理器。 [\#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 删除支持 `tcmalloc` 为了使 `CMakeLists.txt` 更简单 [\#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 发布海湾合作委员会构建现在使用 `libc++` 而不是 `libstdc++`. 最近 `libc++` 只与叮当一起使用。 这将提高构建配置的一致性和可移植性。 [\#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用MemorySanitizer启用ICU库进行构建。 [\#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁止从警告 `CapNProto` 图书馆. [\#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除代码的特殊情况 `tcmalloc`,因为它不再受支持。 [\#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI coverage任务中,优雅地终止服务器以允许它保存coverage报告。 这修复了我们最近看到的不完整的复盖率报告。 [\#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([阿利沙平](https://github.com/alesapin)) +- 针对所有编解码器的性能测试 `Float64` 和 `UInt64` 值。 [\#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- `termcap` 非常不推荐使用,并导致各种问题(f.g.missing “up” 帽和呼应 `^J` 而不是多行)。 帮个忙 `terminfo` 或bund绑 `ncurses`. [\#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复 `test_storage_s3` 集成测试。 [\#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 碌莽禄Support: `StorageFile(, null)` 将块插入给定格式的文件而不实际写入磁盘。 这是性能测试所必需的。 [\#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加参数 `--print-time` 功能测试打印每个测试的执行时间。 [\#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加断言 `KeyCondition` 同时评估RPN。 这将修复来自gcc-9的警告。 [\#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI构建中转储cmake选项。 [\#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 不要为某些fat库生成调试信息。 [\#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `log_to_console.xml` 始终登录到stderr,无论它是否交互。 [\#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 删除了一些未使用的功能 `clickhouse-performance-test` 工具 [\#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们也将搜索 `lld-X` 与相应的 `clang-X` 版本。 [\#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([阿利沙平](https://github.com/alesapin)) +- 实木复合地板建设改善。 [\#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([马苏兰](https://github.com/maxulan)) +- 更多海湾合作委员会警告 [\#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Arch Linux的软件包现在允许运行ClickHouse服务器,而不仅仅是客户端。 [\#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复与处理器的测试。 微小的性能修复。 [\#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 更新contrib/protobuf。 [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V.Kornilov](https://github.com/matwey)) +- 在准备切换到c++20作为新年庆祝活动。 “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 实验特点 {#experimental-feature-1} + +- 增加了实验设置 `min_bytes_to_use_mmap_io`. 它允许读取大文件,而无需将数据从内核复制到用户空间。 默认情况下禁用该设置。 建议的阈值大约是64MB,因为mmap/munmap很慢。 [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返工配额作为访问控制系统的一部分。 增加了新表 `system.quotas`,新功能 `currentQuota`, `currentQuotaKey`,新的SQL语法 `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 允许跳过带有警告的未知设置,而不是引发异常。 [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 重新设计的行策略作为访问控制系统的一部分。 增加了新表 `system.row_policies`,新功能 `currentRowPolicies()`,新的SQL语法 `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +#### 安全修复 {#security-fix} + +- 修正了读取目录结构中的表的可能性 `File` 表引擎。 这修复 [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## [更新日志2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/zh/whats_new/index.md b/docs/zh/whats_new/index.md new file mode 100644 index 00000000000..75a13a72bac --- /dev/null +++ b/docs/zh/whats_new/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u65B0\u589E\u5185\u5BB9" +toc_priority: 72 +--- + + diff --git a/docs/zh/whats_new/roadmap.md b/docs/zh/whats_new/roadmap.md new file mode 100644 index 00000000000..47e09bc3c78 --- /dev/null +++ b/docs/zh/whats_new/roadmap.md @@ -0,0 +1,10 @@ + +# 规划 {#gui-hua} + +## Q1 2020 {#q1-2020} + +- 更精确的用户资源池,可以在用户之间合理分配集群资源 +- 细粒度的授权管理 +- 与外部认证服务集成 + +[来源文章](https://clickhouse.tech/docs/en/roadmap/) diff --git a/docs/zh/whats_new/security_changelog.md b/docs/zh/whats_new/security_changelog.md new file mode 100644 index 00000000000..6315398371f --- /dev/null +++ b/docs/zh/whats_new/security_changelog.md @@ -0,0 +1,42 @@ + +## 修复于 ClickHouse Release 18.12.13, 2018-09-10 {#xiu-fu-yu-clickhouse-release-18-12-13-2018-09-10} + +### CVE-2018-14672 {#cve-2018-14672} + +加载CatBoost模型的功能,允许遍历路径并通过错误消息读取任意文件。 + +来源: Yandex信息安全团队的Andrey Krasichkov + +## 修复于 ClickHouse Release 18.10.3, 2018-08-13 {#xiu-fu-yu-clickhouse-release-18-10-3-2018-08-13} + +### CVE-2018-14671 {#cve-2018-14671} + +unixODBC允许从文件系统加载任意共享对象,从而导致«远程执行代码»漏洞。 + +来源:Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov + +## 修复于 ClickHouse Release 1.1.54388, 2018-06-28 {#xiu-fu-yu-clickhouse-release-1-1-54388-2018-06-28} + +### CVE-2018-14668 {#cve-2018-14668} + +远程表函数功能允许在 «user», «password» 及 «default\_database» 字段中使用任意符号,从而导致跨协议请求伪造攻击。 + +来源:Yandex信息安全团队的Andrey Krasichkov + +## 修复于 ClickHouse Release 1.1.54390, 2018-07-06 {#xiu-fu-yu-clickhouse-release-1-1-54390-2018-07-06} + +### CVE-2018-14669 {#cve-2018-14669} + +ClickHouse MySQL客户端启用了 «LOAD DATA LOCAL INFILE» 功能,该功能允许恶意MySQL数据库从连接的ClickHouse服务器读取任意文件。 + +来源:Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov + +## 修复于 ClickHouse Release 1.1.54131, 2017-01-10 {#xiu-fu-yu-clickhouse-release-1-1-54131-2017-01-10} + +### CVE-2018-14670 {#cve-2018-14670} + +deb软件包中的错误配置可能导致使用未经授权的数据库。 + +来源:英国国家网络安全中心(NCSC) + +[来源文章](https://clickhouse.tech/docs/en/security_changelog/) diff --git a/dbms/programs/CMakeLists.txt b/programs/CMakeLists.txt similarity index 100% rename from dbms/programs/CMakeLists.txt rename to programs/CMakeLists.txt diff --git a/dbms/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp similarity index 100% rename from dbms/programs/benchmark/Benchmark.cpp rename to programs/benchmark/Benchmark.cpp diff --git a/dbms/programs/benchmark/CMakeLists.txt b/programs/benchmark/CMakeLists.txt similarity index 100% rename from dbms/programs/benchmark/CMakeLists.txt rename to programs/benchmark/CMakeLists.txt diff --git a/dbms/programs/benchmark/clickhouse-benchmark.cpp b/programs/benchmark/clickhouse-benchmark.cpp similarity index 100% rename from dbms/programs/benchmark/clickhouse-benchmark.cpp rename to programs/benchmark/clickhouse-benchmark.cpp diff --git a/dbms/programs/clickhouse-split-helper b/programs/clickhouse-split-helper similarity index 100% rename from dbms/programs/clickhouse-split-helper rename to programs/clickhouse-split-helper diff --git a/dbms/programs/client/CMakeLists.txt b/programs/client/CMakeLists.txt similarity index 100% rename from dbms/programs/client/CMakeLists.txt rename to programs/client/CMakeLists.txt diff --git a/dbms/programs/client/Client.cpp b/programs/client/Client.cpp similarity index 98% rename from dbms/programs/client/Client.cpp rename to programs/client/Client.cpp index 9cd1332b513..b1dda4b4da9 100644 --- a/dbms/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -82,16 +82,8 @@ #endif /// http://en.wikipedia.org/wiki/ANSI_escape_code - -/// Similar codes \e[s, \e[u don't work in VT100 and Mosh. -#define SAVE_CURSOR_POSITION "\033""7" -#define RESTORE_CURSOR_POSITION "\033""8" - #define CLEAR_TO_END_OF_LINE "\033[K" -/// This codes are possibly not supported everywhere. -#define DISABLE_LINE_WRAPPING "\033[?7l" -#define ENABLE_LINE_WRAPPING "\033[?7h" namespace DB { @@ -133,8 +125,6 @@ private: bool stdin_is_a_tty = false; /// stdin is a terminal. bool stdout_is_a_tty = false; /// stdout is a terminal. - uint16_t terminal_width = 0; /// Terminal width is needed to render progress bar. - std::unique_ptr connection; /// Connection to DB. String query_id; /// Current query_id. String query; /// Current query. @@ -694,7 +684,7 @@ private: if (ignore_error) { Tokens tokens(begin, end); - IParser::Pos token_iterator(tokens); + IParser::Pos token_iterator(tokens, context.getSettingsRef().max_parser_depth); while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid()) ++token_iterator; begin = token_iterator->end; @@ -968,10 +958,15 @@ private: ParserQuery parser(end, true); ASTPtr res; + const auto & settings = context.getSettingsRef(); + size_t max_length = 0; + if (!allow_multi_statements) + max_length = settings.max_query_size; + if (is_interactive || ignore_error) { String message; - res = tryParseQuery(parser, pos, end, message, true, "", allow_multi_statements, 0); + res = tryParseQuery(parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth); if (!res) { @@ -980,7 +975,7 @@ private: } } else - res = parseQueryAndMovePosition(parser, pos, end, "", allow_multi_statements, 0); + res = parseQueryAndMovePosition(parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth); if (is_interactive) { @@ -1122,11 +1117,16 @@ private: /// to avoid losing sync. if (!cancelled) { - auto cancel_query = [&] { + auto cancel_query = [&] + { connection->sendCancel(); cancelled = true; if (is_interactive) + { + if (written_progress_chars) + clearProgress(); std::cout << "Cancelling query." << std::endl; + } /// Pressing Ctrl+C twice results in shut down. interrupt_listener.unblock(); @@ -1436,7 +1436,7 @@ private: { written_progress_chars = 0; if (!send_logs) - std::cerr << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE; + std::cerr << "\r" CLEAR_TO_END_OF_LINE; } @@ -1461,20 +1461,14 @@ private: "\033[1m↗\033[0m", }; - if (!send_logs) - { - if (written_progress_chars) - message << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE; - else - message << SAVE_CURSOR_POSITION; - } + auto indicator = indicators[increment % 8]; - message << DISABLE_LINE_WRAPPING; + if (!send_logs && written_progress_chars) + message << '\r'; size_t prefix_size = message.count(); - message << indicators[increment % 8] - << " Progress: "; + message << indicator << " Progress: "; message << formatReadableQuantity(progress.read_rows) << " rows, " @@ -1488,7 +1482,7 @@ private: else message << ". "; - written_progress_chars = message.count() - prefix_size - (increment % 8 == 7 ? 10 : 13); /// Don't count invisible output (escape sequences). + written_progress_chars = message.count() - prefix_size - (strlen(indicator) - 2); /// Don't count invisible output (escape sequences). /// If the approximate number of rows to process is known, we can display a progress bar and percentage. if (progress.total_rows_to_read > 0) @@ -1506,7 +1500,7 @@ private: if (show_progress_bar) { - ssize_t width_of_progress_bar = static_cast(terminal_width) - written_progress_chars - strlen(" 99%"); + ssize_t width_of_progress_bar = static_cast(getTerminalWidth()) - written_progress_chars - strlen(" 99%"); if (width_of_progress_bar > 0) { std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.read_rows, 0, total_rows_corrected, width_of_progress_bar)); @@ -1521,7 +1515,8 @@ private: message << ' ' << (99 * progress.read_rows / total_rows_corrected) << '%'; } - message << ENABLE_LINE_WRAPPING; + message << CLEAR_TO_END_OF_LINE; + if (send_logs) message << '\n'; @@ -1589,7 +1584,11 @@ private: resetOutput(); if (is_interactive && !written_first_block) + { + if (written_progress_chars) + clearProgress(); std::cout << "Ok." << std::endl; + } } static void showClientVersion() @@ -1687,6 +1686,7 @@ public: stdin_is_a_tty = isatty(STDIN_FILENO); stdout_is_a_tty = isatty(STDOUT_FILENO); + uint64_t terminal_width = 0; if (stdin_is_a_tty) terminal_width = getTerminalWidth(); @@ -1715,7 +1715,6 @@ public: ("database,d", po::value(), "database") ("pager", po::value(), "pager") ("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.") - ("always_load_suggestion_data", "Load suggestion data even if clickhouse-client is run in non-interactive mode. Used for testing.") ("suggestion_limit", po::value()->default_value(10000), "Suggestion limit for how many databases, tables and columns to fetch.") ("multiline,m", "multiline") diff --git a/dbms/programs/client/ConnectionParameters.cpp b/programs/client/ConnectionParameters.cpp similarity index 100% rename from dbms/programs/client/ConnectionParameters.cpp rename to programs/client/ConnectionParameters.cpp diff --git a/dbms/programs/client/ConnectionParameters.h b/programs/client/ConnectionParameters.h similarity index 100% rename from dbms/programs/client/ConnectionParameters.h rename to programs/client/ConnectionParameters.h diff --git a/dbms/programs/client/Suggest.cpp b/programs/client/Suggest.cpp similarity index 84% rename from dbms/programs/client/Suggest.cpp rename to programs/client/Suggest.cpp index f7141449f54..8fffbec4fab 100644 --- a/dbms/programs/client/Suggest.cpp +++ b/programs/client/Suggest.cpp @@ -67,16 +67,19 @@ void Suggest::load(const ConnectionParameters & connection_parameters, size_t su Suggest::Suggest() { /// Keywords may be not up to date with ClickHouse parser. - words = {"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT", - "MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP", - "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT", - "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO", - "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", - "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", "VALUES", - "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", - "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", - "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", - "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE"}; + words = {"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT", + "MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP", + "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT", + "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO", + "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", + "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", "VALUES", + "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", + "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", + "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", + "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE", "USER", "ROLE", + "PROFILE", "QUOTA", "POLICY", "ROW", "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT", "REPLACE", + "IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "FOR", "RANDOMIZED", + "INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP"}; } void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit) diff --git a/dbms/programs/client/Suggest.h b/programs/client/Suggest.h similarity index 100% rename from dbms/programs/client/Suggest.h rename to programs/client/Suggest.h diff --git a/dbms/programs/client/TestHint.h b/programs/client/TestHint.h similarity index 100% rename from dbms/programs/client/TestHint.h rename to programs/client/TestHint.h diff --git a/dbms/programs/client/clickhouse-client.cpp b/programs/client/clickhouse-client.cpp similarity index 100% rename from dbms/programs/client/clickhouse-client.cpp rename to programs/client/clickhouse-client.cpp diff --git a/dbms/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml similarity index 100% rename from dbms/programs/client/clickhouse-client.xml rename to programs/client/clickhouse-client.xml diff --git a/dbms/programs/client/config_client.h.in b/programs/client/config_client.h.in similarity index 100% rename from dbms/programs/client/config_client.h.in rename to programs/client/config_client.h.in diff --git a/dbms/programs/client/readpassphrase/CMakeLists.txt b/programs/client/readpassphrase/CMakeLists.txt similarity index 100% rename from dbms/programs/client/readpassphrase/CMakeLists.txt rename to programs/client/readpassphrase/CMakeLists.txt diff --git a/dbms/programs/client/readpassphrase/includes.h.in b/programs/client/readpassphrase/includes.h.in similarity index 100% rename from dbms/programs/client/readpassphrase/includes.h.in rename to programs/client/readpassphrase/includes.h.in diff --git a/programs/client/readpassphrase/readpassphrase.c b/programs/client/readpassphrase/readpassphrase.c new file mode 100644 index 00000000000..243701239bf --- /dev/null +++ b/programs/client/readpassphrase/readpassphrase.c @@ -0,0 +1,211 @@ +/* $OpenBSD: readpassphrase.c,v 1.26 2016/10/18 12:47:18 millert Exp $ */ + +/* + * Copyright (c) 2000-2002, 2007, 2010 + * Todd C. Miller + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Sponsored in part by the Defense Advanced Research Projects + * Agency (DARPA) and Air Force Research Laboratory, Air Force + * Materiel Command, USAF, under agreement number F39502-99-1-0512. + */ + +/* OPENBSD ORIGINAL: lib/libc/gen/readpassphrase.c */ + +#include "includes.h" + +#ifndef HAVE_READPASSPHRASE + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef TCSASOFT +/* If we don't have TCSASOFT define it so that ORing it it below is a no-op. */ +# define TCSASOFT 0 +#endif + +/* SunOS 4.x which lacks _POSIX_VDISABLE, but has VDISABLE */ +#if !defined(_POSIX_VDISABLE) && defined(VDISABLE) +# define _POSIX_VDISABLE VDISABLE +#endif + +static volatile sig_atomic_t signo[NSIG]; + +static void handler(int); + +char * +readpassphrase(const char *prompt, char *buf, size_t bufsiz, int flags) +{ + ssize_t nr; + int input, output, save_errno, i, need_restart; + char ch, *p, *end; + struct termios term, oterm; + struct sigaction sa, savealrm, saveint, savehup, savequit, saveterm; + struct sigaction savetstp, savettin, savettou, savepipe; + + /* I suppose we could alloc on demand in this case (XXX). */ + if (bufsiz == 0) { + errno = EINVAL; + return(NULL); + } + +restart: + for (i = 0; i < NSIG; i++) + signo[i] = 0; + nr = -1; + save_errno = 0; + need_restart = 0; + /* + * Read and write to /dev/tty if available. If not, read from + * stdin and write to stderr unless a tty is required. + */ + if ((flags & RPP_STDIN) || + (input = output = open(_PATH_TTY, O_RDWR)) == -1) { + if (flags & RPP_REQUIRE_TTY) { + errno = ENOTTY; + return(NULL); + } + input = STDIN_FILENO; + output = STDERR_FILENO; + } + + /* + * Turn off echo if possible. + * If we are using a tty but are not the foreground pgrp this will + * generate SIGTTOU, so do it *before* installing the signal handlers. + */ + if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) { + memcpy(&term, &oterm, sizeof(term)); + if (!(flags & RPP_ECHO_ON)) + term.c_lflag &= ~(ECHO | ECHONL); +#ifdef VSTATUS + if (term.c_cc[VSTATUS] != _POSIX_VDISABLE) + term.c_cc[VSTATUS] = _POSIX_VDISABLE; +#endif + (void)tcsetattr(input, TCSAFLUSH|TCSASOFT, &term); + } else { + memset(&term, 0, sizeof(term)); + term.c_lflag |= ECHO; + memset(&oterm, 0, sizeof(oterm)); + oterm.c_lflag |= ECHO; + } + + /* + * Catch signals that would otherwise cause the user to end + * up with echo turned off in the shell. Don't worry about + * things like SIGXCPU and SIGVTALRM for now. + */ + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; /* don't restart system calls */ + sa.sa_handler = handler; + (void)sigaction(SIGALRM, &sa, &savealrm); + (void)sigaction(SIGHUP, &sa, &savehup); + (void)sigaction(SIGINT, &sa, &saveint); + (void)sigaction(SIGPIPE, &sa, &savepipe); + (void)sigaction(SIGQUIT, &sa, &savequit); + (void)sigaction(SIGTERM, &sa, &saveterm); + (void)sigaction(SIGTSTP, &sa, &savetstp); + (void)sigaction(SIGTTIN, &sa, &savettin); + (void)sigaction(SIGTTOU, &sa, &savettou); + + if (!(flags & RPP_STDIN)) + (void)write(output, prompt, strlen(prompt)); + end = buf + bufsiz - 1; + p = buf; + while ((nr = read(input, &ch, 1)) == 1 && ch != '\n' && ch != '\r') { + if (p < end) { + if ((flags & RPP_SEVENBIT)) + ch &= 0x7f; + if (isalpha((unsigned char)ch)) { + if ((flags & RPP_FORCELOWER)) + ch = (char)tolower((unsigned char)ch); + if ((flags & RPP_FORCEUPPER)) + ch = (char)toupper((unsigned char)ch); + } + *p++ = ch; + } + } + *p = '\0'; + save_errno = errno; + if (!(term.c_lflag & ECHO)) + (void)write(output, "\n", 1); + + /* Restore old terminal settings and signals. */ + if (memcmp(&term, &oterm, sizeof(term)) != 0) { + const int sigttou = signo[SIGTTOU]; + + /* Ignore SIGTTOU generated when we are not the fg pgrp. */ + while (tcsetattr(input, TCSAFLUSH|TCSASOFT, &oterm) == -1 && + errno == EINTR && !signo[SIGTTOU]) + continue; + signo[SIGTTOU] = sigttou; + } + (void)sigaction(SIGALRM, &savealrm, NULL); + (void)sigaction(SIGHUP, &savehup, NULL); + (void)sigaction(SIGINT, &saveint, NULL); + (void)sigaction(SIGQUIT, &savequit, NULL); + (void)sigaction(SIGPIPE, &savepipe, NULL); + (void)sigaction(SIGTERM, &saveterm, NULL); + (void)sigaction(SIGTSTP, &savetstp, NULL); + (void)sigaction(SIGTTIN, &savettin, NULL); + (void)sigaction(SIGTTOU, &savettou, NULL); + if (input != STDIN_FILENO) + (void)close(input); + + /* + * If we were interrupted by a signal, resend it to ourselves + * now that we have restored the signal handlers. + */ + for (i = 0; i < NSIG; i++) { + if (signo[i]) { + kill(getpid(), i); + switch (i) { + case SIGTSTP: + case SIGTTIN: + case SIGTTOU: + need_restart = 1; + } + } + } + if (need_restart) + goto restart; + + if (save_errno) + errno = save_errno; + return(nr == -1 ? NULL : buf); +} +//DEF_WEAK(readpassphrase); + +#if 0 +char * +getpass(const char *prompt) +{ + static char buf[_PASSWORD_LEN + 1]; + + return(readpassphrase(prompt, buf, sizeof(buf), RPP_ECHO_OFF)); +} +#endif + +static void handler(int s) +{ + + signo[s] = 1; +} +#endif /* HAVE_READPASSPHRASE */ diff --git a/dbms/programs/client/readpassphrase/readpassphrase.h b/programs/client/readpassphrase/readpassphrase.h similarity index 96% rename from dbms/programs/client/readpassphrase/readpassphrase.h rename to programs/client/readpassphrase/readpassphrase.h index 272c822423a..0782a1773ea 100644 --- a/dbms/programs/client/readpassphrase/readpassphrase.h +++ b/programs/client/readpassphrase/readpassphrase.h @@ -1,4 +1,4 @@ -// /* $OpenBSD: readpassphrase.h,v 1.5 2003/06/17 21:56:23 millert Exp $ */ +// /* $OpenBSD: readpassphrase.h,v 1.5 2003/06/17 21:56:23 millert Exp $ */ /* * Copyright (c) 2000, 2002 Todd C. Miller diff --git a/dbms/programs/compressor/CMakeLists.txt b/programs/compressor/CMakeLists.txt similarity index 100% rename from dbms/programs/compressor/CMakeLists.txt rename to programs/compressor/CMakeLists.txt diff --git a/dbms/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp similarity index 98% rename from dbms/programs/compressor/Compressor.cpp rename to programs/compressor/Compressor.cpp index 98a3055da28..fecdad9bcea 100644 --- a/dbms/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -14,6 +14,7 @@ #include #include #include +#include namespace DB @@ -123,7 +124,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) DB::ParserCodec codec_parser; std::string codecs_line = boost::algorithm::join(codecs, ","); - auto ast = DB::parseQuery(codec_parser, "(" + codecs_line + ")", 0); + auto ast = DB::parseQuery(codec_parser, "(" + codecs_line + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); codec = DB::CompressionCodecFactory::instance().get(ast, nullptr); } else diff --git a/dbms/programs/compressor/README.md b/programs/compressor/README.md similarity index 100% rename from dbms/programs/compressor/README.md rename to programs/compressor/README.md diff --git a/dbms/programs/compressor/clickhouse-compressor.cpp b/programs/compressor/clickhouse-compressor.cpp similarity index 100% rename from dbms/programs/compressor/clickhouse-compressor.cpp rename to programs/compressor/clickhouse-compressor.cpp diff --git a/dbms/programs/config_tools.h.in b/programs/config_tools.h.in similarity index 100% rename from dbms/programs/config_tools.h.in rename to programs/config_tools.h.in diff --git a/dbms/programs/copier/Aliases.h b/programs/copier/Aliases.h similarity index 100% rename from dbms/programs/copier/Aliases.h rename to programs/copier/Aliases.h diff --git a/dbms/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt similarity index 100% rename from dbms/programs/copier/CMakeLists.txt rename to programs/copier/CMakeLists.txt diff --git a/dbms/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp similarity index 98% rename from dbms/programs/copier/ClusterCopier.cpp rename to programs/copier/ClusterCopier.cpp index c4714ff201f..cd5b1e2a2cd 100644 --- a/dbms/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -1197,7 +1197,9 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( query += " LIMIT " + limit; ParserQuery p_query(query.data() + query.size()); - return parseQuery(p_query, query, 0); + + const auto & settings = context.getSettingsRef(); + return parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); }; /// Load balancing @@ -1409,7 +1411,8 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( query += "INSERT INTO " + getQuotedTable(split_table_for_current_piece) + " VALUES "; ParserQuery p_query(query.data() + query.size()); - query_insert_ast = parseQuery(p_query, query, 0); + const auto & settings = context.getSettingsRef(); + query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); LOG_DEBUG(log, "Executing INSERT query: " << query); } @@ -1634,7 +1637,8 @@ ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & time &task_cluster->settings_pull); ParserCreateQuery parser_create_query; - return parseQuery(parser_create_query, create_query_pull_str, 0); + const auto & settings = context.getSettingsRef(); + return parseQuery(parser_create_query, create_query_pull_str, settings.max_query_size, settings.max_parser_depth); } /// If it is implicitly asked to create split Distributed table for certain piece on current shard, we will do it. @@ -1712,7 +1716,8 @@ std::set ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti } ParserQuery parser_query(query.data() + query.size()); - ASTPtr query_ast = parseQuery(parser_query, query, 0); + const auto & settings = context.getSettingsRef(); + ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); LOG_DEBUG(log, "Computing destination partition set, executing query: " << query); @@ -1759,7 +1764,8 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts, << partition_quoted_name << " existence, executing query: " << query); ParserQuery parser_query(query.data() + query.size()); - ASTPtr query_ast = parseQuery(parser_query, query, 0); +const auto & settings = context.getSettingsRef(); + ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); Context local_context = context; local_context.setSettings(task_cluster->settings_pull); @@ -1793,7 +1799,8 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi << "existence, executing query: " << query); ParserQuery parser_query(query.data() + query.size()); - ASTPtr query_ast = parseQuery(parser_query, query, 0); + const auto & settings = context.getSettingsRef(); + ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); Context local_context = context; local_context.setSettings(task_cluster->settings_pull); @@ -1826,7 +1833,8 @@ UInt64 ClusterCopier::executeQueryOnCluster( if (query_ast_ == nullptr) { ParserQuery p_query(query.data() + query.size()); - query_ast = parseQuery(p_query, query, 0); + const auto & settings = context.getSettingsRef(); + query_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); } else query_ast = query_ast_; diff --git a/dbms/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h similarity index 100% rename from dbms/programs/copier/ClusterCopier.h rename to programs/copier/ClusterCopier.h diff --git a/dbms/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp similarity index 100% rename from dbms/programs/copier/ClusterCopierApp.cpp rename to programs/copier/ClusterCopierApp.cpp diff --git a/dbms/programs/copier/ClusterCopierApp.h b/programs/copier/ClusterCopierApp.h similarity index 100% rename from dbms/programs/copier/ClusterCopierApp.h rename to programs/copier/ClusterCopierApp.h diff --git a/dbms/programs/copier/ClusterPartition.h b/programs/copier/ClusterPartition.h similarity index 100% rename from dbms/programs/copier/ClusterPartition.h rename to programs/copier/ClusterPartition.h diff --git a/dbms/programs/copier/Internals.cpp b/programs/copier/Internals.cpp similarity index 100% rename from dbms/programs/copier/Internals.cpp rename to programs/copier/Internals.cpp diff --git a/dbms/programs/copier/Internals.h b/programs/copier/Internals.h similarity index 100% rename from dbms/programs/copier/Internals.h rename to programs/copier/Internals.h diff --git a/dbms/programs/copier/ShardPartition.h b/programs/copier/ShardPartition.h similarity index 100% rename from dbms/programs/copier/ShardPartition.h rename to programs/copier/ShardPartition.h diff --git a/dbms/programs/copier/ShardPartitionPiece.h b/programs/copier/ShardPartitionPiece.h similarity index 100% rename from dbms/programs/copier/ShardPartitionPiece.h rename to programs/copier/ShardPartitionPiece.h diff --git a/dbms/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h similarity index 100% rename from dbms/programs/copier/TaskCluster.h rename to programs/copier/TaskCluster.h diff --git a/dbms/programs/copier/TaskTableAndShard.h b/programs/copier/TaskTableAndShard.h similarity index 98% rename from dbms/programs/copier/TaskTableAndShard.h rename to programs/copier/TaskTableAndShard.h index 615ad297b79..32841e93a14 100644 --- a/dbms/programs/copier/TaskTableAndShard.h +++ b/programs/copier/TaskTableAndShard.h @@ -4,6 +4,9 @@ #include "Internals.h" #include "ClusterPartition.h" +#include + + namespace DB { namespace ErrorCodes @@ -260,9 +263,10 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf + "." + escapeForFileName(table_push.second); engine_push_str = config.getString(table_prefix + "engine"); + { ParserStorage parser_storage; - engine_push_ast = parseQuery(parser_storage, engine_push_str, 0); + engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); engine_push_partition_key_ast = extractPartitionKey(engine_push_ast); primary_key_comma_separated = createCommaSeparatedStringFrom(extractPrimaryKeyColumnNames(engine_push_ast)); engine_push_zk_path = extractReplicatedTableZookeeperPath(engine_push_ast); @@ -273,7 +277,7 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf auxiliary_engine_split_asts.reserve(number_of_splits); { ParserExpressionWithOptionalAlias parser_expression(false); - sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0); + sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); main_engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second, sharding_key_ast); @@ -291,7 +295,7 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf if (!where_condition_str.empty()) { ParserExpressionWithOptionalAlias parser_expression(false); - where_condition_ast = parseQuery(parser_expression, where_condition_str, 0); + where_condition_ast = parseQuery(parser_expression, where_condition_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); // Will use canonical expression form where_condition_str = queryToString(where_condition_ast); diff --git a/dbms/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h similarity index 100% rename from dbms/programs/copier/ZooKeeperStaff.h rename to programs/copier/ZooKeeperStaff.h diff --git a/dbms/programs/copier/clickhouse-copier.cpp b/programs/copier/clickhouse-copier.cpp similarity index 100% rename from dbms/programs/copier/clickhouse-copier.cpp rename to programs/copier/clickhouse-copier.cpp diff --git a/dbms/programs/extract-from-config/CMakeLists.txt b/programs/extract-from-config/CMakeLists.txt similarity index 100% rename from dbms/programs/extract-from-config/CMakeLists.txt rename to programs/extract-from-config/CMakeLists.txt diff --git a/dbms/programs/extract-from-config/ExtractFromConfig.cpp b/programs/extract-from-config/ExtractFromConfig.cpp similarity index 100% rename from dbms/programs/extract-from-config/ExtractFromConfig.cpp rename to programs/extract-from-config/ExtractFromConfig.cpp diff --git a/dbms/programs/extract-from-config/clickhouse-extract-from-config.cpp b/programs/extract-from-config/clickhouse-extract-from-config.cpp similarity index 100% rename from dbms/programs/extract-from-config/clickhouse-extract-from-config.cpp rename to programs/extract-from-config/clickhouse-extract-from-config.cpp diff --git a/dbms/programs/format/CMakeLists.txt b/programs/format/CMakeLists.txt similarity index 100% rename from dbms/programs/format/CMakeLists.txt rename to programs/format/CMakeLists.txt diff --git a/dbms/programs/format/Format.cpp b/programs/format/Format.cpp similarity index 95% rename from dbms/programs/format/Format.cpp rename to programs/format/Format.cpp index f826d6394bc..b5a4e2d1603 100644 --- a/dbms/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -53,7 +53,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv) const char * end = pos + query.size(); ParserQuery parser(end); - ASTPtr res = parseQuery(parser, pos, end, "query", 0); + ASTPtr res = parseQuery(parser, pos, end, "query", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); if (!quiet) { diff --git a/dbms/programs/format/clickhouse-format.cpp b/programs/format/clickhouse-format.cpp similarity index 100% rename from dbms/programs/format/clickhouse-format.cpp rename to programs/format/clickhouse-format.cpp diff --git a/dbms/programs/local/CMakeLists.txt b/programs/local/CMakeLists.txt similarity index 100% rename from dbms/programs/local/CMakeLists.txt rename to programs/local/CMakeLists.txt diff --git a/dbms/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp similarity index 99% rename from dbms/programs/local/LocalServer.cpp rename to programs/local/LocalServer.cpp index 26752da5d87..1ab07d79401 100644 --- a/dbms/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -267,8 +267,10 @@ void LocalServer::processQueries() String initial_create_query = getInitialCreateTableQuery(); String queries_str = initial_create_query + config().getRawString("query"); + const auto & settings = context->getSettingsRef(); + std::vector queries; - auto parse_res = splitMultipartQuery(queries_str, queries); + auto parse_res = splitMultipartQuery(queries_str, queries, settings.max_query_size, settings.max_parser_depth); if (!parse_res.second) throw Exception("Cannot parse and execute the following part of query: " + String(parse_res.first), ErrorCodes::SYNTAX_ERROR); diff --git a/dbms/programs/local/LocalServer.h b/programs/local/LocalServer.h similarity index 100% rename from dbms/programs/local/LocalServer.h rename to programs/local/LocalServer.h diff --git a/dbms/programs/local/clickhouse-local.cpp b/programs/local/clickhouse-local.cpp similarity index 100% rename from dbms/programs/local/clickhouse-local.cpp rename to programs/local/clickhouse-local.cpp diff --git a/dbms/programs/main.cpp b/programs/main.cpp similarity index 100% rename from dbms/programs/main.cpp rename to programs/main.cpp diff --git a/dbms/programs/obfuscator/CMakeLists.txt b/programs/obfuscator/CMakeLists.txt similarity index 100% rename from dbms/programs/obfuscator/CMakeLists.txt rename to programs/obfuscator/CMakeLists.txt diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp similarity index 100% rename from dbms/programs/obfuscator/Obfuscator.cpp rename to programs/obfuscator/Obfuscator.cpp diff --git a/dbms/programs/obfuscator/clickhouse-obfuscator.cpp b/programs/obfuscator/clickhouse-obfuscator.cpp similarity index 100% rename from dbms/programs/obfuscator/clickhouse-obfuscator.cpp rename to programs/obfuscator/clickhouse-obfuscator.cpp diff --git a/dbms/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt similarity index 100% rename from dbms/programs/odbc-bridge/CMakeLists.txt rename to programs/odbc-bridge/CMakeLists.txt diff --git a/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp similarity index 97% rename from dbms/programs/odbc-bridge/ColumnInfoHandler.cpp rename to programs/odbc-bridge/ColumnInfoHandler.cpp index b89d50569f6..e3c00f48fb5 100644 --- a/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -120,12 +120,14 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques SCOPE_EXIT(SQLFreeStmt(hstmt, SQL_DROP)); + const auto & context_settings = context->getSettingsRef(); + /// TODO Why not do SQLColumns instead? std::string name = schema_name.empty() ? table_name : schema_name + "." + table_name; std::stringstream ss; std::string input = "SELECT * FROM " + name + " WHERE 1 = 0"; ParserQueryWithOutput parser; - ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", context_settings.max_query_size, context_settings.max_parser_depth); IAST::FormatSettings settings(ss, true); settings.always_quote_identifiers = true; diff --git a/dbms/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/ColumnInfoHandler.h rename to programs/odbc-bridge/ColumnInfoHandler.h diff --git a/dbms/programs/odbc-bridge/HandlerFactory.cpp b/programs/odbc-bridge/HandlerFactory.cpp similarity index 100% rename from dbms/programs/odbc-bridge/HandlerFactory.cpp rename to programs/odbc-bridge/HandlerFactory.cpp diff --git a/dbms/programs/odbc-bridge/HandlerFactory.h b/programs/odbc-bridge/HandlerFactory.h similarity index 100% rename from dbms/programs/odbc-bridge/HandlerFactory.h rename to programs/odbc-bridge/HandlerFactory.h diff --git a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp similarity index 100% rename from dbms/programs/odbc-bridge/IdentifierQuoteHandler.cpp rename to programs/odbc-bridge/IdentifierQuoteHandler.cpp diff --git a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/IdentifierQuoteHandler.h rename to programs/odbc-bridge/IdentifierQuoteHandler.h diff --git a/dbms/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp similarity index 100% rename from dbms/programs/odbc-bridge/MainHandler.cpp rename to programs/odbc-bridge/MainHandler.cpp diff --git a/dbms/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/MainHandler.h rename to programs/odbc-bridge/MainHandler.h diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp rename to programs/odbc-bridge/ODBCBlockInputStream.cpp diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.h b/programs/odbc-bridge/ODBCBlockInputStream.h similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBlockInputStream.h rename to programs/odbc-bridge/ODBCBlockInputStream.h diff --git a/dbms/programs/odbc-bridge/ODBCBridge.cpp b/programs/odbc-bridge/ODBCBridge.cpp similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBridge.cpp rename to programs/odbc-bridge/ODBCBridge.cpp diff --git a/dbms/programs/odbc-bridge/ODBCBridge.h b/programs/odbc-bridge/ODBCBridge.h similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBridge.h rename to programs/odbc-bridge/ODBCBridge.h diff --git a/dbms/programs/odbc-bridge/PingHandler.cpp b/programs/odbc-bridge/PingHandler.cpp similarity index 100% rename from dbms/programs/odbc-bridge/PingHandler.cpp rename to programs/odbc-bridge/PingHandler.cpp diff --git a/dbms/programs/odbc-bridge/PingHandler.h b/programs/odbc-bridge/PingHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/PingHandler.h rename to programs/odbc-bridge/PingHandler.h diff --git a/dbms/programs/odbc-bridge/README.md b/programs/odbc-bridge/README.md similarity index 100% rename from dbms/programs/odbc-bridge/README.md rename to programs/odbc-bridge/README.md diff --git a/dbms/programs/odbc-bridge/getIdentifierQuote.cpp b/programs/odbc-bridge/getIdentifierQuote.cpp similarity index 100% rename from dbms/programs/odbc-bridge/getIdentifierQuote.cpp rename to programs/odbc-bridge/getIdentifierQuote.cpp diff --git a/dbms/programs/odbc-bridge/getIdentifierQuote.h b/programs/odbc-bridge/getIdentifierQuote.h similarity index 100% rename from dbms/programs/odbc-bridge/getIdentifierQuote.h rename to programs/odbc-bridge/getIdentifierQuote.h diff --git a/dbms/programs/odbc-bridge/odbc-bridge.cpp b/programs/odbc-bridge/odbc-bridge.cpp similarity index 100% rename from dbms/programs/odbc-bridge/odbc-bridge.cpp rename to programs/odbc-bridge/odbc-bridge.cpp diff --git a/dbms/programs/odbc-bridge/tests/CMakeLists.txt b/programs/odbc-bridge/tests/CMakeLists.txt similarity index 100% rename from dbms/programs/odbc-bridge/tests/CMakeLists.txt rename to programs/odbc-bridge/tests/CMakeLists.txt diff --git a/dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.cpp b/programs/odbc-bridge/tests/validate-odbc-connection-string.cpp similarity index 100% rename from dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.cpp rename to programs/odbc-bridge/tests/validate-odbc-connection-string.cpp diff --git a/dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.reference b/programs/odbc-bridge/tests/validate-odbc-connection-string.reference similarity index 100% rename from dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.reference rename to programs/odbc-bridge/tests/validate-odbc-connection-string.reference diff --git a/dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.sh b/programs/odbc-bridge/tests/validate-odbc-connection-string.sh similarity index 100% rename from dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.sh rename to programs/odbc-bridge/tests/validate-odbc-connection-string.sh diff --git a/dbms/programs/odbc-bridge/validateODBCConnectionString.cpp b/programs/odbc-bridge/validateODBCConnectionString.cpp similarity index 100% rename from dbms/programs/odbc-bridge/validateODBCConnectionString.cpp rename to programs/odbc-bridge/validateODBCConnectionString.cpp diff --git a/dbms/programs/odbc-bridge/validateODBCConnectionString.h b/programs/odbc-bridge/validateODBCConnectionString.h similarity index 100% rename from dbms/programs/odbc-bridge/validateODBCConnectionString.h rename to programs/odbc-bridge/validateODBCConnectionString.h diff --git a/dbms/programs/server/CMakeLists.txt b/programs/server/CMakeLists.txt similarity index 100% rename from dbms/programs/server/CMakeLists.txt rename to programs/server/CMakeLists.txt diff --git a/dbms/programs/server/HTTPHandler.cpp b/programs/server/HTTPHandler.cpp similarity index 100% rename from dbms/programs/server/HTTPHandler.cpp rename to programs/server/HTTPHandler.cpp diff --git a/dbms/programs/server/HTTPHandler.h b/programs/server/HTTPHandler.h similarity index 100% rename from dbms/programs/server/HTTPHandler.h rename to programs/server/HTTPHandler.h diff --git a/dbms/programs/server/HTTPHandlerFactory.cpp b/programs/server/HTTPHandlerFactory.cpp similarity index 100% rename from dbms/programs/server/HTTPHandlerFactory.cpp rename to programs/server/HTTPHandlerFactory.cpp diff --git a/dbms/programs/server/HTTPHandlerFactory.h b/programs/server/HTTPHandlerFactory.h similarity index 100% rename from dbms/programs/server/HTTPHandlerFactory.h rename to programs/server/HTTPHandlerFactory.h diff --git a/dbms/programs/server/IServer.h b/programs/server/IServer.h similarity index 100% rename from dbms/programs/server/IServer.h rename to programs/server/IServer.h diff --git a/dbms/programs/server/InterserverIOHTTPHandler.cpp b/programs/server/InterserverIOHTTPHandler.cpp similarity index 100% rename from dbms/programs/server/InterserverIOHTTPHandler.cpp rename to programs/server/InterserverIOHTTPHandler.cpp diff --git a/dbms/programs/server/InterserverIOHTTPHandler.h b/programs/server/InterserverIOHTTPHandler.h similarity index 100% rename from dbms/programs/server/InterserverIOHTTPHandler.h rename to programs/server/InterserverIOHTTPHandler.h diff --git a/dbms/programs/server/MetricsTransmitter.cpp b/programs/server/MetricsTransmitter.cpp similarity index 100% rename from dbms/programs/server/MetricsTransmitter.cpp rename to programs/server/MetricsTransmitter.cpp diff --git a/dbms/programs/server/MetricsTransmitter.h b/programs/server/MetricsTransmitter.h similarity index 100% rename from dbms/programs/server/MetricsTransmitter.h rename to programs/server/MetricsTransmitter.h diff --git a/dbms/programs/server/MySQLHandler.cpp b/programs/server/MySQLHandler.cpp similarity index 92% rename from dbms/programs/server/MySQLHandler.cpp rename to programs/server/MySQLHandler.cpp index 3e1432dbfce..bfab19061ce 100644 --- a/dbms/programs/server/MySQLHandler.cpp +++ b/programs/server/MySQLHandler.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #if USE_POCO_NETSSL #include @@ -268,7 +269,8 @@ void MySQLHandler::comPing() packet_sender->sendPacket(OK_Packet(0x0, client_capability_flags, 0, 0, 0), true); } -static bool isFederatedServerSetupCommand(const String & query); +static bool isFederatedServerSetupSetCommand(const String & query); +static bool isFederatedServerSetupSelectVarCommand(const String & query); void MySQLHandler::comQuery(ReadBuffer & payload) { @@ -276,22 +278,25 @@ void MySQLHandler::comQuery(ReadBuffer & payload) // This is a workaround in order to support adding ClickHouse to MySQL using federated server. // As Clickhouse doesn't support these statements, we just send OK packet in response. - if (isFederatedServerSetupCommand(query)) + if (isFederatedServerSetupSetCommand(query)) { packet_sender->sendPacket(OK_Packet(0x00, client_capability_flags, 0, 0, 0), true); } else { - String replacement_query = "select ''"; + String replacement_query = "SELECT ''"; bool should_replace = false; bool with_output = false; // Translate query from MySQL to ClickHouse. - // This is a temporary workaround until ClickHouse supports the syntax "@@var_name". - if (query == "select @@version_comment limit 1") // MariaDB client starts session with that query + // Required parameters when setup: + // * max_allowed_packet, default 64MB, https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_allowed_packet + if (isFederatedServerSetupSelectVarCommand(query)) { should_replace = true; + replacement_query = "SELECT 67108864 AS max_allowed_packet"; } + // This is a workaround in order to support adding ClickHouse to MySQL using federated server. if (0 == strncasecmp("SHOW TABLE STATUS LIKE", query.c_str(), 22)) { @@ -358,11 +363,27 @@ void MySQLHandlerSSL::finishHandshakeSSL(size_t packet_size, char * buf, size_t #endif -static bool isFederatedServerSetupCommand(const String & query) +static bool isFederatedServerSetupSetCommand(const String & query) { - return 0 == strncasecmp("SET NAMES", query.c_str(), 9) || 0 == strncasecmp("SET character_set_results", query.c_str(), 25) - || 0 == strncasecmp("SET FOREIGN_KEY_CHECKS", query.c_str(), 22) || 0 == strncasecmp("SET AUTOCOMMIT", query.c_str(), 14) - || 0 == strncasecmp("SET SESSION TRANSACTION ISOLATION LEVEL", query.c_str(), 39); + static const std::regex expr{ + "(^(SET NAMES(.*)))" + "|(^(SET character_set_results(.*)))" + "|(^(SET FOREIGN_KEY_CHECKS(.*)))" + "|(^(SET AUTOCOMMIT(.*)))" + "|(^(SET sql_mode(.*)))" + "|(^(SET SESSION TRANSACTION ISOLATION LEVEL(.*)))" + , std::regex::icase}; + return 1 == std::regex_match(query, expr); +} + +static bool isFederatedServerSetupSelectVarCommand(const String & query) +{ + static const std::regex expr{ + "|(^(SELECT @@(.*)))" + "|(^((/\\*(.*)\\*/)([ \t]*)(SELECT([ \t]*)@@(.*))))" + "|(^((/\\*(.*)\\*/)([ \t]*)(SHOW VARIABLES(.*))))" + , std::regex::icase}; + return 1 == std::regex_match(query, expr); } const String MySQLHandler::show_table_status_replacement_query("SELECT" diff --git a/dbms/programs/server/MySQLHandler.h b/programs/server/MySQLHandler.h similarity index 100% rename from dbms/programs/server/MySQLHandler.h rename to programs/server/MySQLHandler.h diff --git a/dbms/programs/server/MySQLHandlerFactory.cpp b/programs/server/MySQLHandlerFactory.cpp similarity index 100% rename from dbms/programs/server/MySQLHandlerFactory.cpp rename to programs/server/MySQLHandlerFactory.cpp diff --git a/dbms/programs/server/MySQLHandlerFactory.h b/programs/server/MySQLHandlerFactory.h similarity index 100% rename from dbms/programs/server/MySQLHandlerFactory.h rename to programs/server/MySQLHandlerFactory.h diff --git a/dbms/programs/server/NotFoundHandler.cpp b/programs/server/NotFoundHandler.cpp similarity index 100% rename from dbms/programs/server/NotFoundHandler.cpp rename to programs/server/NotFoundHandler.cpp diff --git a/dbms/programs/server/NotFoundHandler.h b/programs/server/NotFoundHandler.h similarity index 100% rename from dbms/programs/server/NotFoundHandler.h rename to programs/server/NotFoundHandler.h diff --git a/dbms/programs/server/PingRequestHandler.cpp b/programs/server/PingRequestHandler.cpp similarity index 100% rename from dbms/programs/server/PingRequestHandler.cpp rename to programs/server/PingRequestHandler.cpp diff --git a/dbms/programs/server/PingRequestHandler.h b/programs/server/PingRequestHandler.h similarity index 100% rename from dbms/programs/server/PingRequestHandler.h rename to programs/server/PingRequestHandler.h diff --git a/dbms/programs/server/PrometheusMetricsWriter.cpp b/programs/server/PrometheusMetricsWriter.cpp similarity index 100% rename from dbms/programs/server/PrometheusMetricsWriter.cpp rename to programs/server/PrometheusMetricsWriter.cpp diff --git a/dbms/programs/server/PrometheusMetricsWriter.h b/programs/server/PrometheusMetricsWriter.h similarity index 100% rename from dbms/programs/server/PrometheusMetricsWriter.h rename to programs/server/PrometheusMetricsWriter.h diff --git a/dbms/programs/server/PrometheusRequestHandler.cpp b/programs/server/PrometheusRequestHandler.cpp similarity index 100% rename from dbms/programs/server/PrometheusRequestHandler.cpp rename to programs/server/PrometheusRequestHandler.cpp diff --git a/dbms/programs/server/PrometheusRequestHandler.h b/programs/server/PrometheusRequestHandler.h similarity index 100% rename from dbms/programs/server/PrometheusRequestHandler.h rename to programs/server/PrometheusRequestHandler.h diff --git a/dbms/programs/server/ReplicasStatusHandler.cpp b/programs/server/ReplicasStatusHandler.cpp similarity index 100% rename from dbms/programs/server/ReplicasStatusHandler.cpp rename to programs/server/ReplicasStatusHandler.cpp diff --git a/dbms/programs/server/ReplicasStatusHandler.h b/programs/server/ReplicasStatusHandler.h similarity index 100% rename from dbms/programs/server/ReplicasStatusHandler.h rename to programs/server/ReplicasStatusHandler.h diff --git a/dbms/programs/server/RootRequestHandler.cpp b/programs/server/RootRequestHandler.cpp similarity index 100% rename from dbms/programs/server/RootRequestHandler.cpp rename to programs/server/RootRequestHandler.cpp diff --git a/dbms/programs/server/RootRequestHandler.h b/programs/server/RootRequestHandler.h similarity index 100% rename from dbms/programs/server/RootRequestHandler.h rename to programs/server/RootRequestHandler.h diff --git a/dbms/programs/server/Server.cpp b/programs/server/Server.cpp similarity index 100% rename from dbms/programs/server/Server.cpp rename to programs/server/Server.cpp diff --git a/dbms/programs/server/Server.h b/programs/server/Server.h similarity index 100% rename from dbms/programs/server/Server.h rename to programs/server/Server.h diff --git a/dbms/programs/server/TCPHandler.cpp b/programs/server/TCPHandler.cpp similarity index 99% rename from dbms/programs/server/TCPHandler.cpp rename to programs/server/TCPHandler.cpp index 725aa8453b3..d82c6e31528 100644 --- a/dbms/programs/server/TCPHandler.cpp +++ b/programs/server/TCPHandler.cpp @@ -147,10 +147,6 @@ void TCPHandler::runImpl() if (server.isCancelled() || in->eof()) break; - /// receiveHello() has set the default settings for the current user, - /// but this default itself could change while we were waiting for a packet from the client. - connection_context.resetSettingsToDefault(); - /// Set context of request. query_context = connection_context; diff --git a/dbms/programs/server/TCPHandler.h b/programs/server/TCPHandler.h similarity index 100% rename from dbms/programs/server/TCPHandler.h rename to programs/server/TCPHandler.h diff --git a/programs/server/TCPHandlerFactory.h b/programs/server/TCPHandlerFactory.h new file mode 100644 index 00000000000..3b764af96ec --- /dev/null +++ b/programs/server/TCPHandlerFactory.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include +#include "IServer.h" +#include "TCPHandler.h" + +namespace Poco { class Logger; } + +namespace DB +{ + +class TCPHandlerFactory : public Poco::Net::TCPServerConnectionFactory +{ +private: + IServer & server; + Poco::Logger * log; + + class DummyTCPHandler : public Poco::Net::TCPServerConnection + { + public: + using Poco::Net::TCPServerConnection::TCPServerConnection; + void run() override {} + }; + +public: + explicit TCPHandlerFactory(IServer & server_, bool secure_ = false) + : server(server_) + , log(&Logger::get(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) + { + } + + Poco::Net::TCPServerConnection * createConnection(const Poco::Net::StreamSocket & socket) override + { + try + { + LOG_TRACE(log, "TCP Request. Address: " << socket.peerAddress().toString()); + return new TCPHandler(server, socket); + } + catch (const Poco::Net::NetException & e) + { + LOG_TRACE(log, "TCP Request. Client is not connected (most likely RST packet was sent)."); + return new DummyTCPHandler(socket); + } + } +}; + +} diff --git a/dbms/programs/server/clickhouse-server.cpp b/programs/server/clickhouse-server.cpp similarity index 100% rename from dbms/programs/server/clickhouse-server.cpp rename to programs/server/clickhouse-server.cpp diff --git a/dbms/programs/server/config.d/listen.xml.disabled b/programs/server/config.d/listen.xml.disabled similarity index 100% rename from dbms/programs/server/config.d/listen.xml.disabled rename to programs/server/config.d/listen.xml.disabled diff --git a/dbms/programs/server/config.d/log_to_console.xml b/programs/server/config.d/log_to_console.xml similarity index 100% rename from dbms/programs/server/config.d/log_to_console.xml rename to programs/server/config.d/log_to_console.xml diff --git a/dbms/programs/server/config.d/macros.xml b/programs/server/config.d/macros.xml similarity index 100% rename from dbms/programs/server/config.d/macros.xml rename to programs/server/config.d/macros.xml diff --git a/dbms/programs/server/config.d/metric_log.xml b/programs/server/config.d/metric_log.xml similarity index 100% rename from dbms/programs/server/config.d/metric_log.xml rename to programs/server/config.d/metric_log.xml diff --git a/dbms/programs/server/config.d/more_clusters.xml b/programs/server/config.d/more_clusters.xml similarity index 100% rename from dbms/programs/server/config.d/more_clusters.xml rename to programs/server/config.d/more_clusters.xml diff --git a/dbms/programs/server/config.d/part_log.xml b/programs/server/config.d/part_log.xml similarity index 100% rename from dbms/programs/server/config.d/part_log.xml rename to programs/server/config.d/part_log.xml diff --git a/dbms/programs/server/config.d/path.xml b/programs/server/config.d/path.xml similarity index 77% rename from dbms/programs/server/config.d/path.xml rename to programs/server/config.d/path.xml index 14b7deb9de0..8db1d18e8c7 100644 --- a/dbms/programs/server/config.d/path.xml +++ b/programs/server/config.d/path.xml @@ -3,4 +3,5 @@ ./tmp/ ./user_files/ ./format_schemas/ + ./access/ diff --git a/dbms/programs/server/config.d/query_masking_rules.xml b/programs/server/config.d/query_masking_rules.xml similarity index 100% rename from dbms/programs/server/config.d/query_masking_rules.xml rename to programs/server/config.d/query_masking_rules.xml diff --git a/dbms/programs/server/config.d/text_log.xml b/programs/server/config.d/text_log.xml similarity index 100% rename from dbms/programs/server/config.d/text_log.xml rename to programs/server/config.d/text_log.xml diff --git a/dbms/programs/server/config.d/tls.xml.disabled b/programs/server/config.d/tls.xml.disabled similarity index 100% rename from dbms/programs/server/config.d/tls.xml.disabled rename to programs/server/config.d/tls.xml.disabled diff --git a/dbms/programs/server/config.d/zookeeper.xml b/programs/server/config.d/zookeeper.xml similarity index 100% rename from dbms/programs/server/config.d/zookeeper.xml rename to programs/server/config.d/zookeeper.xml diff --git a/programs/server/config.xml b/programs/server/config.xml new file mode 100644 index 00000000000..fb2f9be6e24 --- /dev/null +++ b/programs/server/config.xml @@ -0,0 +1,531 @@ + + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + 8123 + 9000 + 9004 + + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 9009 + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + + + + /var/lib/clickhouse/user_files/ + + + /var/lib/clickhouse/access/ + + + users.xml + + + default + + + + + + default + + + + + + + + + false + + + + + + + + localhost + 9000 + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + + 127.0.0.1 + 9000 + + + + + 127.0.0.2 + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + + + + + + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + + + + system + query_log
    + + toYYYYMM(event_date) + + + + + 7500 +
    + + + + system + trace_log
    + + toYYYYMM(event_date) + 7500 +
    + + + + system + query_thread_log
    + toYYYYMM(event_date) + 7500 +
    + + + + + + + + system + metric_log
    + 7500 + 1000 +
    + + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + + + + +
    diff --git a/dbms/programs/server/data/.gitignore b/programs/server/data/.gitignore similarity index 100% rename from dbms/programs/server/data/.gitignore rename to programs/server/data/.gitignore diff --git a/dbms/programs/server/data/default/.gitignore b/programs/server/data/default/.gitignore similarity index 100% rename from dbms/programs/server/data/default/.gitignore rename to programs/server/data/default/.gitignore diff --git a/dbms/programs/server/metadata/default/.gitignore b/programs/server/metadata/default/.gitignore similarity index 100% rename from dbms/programs/server/metadata/default/.gitignore rename to programs/server/metadata/default/.gitignore diff --git a/dbms/programs/server/users.d/allow_only_from_localhost.xml b/programs/server/users.d/allow_only_from_localhost.xml similarity index 100% rename from dbms/programs/server/users.d/allow_only_from_localhost.xml rename to programs/server/users.d/allow_only_from_localhost.xml diff --git a/dbms/programs/server/users.d/log_queries.xml b/programs/server/users.d/log_queries.xml similarity index 100% rename from dbms/programs/server/users.d/log_queries.xml rename to programs/server/users.d/log_queries.xml diff --git a/dbms/programs/server/users.d/readonly.xml b/programs/server/users.d/readonly.xml similarity index 100% rename from dbms/programs/server/users.d/readonly.xml rename to programs/server/users.d/readonly.xml diff --git a/programs/server/users.xml b/programs/server/users.xml new file mode 100644 index 00000000000..3d95269190b --- /dev/null +++ b/programs/server/users.xml @@ -0,0 +1,110 @@ + + + + + + + + 10000000000 + + + 0 + + + random + + + + + 1 + + + + + + + + + + + + + ::/0 + + + + default + + + default + + + + + + + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + + + diff --git a/dbms/src/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp similarity index 99% rename from dbms/src/Access/AccessControlManager.cpp rename to src/Access/AccessControlManager.cpp index b5e06549c28..f8f15e425ed 100644 --- a/dbms/src/Access/AccessControlManager.cpp +++ b/src/Access/AccessControlManager.cpp @@ -23,7 +23,10 @@ namespace std::vector> list; list.emplace_back(std::make_unique()); list.emplace_back(std::make_unique()); + +#if 0 /// Memory access storage is disabled. list.emplace_back(std::make_unique()); +#endif return list; } diff --git a/dbms/src/Access/AccessControlManager.h b/src/Access/AccessControlManager.h similarity index 100% rename from dbms/src/Access/AccessControlManager.h rename to src/Access/AccessControlManager.h diff --git a/src/Access/AccessFlags.h b/src/Access/AccessFlags.h new file mode 100644 index 00000000000..c8f57fcd419 --- /dev/null +++ b/src/Access/AccessFlags.h @@ -0,0 +1,425 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +/// Represents a combination of access types which can be granted globally, on databases, tables, columns, etc. +/// For example "SELECT, CREATE USER" is an access type. +class AccessFlags +{ +public: + AccessFlags(AccessType type); + + /// The same as AccessFlags(AccessType::NONE). + AccessFlags() = default; + + /// Constructs from a string like "SELECT". + AccessFlags(const std::string_view & keyword); + + /// Constructs from a list of strings like "SELECT, UPDATE, INSERT". + AccessFlags(const std::vector & keywords); + AccessFlags(const Strings & keywords); + + AccessFlags(const AccessFlags & src) = default; + AccessFlags(AccessFlags && src) = default; + AccessFlags & operator =(const AccessFlags & src) = default; + AccessFlags & operator =(AccessFlags && src) = default; + + /// Returns the access type which contains two specified access types. + AccessFlags & operator |=(const AccessFlags & other) { flags |= other.flags; return *this; } + friend AccessFlags operator |(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) |= right; } + + /// Returns the access type which contains the common part of two access types. + AccessFlags & operator &=(const AccessFlags & other) { flags &= other.flags; return *this; } + friend AccessFlags operator &(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) &= right; } + + /// Returns the access type which contains only the part of the first access type which is not the part of the second access type. + /// (lhs - rhs) is the same as (lhs & ~rhs). + AccessFlags & operator -=(const AccessFlags & other) { flags &= ~other.flags; return *this; } + friend AccessFlags operator -(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) -= right; } + + AccessFlags operator ~() const { AccessFlags res; res.flags = ~flags; return res; } + + bool isEmpty() const { return flags.none(); } + explicit operator bool() const { return !isEmpty(); } + bool contains(const AccessFlags & other) const { return (flags & other.flags) == other.flags; } + + friend bool operator ==(const AccessFlags & left, const AccessFlags & right) { return left.flags == right.flags; } + friend bool operator !=(const AccessFlags & left, const AccessFlags & right) { return !(left == right); } + + void clear() { flags.reset(); } + + /// Returns a comma-separated list of keywords, like "SELECT, CREATE USER, UPDATE". + String toString() const; + + /// Returns a list of keywords. + std::vector toKeywords() const; + + /// Returns all the flags. + /// These are the same as (allGlobalFlags() | allDatabaseFlags() | allTableFlags() | allColumnsFlags() | allDictionaryFlags()). + static AccessFlags allFlags(); + + /// Returns all the global flags. + static AccessFlags allGlobalFlags(); + + /// Returns all the flags related to a database. + static AccessFlags allDatabaseFlags(); + + /// Returns all the flags related to a table. + static AccessFlags allTableFlags(); + + /// Returns all the flags related to a column. + static AccessFlags allColumnFlags(); + + /// Returns all the flags related to a dictionary. + static AccessFlags allDictionaryFlags(); + +private: + static constexpr size_t NUM_FLAGS = 128; + using Flags = std::bitset; + Flags flags; + + AccessFlags(const Flags & flags_) : flags(flags_) {} + + template + class Impl; +}; + + +namespace ErrorCodes +{ + extern const int UNKNOWN_ACCESS_TYPE; +} + +template +class AccessFlags::Impl +{ +public: + static const Impl & instance() + { + static const Impl res; + return res; + } + + Flags accessTypeToFlags(AccessType type) const + { + return access_type_to_flags_mapping[static_cast(type)]; + } + + Flags keywordToFlags(const std::string_view & keyword) const + { + auto it = keyword_to_flags_map.find(keyword); + if (it == keyword_to_flags_map.end()) + { + String uppercased_keyword{keyword}; + boost::to_upper(uppercased_keyword); + it = keyword_to_flags_map.find(uppercased_keyword); + if (it == keyword_to_flags_map.end()) + throw Exception("Unknown access type: " + String(keyword), ErrorCodes::UNKNOWN_ACCESS_TYPE); + } + return it->second; + } + + Flags keywordsToFlags(const std::vector & keywords) const + { + Flags res; + for (const auto & keyword : keywords) + res |= keywordToFlags(keyword); + return res; + } + + Flags keywordsToFlags(const Strings & keywords) const + { + Flags res; + for (const auto & keyword : keywords) + res |= keywordToFlags(keyword); + return res; + } + + std::vector flagsToKeywords(const Flags & flags_) const + { + std::vector keywords; + flagsToKeywordsRec(flags_, keywords, *flags_to_keyword_tree); + + if (keywords.empty()) + keywords.push_back("USAGE"); + + return keywords; + } + + String flagsToString(const Flags & flags_) const + { + String str; + for (const auto & keyword : flagsToKeywords(flags_)) + { + if (!str.empty()) + str += ", "; + str += keyword; + } + return str; + } + + const Flags & getAllFlags() const { return all_flags; } + const Flags & getGlobalFlags() const { return all_flags_for_target[GLOBAL]; } + const Flags & getDatabaseFlags() const { return all_flags_for_target[DATABASE]; } + const Flags & getTableFlags() const { return all_flags_for_target[TABLE]; } + const Flags & getColumnFlags() const { return all_flags_for_target[COLUMN]; } + const Flags & getDictionaryFlags() const { return all_flags_for_target[DICTIONARY]; } + +private: + enum NodeType + { + UNKNOWN = -2, + GROUP = -1, + GLOBAL, + DATABASE, + TABLE, + VIEW = TABLE, + COLUMN, + DICTIONARY, + }; + + struct Node; + using NodePtr = std::unique_ptr; + + struct Node + { + const String keyword; + NodeType node_type; + AccessType type = AccessType::NONE; + Strings aliases; + Flags flags; + std::vector children; + + Node(String keyword_, NodeType node_type_ = UNKNOWN) : keyword(std::move(keyword_)), node_type(node_type_) {} + + void setFlag(size_t flag) { flags.set(flag); } + + void addChild(NodePtr child) + { + flags |= child->flags; + children.push_back(std::move(child)); + } + }; + + static String replaceUnderscoreWithSpace(const std::string_view & str) + { + String res{str}; + boost::replace_all(res, "_", " "); + return res; + } + + static Strings splitAliases(const std::string_view & str) + { + Strings aliases; + boost::split(aliases, str, boost::is_any_of(",")); + for (auto & alias : aliases) + boost::trim(alias); + return aliases; + } + + static void makeFlagsToKeywordTreeNode( + AccessType type, + const std::string_view & name, + const std::string_view & aliases, + NodeType node_type, + const std::string_view & parent_group_name, + std::unordered_map & nodes, + std::unordered_map & owned_nodes, + size_t & next_flag) + { + NodePtr node; + auto keyword = replaceUnderscoreWithSpace(name); + auto it = owned_nodes.find(keyword); + if (it != owned_nodes.end()) + { + node = std::move(it->second); + owned_nodes.erase(it); + } + else + { + if (nodes.contains(keyword)) + throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR); + node = std::make_unique(keyword, node_type); + nodes[node->keyword] = node.get(); + } + + node->type = type; + node->node_type = node_type; + node->aliases = splitAliases(aliases); + if (node_type != GROUP) + node->setFlag(next_flag++); + + bool has_parent_group = (parent_group_name != "NONE"); + if (!has_parent_group) + { + std::string_view keyword_as_string_view = node->keyword; + owned_nodes[keyword_as_string_view] = std::move(node); + return; + } + + auto parent_keyword = replaceUnderscoreWithSpace(parent_group_name); + auto it_parent = nodes.find(parent_keyword); + if (it_parent == nodes.end()) + { + auto parent_node = std::make_unique(parent_keyword); + it_parent = nodes.emplace(parent_node->keyword, parent_node.get()).first; + assert(!owned_nodes.contains(parent_node->keyword)); + std::string_view parent_keyword_as_string_view = parent_node->keyword; + owned_nodes[parent_keyword_as_string_view] = std::move(parent_node); + } + it_parent->second->addChild(std::move(node)); + } + + void makeFlagsToKeywordTree() + { + std::unordered_map owned_nodes; + std::unordered_map nodes; + size_t next_flag = 0; + +#define MAKE_ACCESS_FLAGS_TO_KEYWORD_TREE_NODE(name, aliases, node_type, parent_group_name) \ + makeFlagsToKeywordTreeNode(AccessType::name, #name, aliases, node_type, #parent_group_name, nodes, owned_nodes, next_flag); + + APPLY_FOR_ACCESS_TYPES(MAKE_ACCESS_FLAGS_TO_KEYWORD_TREE_NODE) + +#undef MAKE_ACCESS_FLAGS_TO_KEYWORD_TREE_NODE + + if (!owned_nodes.contains("NONE")) + throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR); + if (!owned_nodes.contains("ALL")) + throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR); + + flags_to_keyword_tree = std::move(owned_nodes["ALL"]); + none_node = std::move(owned_nodes["NONE"]); + owned_nodes.erase("ALL"); + owned_nodes.erase("NONE"); + + if (!owned_nodes.empty()) + { + const auto & unused_node = *(owned_nodes.begin()->second); + if (unused_node.node_type == UNKNOWN) + throw Exception("Parent group '" + unused_node.keyword + "' not found", ErrorCodes::LOGICAL_ERROR); + else + throw Exception("Access type '" + unused_node.keyword + "' should have parent group", ErrorCodes::LOGICAL_ERROR); + } + } + + void makeKeywordToFlagsMap(Node * start_node = nullptr) + { + if (!start_node) + { + makeKeywordToFlagsMap(none_node.get()); + start_node = flags_to_keyword_tree.get(); + } + + start_node->aliases.emplace_back(start_node->keyword); + for (auto & alias : start_node->aliases) + { + boost::to_upper(alias); + keyword_to_flags_map[alias] = start_node->flags; + } + + for (auto & child : start_node->children) + makeKeywordToFlagsMap(child.get()); + } + + void makeAccessTypeToFlagsMapping(Node * start_node = nullptr) + { + if (!start_node) + { + makeAccessTypeToFlagsMapping(none_node.get()); + start_node = flags_to_keyword_tree.get(); + } + + size_t index = static_cast(start_node->type); + access_type_to_flags_mapping.resize(std::max(index + 1, access_type_to_flags_mapping.size())); + access_type_to_flags_mapping[index] = start_node->flags; + + for (auto & child : start_node->children) + makeAccessTypeToFlagsMapping(child.get()); + } + + void collectAllFlags(const Node * start_node = nullptr) + { + if (!start_node) + { + start_node = flags_to_keyword_tree.get(); + all_flags = start_node->flags; + } + if (start_node->node_type != GROUP) + { + assert(static_cast(start_node->node_type) < std::size(all_flags_for_target)); + all_flags_for_target[start_node->node_type] |= start_node->flags; + } + for (const auto & child : start_node->children) + collectAllFlags(child.get()); + } + + Impl() + { + makeFlagsToKeywordTree(); + makeKeywordToFlagsMap(); + makeAccessTypeToFlagsMapping(); + collectAllFlags(); + } + + static void flagsToKeywordsRec(const Flags & flags_, std::vector & keywords, const Node & start_node) + { + Flags matching_flags = (flags_ & start_node.flags); + if (matching_flags.any()) + { + if (matching_flags == start_node.flags) + { + keywords.push_back(start_node.keyword); + } + else + { + for (const auto & child : start_node.children) + flagsToKeywordsRec(flags_, keywords, *child); + } + } + } + + NodePtr flags_to_keyword_tree; + NodePtr none_node; + std::unordered_map keyword_to_flags_map; + std::vector access_type_to_flags_mapping; + Flags all_flags; + Flags all_flags_for_target[static_cast(DICTIONARY) + 1]; +}; + + +inline AccessFlags::AccessFlags(AccessType type) : flags(Impl<>::instance().accessTypeToFlags(type)) {} +inline AccessFlags::AccessFlags(const std::string_view & keyword) : flags(Impl<>::instance().keywordToFlags(keyword)) {} +inline AccessFlags::AccessFlags(const std::vector & keywords) : flags(Impl<>::instance().keywordsToFlags(keywords)) {} +inline AccessFlags::AccessFlags(const Strings & keywords) : flags(Impl<>::instance().keywordsToFlags(keywords)) {} +inline String AccessFlags::toString() const { return Impl<>::instance().flagsToString(flags); } +inline std::vector AccessFlags::toKeywords() const { return Impl<>::instance().flagsToKeywords(flags); } +inline AccessFlags AccessFlags::allFlags() { return Impl<>::instance().getAllFlags(); } +inline AccessFlags AccessFlags::allGlobalFlags() { return Impl<>::instance().getGlobalFlags(); } +inline AccessFlags AccessFlags::allDatabaseFlags() { return Impl<>::instance().getDatabaseFlags(); } +inline AccessFlags AccessFlags::allTableFlags() { return Impl<>::instance().getTableFlags(); } +inline AccessFlags AccessFlags::allColumnFlags() { return Impl<>::instance().getColumnFlags(); } +inline AccessFlags AccessFlags::allDictionaryFlags() { return Impl<>::instance().getDictionaryFlags(); } + +inline AccessFlags operator |(AccessType left, AccessType right) { return AccessFlags(left) | right; } +inline AccessFlags operator &(AccessType left, AccessType right) { return AccessFlags(left) & right; } +inline AccessFlags operator -(AccessType left, AccessType right) { return AccessFlags(left) - right; } +inline AccessFlags operator ~(AccessType x) { return ~AccessFlags(x); } + +} diff --git a/dbms/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp similarity index 96% rename from dbms/src/Access/AccessRights.cpp rename to src/Access/AccessRights.cpp index 6f94cfac286..9c3b5e36ec8 100644 --- a/dbms/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -49,10 +49,13 @@ namespace const AccessFlags create_temporary_table_flag = AccessType::CREATE_TEMPORARY_TABLE; const AccessFlags alter_table_flag = AccessType::ALTER_TABLE; const AccessFlags alter_view_flag = AccessType::ALTER_VIEW; - const AccessFlags truncate_table_flag = AccessType::TRUNCATE_TABLE; - const AccessFlags truncate_view_flag = AccessType::TRUNCATE_VIEW; + const AccessFlags truncate_flag = AccessType::TRUNCATE; const AccessFlags drop_table_flag = AccessType::DROP_TABLE; const AccessFlags drop_view_flag = AccessType::DROP_VIEW; + const AccessFlags alter_ttl_flag = AccessType::ALTER_TTL; + const AccessFlags alter_materialize_ttl_flag = AccessType::ALTER_MATERIALIZE_TTL; + const AccessFlags system_reload_dictionary = AccessType::SYSTEM_RELOAD_DICTIONARY; + const AccessFlags system_reload_embedded_dictionaries = AccessType::SYSTEM_RELOAD_EMBEDDED_DICTIONARIES; }; std::string_view checkCurrentDatabase(const std::string_view & current_database) @@ -413,8 +416,14 @@ private: implicit_access |= helper.show_tables_flag; } - if ((level == GLOBAL_LEVEL) && ((access | max_access_among_children) & helper.create_table_flag)) - implicit_access |= helper.create_temporary_table_flag; + if (level == GLOBAL_LEVEL) + { + if ((access | max_access_among_children) & helper.create_table_flag) + implicit_access |= helper.create_temporary_table_flag; + + if (access & helper.system_reload_dictionary) + implicit_access |= helper.system_reload_embedded_dictionaries; + } if (level <= TABLE_LEVEL) { @@ -427,8 +436,8 @@ private: if (access & helper.alter_table_flag) implicit_access |= helper.alter_view_flag; - if (access & helper.truncate_table_flag) - implicit_access |= helper.truncate_view_flag; + if (access & helper.alter_ttl_flag) + implicit_access |= helper.alter_materialize_ttl_flag; } final_access = access | implicit_access; diff --git a/dbms/src/Access/AccessRights.h b/src/Access/AccessRights.h similarity index 100% rename from dbms/src/Access/AccessRights.h rename to src/Access/AccessRights.h diff --git a/dbms/src/Access/AccessRightsElement.cpp b/src/Access/AccessRightsElement.cpp similarity index 100% rename from dbms/src/Access/AccessRightsElement.cpp rename to src/Access/AccessRightsElement.cpp diff --git a/dbms/src/Access/AccessRightsElement.h b/src/Access/AccessRightsElement.h similarity index 100% rename from dbms/src/Access/AccessRightsElement.h rename to src/Access/AccessRightsElement.h diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h new file mode 100644 index 00000000000..d0665a6e55f --- /dev/null +++ b/src/Access/AccessType.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ +/// Represents an access type which can be granted on databases, tables, columns, etc. +enum class AccessType +{ +/// Macro M should be defined as M(name, aliases, node_type, parent_group_name) +/// where name is identifier with underscores (instead of spaces); +/// aliases is a string containing comma-separated list; +/// node_type either specifies access type's level (GLOBAL/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS), +/// or specifies that the access type is a GROUP of other access types; +/// parent_group_name is the name of the group containing this access type (or NONE if there is no such group). +#define APPLY_FOR_ACCESS_TYPES(M) \ + M(SHOW_DATABASES, "", DATABASE, SHOW) /* allows to execute SHOW DATABASES, SHOW CREATE DATABASE, USE ; + implicitly enabled by any grant on the database */\ + M(SHOW_TABLES, "", TABLE, SHOW) /* allows to execute SHOW TABLES, EXISTS , CHECK
    ; + implicitly enabled by any grant on the table */\ + M(SHOW_COLUMNS, "", COLUMN, SHOW) /* allows to execute SHOW CREATE TABLE, DESCRIBE; + implicitly enabled with any grant on the column */\ + M(SHOW_DICTIONARIES, "", DICTIONARY, SHOW) /* allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS ; + implicitly enabled by any grant on the dictionary */\ + M(SHOW, "", GROUP, ALL) /* allows to execute SHOW, USE, EXISTS, CHECK, DESCRIBE */\ + \ + M(SELECT, "", COLUMN, ALL) \ + M(INSERT, "", COLUMN, ALL) \ + M(ALTER_UPDATE, "UPDATE", COLUMN, ALTER_TABLE) /* allows to execute ALTER UPDATE */\ + M(ALTER_DELETE, "DELETE", COLUMN, ALTER_TABLE) /* allows to execute ALTER DELETE */\ + \ + M(ALTER_ADD_COLUMN, "ADD COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_MODIFY_COLUMN, "MODIFY COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_DROP_COLUMN, "DROP COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_COMMENT_COLUMN, "COMMENT COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_CLEAR_COLUMN, "CLEAR COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_RENAME_COLUMN, "RENAME COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_COLUMN, "", GROUP, ALTER_TABLE) /* allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN */\ + \ + M(ALTER_ORDER_BY, "ALTER MODIFY ORDER BY, MODIFY ORDER BY", TABLE, ALTER_INDEX) \ + M(ALTER_ADD_INDEX, "ADD INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_DROP_INDEX, "DROP INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_MATERIALIZE_INDEX, "MATERIALIZE INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_CLEAR_INDEX, "CLEAR INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_INDEX, "INDEX", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX */\ + \ + M(ALTER_ADD_CONSTRAINT, "ADD CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ + M(ALTER_DROP_CONSTRAINT, "DROP CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ + M(ALTER_CONSTRAINT, "CONSTRAINT", GROUP, ALTER_TABLE) /* allows to execute ALTER {ADD|DROP} CONSTRAINT */\ + \ + M(ALTER_TTL, "ALTER MODIFY TTL, MODIFY TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY TTL */\ + M(ALTER_MATERIALIZE_TTL, "MATERIALIZE TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MATERIALIZE TTL; + enabled implicitly by the grant ALTER_TABLE */\ + M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ + M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \ + M(ALTER_FETCH_PARTITION, "FETCH PARTITION", TABLE, ALTER_TABLE) \ + M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION", TABLE, ALTER_TABLE) \ + \ + M(ALTER_TABLE, "", GROUP, ALTER) \ + \ + M(ALTER_VIEW_REFRESH, "ALTER LIVE VIEW REFRESH, REFRESH VIEW", VIEW, ALTER_VIEW) \ + M(ALTER_VIEW_MODIFY_QUERY, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \ + M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER VIEW REFRESH, ALTER VIEW MODIFY QUERY; + implicitly enabled by the grant ALTER_TABLE */\ + \ + M(ALTER, "", GROUP, ALL) /* allows to execute ALTER {TABLE|LIVE VIEW} */\ + \ + M(CREATE_DATABASE, "", DATABASE, CREATE) /* allows to execute {CREATE|ATTACH} DATABASE */\ + M(CREATE_TABLE, "", TABLE, CREATE) /* allows to execute {CREATE|ATTACH} {TABLE|VIEW} */\ + M(CREATE_VIEW, "", VIEW, CREATE) /* allows to execute {CREATE|ATTACH} VIEW; + implicitly enabled by the grant CREATE_TABLE */\ + M(CREATE_DICTIONARY, "", DICTIONARY, CREATE) /* allows to execute {CREATE|ATTACH} DICTIONARY */\ + M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables; + implicitly enabled by the grant CREATE_TABLE on any table */ \ + M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \ + \ + M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\ + M(DROP_TABLE, "", TABLE, DROP) /* allows to execute {DROP|DETACH} TABLE */\ + M(DROP_VIEW, "", VIEW, DROP) /* allows to execute {DROP|DETACH} TABLE for views; + implicitly enabled by the grant DROP_TABLE */\ + M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\ + M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\ + \ + M(TRUNCATE, "TRUNCATE TABLE", TABLE, ALL) \ + M(OPTIMIZE, "OPTIMIZE TABLE", TABLE, ALL) \ + \ + M(KILL_QUERY, "", GLOBAL, ALL) /* allows to kill a query started by another user + (anyone can kill his own queries) */\ + \ + M(CREATE_USER, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_USER, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_USER, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(CREATE_ROLE, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_ROLE, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_ROLE, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ROLE_ADMIN, "", GLOBAL, ACCESS_MANAGEMENT) /* allows to grant and revoke the roles which are not granted to the current user with admin option */\ + M(CREATE_ROW_POLICY, "CREATE POLICY", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_ROW_POLICY, "ALTER POLICY", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_ROW_POLICY, "DROP POLICY", GLOBAL, ACCESS_MANAGEMENT) \ + M(CREATE_QUOTA, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_QUOTA, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_QUOTA, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(CREATE_SETTINGS_PROFILE, "CREATE PROFILE", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_SETTINGS_PROFILE, "ALTER PROFILE", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_SETTINGS_PROFILE, "DROP PROFILE", GLOBAL, ACCESS_MANAGEMENT) \ + M(SHOW_USERS, "SHOW CREATE USER", GLOBAL, SHOW_ACCESS) \ + M(SHOW_ROLES, "SHOW CREATE ROLE", GLOBAL, SHOW_ACCESS) \ + M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", GLOBAL, SHOW_ACCESS) \ + M(SHOW_QUOTAS, "SHOW CREATE QUOTA", GLOBAL, SHOW_ACCESS) \ + M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \ + M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \ + M(ACCESS_MANAGEMENT, "", GROUP, ALL) \ + \ + M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \ + M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_MARK_CACHE, "SYSTEM DROP MARK, DROP MARK CACHE, DROP MARKS", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_UNCOMPRESSED_CACHE, "SYSTEM DROP UNCOMPRESSED, DROP UNCOMPRESSED CACHE, DROP UNCOMPRESSED", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \ + M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \ + M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \ + M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\ + M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \ + M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \ + M(SYSTEM_TTL_MERGES, "SYSTEM STOP TTL MERGES, SYSTEM START TTL MERGES, STOP TTL MERGES, START TTL MERGES", TABLE, SYSTEM) \ + M(SYSTEM_FETCHES, "SYSTEM STOP FETCHES, SYSTEM START FETCHES, STOP FETCHES, START FETCHES", TABLE, SYSTEM) \ + M(SYSTEM_MOVES, "SYSTEM STOP MOVES, SYSTEM START MOVES, STOP MOVES, START MOVES", TABLE, SYSTEM) \ + M(SYSTEM_DISTRIBUTED_SENDS, "SYSTEM STOP DISTRIBUTED SENDS, SYSTEM START DISTRIBUTED SENDS, STOP DISTRIBUTED SENDS, START DISTRIBUTED SENDS", TABLE, SYSTEM_SENDS) \ + M(SYSTEM_REPLICATED_SENDS, "SYSTEM STOP REPLICATED SENDS, SYSTEM START REPLICATED SENDS, STOP_REPLICATED_SENDS, START REPLICATED SENDS", TABLE, SYSTEM_SENDS) \ + M(SYSTEM_SENDS, "SYSTEM STOP SENDS, SYSTEM START SENDS, STOP SENDS, START SENDS", GROUP, SYSTEM) \ + M(SYSTEM_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES, STOP_REPLICATION_QUEUES, START REPLICATION QUEUES", TABLE, SYSTEM) \ + M(SYSTEM_SYNC_REPLICA, "SYNC REPLICA", TABLE, SYSTEM) \ + M(SYSTEM_RESTART_REPLICA, "RESTART REPLICA", TABLE, SYSTEM) \ + M(SYSTEM_FLUSH_DISTRIBUTED, "FLUSH DISTRIBUTED", TABLE, SYSTEM_FLUSH) \ + M(SYSTEM_FLUSH_LOGS, "FLUSH LOGS", GLOBAL, SYSTEM_FLUSH) \ + M(SYSTEM_FLUSH, "", GROUP, SYSTEM) \ + M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \ + \ + M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\ + \ + M(addressToLine, "", GLOBAL, INTROSPECTION) /* allows to execute function addressToLine() */\ + M(addressToSymbol, "", GLOBAL, INTROSPECTION) /* allows to execute function addressToSymbol() */\ + M(demangle, "", GLOBAL, INTROSPECTION) /* allows to execute function demangle() */\ + M(INTROSPECTION, "INTROSPECTION FUNCTIONS", GROUP, ALL) /* allows to execute functions addressToLine(), addressToSymbol(), demangle()*/\ + \ + M(FILE, "", GLOBAL, SOURCES) \ + M(URL, "", GLOBAL, SOURCES) \ + M(REMOTE, "", GLOBAL, SOURCES) \ + M(MYSQL, "", GLOBAL, SOURCES) \ + M(ODBC, "", GLOBAL, SOURCES) \ + M(JDBC, "", GLOBAL, SOURCES) \ + M(HDFS, "", GLOBAL, SOURCES) \ + M(S3, "", GLOBAL, SOURCES) \ + M(SOURCES, "", GROUP, ALL) \ + \ + M(ALL, "ALL PRIVILEGES", GROUP, NONE) /* full access */ \ + M(NONE, "USAGE, NO PRIVILEGES", GROUP, NONE) /* no access */ + +#define DECLARE_ACCESS_TYPE_ENUM_CONST(name, aliases, node_type, parent_group_name) \ + name, + + APPLY_FOR_ACCESS_TYPES(DECLARE_ACCESS_TYPE_ENUM_CONST) +#undef DECLARE_ACCESS_TYPE_ENUM_CONST +}; + +std::string_view toString(AccessType type); + + +namespace impl +{ + template + class AccessTypeToKeywordConverter + { + public: + static const AccessTypeToKeywordConverter & instance() + { + static const AccessTypeToKeywordConverter res; + return res; + } + + std::string_view convert(AccessType type) const + { + return access_type_to_keyword_mapping[static_cast(type)]; + } + + private: + AccessTypeToKeywordConverter() + { +#define INSERT_ACCESS_TYPE_KEYWORD_PAIR_TO_MAPPING(name, aliases, node_type, parent_group_name) \ + insertToMapping(AccessType::name, #name); + + APPLY_FOR_ACCESS_TYPES(INSERT_ACCESS_TYPE_KEYWORD_PAIR_TO_MAPPING) + +#undef INSERT_ACCESS_TYPE_KEYWORD_PAIR_TO_MAPPING + } + + void insertToMapping(AccessType type, const std::string_view & str) + { + String str2{str}; + boost::replace_all(str2, "_", " "); + size_t index = static_cast(type); + access_type_to_keyword_mapping.resize(std::max(index + 1, access_type_to_keyword_mapping.size())); + access_type_to_keyword_mapping[index] = str2; + } + + Strings access_type_to_keyword_mapping; + }; +} + +inline std::string_view toKeyword(AccessType type) { return impl::AccessTypeToKeywordConverter<>::instance().convert(type); } + +} diff --git a/dbms/src/Access/AllowedClientHosts.cpp b/src/Access/AllowedClientHosts.cpp similarity index 100% rename from dbms/src/Access/AllowedClientHosts.cpp rename to src/Access/AllowedClientHosts.cpp diff --git a/dbms/src/Access/AllowedClientHosts.h b/src/Access/AllowedClientHosts.h similarity index 98% rename from dbms/src/Access/AllowedClientHosts.h rename to src/Access/AllowedClientHosts.h index 9e89c2b92a1..2baafb2e04a 100644 --- a/dbms/src/Access/AllowedClientHosts.h +++ b/src/Access/AllowedClientHosts.h @@ -91,7 +91,7 @@ public: /// Allows IP addresses or host names using LIKE pattern. /// This pattern can contain % and _ wildcard characters. - /// For example, addLikePattern("@") will allow all addresses. + /// For example, addLikePattern("%") will allow all addresses. void addLikePattern(const String & pattern); void removeLikePattern(const String & like_pattern); const std::vector & getLikePatterns() const { return like_patterns; } @@ -298,7 +298,7 @@ inline void AllowedClientHosts::addLikePattern(const String & pattern) { if (boost::iequals(pattern, "localhost") || (pattern == "127.0.0.1") || (pattern == "::1")) local_host = true; - else if ((pattern == "@") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) + else if ((pattern == "%") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) any_host = true; else if (boost::range::find(like_patterns, pattern) == name_regexps.end()) like_patterns.push_back(pattern); @@ -308,7 +308,7 @@ inline void AllowedClientHosts::removeLikePattern(const String & pattern) { if (boost::iequals(pattern, "localhost") || (pattern == "127.0.0.1") || (pattern == "::1")) local_host = false; - else if ((pattern == "@") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) + else if ((pattern == "%") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) any_host = false; else boost::range::remove_erase(like_patterns, pattern); diff --git a/dbms/src/Access/Authentication.cpp b/src/Access/Authentication.cpp similarity index 100% rename from dbms/src/Access/Authentication.cpp rename to src/Access/Authentication.cpp diff --git a/dbms/src/Access/Authentication.h b/src/Access/Authentication.h similarity index 100% rename from dbms/src/Access/Authentication.h rename to src/Access/Authentication.h diff --git a/dbms/src/Access/CMakeLists.txt b/src/Access/CMakeLists.txt similarity index 100% rename from dbms/src/Access/CMakeLists.txt rename to src/Access/CMakeLists.txt diff --git a/dbms/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp similarity index 93% rename from dbms/src/Access/ContextAccess.cpp rename to src/Access/ContextAccess.cpp index 4c690956358..915593f58f0 100644 --- a/dbms/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -196,7 +196,7 @@ bool ContextAccess::isClientHostAllowed() const template -bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const +bool ContextAccess::calculateResultAccessAndCheck(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const { auto access = calculateResultAccess(grant_option); bool is_granted = access->isGranted(flags, args...); @@ -267,6 +267,22 @@ bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & fla } +template +bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags) const +{ + return calculateResultAccessAndCheck(log_, flags); +} + +template +bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const std::string_view & database, const Args &... args) const +{ + if (database.empty()) + return calculateResultAccessAndCheck(log_, flags, params.current_database, args...); + else + return calculateResultAccessAndCheck(log_, flags, database, args...); +} + + template bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessRightsElement & element) const { @@ -276,24 +292,15 @@ bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessRightsEleme } else if (element.any_table) { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database); - else - return checkAccessImpl(log_, element.access_flags, element.database); + return checkAccessImpl(log_, element.access_flags, element.database); } else if (element.any_column) { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database, element.table); - else - return checkAccessImpl(log_, element.access_flags, element.database, element.table); + return checkAccessImpl(log_, element.access_flags, element.database, element.table); } else { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database, element.table, element.columns); - else - return checkAccessImpl(log_, element.access_flags, element.database, element.table, element.columns); + return checkAccessImpl(log_, element.access_flags, element.database, element.table, element.columns); } } @@ -404,23 +411,20 @@ boost::shared_ptr ContextAccess::calculateResultAccess(bool static const AccessFlags table_ddl = AccessType::CREATE_DATABASE | AccessType::CREATE_TABLE | AccessType::CREATE_VIEW | AccessType::ALTER_TABLE | AccessType::ALTER_VIEW | AccessType::DROP_DATABASE | AccessType::DROP_TABLE | AccessType::DROP_VIEW | AccessType::TRUNCATE; + static const AccessFlags dictionary_ddl = AccessType::CREATE_DICTIONARY | AccessType::DROP_DICTIONARY; static const AccessFlags table_and_dictionary_ddl = table_ddl | dictionary_ddl; static const AccessFlags write_table_access = AccessType::INSERT | AccessType::OPTIMIZE; - static const AccessFlags all_dcl = AccessType::CREATE_USER | AccessType::CREATE_ROLE | AccessType::CREATE_POLICY - | AccessType::CREATE_QUOTA | AccessType::CREATE_SETTINGS_PROFILE | AccessType::ALTER_USER | AccessType::ALTER_ROLE - | AccessType::ALTER_POLICY | AccessType::ALTER_QUOTA | AccessType::ALTER_SETTINGS_PROFILE | AccessType::DROP_USER - | AccessType::DROP_ROLE | AccessType::DROP_POLICY | AccessType::DROP_QUOTA | AccessType::DROP_SETTINGS_PROFILE - | AccessType::ROLE_ADMIN; + static const AccessFlags write_dcl_access = AccessType::ACCESS_MANAGEMENT - AccessType::SHOW_ACCESS; if (readonly_) - merged_access->revoke(write_table_access | all_dcl | table_and_dictionary_ddl | AccessType::SYSTEM | AccessType::KILL_QUERY); + merged_access->revoke(write_table_access | table_and_dictionary_ddl | write_dcl_access | AccessType::SYSTEM | AccessType::KILL_QUERY); if (readonly_ == 1) { /// Table functions are forbidden in readonly mode. /// For example, for readonly = 2 - allowed. - merged_access->revoke(AccessType::CREATE_TEMPORARY_TABLE | AccessType::TABLE_FUNCTIONS); + merged_access->revoke(AccessType::CREATE_TEMPORARY_TABLE); } if (!allow_ddl_ && !grant_option) diff --git a/dbms/src/Access/ContextAccess.h b/src/Access/ContextAccess.h similarity index 96% rename from dbms/src/Access/ContextAccess.h rename to src/Access/ContextAccess.h index bee63103793..e0fbf58dbe8 100644 --- a/dbms/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -130,8 +130,11 @@ private: void setRolesInfo(const std::shared_ptr & roles_info_) const; void setSettingsAndConstraints() const; + template + bool checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags) const; + template - bool checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const; + bool checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const std::string_view & database, const Args &... args) const; template bool checkAccessImpl(Poco::Logger * log_, const AccessRightsElement & element) const; @@ -139,6 +142,9 @@ private: template bool checkAccessImpl(Poco::Logger * log_, const AccessRightsElements & elements) const; + template + bool calculateResultAccessAndCheck(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const; + boost::shared_ptr calculateResultAccess(bool grant_option) const; boost::shared_ptr calculateResultAccess(bool grant_option, UInt64 readonly_, bool allow_ddl_, bool allow_introspection_) const; diff --git a/dbms/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp similarity index 99% rename from dbms/src/Access/DiskAccessStorage.cpp rename to src/Access/DiskAccessStorage.cpp index 12c65e7df1e..d7ba8563f5a 100644 --- a/dbms/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -93,7 +94,7 @@ namespace const char * end = begin + file_contents.size(); while (pos < end) { - queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0)); + queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH)); while (isWhitespaceASCII(*pos) || *pos == ';') ++pos; } diff --git a/dbms/src/Access/DiskAccessStorage.h b/src/Access/DiskAccessStorage.h similarity index 100% rename from dbms/src/Access/DiskAccessStorage.h rename to src/Access/DiskAccessStorage.h diff --git a/dbms/src/Access/EnabledQuota.cpp b/src/Access/EnabledQuota.cpp similarity index 100% rename from dbms/src/Access/EnabledQuota.cpp rename to src/Access/EnabledQuota.cpp diff --git a/dbms/src/Access/EnabledQuota.h b/src/Access/EnabledQuota.h similarity index 100% rename from dbms/src/Access/EnabledQuota.h rename to src/Access/EnabledQuota.h diff --git a/dbms/src/Access/EnabledRoles.cpp b/src/Access/EnabledRoles.cpp similarity index 100% rename from dbms/src/Access/EnabledRoles.cpp rename to src/Access/EnabledRoles.cpp diff --git a/dbms/src/Access/EnabledRoles.h b/src/Access/EnabledRoles.h similarity index 100% rename from dbms/src/Access/EnabledRoles.h rename to src/Access/EnabledRoles.h diff --git a/dbms/src/Access/EnabledRolesInfo.cpp b/src/Access/EnabledRolesInfo.cpp similarity index 100% rename from dbms/src/Access/EnabledRolesInfo.cpp rename to src/Access/EnabledRolesInfo.cpp diff --git a/dbms/src/Access/EnabledRolesInfo.h b/src/Access/EnabledRolesInfo.h similarity index 100% rename from dbms/src/Access/EnabledRolesInfo.h rename to src/Access/EnabledRolesInfo.h diff --git a/src/Access/EnabledRowPolicies.cpp b/src/Access/EnabledRowPolicies.cpp new file mode 100644 index 00000000000..56c73aaf40d --- /dev/null +++ b/src/Access/EnabledRowPolicies.cpp @@ -0,0 +1,71 @@ +#include +#include +#include +#include + + +namespace DB +{ +size_t EnabledRowPolicies::Hash::operator()(const DatabaseAndTableNameRef & database_and_table_name) const +{ + return std::hash{}(database_and_table_name.first) - std::hash{}(database_and_table_name.second); +} + + +EnabledRowPolicies::EnabledRowPolicies(const Params & params_) + : params(params_) +{ +} + +EnabledRowPolicies::~EnabledRowPolicies() = default; + + +ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType type) const +{ + /// We don't lock `mutex` here. + auto loaded = map_of_mixed_conditions.load(); + auto it = loaded->find({database, table_name}); + if (it == loaded->end()) + return {}; + return it->second.mixed_conditions[type]; +} + + +ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType type, const ASTPtr & extra_condition) const +{ + ASTPtr condition = getCondition(database, table_name, type); + if (condition && extra_condition) + condition = makeASTForLogicalAnd({condition, extra_condition}); + else if (!condition) + condition = extra_condition; + + bool value; + if (tryGetLiteralBool(condition.get(), value) && value) + condition = nullptr; /// The condition is always true, no need to check it. + + return condition; +} + + +std::vector EnabledRowPolicies::getCurrentPolicyIDs() const +{ + /// We don't lock `mutex` here. + auto loaded = map_of_mixed_conditions.load(); + std::vector policy_ids; + for (const auto & mixed_conditions : *loaded | boost::adaptors::map_values) + boost::range::copy(mixed_conditions.policy_ids, std::back_inserter(policy_ids)); + return policy_ids; +} + + +std::vector EnabledRowPolicies::getCurrentPolicyIDs(const String & database, const String & table_name) const +{ + /// We don't lock `mutex` here. + auto loaded = map_of_mixed_conditions.load(); + auto it = loaded->find({database, table_name}); + if (it == loaded->end()) + return {}; + return it->second.policy_ids; +} + +} diff --git a/dbms/src/Access/EnabledRowPolicies.h b/src/Access/EnabledRowPolicies.h similarity index 100% rename from dbms/src/Access/EnabledRowPolicies.h rename to src/Access/EnabledRowPolicies.h diff --git a/dbms/src/Access/EnabledSettings.cpp b/src/Access/EnabledSettings.cpp similarity index 100% rename from dbms/src/Access/EnabledSettings.cpp rename to src/Access/EnabledSettings.cpp diff --git a/dbms/src/Access/EnabledSettings.h b/src/Access/EnabledSettings.h similarity index 100% rename from dbms/src/Access/EnabledSettings.h rename to src/Access/EnabledSettings.h diff --git a/dbms/src/Access/ExtendedRoleSet.cpp b/src/Access/ExtendedRoleSet.cpp similarity index 95% rename from dbms/src/Access/ExtendedRoleSet.cpp rename to src/Access/ExtendedRoleSet.cpp index b59dc7ac232..eed475bc3cc 100644 --- a/dbms/src/Access/ExtendedRoleSet.cpp +++ b/src/Access/ExtendedRoleSet.cpp @@ -51,25 +51,25 @@ ExtendedRoleSet::ExtendedRoleSet(const boost::container::flat_set & ids_) ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast) { - init(ast, nullptr, nullptr); + init(ast, nullptr); } -ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const UUID & current_user_id) +ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const std::optional & current_user_id) { - init(ast, nullptr, ¤t_user_id); + init(ast, nullptr, current_user_id); } ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager) { - init(ast, &manager, nullptr); + init(ast, &manager); } -ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const UUID & current_user_id) +ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const std::optional & current_user_id) { - init(ast, &manager, ¤t_user_id); + init(ast, &manager, current_user_id); } -void ExtendedRoleSet::init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager, const UUID * current_user_id) +void ExtendedRoleSet::init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager, const std::optional & current_user_id) { all = ast.all; diff --git a/dbms/src/Access/ExtendedRoleSet.h b/src/Access/ExtendedRoleSet.h similarity index 91% rename from dbms/src/Access/ExtendedRoleSet.h rename to src/Access/ExtendedRoleSet.h index 61a4db6e0ae..486b4277337 100644 --- a/dbms/src/Access/ExtendedRoleSet.h +++ b/src/Access/ExtendedRoleSet.h @@ -32,9 +32,9 @@ struct ExtendedRoleSet /// The constructor from AST requires the AccessControlManager if `ast.id_mode == false`. ExtendedRoleSet(const ASTExtendedRoleSet & ast); - ExtendedRoleSet(const ASTExtendedRoleSet & ast, const UUID & current_user_id); + ExtendedRoleSet(const ASTExtendedRoleSet & ast, const std::optional & current_user_id); ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager); - ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const UUID & current_user_id); + ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const std::optional & current_user_id); std::shared_ptr toAST() const; String toString() const; @@ -69,7 +69,7 @@ struct ExtendedRoleSet boost::container::flat_set except_ids; private: - void init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager = nullptr, const UUID * current_user_id = nullptr); + void init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager = nullptr, const std::optional & current_user_id = {}); }; } diff --git a/dbms/src/Access/IAccessEntity.cpp b/src/Access/IAccessEntity.cpp similarity index 100% rename from dbms/src/Access/IAccessEntity.cpp rename to src/Access/IAccessEntity.cpp diff --git a/dbms/src/Access/IAccessEntity.h b/src/Access/IAccessEntity.h similarity index 100% rename from dbms/src/Access/IAccessEntity.h rename to src/Access/IAccessEntity.h diff --git a/dbms/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp similarity index 100% rename from dbms/src/Access/IAccessStorage.cpp rename to src/Access/IAccessStorage.cpp diff --git a/dbms/src/Access/IAccessStorage.h b/src/Access/IAccessStorage.h similarity index 100% rename from dbms/src/Access/IAccessStorage.h rename to src/Access/IAccessStorage.h diff --git a/dbms/src/Access/MemoryAccessStorage.cpp b/src/Access/MemoryAccessStorage.cpp similarity index 100% rename from dbms/src/Access/MemoryAccessStorage.cpp rename to src/Access/MemoryAccessStorage.cpp diff --git a/dbms/src/Access/MemoryAccessStorage.h b/src/Access/MemoryAccessStorage.h similarity index 100% rename from dbms/src/Access/MemoryAccessStorage.h rename to src/Access/MemoryAccessStorage.h diff --git a/dbms/src/Access/MultipleAccessStorage.cpp b/src/Access/MultipleAccessStorage.cpp similarity index 100% rename from dbms/src/Access/MultipleAccessStorage.cpp rename to src/Access/MultipleAccessStorage.cpp diff --git a/dbms/src/Access/MultipleAccessStorage.h b/src/Access/MultipleAccessStorage.h similarity index 100% rename from dbms/src/Access/MultipleAccessStorage.h rename to src/Access/MultipleAccessStorage.h diff --git a/dbms/src/Access/Quota.cpp b/src/Access/Quota.cpp similarity index 100% rename from dbms/src/Access/Quota.cpp rename to src/Access/Quota.cpp diff --git a/dbms/src/Access/Quota.h b/src/Access/Quota.h similarity index 100% rename from dbms/src/Access/Quota.h rename to src/Access/Quota.h diff --git a/dbms/src/Access/QuotaCache.cpp b/src/Access/QuotaCache.cpp similarity index 98% rename from dbms/src/Access/QuotaCache.cpp rename to src/Access/QuotaCache.cpp index 6db3eb66c5d..2cfffc7f667 100644 --- a/dbms/src/Access/QuotaCache.cpp +++ b/src/Access/QuotaCache.cpp @@ -167,7 +167,12 @@ QuotaCache::QuotaCache(const AccessControlManager & access_control_manager_) QuotaCache::~QuotaCache() = default; -std::shared_ptr QuotaCache::getEnabledQuota(const UUID & user_id, const String & user_name, const std::vector & enabled_roles, const Poco::Net::IPAddress & client_address, const String & client_key) +std::shared_ptr QuotaCache::getEnabledQuota( + const UUID & user_id, + const String & user_name, + const std::vector & enabled_roles, + const Poco::Net::IPAddress & client_address, + const String & client_key) { std::lock_guard lock{mutex}; ensureAllQuotasRead(); diff --git a/dbms/src/Access/QuotaCache.h b/src/Access/QuotaCache.h similarity index 87% rename from dbms/src/Access/QuotaCache.h rename to src/Access/QuotaCache.h index 81734f385c1..8399c5f73eb 100644 --- a/dbms/src/Access/QuotaCache.h +++ b/src/Access/QuotaCache.h @@ -20,7 +20,13 @@ public: QuotaCache(const AccessControlManager & access_control_manager_); ~QuotaCache(); - std::shared_ptr getEnabledQuota(const UUID & user_id, const String & user_name, const std::vector & enabled_roles, const Poco::Net::IPAddress & address, const String & client_key); + std::shared_ptr getEnabledQuota( + const UUID & user_id, + const String & user_name, + const std::vector & enabled_roles, + const Poco::Net::IPAddress & address, + const String & client_key); + std::vector getUsageInfo() const; private: diff --git a/dbms/src/Access/QuotaUsageInfo.cpp b/src/Access/QuotaUsageInfo.cpp similarity index 100% rename from dbms/src/Access/QuotaUsageInfo.cpp rename to src/Access/QuotaUsageInfo.cpp diff --git a/dbms/src/Access/QuotaUsageInfo.h b/src/Access/QuotaUsageInfo.h similarity index 100% rename from dbms/src/Access/QuotaUsageInfo.h rename to src/Access/QuotaUsageInfo.h diff --git a/dbms/src/Access/Role.cpp b/src/Access/Role.cpp similarity index 100% rename from dbms/src/Access/Role.cpp rename to src/Access/Role.cpp diff --git a/dbms/src/Access/Role.h b/src/Access/Role.h similarity index 100% rename from dbms/src/Access/Role.h rename to src/Access/Role.h diff --git a/dbms/src/Access/RoleCache.cpp b/src/Access/RoleCache.cpp similarity index 100% rename from dbms/src/Access/RoleCache.cpp rename to src/Access/RoleCache.cpp diff --git a/dbms/src/Access/RoleCache.h b/src/Access/RoleCache.h similarity index 100% rename from dbms/src/Access/RoleCache.h rename to src/Access/RoleCache.h diff --git a/dbms/src/Access/RowPolicy.cpp b/src/Access/RowPolicy.cpp similarity index 100% rename from dbms/src/Access/RowPolicy.cpp rename to src/Access/RowPolicy.cpp diff --git a/dbms/src/Access/RowPolicy.h b/src/Access/RowPolicy.h similarity index 100% rename from dbms/src/Access/RowPolicy.h rename to src/Access/RowPolicy.h diff --git a/src/Access/RowPolicyCache.cpp b/src/Access/RowPolicyCache.cpp new file mode 100644 index 00000000000..fc67b7e9b86 --- /dev/null +++ b/src/Access/RowPolicyCache.cpp @@ -0,0 +1,240 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace +{ + using ConditionType = RowPolicy::ConditionType; + constexpr size_t MAX_CONDITION_TYPE = RowPolicy::MAX_CONDITION_TYPE; + + + /// Accumulates conditions from multiple row policies and joins them using the AND logical operation. + class ConditionsMixer + { + public: + void add(const ASTPtr & condition, bool is_restrictive) + { + if (is_restrictive) + restrictions.push_back(condition); + else + permissions.push_back(condition); + } + + ASTPtr getResult() && + { + /// Process permissive conditions. + restrictions.push_back(makeASTForLogicalOr(std::move(permissions))); + + /// Process restrictive conditions. + auto condition = makeASTForLogicalAnd(std::move(restrictions)); + + bool value; + if (tryGetLiteralBool(condition.get(), value) && value) + condition = nullptr; /// The condition is always true, no need to check it. + + return condition; + } + + private: + ASTs permissions; + ASTs restrictions; + }; +} + + +void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_) +{ + policy = policy_; + roles = &policy->to_roles; + + for (auto type : ext::range_with_static_cast(0, MAX_CONDITION_TYPE)) + { + parsed_conditions[type] = nullptr; + const String & condition = policy->conditions[type]; + if (condition.empty()) + continue; + + auto previous_range = std::pair(std::begin(policy->conditions), std::begin(policy->conditions) + type); + auto previous_it = std::find(previous_range.first, previous_range.second, condition); + if (previous_it != previous_range.second) + { + /// The condition is already parsed before. + parsed_conditions[type] = parsed_conditions[previous_it - previous_range.first]; + continue; + } + + /// Try to parse the condition. + try + { + ParserExpression parser; + parsed_conditions[type] = parseQuery(parser, condition, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + } + catch (...) + { + tryLogCurrentException( + &Poco::Logger::get("RowPolicy"), + String("Could not parse the condition ") + RowPolicy::conditionTypeToString(type) + " of row policy " + + backQuote(policy->getFullName())); + } + } +} + + +RowPolicyCache::RowPolicyCache(const AccessControlManager & access_control_manager_) + : access_control_manager(access_control_manager_) +{ +} + +RowPolicyCache::~RowPolicyCache() = default; + + +std::shared_ptr RowPolicyCache::getEnabledRowPolicies(const UUID & user_id, const std::vector & enabled_roles) +{ + std::lock_guard lock{mutex}; + ensureAllRowPoliciesRead(); + + EnabledRowPolicies::Params params; + params.user_id = user_id; + params.enabled_roles = enabled_roles; + auto it = enabled_row_policies.find(params); + if (it != enabled_row_policies.end()) + { + auto from_cache = it->second.lock(); + if (from_cache) + return from_cache; + enabled_row_policies.erase(it); + } + + auto res = std::shared_ptr(new EnabledRowPolicies(params)); + enabled_row_policies.emplace(std::move(params), res); + mixConditionsFor(*res); + return res; +} + + +void RowPolicyCache::ensureAllRowPoliciesRead() +{ + /// `mutex` is already locked. + if (all_policies_read) + return; + all_policies_read = true; + + subscription = access_control_manager.subscribeForChanges( + [&](const UUID & id, const AccessEntityPtr & entity) + { + if (entity) + rowPolicyAddedOrChanged(id, typeid_cast(entity)); + else + rowPolicyRemoved(id); + }); + + for (const UUID & id : access_control_manager.findAll()) + { + auto quota = access_control_manager.tryRead(id); + if (quota) + all_policies.emplace(id, PolicyInfo(quota)); + } +} + + +void RowPolicyCache::rowPolicyAddedOrChanged(const UUID & policy_id, const RowPolicyPtr & new_policy) +{ + std::lock_guard lock{mutex}; + auto it = all_policies.find(policy_id); + if (it == all_policies.end()) + { + it = all_policies.emplace(policy_id, PolicyInfo(new_policy)).first; + } + else + { + if (it->second.policy == new_policy) + return; + } + + auto & info = it->second; + info.setPolicy(new_policy); + mixConditions(); +} + + +void RowPolicyCache::rowPolicyRemoved(const UUID & policy_id) +{ + std::lock_guard lock{mutex}; + all_policies.erase(policy_id); + mixConditions(); +} + + +void RowPolicyCache::mixConditions() +{ + /// `mutex` is already locked. + std::erase_if( + enabled_row_policies, + [&](const std::pair> & pr) + { + auto elem = pr.second.lock(); + if (!elem) + return true; // remove from the `enabled_row_policies` map. + mixConditionsFor(*elem); + return false; // keep in the `enabled_row_policies` map. + }); +} + + +void RowPolicyCache::mixConditionsFor(EnabledRowPolicies & enabled) +{ + /// `mutex` is already locked. + struct Mixers + { + ConditionsMixer mixers[MAX_CONDITION_TYPE]; + std::vector policy_ids; + }; + using MapOfMixedConditions = EnabledRowPolicies::MapOfMixedConditions; + using DatabaseAndTableName = EnabledRowPolicies::DatabaseAndTableName; + using DatabaseAndTableNameRef = EnabledRowPolicies::DatabaseAndTableNameRef; + using Hash = EnabledRowPolicies::Hash; + + std::unordered_map map_of_mixers; + + for (const auto & [policy_id, info] : all_policies) + { + const auto & policy = *info.policy; + auto & mixers = map_of_mixers[std::pair{policy.getDatabase(), policy.getTableName()}]; + if (info.roles->match(enabled.params.user_id, enabled.params.enabled_roles)) + { + mixers.policy_ids.push_back(policy_id); + for (auto type : ext::range(0, MAX_CONDITION_TYPE)) + if (info.parsed_conditions[type]) + mixers.mixers[type].add(info.parsed_conditions[type], policy.isRestrictive()); + } + } + + auto map_of_mixed_conditions = boost::make_shared(); + for (auto & [database_and_table_name, mixers] : map_of_mixers) + { + auto database_and_table_name_keeper = std::make_unique(); + database_and_table_name_keeper->first = database_and_table_name.first; + database_and_table_name_keeper->second = database_and_table_name.second; + auto & mixed_conditions = (*map_of_mixed_conditions)[DatabaseAndTableNameRef{database_and_table_name_keeper->first, + database_and_table_name_keeper->second}]; + mixed_conditions.database_and_table_name_keeper = std::move(database_and_table_name_keeper); + mixed_conditions.policy_ids = std::move(mixers.policy_ids); + for (auto type : ext::range(0, MAX_CONDITION_TYPE)) + mixed_conditions.mixed_conditions[type] = std::move(mixers.mixers[type]).getResult(); + } + + enabled.map_of_mixed_conditions.store(map_of_mixed_conditions); +} + +} diff --git a/dbms/src/Access/RowPolicyCache.h b/src/Access/RowPolicyCache.h similarity index 100% rename from dbms/src/Access/RowPolicyCache.h rename to src/Access/RowPolicyCache.h diff --git a/dbms/src/Access/SettingsConstraints.cpp b/src/Access/SettingsConstraints.cpp similarity index 100% rename from dbms/src/Access/SettingsConstraints.cpp rename to src/Access/SettingsConstraints.cpp diff --git a/dbms/src/Access/SettingsConstraints.h b/src/Access/SettingsConstraints.h similarity index 100% rename from dbms/src/Access/SettingsConstraints.h rename to src/Access/SettingsConstraints.h diff --git a/dbms/src/Access/SettingsProfile.cpp b/src/Access/SettingsProfile.cpp similarity index 100% rename from dbms/src/Access/SettingsProfile.cpp rename to src/Access/SettingsProfile.cpp diff --git a/dbms/src/Access/SettingsProfile.h b/src/Access/SettingsProfile.h similarity index 100% rename from dbms/src/Access/SettingsProfile.h rename to src/Access/SettingsProfile.h diff --git a/dbms/src/Access/SettingsProfileElement.cpp b/src/Access/SettingsProfileElement.cpp similarity index 100% rename from dbms/src/Access/SettingsProfileElement.cpp rename to src/Access/SettingsProfileElement.cpp diff --git a/dbms/src/Access/SettingsProfileElement.h b/src/Access/SettingsProfileElement.h similarity index 100% rename from dbms/src/Access/SettingsProfileElement.h rename to src/Access/SettingsProfileElement.h diff --git a/dbms/src/Access/SettingsProfilesCache.cpp b/src/Access/SettingsProfilesCache.cpp similarity index 100% rename from dbms/src/Access/SettingsProfilesCache.cpp rename to src/Access/SettingsProfilesCache.cpp diff --git a/dbms/src/Access/SettingsProfilesCache.h b/src/Access/SettingsProfilesCache.h similarity index 100% rename from dbms/src/Access/SettingsProfilesCache.h rename to src/Access/SettingsProfilesCache.h diff --git a/dbms/src/Access/User.cpp b/src/Access/User.cpp similarity index 100% rename from dbms/src/Access/User.cpp rename to src/Access/User.cpp diff --git a/dbms/src/Access/User.h b/src/Access/User.h similarity index 100% rename from dbms/src/Access/User.h rename to src/Access/User.h diff --git a/dbms/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp similarity index 98% rename from dbms/src/Access/UsersConfigAccessStorage.cpp rename to src/Access/UsersConfigAccessStorage.cpp index 13102528108..0842839dec8 100644 --- a/dbms/src/Access/UsersConfigAccessStorage.cpp +++ b/src/Access/UsersConfigAccessStorage.cpp @@ -168,7 +168,14 @@ namespace user->access.grant(AccessFlags::allDictionaryFlags(), IDictionary::NO_DATABASE_TAG, dictionary); } - user->access_with_grant_option = user->access; + user->access_with_grant_option = user->access; /// By default the user can grant everything he has. + + bool access_management = config.getBool(user_config + ".access_management", false); + if (!access_management) + { + user->access.revoke(AccessType::ACCESS_MANAGEMENT); + user->access_with_grant_option.clear(); + } return user; } diff --git a/dbms/src/Access/UsersConfigAccessStorage.h b/src/Access/UsersConfigAccessStorage.h similarity index 100% rename from dbms/src/Access/UsersConfigAccessStorage.h rename to src/Access/UsersConfigAccessStorage.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAggThrow.cpp b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAggThrow.cpp rename to src/AggregateFunctions/AggregateFunctionAggThrow.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h b/src/AggregateFunctions/AggregateFunctionArgMinMax.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h rename to src/AggregateFunctions/AggregateFunctionArgMinMax.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp b/src/AggregateFunctions/AggregateFunctionArray.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionArray.cpp rename to src/AggregateFunctions/AggregateFunctionArray.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArray.h b/src/AggregateFunctions/AggregateFunctionArray.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionArray.h rename to src/AggregateFunctions/AggregateFunctionArray.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp b/src/AggregateFunctions/AggregateFunctionAvg.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp rename to src/AggregateFunctions/AggregateFunctionAvg.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvg.h rename to src/AggregateFunctions/AggregateFunctionAvg.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp b/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp rename to src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.h rename to src/AggregateFunctions/AggregateFunctionAvgWeighted.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp b/src/AggregateFunctions/AggregateFunctionBitwise.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp rename to src/AggregateFunctions/AggregateFunctionBitwise.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.h b/src/AggregateFunctions/AggregateFunctionBitwise.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBitwise.h rename to src/AggregateFunctions/AggregateFunctionBitwise.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp b/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp rename to src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.h b/src/AggregateFunctions/AggregateFunctionBoundingRatio.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.h rename to src/AggregateFunctions/AggregateFunctionBoundingRatio.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp b/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp rename to src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h b/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h rename to src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp rename to src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h rename to src/AggregateFunctions/AggregateFunctionCombinatorFactory.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCount.cpp b/src/AggregateFunctions/AggregateFunctionCount.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCount.cpp rename to src/AggregateFunctions/AggregateFunctionCount.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCount.h b/src/AggregateFunctions/AggregateFunctionCount.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCount.h rename to src/AggregateFunctions/AggregateFunctionCount.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionEntropy.cpp b/src/AggregateFunctions/AggregateFunctionEntropy.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionEntropy.cpp rename to src/AggregateFunctions/AggregateFunctionEntropy.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionEntropy.h b/src/AggregateFunctions/AggregateFunctionEntropy.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionEntropy.h rename to src/AggregateFunctions/AggregateFunctionEntropy.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp b/src/AggregateFunctions/AggregateFunctionFactory.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp rename to src/AggregateFunctions/AggregateFunctionFactory.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.h b/src/AggregateFunctions/AggregateFunctionFactory.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionFactory.h rename to src/AggregateFunctions/AggregateFunctionFactory.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionForEach.cpp b/src/AggregateFunctions/AggregateFunctionForEach.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionForEach.cpp rename to src/AggregateFunctions/AggregateFunctionForEach.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionForEach.h b/src/AggregateFunctions/AggregateFunctionForEach.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionForEach.h rename to src/AggregateFunctions/AggregateFunctionForEach.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArray.cpp rename to src/AggregateFunctions/AggregateFunctionGroupArray.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h b/src/AggregateFunctions/AggregateFunctionGroupArray.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h rename to src/AggregateFunctions/AggregateFunctionGroupArray.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp rename to src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h b/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h rename to src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp rename to src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h rename to src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp b/src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp rename to src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.h b/src/AggregateFunctions/AggregateFunctionGroupBitmap.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.h rename to src/AggregateFunctions/AggregateFunctionGroupBitmap.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h rename to src/AggregateFunctions/AggregateFunctionGroupBitmapData.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp rename to src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h rename to src/AggregateFunctions/AggregateFunctionGroupUniqArray.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.cpp b/src/AggregateFunctions/AggregateFunctionHistogram.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionHistogram.cpp rename to src/AggregateFunctions/AggregateFunctionHistogram.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h b/src/AggregateFunctions/AggregateFunctionHistogram.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionHistogram.h rename to src/AggregateFunctions/AggregateFunctionHistogram.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionIf.cpp rename to src/AggregateFunctions/AggregateFunctionIf.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionIf.h b/src/AggregateFunctions/AggregateFunctionIf.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionIf.h rename to src/AggregateFunctions/AggregateFunctionIf.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp b/src/AggregateFunctions/AggregateFunctionMLMethod.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp rename to src/AggregateFunctions/AggregateFunctionMLMethod.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h b/src/AggregateFunctions/AggregateFunctionMLMethod.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h rename to src/AggregateFunctions/AggregateFunctionMLMethod.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp b/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp rename to src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h b/src/AggregateFunctions/AggregateFunctionMaxIntersections.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h rename to src/AggregateFunctions/AggregateFunctionMaxIntersections.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMerge.cpp b/src/AggregateFunctions/AggregateFunctionMerge.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMerge.cpp rename to src/AggregateFunctions/AggregateFunctionMerge.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMerge.h b/src/AggregateFunctions/AggregateFunctionMerge.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMerge.h rename to src/AggregateFunctions/AggregateFunctionMerge.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp b/src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp rename to src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h rename to src/AggregateFunctions/AggregateFunctionMinMaxAny.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNothing.h b/src/AggregateFunctions/AggregateFunctionNothing.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionNothing.h rename to src/AggregateFunctions/AggregateFunctionNothing.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp b/src/AggregateFunctions/AggregateFunctionNull.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionNull.cpp rename to src/AggregateFunctions/AggregateFunctionNull.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionNull.h rename to src/AggregateFunctions/AggregateFunctionNull.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionOrFill.cpp b/src/AggregateFunctions/AggregateFunctionOrFill.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionOrFill.cpp rename to src/AggregateFunctions/AggregateFunctionOrFill.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionOrFill.h b/src/AggregateFunctions/AggregateFunctionOrFill.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionOrFill.h rename to src/AggregateFunctions/AggregateFunctionOrFill.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp b/src/AggregateFunctions/AggregateFunctionQuantile.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp rename to src/AggregateFunctions/AggregateFunctionQuantile.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h b/src/AggregateFunctions/AggregateFunctionQuantile.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionQuantile.h rename to src/AggregateFunctions/AggregateFunctionQuantile.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionResample.cpp b/src/AggregateFunctions/AggregateFunctionResample.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionResample.cpp rename to src/AggregateFunctions/AggregateFunctionResample.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionResample.h b/src/AggregateFunctions/AggregateFunctionResample.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionResample.h rename to src/AggregateFunctions/AggregateFunctionResample.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionRetention.cpp b/src/AggregateFunctions/AggregateFunctionRetention.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionRetention.cpp rename to src/AggregateFunctions/AggregateFunctionRetention.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionRetention.h b/src/AggregateFunctions/AggregateFunctionRetention.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionRetention.h rename to src/AggregateFunctions/AggregateFunctionRetention.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp b/src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp rename to src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h b/src/AggregateFunctions/AggregateFunctionSequenceMatch.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h rename to src/AggregateFunctions/AggregateFunctionSequenceMatch.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp b/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp rename to src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h b/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h rename to src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionState.cpp b/src/AggregateFunctions/AggregateFunctionState.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionState.cpp rename to src/AggregateFunctions/AggregateFunctionState.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionState.h b/src/AggregateFunctions/AggregateFunctionState.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionState.h rename to src/AggregateFunctions/AggregateFunctionState.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatistics.cpp b/src/AggregateFunctions/AggregateFunctionStatistics.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatistics.cpp rename to src/AggregateFunctions/AggregateFunctionStatistics.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatistics.h b/src/AggregateFunctions/AggregateFunctionStatistics.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatistics.h rename to src/AggregateFunctions/AggregateFunctionStatistics.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp rename to src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h rename to src/AggregateFunctions/AggregateFunctionStatisticsSimple.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSum.cpp b/src/AggregateFunctions/AggregateFunctionSum.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSum.cpp rename to src/AggregateFunctions/AggregateFunctionSum.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSum.h rename to src/AggregateFunctions/AggregateFunctionSum.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.cpp b/src/AggregateFunctions/AggregateFunctionSumMap.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSumMap.cpp rename to src/AggregateFunctions/AggregateFunctionSumMap.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h b/src/AggregateFunctions/AggregateFunctionSumMap.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSumMap.h rename to src/AggregateFunctions/AggregateFunctionSumMap.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp rename to src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h rename to src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTopK.cpp b/src/AggregateFunctions/AggregateFunctionTopK.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTopK.cpp rename to src/AggregateFunctions/AggregateFunctionTopK.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTopK.h b/src/AggregateFunctions/AggregateFunctionTopK.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTopK.h rename to src/AggregateFunctions/AggregateFunctionTopK.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp b/src/AggregateFunctions/AggregateFunctionUniq.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp rename to src/AggregateFunctions/AggregateFunctionUniq.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniq.h rename to src/AggregateFunctions/AggregateFunctionUniq.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp b/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp rename to src/AggregateFunctions/AggregateFunctionUniqCombined.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/src/AggregateFunctions/AggregateFunctionUniqCombined.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.h rename to src/AggregateFunctions/AggregateFunctionUniqCombined.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp b/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp rename to src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h b/src/AggregateFunctions/AggregateFunctionUniqUpTo.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h rename to src/AggregateFunctions/AggregateFunctionUniqUpTo.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp b/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp rename to src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h b/src/AggregateFunctions/AggregateFunctionWindowFunnel.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h rename to src/AggregateFunctions/AggregateFunctionWindowFunnel.h diff --git a/dbms/src/AggregateFunctions/CMakeLists.txt b/src/AggregateFunctions/CMakeLists.txt similarity index 100% rename from dbms/src/AggregateFunctions/CMakeLists.txt rename to src/AggregateFunctions/CMakeLists.txt diff --git a/dbms/src/AggregateFunctions/FactoryHelpers.h b/src/AggregateFunctions/FactoryHelpers.h similarity index 100% rename from dbms/src/AggregateFunctions/FactoryHelpers.h rename to src/AggregateFunctions/FactoryHelpers.h diff --git a/dbms/src/AggregateFunctions/Helpers.h b/src/AggregateFunctions/Helpers.h similarity index 100% rename from dbms/src/AggregateFunctions/Helpers.h rename to src/AggregateFunctions/Helpers.h diff --git a/dbms/src/AggregateFunctions/HelpersMinMaxAny.h b/src/AggregateFunctions/HelpersMinMaxAny.h similarity index 100% rename from dbms/src/AggregateFunctions/HelpersMinMaxAny.h rename to src/AggregateFunctions/HelpersMinMaxAny.h diff --git a/dbms/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h similarity index 100% rename from dbms/src/AggregateFunctions/IAggregateFunction.h rename to src/AggregateFunctions/IAggregateFunction.h diff --git a/dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h b/src/AggregateFunctions/IAggregateFunctionCombinator.h similarity index 100% rename from dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h rename to src/AggregateFunctions/IAggregateFunctionCombinator.h diff --git a/dbms/src/AggregateFunctions/QuantileExact.h b/src/AggregateFunctions/QuantileExact.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileExact.h rename to src/AggregateFunctions/QuantileExact.h diff --git a/dbms/src/AggregateFunctions/QuantileExactWeighted.h b/src/AggregateFunctions/QuantileExactWeighted.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileExactWeighted.h rename to src/AggregateFunctions/QuantileExactWeighted.h diff --git a/dbms/src/AggregateFunctions/QuantileReservoirSampler.h b/src/AggregateFunctions/QuantileReservoirSampler.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileReservoirSampler.h rename to src/AggregateFunctions/QuantileReservoirSampler.h diff --git a/dbms/src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h b/src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h rename to src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h diff --git a/dbms/src/AggregateFunctions/QuantileTDigest.h b/src/AggregateFunctions/QuantileTDigest.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileTDigest.h rename to src/AggregateFunctions/QuantileTDigest.h diff --git a/dbms/src/AggregateFunctions/QuantileTiming.h b/src/AggregateFunctions/QuantileTiming.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileTiming.h rename to src/AggregateFunctions/QuantileTiming.h diff --git a/dbms/src/AggregateFunctions/QuantilesCommon.h b/src/AggregateFunctions/QuantilesCommon.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantilesCommon.h rename to src/AggregateFunctions/QuantilesCommon.h diff --git a/dbms/src/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h similarity index 100% rename from dbms/src/AggregateFunctions/ReservoirSampler.h rename to src/AggregateFunctions/ReservoirSampler.h diff --git a/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h similarity index 100% rename from dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h rename to src/AggregateFunctions/ReservoirSamplerDeterministic.h diff --git a/dbms/src/AggregateFunctions/UniqCombinedBiasData.cpp b/src/AggregateFunctions/UniqCombinedBiasData.cpp similarity index 100% rename from dbms/src/AggregateFunctions/UniqCombinedBiasData.cpp rename to src/AggregateFunctions/UniqCombinedBiasData.cpp diff --git a/dbms/src/AggregateFunctions/UniqCombinedBiasData.h b/src/AggregateFunctions/UniqCombinedBiasData.h similarity index 89% rename from dbms/src/AggregateFunctions/UniqCombinedBiasData.h rename to src/AggregateFunctions/UniqCombinedBiasData.h index 0a69a211206..2b009bfdfd8 100644 --- a/dbms/src/AggregateFunctions/UniqCombinedBiasData.h +++ b/src/AggregateFunctions/UniqCombinedBiasData.h @@ -8,11 +8,11 @@ namespace DB /** Data for HyperLogLogBiasEstimator in the uniqCombined function. * The development plan is as follows: * 1. Assemble ClickHouse. - * 2. Run the script src/dbms/scripts/gen-bias-data.py, which returns one array for getRawEstimates() + * 2. Run the script src/src/scripts/gen-bias-data.py, which returns one array for getRawEstimates() * and another array for getBiases(). * 3. Update `raw_estimates` and `biases` arrays. Also update the size of arrays in InterpolatedData. * 4. Assemble ClickHouse. - * 5. Run the script src/dbms/scripts/linear-counting-threshold.py, which creates 3 files: + * 5. Run the script src/src/scripts/linear-counting-threshold.py, which creates 3 files: * - raw_graph.txt (1st column: the present number of unique values; * 2nd column: relative error in the case of HyperLogLog without applying any corrections) * - linear_counting_graph.txt (1st column: the present number of unique values; diff --git a/dbms/src/AggregateFunctions/UniqVariadicHash.cpp b/src/AggregateFunctions/UniqVariadicHash.cpp similarity index 100% rename from dbms/src/AggregateFunctions/UniqVariadicHash.cpp rename to src/AggregateFunctions/UniqVariadicHash.cpp diff --git a/dbms/src/AggregateFunctions/UniqVariadicHash.h b/src/AggregateFunctions/UniqVariadicHash.h similarity index 100% rename from dbms/src/AggregateFunctions/UniqVariadicHash.h rename to src/AggregateFunctions/UniqVariadicHash.h diff --git a/dbms/src/AggregateFunctions/UniquesHashSet.h b/src/AggregateFunctions/UniquesHashSet.h similarity index 100% rename from dbms/src/AggregateFunctions/UniquesHashSet.h rename to src/AggregateFunctions/UniquesHashSet.h diff --git a/dbms/src/AggregateFunctions/parseAggregateFunctionParameters.cpp b/src/AggregateFunctions/parseAggregateFunctionParameters.cpp similarity index 97% rename from dbms/src/AggregateFunctions/parseAggregateFunctionParameters.cpp rename to src/AggregateFunctions/parseAggregateFunctionParameters.cpp index bcb73f1e9d9..2a6b9e3b499 100644 --- a/dbms/src/AggregateFunctions/parseAggregateFunctionParameters.cpp +++ b/src/AggregateFunctions/parseAggregateFunctionParameters.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace DB @@ -65,7 +66,7 @@ void getAggregateFunctionNameAndParametersArray( ParserExpressionList params_parser(false); ASTPtr args_ast = parseQuery(params_parser, parameters_str.data(), parameters_str.data() + parameters_str.size(), - "parameters of aggregate function in " + error_context, 0); + "parameters of aggregate function in " + error_context, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); if (args_ast->children.empty()) throw Exception("Incorrect list of parameters to aggregate function " diff --git a/dbms/src/AggregateFunctions/parseAggregateFunctionParameters.h b/src/AggregateFunctions/parseAggregateFunctionParameters.h similarity index 100% rename from dbms/src/AggregateFunctions/parseAggregateFunctionParameters.h rename to src/AggregateFunctions/parseAggregateFunctionParameters.h diff --git a/dbms/src/AggregateFunctions/registerAggregateFunctions.cpp b/src/AggregateFunctions/registerAggregateFunctions.cpp similarity index 100% rename from dbms/src/AggregateFunctions/registerAggregateFunctions.cpp rename to src/AggregateFunctions/registerAggregateFunctions.cpp diff --git a/dbms/src/AggregateFunctions/registerAggregateFunctions.h b/src/AggregateFunctions/registerAggregateFunctions.h similarity index 100% rename from dbms/src/AggregateFunctions/registerAggregateFunctions.h rename to src/AggregateFunctions/registerAggregateFunctions.h diff --git a/dbms/src/AggregateFunctions/tests/CMakeLists.txt b/src/AggregateFunctions/tests/CMakeLists.txt similarity index 100% rename from dbms/src/AggregateFunctions/tests/CMakeLists.txt rename to src/AggregateFunctions/tests/CMakeLists.txt diff --git a/dbms/src/AggregateFunctions/tests/quantile-t-digest.cpp b/src/AggregateFunctions/tests/quantile-t-digest.cpp similarity index 100% rename from dbms/src/AggregateFunctions/tests/quantile-t-digest.cpp rename to src/AggregateFunctions/tests/quantile-t-digest.cpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 00000000000..2393e0be18d --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,601 @@ +if (USE_INCLUDE_WHAT_YOU_USE) + set (CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) +endif () + +if (USE_CLANG_TIDY) + set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") +endif () + +if(COMPILER_PIPE) + set(MAX_COMPILER_MEMORY 2500) +else() + set(MAX_COMPILER_MEMORY 1500) +endif() +if(MAKE_STATIC_LIBRARIES) + set(MAX_LINKER_MEMORY 3500) +else() + set(MAX_LINKER_MEMORY 2500) +endif() +include(../cmake/limit_jobs.cmake) + +set (CONFIG_VERSION ${CMAKE_CURRENT_BINARY_DIR}/Common/config_version.h) +set (CONFIG_COMMON ${CMAKE_CURRENT_BINARY_DIR}/Common/config.h) + +include (../cmake/version.cmake) +message (STATUS "Will build ${VERSION_FULL} revision ${VERSION_REVISION} ${VERSION_OFFICIAL}") +configure_file (Common/config.h.in ${CONFIG_COMMON}) +configure_file (Common/config_version.h.in ${CONFIG_VERSION}) +configure_file (Core/config_core.h.in ${CMAKE_CURRENT_BINARY_DIR}/Core/include/config_core.h) + +if (NOT MSVC) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") +endif () + +if (USE_DEBUG_HELPERS) + set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") +endif () + +# Add some warnings that are not available even with -Wall -Wextra -Wpedantic. + +option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON) + +if (COMPILER_CLANG) + add_warning(pedantic) + no_warning(gnu-anonymous-struct) + no_warning(nested-anon-types) + no_warning(vla-extension) + no_warning(zero-length-array) + + add_warning(comma) + add_warning(conditional-uninitialized) + add_warning(covered-switch-default) + add_warning(deprecated) + add_warning(embedded-directive) + add_warning(empty-init-stmt) # linux-only + add_warning(extra-semi-stmt) # linux-only + add_warning(extra-semi) + add_warning(gnu-case-range) + add_warning(inconsistent-missing-destructor-override) + add_warning(newline-eof) + add_warning(old-style-cast) + add_warning(range-loop-analysis) + add_warning(redundant-parens) + add_warning(reserved-id-macro) + add_warning(shadow-field) # clang 8+ + add_warning(shadow-uncaptured-local) + add_warning(shadow) + add_warning(string-plus-int) # clang 8+ + add_warning(undef) + add_warning(unreachable-code-return) + add_warning(unreachable-code) + add_warning(unused-exception-parameter) + add_warning(unused-macros) + add_warning(unused-member-function) + add_warning(zero-as-null-pointer-constant) + + if (WEVERYTHING) + add_warning(everything) + no_warning(c++98-compat-pedantic) + no_warning(c++98-compat) + no_warning(c99-extensions) + no_warning(conversion) + no_warning(ctad-maybe-unsupported) # clang 9+, linux-only + no_warning(deprecated-dynamic-exception-spec) + no_warning(disabled-macro-expansion) + no_warning(documentation-unknown-command) + no_warning(double-promotion) + no_warning(exit-time-destructors) + no_warning(float-equal) + no_warning(global-constructors) + no_warning(gnu-anonymous-struct) + no_warning(missing-prototypes) + no_warning(missing-variable-declarations) + no_warning(nested-anon-types) + no_warning(packed) + no_warning(padded) + no_warning(return-std-move-in-c++11) # clang 7+ + no_warning(shift-sign-overflow) + no_warning(sign-conversion) + no_warning(switch-enum) + no_warning(undefined-func-template) + no_warning(unused-template) + no_warning(vla-extension) + no_warning(vla) + no_warning(weak-template-vtables) + no_warning(weak-vtables) + no_warning(zero-length-array) + + # TODO Enable conversion, sign-conversion, double-promotion warnings. + endif () +elseif (COMPILER_GCC) + # Add compiler options only to c++ compiler + function(add_cxx_compile_options option) + add_compile_options("$<$,CXX>:${option}>") + endfunction() + # Warn about boolean expression compared with an integer value different from true/false + add_cxx_compile_options(-Wbool-compare) + # Warn whenever a pointer is cast such that the required alignment of the target is increased. + add_cxx_compile_options(-Wcast-align) + # Warn whenever a pointer is cast so as to remove a type qualifier from the target type. + add_cxx_compile_options(-Wcast-qual) + # Warn when deleting a pointer to incomplete type, which may cause undefined behavior at runtime + add_cxx_compile_options(-Wdelete-incomplete) + # Warn if a requested optimization pass is disabled. Code is too big or too complex + add_cxx_compile_options(-Wdisabled-optimization) + # Warn about duplicated conditions in an if-else-if chain + add_cxx_compile_options(-Wduplicated-cond) + # Warn about a comparison between values of different enumerated types + add_cxx_compile_options(-Wenum-compare) + # Warn about uninitialized variables that are initialized with themselves + add_cxx_compile_options(-Winit-self) + # Warn about logical not used on the left hand side operand of a comparison + add_cxx_compile_options(-Wlogical-not-parentheses) + # Warn about suspicious uses of logical operators in expressions + add_cxx_compile_options(-Wlogical-op) + # Warn if there exists a path from the function entry to a use of the variable that is uninitialized. + add_cxx_compile_options(-Wmaybe-uninitialized) + # Warn when the indentation of the code does not reflect the block structure + add_cxx_compile_options(-Wmisleading-indentation) + # Warn if a global function is defined without a previous declaration - disabled because of build times + # add_cxx_compile_options(-Wmissing-declarations) + # Warn if a user-supplied include directory does not exist + add_cxx_compile_options(-Wmissing-include-dirs) + # Obvious + add_cxx_compile_options(-Wnon-virtual-dtor) + # Obvious + add_cxx_compile_options(-Wno-return-local-addr) + # This warning is disabled due to false positives if compiled with libc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90037 + #add_cxx_compile_options(-Wnull-dereference) + # Obvious + add_cxx_compile_options(-Wodr) + # Obvious + add_cxx_compile_options(-Wold-style-cast) + # Warn when a function declaration hides virtual functions from a base class + # add_cxx_compile_options(-Woverloaded-virtual) + # Warn about placement new expressions with undefined behavior + add_cxx_compile_options(-Wplacement-new=2) + # Warn about anything that depends on the “size of” a function type or of void + add_cxx_compile_options(-Wpointer-arith) + # Warn if anything is declared more than once in the same scope + add_cxx_compile_options(-Wredundant-decls) + # Member initialization reordering + add_cxx_compile_options(-Wreorder) + # Obvious + add_cxx_compile_options(-Wshadow) + # Warn if left shifting a negative value + add_cxx_compile_options(-Wshift-negative-value) + # Warn about a definition of an unsized deallocation function + add_cxx_compile_options(-Wsized-deallocation) + # Warn when the sizeof operator is applied to a parameter that is declared as an array in a function definition + add_cxx_compile_options(-Wsizeof-array-argument) + # Warn for suspicious length parameters to certain string and memory built-in functions if the argument uses sizeof + add_cxx_compile_options(-Wsizeof-pointer-memaccess) + + if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9) + # Warn about overriding virtual functions that are not marked with the override keyword + add_cxx_compile_options(-Wsuggest-override) + endif () + + # Warn whenever a switch statement has an index of boolean type and the case values are outside the range of a boolean type + add_cxx_compile_options(-Wswitch-bool) + # Warn if a self-comparison always evaluates to true or false + add_cxx_compile_options(-Wtautological-compare) + # Warn about trampolines generated for pointers to nested functions + add_cxx_compile_options(-Wtrampolines) + # Obvious + add_cxx_compile_options(-Wunused) + # Warn if vector operation is not implemented via SIMD capabilities of the architecture + add_cxx_compile_options(-Wvector-operation-performance) +endif () + +if (COMPILER_GCC) + # If we leave this optimization enabled, gcc-7 replaces a pair of SSE intrinsics (16 byte load, store) with a call to memcpy. + # It leads to slow code. This is compiler bug. It looks like this: + # + # (gdb) bt + #0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11 + #1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720 + #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../src/Common/memcpySmall.h:37 + + add_definitions ("-fno-tree-loop-distribute-patterns") +endif () + +add_subdirectory (Access) +add_subdirectory (Columns) +add_subdirectory (Common) +add_subdirectory (Core) +add_subdirectory (DataStreams) +add_subdirectory (DataTypes) +add_subdirectory (Dictionaries) +add_subdirectory (Disks) +add_subdirectory (Storages) +add_subdirectory (Parsers) +add_subdirectory (IO) +add_subdirectory (Functions) +add_subdirectory (Interpreters) +add_subdirectory (AggregateFunctions) +add_subdirectory (Client) +add_subdirectory (TableFunctions) +add_subdirectory (Processors) +add_subdirectory (Formats) +add_subdirectory (Compression) + + +set(dbms_headers) +set(dbms_sources) + +add_headers_and_sources(clickhouse_common_io Common) +add_headers_and_sources(clickhouse_common_io Common/HashTable) +add_headers_and_sources(clickhouse_common_io IO) +list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp) + +if(USE_RDKAFKA) + add_headers_and_sources(dbms Storages/Kafka) +endif() + + +list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD}) +list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON}) + +list (APPEND dbms_sources Functions/IFunction.cpp Functions/FunctionFactory.cpp Functions/FunctionHelpers.cpp Functions/extractTimeZoneFromFunctionArguments.cpp) +list (APPEND dbms_headers Functions/IFunctionImpl.h Functions/FunctionFactory.h Functions/FunctionHelpers.h Functions/extractTimeZoneFromFunctionArguments.h) + +list (APPEND dbms_sources + AggregateFunctions/AggregateFunctionFactory.cpp + AggregateFunctions/AggregateFunctionCombinatorFactory.cpp + AggregateFunctions/AggregateFunctionState.cpp + AggregateFunctions/parseAggregateFunctionParameters.cpp) + +list (APPEND dbms_headers + AggregateFunctions/IAggregateFunction.h + AggregateFunctions/IAggregateFunctionCombinator.h + AggregateFunctions/AggregateFunctionFactory.h + AggregateFunctions/AggregateFunctionCombinatorFactory.h + AggregateFunctions/AggregateFunctionState.h + AggregateFunctions/FactoryHelpers.h + AggregateFunctions/parseAggregateFunctionParameters.h) + +list (APPEND dbms_sources TableFunctions/ITableFunction.cpp TableFunctions/TableFunctionFactory.cpp) +list (APPEND dbms_headers TableFunctions/ITableFunction.h TableFunctions/TableFunctionFactory.h) +list (APPEND dbms_sources Dictionaries/DictionaryFactory.cpp Dictionaries/DictionarySourceFactory.cpp Dictionaries/DictionaryStructure.cpp Dictionaries/getDictionaryConfigurationFromAST.cpp) +list (APPEND dbms_headers Dictionaries/DictionaryFactory.h Dictionaries/DictionarySourceFactory.h Dictionaries/DictionaryStructure.h Dictionaries/getDictionaryConfigurationFromAST.h) + +if (NOT ENABLE_SSL) + list (REMOVE_ITEM clickhouse_common_io_sources Common/OpenSSLHelpers.cpp) + list (REMOVE_ITEM clickhouse_common_io_headers Common/OpenSSLHelpers.h) +endif () + +add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) + +add_library (clickhouse_malloc OBJECT Common/malloc.cpp) +set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") + +add_library (clickhouse_new_delete STATIC Common/new_delete.cpp) +target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io jemalloc) + +add_subdirectory(Common/ZooKeeper) +add_subdirectory(Common/Config) + +set (all_modules) +macro(add_object_library name common_path) + if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) + add_headers_and_sources(dbms ${common_path}) + else () + list (APPEND all_modules ${name}) + add_headers_and_sources(${name} ${common_path}) + add_library(${name} SHARED ${${name}_sources} ${${name}_headers}) + target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all) + endif () +endmacro() + +add_object_library(clickhouse_access Access) +add_object_library(clickhouse_core Core) +add_object_library(clickhouse_compression Compression) +add_object_library(clickhouse_datastreams DataStreams) +add_object_library(clickhouse_datatypes DataTypes) +add_object_library(clickhouse_databases Databases) +add_object_library(clickhouse_disks Disks) +add_object_library(clickhouse_interpreters Interpreters) +add_object_library(clickhouse_interpreters_clusterproxy Interpreters/ClusterProxy) +add_object_library(clickhouse_columns Columns) +add_object_library(clickhouse_storages Storages) +add_object_library(clickhouse_storages_distributed Storages/Distributed) +add_object_library(clickhouse_storages_mergetree Storages/MergeTree) +add_object_library(clickhouse_storages_liveview Storages/LiveView) +add_object_library(clickhouse_client Client) +add_object_library(clickhouse_formats Formats) +add_object_library(clickhouse_processors Processors) +add_object_library(clickhouse_processors_executors Processors/Executors) +add_object_library(clickhouse_processors_formats Processors/Formats) +add_object_library(clickhouse_processors_formats_impl Processors/Formats/Impl) +add_object_library(clickhouse_processors_transforms Processors/Transforms) +add_object_library(clickhouse_processors_sources Processors/Sources) + + +if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) + add_library (dbms STATIC ${dbms_headers} ${dbms_sources}) + target_link_libraries (dbms PRIVATE jemalloc) + set (all_modules dbms) +else() + add_library (dbms SHARED ${dbms_headers} ${dbms_sources}) + target_link_libraries (dbms PUBLIC ${all_modules}) + target_link_libraries (clickhouse_interpreters PRIVATE jemalloc) + list (APPEND all_modules dbms) + # force all split libs to be linked + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed") +endif () + +macro (dbms_target_include_directories) + foreach (module ${all_modules}) + target_include_directories (${module} ${ARGN}) + endforeach () +endmacro () + +macro (dbms_target_link_libraries) + foreach (module ${all_modules}) + target_link_libraries (${module} ${ARGN}) + endforeach () +endmacro () + +if (USE_EMBEDDED_COMPILER) + dbms_target_link_libraries (PRIVATE ${REQUIRED_LLVM_LIBRARIES}) + dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS}) +endif () + +if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL") + # Won't generate debug info for files with heavy template instantiation to achieve faster linking and lower size. + set_source_files_properties( + Dictionaries/FlatDictionary.cpp + Dictionaries/HashedDictionary.cpp + Dictionaries/CacheDictionary.cpp + Dictionaries/TrieDictionary.cpp + Dictionaries/RangeHashedDictionary.cpp + Dictionaries/ComplexKeyHashedDictionary.cpp + Dictionaries/ComplexKeyCacheDictionary.cpp + Dictionaries/ComplexKeyCacheDictionary_generate1.cpp + Dictionaries/ComplexKeyCacheDictionary_generate2.cpp + Dictionaries/ComplexKeyCacheDictionary_generate3.cpp + Dictionaries/ODBCBlockInputStream.cpp + Dictionaries/HTTPDictionarySource.cpp + Dictionaries/LibraryDictionarySource.cpp + Dictionaries/ExecutableDictionarySource.cpp + Dictionaries/ClickHouseDictionarySource.cpp + PROPERTIES COMPILE_FLAGS -g0) +endif () + +# Otherwise it will slow down stack traces printing too much. +set_source_files_properties( + Common/Elf.cpp + Common/Dwarf.cpp + Common/SymbolIndex.cpp + PROPERTIES COMPILE_FLAGS "-O3 ${WITHOUT_COVERAGE}") + +target_link_libraries (clickhouse_common_io + PUBLIC + common + PRIVATE + string_utils + widechar_width + ${LINK_LIBRARIES_ONLY_ON_X86_64} + PUBLIC + ${DOUBLE_CONVERSION_LIBRARIES} + ryu + PUBLIC + ${Poco_Net_LIBRARY} + ${Poco_Util_LIBRARY} + ${Poco_Foundation_LIBRARY} + ${Poco_XML_LIBRARY} +) + +if(RE2_LIBRARY) + target_link_libraries(clickhouse_common_io PUBLIC ${RE2_LIBRARY}) +endif() +if(RE2_ST_LIBRARY) + target_link_libraries(clickhouse_common_io PUBLIC ${RE2_ST_LIBRARY}) +endif() + +target_link_libraries(clickhouse_common_io + PUBLIC + ${CITYHASH_LIBRARIES} + PRIVATE + ${Poco_XML_LIBRARY} + ${ZLIB_LIBRARIES} + ${EXECINFO_LIBRARIES} + PUBLIC + ${Boost_SYSTEM_LIBRARY} + ${Boost_PROGRAM_OPTIONS_LIBRARY} + PUBLIC + roaring +) + +if (USE_RDKAFKA) + dbms_target_link_libraries(PRIVATE ${CPPKAFKA_LIBRARY} ${RDKAFKA_LIBRARY}) + if(NOT USE_INTERNAL_RDKAFKA_LIBRARY) + dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR}) + endif() +endif() + + +if(RE2_INCLUDE_DIR) + target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${RE2_INCLUDE_DIR}) +endif() + +if(CPUID_LIBRARY) + target_link_libraries(clickhouse_common_io PRIVATE ${CPUID_LIBRARY}) +endif() + +if(CPUINFO_LIBRARY) + target_link_libraries(clickhouse_common_io PRIVATE ${CPUINFO_LIBRARY}) +endif() + +dbms_target_link_libraries ( + PRIVATE + clickhouse_parsers + clickhouse_common_config + clickhouse_common_zookeeper + string_utils # FIXME: not sure if it's private + PUBLIC + clickhouse_common_io + PRIVATE + clickhouse_dictionaries_embedded + ${LZ4_LIBRARY} + PUBLIC + ${MYSQLXX_LIBRARY} + PRIVATE + ${BTRIE_LIBRARIES} + ${Boost_PROGRAM_OPTIONS_LIBRARY} + ${Boost_FILESYSTEM_LIBRARY} + PUBLIC + ${Boost_SYSTEM_LIBRARY} +) + +target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/Core/include) # uses some includes from core +dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/Core/include) + +target_include_directories(clickhouse_common_io SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) +dbms_target_include_directories(SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) + +dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR}) + +if (NOT USE_INTERNAL_LZ4_LIBRARY AND LZ4_INCLUDE_DIR) + dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR}) +endif () + +if (ZSTD_LIBRARY) + dbms_target_link_libraries(PRIVATE ${ZSTD_LIBRARY}) + if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR) + dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${ZSTD_INCLUDE_DIR}) + endif () +endif() + +if (NOT USE_INTERNAL_BOOST_LIBRARY) + target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) +endif () + +if (Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY) + target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) + dbms_target_include_directories (SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) +endif() + +if (USE_POCO_SQLODBC) + target_link_libraries (clickhouse_common_io PRIVATE ${Poco_SQL_LIBRARY}) + dbms_target_link_libraries (PRIVATE ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY}) + if (NOT USE_INTERNAL_POCO_LIBRARY) + target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQL_INCLUDE_DIR}) + dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR} SYSTEM PUBLIC ${Poco_SQL_INCLUDE_DIR}) + endif() +endif() + +if (Poco_Data_FOUND) + target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) + dbms_target_include_directories (SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) +endif() + +if (USE_POCO_DATAODBC) + target_link_libraries (clickhouse_common_io PRIVATE ${Poco_Data_LIBRARY}) + dbms_target_link_libraries (PRIVATE ${Poco_DataODBC_LIBRARY}) + if (NOT USE_INTERNAL_POCO_LIBRARY) + dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR}) + endif() +endif() + +if (USE_POCO_MONGODB) + dbms_target_link_libraries (PRIVATE ${Poco_MongoDB_LIBRARY}) +endif() + +if (USE_POCO_REDIS) + dbms_target_link_libraries (PRIVATE ${Poco_Redis_LIBRARY}) +endif() + +if (USE_POCO_NETSSL) + target_link_libraries (clickhouse_common_io PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) + dbms_target_link_libraries (PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) +endif() + +if (USE_POCO_JSON) + dbms_target_link_libraries (PRIVATE ${Poco_JSON_LIBRARY}) +endif() + +dbms_target_link_libraries (PRIVATE ${Poco_Foundation_LIBRARY}) + +if (USE_ICU) + dbms_target_link_libraries (PRIVATE ${ICU_LIBRARIES}) + dbms_target_include_directories (SYSTEM PRIVATE ${ICU_INCLUDE_DIRS}) +endif () + +if (USE_CAPNP) + dbms_target_link_libraries (PRIVATE ${CAPNP_LIBRARIES}) +endif () + +if (USE_PARQUET) + dbms_target_link_libraries(PRIVATE ${PARQUET_LIBRARY}) + if (NOT USE_INTERNAL_PARQUET_LIBRARY OR USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) + dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${PARQUET_INCLUDE_DIR} ${ARROW_INCLUDE_DIR}) + endif () +endif () + +if (USE_AVRO) + dbms_target_link_libraries(PRIVATE ${AVROCPP_LIBRARY}) + dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${AVROCPP_INCLUDE_DIR}) +endif () + +if (OPENSSL_CRYPTO_LIBRARY) + dbms_target_link_libraries (PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) + target_link_libraries (clickhouse_common_io PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) +endif () + +dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR}) +dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) + +if (USE_PROTOBUF) + dbms_target_link_libraries (PRIVATE ${Protobuf_LIBRARY}) + dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR}) +endif () + +if (USE_HDFS) + target_link_libraries (clickhouse_common_io PUBLIC ${HDFS3_LIBRARY}) + target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR}) +endif() + +if (USE_AWS_S3) + target_link_libraries (clickhouse_common_io PUBLIC ${AWS_S3_LIBRARY}) + target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_CORE_INCLUDE_DIR}) + target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_INCLUDE_DIR}) +endif() + +if (USE_BROTLI) + target_link_libraries (clickhouse_common_io PRIVATE ${BROTLI_LIBRARY}) + target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR}) +endif() + +dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR}) +target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) + +target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) + +target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR}) + +if (ENABLE_TESTS AND USE_GTEST) + macro (grep_gtest_sources BASE_DIR DST_VAR) + # Cold match files that are not in tests/ directories + file(GLOB_RECURSE "${DST_VAR}" RELATIVE "${BASE_DIR}" "gtest*.cpp") + endmacro() + + # attach all dbms gtest sources + grep_gtest_sources(${ClickHouse_SOURCE_DIR}/dbms dbms_gtest_sources) + add_executable(unit_tests_dbms ${dbms_gtest_sources}) + + # gtest framework has substandard code + target_compile_options(unit_tests_dbms PRIVATE + -Wno-zero-as-null-pointer-constant + -Wno-undef + -Wno-sign-compare + -Wno-used-but-marked-unused + -Wno-missing-noreturn + -Wno-gnu-zero-variadic-macro-arguments + ) + + target_link_libraries(unit_tests_dbms PRIVATE ${GTEST_BOTH_LIBRARIES} clickhouse_functions clickhouse_parsers dbms clickhouse_common_zookeeper string_utils) + add_check(unit_tests_dbms) +endif () diff --git a/src/Client/CMakeLists.txt b/src/Client/CMakeLists.txt new file mode 100644 index 00000000000..88c05163602 --- /dev/null +++ b/src/Client/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(tests) diff --git a/dbms/src/Client/Connection.cpp b/src/Client/Connection.cpp similarity index 100% rename from dbms/src/Client/Connection.cpp rename to src/Client/Connection.cpp diff --git a/dbms/src/Client/Connection.h b/src/Client/Connection.h similarity index 100% rename from dbms/src/Client/Connection.h rename to src/Client/Connection.h diff --git a/dbms/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h similarity index 100% rename from dbms/src/Client/ConnectionPool.h rename to src/Client/ConnectionPool.h diff --git a/dbms/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp similarity index 100% rename from dbms/src/Client/ConnectionPoolWithFailover.cpp rename to src/Client/ConnectionPoolWithFailover.cpp diff --git a/dbms/src/Client/ConnectionPoolWithFailover.h b/src/Client/ConnectionPoolWithFailover.h similarity index 100% rename from dbms/src/Client/ConnectionPoolWithFailover.h rename to src/Client/ConnectionPoolWithFailover.h diff --git a/dbms/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp similarity index 100% rename from dbms/src/Client/MultiplexedConnections.cpp rename to src/Client/MultiplexedConnections.cpp diff --git a/dbms/src/Client/MultiplexedConnections.h b/src/Client/MultiplexedConnections.h similarity index 100% rename from dbms/src/Client/MultiplexedConnections.h rename to src/Client/MultiplexedConnections.h diff --git a/dbms/src/Client/TimeoutSetter.cpp b/src/Client/TimeoutSetter.cpp similarity index 100% rename from dbms/src/Client/TimeoutSetter.cpp rename to src/Client/TimeoutSetter.cpp diff --git a/dbms/src/Client/TimeoutSetter.h b/src/Client/TimeoutSetter.h similarity index 100% rename from dbms/src/Client/TimeoutSetter.h rename to src/Client/TimeoutSetter.h diff --git a/dbms/src/Client/tests/CMakeLists.txt b/src/Client/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Client/tests/CMakeLists.txt rename to src/Client/tests/CMakeLists.txt diff --git a/dbms/src/Client/tests/test_connect.cpp b/src/Client/tests/test_connect.cpp similarity index 100% rename from dbms/src/Client/tests/test_connect.cpp rename to src/Client/tests/test_connect.cpp diff --git a/dbms/src/Columns/CMakeLists.txt b/src/Columns/CMakeLists.txt similarity index 100% rename from dbms/src/Columns/CMakeLists.txt rename to src/Columns/CMakeLists.txt diff --git a/dbms/src/Columns/Collator.cpp b/src/Columns/Collator.cpp similarity index 100% rename from dbms/src/Columns/Collator.cpp rename to src/Columns/Collator.cpp diff --git a/dbms/src/Columns/Collator.h b/src/Columns/Collator.h similarity index 100% rename from dbms/src/Columns/Collator.h rename to src/Columns/Collator.h diff --git a/dbms/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp similarity index 100% rename from dbms/src/Columns/ColumnAggregateFunction.cpp rename to src/Columns/ColumnAggregateFunction.cpp diff --git a/dbms/src/Columns/ColumnAggregateFunction.h b/src/Columns/ColumnAggregateFunction.h similarity index 100% rename from dbms/src/Columns/ColumnAggregateFunction.h rename to src/Columns/ColumnAggregateFunction.h diff --git a/dbms/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp similarity index 100% rename from dbms/src/Columns/ColumnArray.cpp rename to src/Columns/ColumnArray.cpp diff --git a/dbms/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h similarity index 100% rename from dbms/src/Columns/ColumnArray.h rename to src/Columns/ColumnArray.h diff --git a/dbms/src/Columns/ColumnConst.cpp b/src/Columns/ColumnConst.cpp similarity index 100% rename from dbms/src/Columns/ColumnConst.cpp rename to src/Columns/ColumnConst.cpp diff --git a/dbms/src/Columns/ColumnConst.h b/src/Columns/ColumnConst.h similarity index 100% rename from dbms/src/Columns/ColumnConst.h rename to src/Columns/ColumnConst.h diff --git a/dbms/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp similarity index 100% rename from dbms/src/Columns/ColumnDecimal.cpp rename to src/Columns/ColumnDecimal.cpp diff --git a/dbms/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h similarity index 100% rename from dbms/src/Columns/ColumnDecimal.h rename to src/Columns/ColumnDecimal.h diff --git a/dbms/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp similarity index 100% rename from dbms/src/Columns/ColumnFixedString.cpp rename to src/Columns/ColumnFixedString.cpp diff --git a/dbms/src/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h similarity index 100% rename from dbms/src/Columns/ColumnFixedString.h rename to src/Columns/ColumnFixedString.h diff --git a/dbms/src/Columns/ColumnFunction.cpp b/src/Columns/ColumnFunction.cpp similarity index 100% rename from dbms/src/Columns/ColumnFunction.cpp rename to src/Columns/ColumnFunction.cpp diff --git a/dbms/src/Columns/ColumnFunction.h b/src/Columns/ColumnFunction.h similarity index 100% rename from dbms/src/Columns/ColumnFunction.h rename to src/Columns/ColumnFunction.h diff --git a/dbms/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp similarity index 100% rename from dbms/src/Columns/ColumnLowCardinality.cpp rename to src/Columns/ColumnLowCardinality.cpp diff --git a/dbms/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h similarity index 100% rename from dbms/src/Columns/ColumnLowCardinality.h rename to src/Columns/ColumnLowCardinality.h diff --git a/dbms/src/Columns/ColumnNothing.h b/src/Columns/ColumnNothing.h similarity index 100% rename from dbms/src/Columns/ColumnNothing.h rename to src/Columns/ColumnNothing.h diff --git a/dbms/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp similarity index 100% rename from dbms/src/Columns/ColumnNullable.cpp rename to src/Columns/ColumnNullable.cpp diff --git a/dbms/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h similarity index 100% rename from dbms/src/Columns/ColumnNullable.h rename to src/Columns/ColumnNullable.h diff --git a/dbms/src/Columns/ColumnSet.h b/src/Columns/ColumnSet.h similarity index 100% rename from dbms/src/Columns/ColumnSet.h rename to src/Columns/ColumnSet.h diff --git a/dbms/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp similarity index 100% rename from dbms/src/Columns/ColumnString.cpp rename to src/Columns/ColumnString.cpp diff --git a/dbms/src/Columns/ColumnString.h b/src/Columns/ColumnString.h similarity index 100% rename from dbms/src/Columns/ColumnString.h rename to src/Columns/ColumnString.h diff --git a/dbms/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp similarity index 100% rename from dbms/src/Columns/ColumnTuple.cpp rename to src/Columns/ColumnTuple.cpp diff --git a/dbms/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h similarity index 100% rename from dbms/src/Columns/ColumnTuple.h rename to src/Columns/ColumnTuple.h diff --git a/dbms/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h similarity index 100% rename from dbms/src/Columns/ColumnUnique.h rename to src/Columns/ColumnUnique.h diff --git a/dbms/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp similarity index 100% rename from dbms/src/Columns/ColumnVector.cpp rename to src/Columns/ColumnVector.cpp diff --git a/dbms/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h similarity index 100% rename from dbms/src/Columns/ColumnVector.h rename to src/Columns/ColumnVector.h diff --git a/dbms/src/Columns/ColumnVectorHelper.h b/src/Columns/ColumnVectorHelper.h similarity index 100% rename from dbms/src/Columns/ColumnVectorHelper.h rename to src/Columns/ColumnVectorHelper.h diff --git a/dbms/src/Columns/ColumnsCommon.cpp b/src/Columns/ColumnsCommon.cpp similarity index 100% rename from dbms/src/Columns/ColumnsCommon.cpp rename to src/Columns/ColumnsCommon.cpp diff --git a/dbms/src/Columns/ColumnsCommon.h b/src/Columns/ColumnsCommon.h similarity index 100% rename from dbms/src/Columns/ColumnsCommon.h rename to src/Columns/ColumnsCommon.h diff --git a/dbms/src/Columns/ColumnsNumber.h b/src/Columns/ColumnsNumber.h similarity index 100% rename from dbms/src/Columns/ColumnsNumber.h rename to src/Columns/ColumnsNumber.h diff --git a/dbms/src/Columns/FilterDescription.cpp b/src/Columns/FilterDescription.cpp similarity index 100% rename from dbms/src/Columns/FilterDescription.cpp rename to src/Columns/FilterDescription.cpp diff --git a/dbms/src/Columns/FilterDescription.h b/src/Columns/FilterDescription.h similarity index 100% rename from dbms/src/Columns/FilterDescription.h rename to src/Columns/FilterDescription.h diff --git a/dbms/src/Columns/IColumn.cpp b/src/Columns/IColumn.cpp similarity index 100% rename from dbms/src/Columns/IColumn.cpp rename to src/Columns/IColumn.cpp diff --git a/dbms/src/Columns/IColumn.h b/src/Columns/IColumn.h similarity index 100% rename from dbms/src/Columns/IColumn.h rename to src/Columns/IColumn.h diff --git a/dbms/src/Columns/IColumnDummy.h b/src/Columns/IColumnDummy.h similarity index 100% rename from dbms/src/Columns/IColumnDummy.h rename to src/Columns/IColumnDummy.h diff --git a/dbms/src/Columns/IColumnImpl.h b/src/Columns/IColumnImpl.h similarity index 100% rename from dbms/src/Columns/IColumnImpl.h rename to src/Columns/IColumnImpl.h diff --git a/dbms/src/Columns/IColumnUnique.h b/src/Columns/IColumnUnique.h similarity index 100% rename from dbms/src/Columns/IColumnUnique.h rename to src/Columns/IColumnUnique.h diff --git a/dbms/src/Columns/ReverseIndex.h b/src/Columns/ReverseIndex.h similarity index 100% rename from dbms/src/Columns/ReverseIndex.h rename to src/Columns/ReverseIndex.h diff --git a/dbms/src/Columns/getLeastSuperColumn.cpp b/src/Columns/getLeastSuperColumn.cpp similarity index 100% rename from dbms/src/Columns/getLeastSuperColumn.cpp rename to src/Columns/getLeastSuperColumn.cpp diff --git a/dbms/src/Columns/getLeastSuperColumn.h b/src/Columns/getLeastSuperColumn.h similarity index 100% rename from dbms/src/Columns/getLeastSuperColumn.h rename to src/Columns/getLeastSuperColumn.h diff --git a/dbms/src/Columns/tests/CMakeLists.txt b/src/Columns/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Columns/tests/CMakeLists.txt rename to src/Columns/tests/CMakeLists.txt diff --git a/dbms/src/Columns/tests/gtest_column_unique.cpp b/src/Columns/tests/gtest_column_unique.cpp similarity index 100% rename from dbms/src/Columns/tests/gtest_column_unique.cpp rename to src/Columns/tests/gtest_column_unique.cpp diff --git a/dbms/src/Columns/tests/gtest_weak_hash_32.cpp b/src/Columns/tests/gtest_weak_hash_32.cpp similarity index 98% rename from dbms/src/Columns/tests/gtest_weak_hash_32.cpp rename to src/Columns/tests/gtest_weak_hash_32.cpp index 4fa420db678..c79188e9e88 100644 --- a/dbms/src/Columns/tests/gtest_weak_hash_32.cpp +++ b/src/Columns/tests/gtest_weak_hash_32.cpp @@ -17,8 +17,10 @@ #include #include +#include #include + using namespace DB; template @@ -69,6 +71,8 @@ void checkColumn( std::unordered_map map; size_t num_collisions = 0; + std::stringstream collitions_str; + for (size_t i = 0; i < eq_class.size(); ++i) { auto & val = eq_class[i]; @@ -82,12 +86,16 @@ void checkColumn( if (num_collisions <= max_collisions_to_print) { - std::cout << "Collision:\n"; - std::cout << print_for_row(it->second) << '\n'; - std::cout << print_for_row(i) << std::endl; + collitions_str << "Collision:\n"; + collitions_str << print_for_row(it->second) << '\n'; + collitions_str << print_for_row(i) << std::endl; } - else if (num_collisions > allowed_collisions) + + if (num_collisions > allowed_collisions) + { + std::cerr << collitions_str.rdbuf(); break; + } } } diff --git a/dbms/src/Common/ActionBlocker.h b/src/Common/ActionBlocker.h similarity index 100% rename from dbms/src/Common/ActionBlocker.h rename to src/Common/ActionBlocker.h diff --git a/dbms/src/Common/ActionLock.cpp b/src/Common/ActionLock.cpp similarity index 100% rename from dbms/src/Common/ActionLock.cpp rename to src/Common/ActionLock.cpp diff --git a/dbms/src/Common/ActionLock.h b/src/Common/ActionLock.h similarity index 100% rename from dbms/src/Common/ActionLock.h rename to src/Common/ActionLock.h diff --git a/dbms/src/Common/AlignedBuffer.cpp b/src/Common/AlignedBuffer.cpp similarity index 100% rename from dbms/src/Common/AlignedBuffer.cpp rename to src/Common/AlignedBuffer.cpp diff --git a/dbms/src/Common/AlignedBuffer.h b/src/Common/AlignedBuffer.h similarity index 100% rename from dbms/src/Common/AlignedBuffer.h rename to src/Common/AlignedBuffer.h diff --git a/dbms/src/Common/Allocator.h b/src/Common/Allocator.h similarity index 100% rename from dbms/src/Common/Allocator.h rename to src/Common/Allocator.h diff --git a/dbms/src/Common/Allocator_fwd.h b/src/Common/Allocator_fwd.h similarity index 100% rename from dbms/src/Common/Allocator_fwd.h rename to src/Common/Allocator_fwd.h diff --git a/dbms/src/Common/Arena.h b/src/Common/Arena.h similarity index 98% rename from dbms/src/Common/Arena.h rename to src/Common/Arena.h index e1556ef73c5..32c0f4c12d1 100644 --- a/dbms/src/Common/Arena.h +++ b/src/Common/Arena.h @@ -179,8 +179,8 @@ public: /** Rollback just performed allocation. * Must pass size not more that was just allocated. - * Return the resulting head pointer, so that the caller can assert that - * the allocation it intended to roll back was indeed the last one. + * Return the resulting head pointer, so that the caller can assert that + * the allocation it intended to roll back was indeed the last one. */ void * rollback(size_t size) { diff --git a/dbms/src/Common/ArenaAllocator.h b/src/Common/ArenaAllocator.h similarity index 100% rename from dbms/src/Common/ArenaAllocator.h rename to src/Common/ArenaAllocator.h diff --git a/dbms/src/Common/ArenaWithFreeLists.h b/src/Common/ArenaWithFreeLists.h similarity index 100% rename from dbms/src/Common/ArenaWithFreeLists.h rename to src/Common/ArenaWithFreeLists.h diff --git a/dbms/src/Common/ArrayCache.h b/src/Common/ArrayCache.h similarity index 100% rename from dbms/src/Common/ArrayCache.h rename to src/Common/ArrayCache.h diff --git a/dbms/src/Common/AutoArray.h b/src/Common/AutoArray.h similarity index 100% rename from dbms/src/Common/AutoArray.h rename to src/Common/AutoArray.h diff --git a/dbms/src/Common/BitHelpers.h b/src/Common/BitHelpers.h similarity index 89% rename from dbms/src/Common/BitHelpers.h rename to src/Common/BitHelpers.h index ba6a4c60a49..bc6d7413def 100644 --- a/dbms/src/Common/BitHelpers.h +++ b/src/Common/BitHelpers.h @@ -53,12 +53,10 @@ inline size_t getLeadingZeroBits(T x) } } +// Unsafe since __builtin_ctz()-family explicitly state that result is undefined on x == 0 template -inline size_t getTrailingZeroBits(T x) +inline size_t getTrailingZeroBitsUnsafe(T x) { - if (!x) - return sizeof(x) * 8; - if constexpr (sizeof(T) <= sizeof(unsigned int)) { return __builtin_ctz(x); @@ -73,6 +71,15 @@ inline size_t getTrailingZeroBits(T x) } } +template +inline size_t getTrailingZeroBits(T x) +{ + if (!x) + return sizeof(x) * 8; + + return getTrailingZeroBitsUnsafe(x); +} + /** Returns a mask that has '1' for `bits` LSB set: * maskLowBits(3) => 00000111 */ diff --git a/dbms/src/Common/CMakeLists.txt b/src/Common/CMakeLists.txt similarity index 100% rename from dbms/src/Common/CMakeLists.txt rename to src/Common/CMakeLists.txt diff --git a/dbms/src/Common/COW.h b/src/Common/COW.h similarity index 100% rename from dbms/src/Common/COW.h rename to src/Common/COW.h diff --git a/dbms/src/Common/ClickHouseRevision.cpp b/src/Common/ClickHouseRevision.cpp similarity index 100% rename from dbms/src/Common/ClickHouseRevision.cpp rename to src/Common/ClickHouseRevision.cpp diff --git a/dbms/src/Common/ClickHouseRevision.h b/src/Common/ClickHouseRevision.h similarity index 100% rename from dbms/src/Common/ClickHouseRevision.h rename to src/Common/ClickHouseRevision.h diff --git a/dbms/src/Common/ColumnsHashing.h b/src/Common/ColumnsHashing.h similarity index 100% rename from dbms/src/Common/ColumnsHashing.h rename to src/Common/ColumnsHashing.h diff --git a/dbms/src/Common/ColumnsHashingImpl.h b/src/Common/ColumnsHashingImpl.h similarity index 100% rename from dbms/src/Common/ColumnsHashingImpl.h rename to src/Common/ColumnsHashingImpl.h diff --git a/dbms/src/Common/CombinedCardinalityEstimator.h b/src/Common/CombinedCardinalityEstimator.h similarity index 100% rename from dbms/src/Common/CombinedCardinalityEstimator.h rename to src/Common/CombinedCardinalityEstimator.h diff --git a/dbms/src/Common/CompactArray.h b/src/Common/CompactArray.h similarity index 100% rename from dbms/src/Common/CompactArray.h rename to src/Common/CompactArray.h diff --git a/dbms/src/Common/ConcurrentBoundedQueue.h b/src/Common/ConcurrentBoundedQueue.h similarity index 100% rename from dbms/src/Common/ConcurrentBoundedQueue.h rename to src/Common/ConcurrentBoundedQueue.h diff --git a/dbms/src/Common/Config/AbstractConfigurationComparison.cpp b/src/Common/Config/AbstractConfigurationComparison.cpp similarity index 100% rename from dbms/src/Common/Config/AbstractConfigurationComparison.cpp rename to src/Common/Config/AbstractConfigurationComparison.cpp diff --git a/dbms/src/Common/Config/AbstractConfigurationComparison.h b/src/Common/Config/AbstractConfigurationComparison.h similarity index 100% rename from dbms/src/Common/Config/AbstractConfigurationComparison.h rename to src/Common/Config/AbstractConfigurationComparison.h diff --git a/dbms/src/Common/Config/CMakeLists.txt b/src/Common/Config/CMakeLists.txt similarity index 100% rename from dbms/src/Common/Config/CMakeLists.txt rename to src/Common/Config/CMakeLists.txt diff --git a/dbms/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp similarity index 100% rename from dbms/src/Common/Config/ConfigProcessor.cpp rename to src/Common/Config/ConfigProcessor.cpp diff --git a/dbms/src/Common/Config/ConfigProcessor.h b/src/Common/Config/ConfigProcessor.h similarity index 100% rename from dbms/src/Common/Config/ConfigProcessor.h rename to src/Common/Config/ConfigProcessor.h diff --git a/dbms/src/Common/Config/ConfigReloader.cpp b/src/Common/Config/ConfigReloader.cpp similarity index 100% rename from dbms/src/Common/Config/ConfigReloader.cpp rename to src/Common/Config/ConfigReloader.cpp diff --git a/dbms/src/Common/Config/ConfigReloader.h b/src/Common/Config/ConfigReloader.h similarity index 100% rename from dbms/src/Common/Config/ConfigReloader.h rename to src/Common/Config/ConfigReloader.h diff --git a/dbms/src/Common/Config/configReadClient.cpp b/src/Common/Config/configReadClient.cpp similarity index 100% rename from dbms/src/Common/Config/configReadClient.cpp rename to src/Common/Config/configReadClient.cpp diff --git a/dbms/src/Common/Config/configReadClient.h b/src/Common/Config/configReadClient.h similarity index 100% rename from dbms/src/Common/Config/configReadClient.h rename to src/Common/Config/configReadClient.h diff --git a/dbms/src/Common/CounterInFile.h b/src/Common/CounterInFile.h similarity index 100% rename from dbms/src/Common/CounterInFile.h rename to src/Common/CounterInFile.h diff --git a/dbms/src/Common/CpuId.h b/src/Common/CpuId.h similarity index 100% rename from dbms/src/Common/CpuId.h rename to src/Common/CpuId.h diff --git a/dbms/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp similarity index 100% rename from dbms/src/Common/CurrentMetrics.cpp rename to src/Common/CurrentMetrics.cpp diff --git a/dbms/src/Common/CurrentMetrics.h b/src/Common/CurrentMetrics.h similarity index 100% rename from dbms/src/Common/CurrentMetrics.h rename to src/Common/CurrentMetrics.h diff --git a/dbms/src/Common/CurrentThread.cpp b/src/Common/CurrentThread.cpp similarity index 100% rename from dbms/src/Common/CurrentThread.cpp rename to src/Common/CurrentThread.cpp diff --git a/dbms/src/Common/CurrentThread.h b/src/Common/CurrentThread.h similarity index 100% rename from dbms/src/Common/CurrentThread.h rename to src/Common/CurrentThread.h diff --git a/dbms/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp similarity index 100% rename from dbms/src/Common/DNSResolver.cpp rename to src/Common/DNSResolver.cpp diff --git a/dbms/src/Common/DNSResolver.h b/src/Common/DNSResolver.h similarity index 100% rename from dbms/src/Common/DNSResolver.h rename to src/Common/DNSResolver.h diff --git a/dbms/src/Common/Dwarf.cpp b/src/Common/Dwarf.cpp similarity index 100% rename from dbms/src/Common/Dwarf.cpp rename to src/Common/Dwarf.cpp diff --git a/dbms/src/Common/Dwarf.h b/src/Common/Dwarf.h similarity index 100% rename from dbms/src/Common/Dwarf.h rename to src/Common/Dwarf.h diff --git a/dbms/src/Common/Elf.cpp b/src/Common/Elf.cpp similarity index 100% rename from dbms/src/Common/Elf.cpp rename to src/Common/Elf.cpp diff --git a/dbms/src/Common/Elf.h b/src/Common/Elf.h similarity index 100% rename from dbms/src/Common/Elf.h rename to src/Common/Elf.h diff --git a/dbms/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp similarity index 100% rename from dbms/src/Common/ErrorCodes.cpp rename to src/Common/ErrorCodes.cpp diff --git a/dbms/src/Common/EventCounter.h b/src/Common/EventCounter.h similarity index 100% rename from dbms/src/Common/EventCounter.h rename to src/Common/EventCounter.h diff --git a/dbms/src/Common/Exception.cpp b/src/Common/Exception.cpp similarity index 100% rename from dbms/src/Common/Exception.cpp rename to src/Common/Exception.cpp diff --git a/dbms/src/Common/Exception.h b/src/Common/Exception.h similarity index 100% rename from dbms/src/Common/Exception.h rename to src/Common/Exception.h diff --git a/dbms/src/Common/ExternalLoaderStatus.cpp b/src/Common/ExternalLoaderStatus.cpp similarity index 100% rename from dbms/src/Common/ExternalLoaderStatus.cpp rename to src/Common/ExternalLoaderStatus.cpp diff --git a/dbms/src/Common/ExternalLoaderStatus.h b/src/Common/ExternalLoaderStatus.h similarity index 100% rename from dbms/src/Common/ExternalLoaderStatus.h rename to src/Common/ExternalLoaderStatus.h diff --git a/dbms/src/Common/FieldVisitors.cpp b/src/Common/FieldVisitors.cpp similarity index 100% rename from dbms/src/Common/FieldVisitors.cpp rename to src/Common/FieldVisitors.cpp diff --git a/dbms/src/Common/FieldVisitors.h b/src/Common/FieldVisitors.h similarity index 100% rename from dbms/src/Common/FieldVisitors.h rename to src/Common/FieldVisitors.h diff --git a/dbms/src/Common/FileChecker.cpp b/src/Common/FileChecker.cpp similarity index 100% rename from dbms/src/Common/FileChecker.cpp rename to src/Common/FileChecker.cpp diff --git a/dbms/src/Common/FileChecker.h b/src/Common/FileChecker.h similarity index 100% rename from dbms/src/Common/FileChecker.h rename to src/Common/FileChecker.h diff --git a/dbms/src/Common/FileUpdatesTracker.h b/src/Common/FileUpdatesTracker.h similarity index 100% rename from dbms/src/Common/FileUpdatesTracker.h rename to src/Common/FileUpdatesTracker.h diff --git a/dbms/src/Common/HTMLForm.h b/src/Common/HTMLForm.h similarity index 100% rename from dbms/src/Common/HTMLForm.h rename to src/Common/HTMLForm.h diff --git a/dbms/src/Common/HashTable/ClearableHashMap.h b/src/Common/HashTable/ClearableHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/ClearableHashMap.h rename to src/Common/HashTable/ClearableHashMap.h diff --git a/dbms/src/Common/HashTable/ClearableHashSet.h b/src/Common/HashTable/ClearableHashSet.h similarity index 100% rename from dbms/src/Common/HashTable/ClearableHashSet.h rename to src/Common/HashTable/ClearableHashSet.h diff --git a/dbms/src/Common/HashTable/FixedClearableHashMap.h b/src/Common/HashTable/FixedClearableHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/FixedClearableHashMap.h rename to src/Common/HashTable/FixedClearableHashMap.h diff --git a/dbms/src/Common/HashTable/FixedClearableHashSet.h b/src/Common/HashTable/FixedClearableHashSet.h similarity index 100% rename from dbms/src/Common/HashTable/FixedClearableHashSet.h rename to src/Common/HashTable/FixedClearableHashSet.h diff --git a/dbms/src/Common/HashTable/FixedHashMap.h b/src/Common/HashTable/FixedHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/FixedHashMap.h rename to src/Common/HashTable/FixedHashMap.h diff --git a/dbms/src/Common/HashTable/FixedHashSet.h b/src/Common/HashTable/FixedHashSet.h similarity index 100% rename from dbms/src/Common/HashTable/FixedHashSet.h rename to src/Common/HashTable/FixedHashSet.h diff --git a/dbms/src/Common/HashTable/FixedHashTable.h b/src/Common/HashTable/FixedHashTable.h similarity index 100% rename from dbms/src/Common/HashTable/FixedHashTable.h rename to src/Common/HashTable/FixedHashTable.h diff --git a/dbms/src/Common/HashTable/Hash.h b/src/Common/HashTable/Hash.h similarity index 100% rename from dbms/src/Common/HashTable/Hash.h rename to src/Common/HashTable/Hash.h diff --git a/dbms/src/Common/HashTable/HashMap.h b/src/Common/HashTable/HashMap.h similarity index 100% rename from dbms/src/Common/HashTable/HashMap.h rename to src/Common/HashTable/HashMap.h diff --git a/dbms/src/Common/HashTable/HashSet.h b/src/Common/HashTable/HashSet.h similarity index 100% rename from dbms/src/Common/HashTable/HashSet.h rename to src/Common/HashTable/HashSet.h diff --git a/dbms/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h similarity index 100% rename from dbms/src/Common/HashTable/HashTable.h rename to src/Common/HashTable/HashTable.h diff --git a/dbms/src/Common/HashTable/HashTableAllocator.h b/src/Common/HashTable/HashTableAllocator.h similarity index 100% rename from dbms/src/Common/HashTable/HashTableAllocator.h rename to src/Common/HashTable/HashTableAllocator.h diff --git a/dbms/src/Common/HashTable/HashTableKeyHolder.h b/src/Common/HashTable/HashTableKeyHolder.h similarity index 100% rename from dbms/src/Common/HashTable/HashTableKeyHolder.h rename to src/Common/HashTable/HashTableKeyHolder.h diff --git a/dbms/src/Common/HashTable/SmallTable.h b/src/Common/HashTable/SmallTable.h similarity index 100% rename from dbms/src/Common/HashTable/SmallTable.h rename to src/Common/HashTable/SmallTable.h diff --git a/dbms/src/Common/HashTable/StringHashMap.h b/src/Common/HashTable/StringHashMap.h similarity index 93% rename from dbms/src/Common/HashTable/StringHashMap.h rename to src/Common/HashTable/StringHashMap.h index 3ee59c89a36..fe4eab16092 100644 --- a/dbms/src/Common/HashTable/StringHashMap.h +++ b/src/Common/HashTable/StringHashMap.h @@ -25,9 +25,13 @@ struct StringHashMapCell : public HashMapCellvalue.first, state); } - // Assuming String does not contain zero bytes. NOTE: Cannot be used in serialized method - static bool isZero(const StringKey16 & key, const HashTableNoState & /*state*/) { return key.low == 0; } - void setZero() { this->value.first.low = 0; } + + // Zero means unoccupied cells in hash table. Use key with last word = 0 as + // zero keys, because such keys are unrepresentable (no way to encode length). + static bool isZero(const StringKey16 & key, const HashTableNoState &) + { return key.high == 0; } + void setZero() { this->value.first.high = 0; } + // external const StringRef getKey() const { return toStringRef(this->value.first); } // internal @@ -42,9 +46,13 @@ struct StringHashMapCell : public HashMapCellvalue.first, state); } - // Assuming String does not contain zero bytes. NOTE: Cannot be used in serialized method - static bool isZero(const StringKey24 & key, const HashTableNoState & /*state*/) { return key.a == 0; } - void setZero() { this->value.first.a = 0; } + + // Zero means unoccupied cells in hash table. Use key with last word = 0 as + // zero keys, because such keys are unrepresentable (no way to encode length). + static bool isZero(const StringKey24 & key, const HashTableNoState &) + { return key.c == 0; } + void setZero() { this->value.first.c = 0; } + // external const StringRef getKey() const { return toStringRef(this->value.first); } // internal diff --git a/dbms/src/Common/HashTable/StringHashTable.h b/src/Common/HashTable/StringHashTable.h similarity index 97% rename from dbms/src/Common/HashTable/StringHashTable.h rename to src/Common/HashTable/StringHashTable.h index d80b26c6a7c..101327ed809 100644 --- a/dbms/src/Common/HashTable/StringHashTable.h +++ b/src/Common/HashTable/StringHashTable.h @@ -18,14 +18,17 @@ struct StringKey24 inline StringRef ALWAYS_INLINE toStringRef(const StringKey8 & n) { + assert(n != 0); return {reinterpret_cast(&n), 8ul - (__builtin_clzll(n) >> 3)}; } inline StringRef ALWAYS_INLINE toStringRef(const StringKey16 & n) { + assert(n.high != 0); return {reinterpret_cast(&n), 16ul - (__builtin_clzll(n.high) >> 3)}; } inline StringRef ALWAYS_INLINE toStringRef(const StringKey24 & n) { + assert(n.c != 0); return {reinterpret_cast(&n), 24ul - (__builtin_clzll(n.c) >> 3)}; } @@ -229,6 +232,7 @@ public: template static auto ALWAYS_INLINE dispatch(Self & self, KeyHolder && key_holder, Func && func) { + StringHashTableHash hash; const StringRef & x = keyHolderGetKey(key_holder); const size_t sz = x.size; if (sz == 0) @@ -237,6 +241,13 @@ public: return func(self.m0, VoidKey{}, 0); } + if (x.data[sz - 1] == 0) + { + // Strings with trailing zeros are not representable as fixed-size + // string keys. Put them to the generic table. + return func(self.ms, std::forward(key_holder), hash(x)); + } + const char * p = x.data; // pending bits that needs to be shifted out const char s = (-sz & 7) * 8; @@ -247,7 +258,6 @@ public: StringKey24 k24; UInt64 n[3]; }; - StringHashTableHash hash; switch ((sz - 1) >> 3) { case 0: // 1..8 bytes diff --git a/dbms/src/Common/HashTable/TwoLevelHashMap.h b/src/Common/HashTable/TwoLevelHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/TwoLevelHashMap.h rename to src/Common/HashTable/TwoLevelHashMap.h diff --git a/dbms/src/Common/HashTable/TwoLevelHashTable.h b/src/Common/HashTable/TwoLevelHashTable.h similarity index 100% rename from dbms/src/Common/HashTable/TwoLevelHashTable.h rename to src/Common/HashTable/TwoLevelHashTable.h diff --git a/dbms/src/Common/HashTable/TwoLevelStringHashMap.h b/src/Common/HashTable/TwoLevelStringHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/TwoLevelStringHashMap.h rename to src/Common/HashTable/TwoLevelStringHashMap.h diff --git a/dbms/src/Common/HashTable/TwoLevelStringHashTable.h b/src/Common/HashTable/TwoLevelStringHashTable.h similarity index 94% rename from dbms/src/Common/HashTable/TwoLevelStringHashTable.h rename to src/Common/HashTable/TwoLevelStringHashTable.h index 88241c6c5fe..93bbcb2835d 100644 --- a/dbms/src/Common/HashTable/TwoLevelStringHashTable.h +++ b/src/Common/HashTable/TwoLevelStringHashTable.h @@ -77,6 +77,7 @@ public: template static auto ALWAYS_INLINE dispatch(Self & self, KeyHolder && key_holder, Func && func) { + StringHashTableHash hash; const StringRef & x = keyHolderGetKey(key_holder); const size_t sz = x.size; if (sz == 0) @@ -85,6 +86,16 @@ public: return func(self.impls[0].m0, VoidKey{}, 0); } + if (x.data[x.size - 1] == 0) + { + // Strings with trailing zeros are not representable as fixed-size + // string keys. Put them to the generic table. + auto res = hash(x); + auto buck = getBucketFromHash(res); + return func(self.impls[buck].ms, std::forward(key_holder), + res); + } + const char * p = x.data; // pending bits that needs to be shifted out const char s = (-sz & 7) * 8; @@ -95,7 +106,6 @@ public: StringKey24 k24; UInt64 n[3]; }; - StringHashTableHash hash; switch ((sz - 1) >> 3) { case 0: diff --git a/dbms/src/Common/HyperLogLogBiasEstimator.h b/src/Common/HyperLogLogBiasEstimator.h similarity index 100% rename from dbms/src/Common/HyperLogLogBiasEstimator.h rename to src/Common/HyperLogLogBiasEstimator.h diff --git a/dbms/src/Common/HyperLogLogCounter.h b/src/Common/HyperLogLogCounter.h similarity index 100% rename from dbms/src/Common/HyperLogLogCounter.h rename to src/Common/HyperLogLogCounter.h diff --git a/dbms/src/Common/HyperLogLogWithSmallSetOptimization.h b/src/Common/HyperLogLogWithSmallSetOptimization.h similarity index 100% rename from dbms/src/Common/HyperLogLogWithSmallSetOptimization.h rename to src/Common/HyperLogLogWithSmallSetOptimization.h diff --git a/dbms/src/Common/IFactoryWithAliases.h b/src/Common/IFactoryWithAliases.h similarity index 100% rename from dbms/src/Common/IFactoryWithAliases.h rename to src/Common/IFactoryWithAliases.h diff --git a/dbms/src/Common/IPv6ToBinary.cpp b/src/Common/IPv6ToBinary.cpp similarity index 100% rename from dbms/src/Common/IPv6ToBinary.cpp rename to src/Common/IPv6ToBinary.cpp diff --git a/dbms/src/Common/IPv6ToBinary.h b/src/Common/IPv6ToBinary.h similarity index 100% rename from dbms/src/Common/IPv6ToBinary.h rename to src/Common/IPv6ToBinary.h diff --git a/dbms/src/Common/Increment.h b/src/Common/Increment.h similarity index 100% rename from dbms/src/Common/Increment.h rename to src/Common/Increment.h diff --git a/dbms/src/Common/InterruptListener.h b/src/Common/InterruptListener.h similarity index 100% rename from dbms/src/Common/InterruptListener.h rename to src/Common/InterruptListener.h diff --git a/dbms/src/Common/IntervalKind.cpp b/src/Common/IntervalKind.cpp similarity index 100% rename from dbms/src/Common/IntervalKind.cpp rename to src/Common/IntervalKind.cpp diff --git a/dbms/src/Common/IntervalKind.h b/src/Common/IntervalKind.h similarity index 100% rename from dbms/src/Common/IntervalKind.h rename to src/Common/IntervalKind.h diff --git a/dbms/src/Common/LRUCache.h b/src/Common/LRUCache.h similarity index 100% rename from dbms/src/Common/LRUCache.h rename to src/Common/LRUCache.h diff --git a/dbms/src/Common/Macros.cpp b/src/Common/Macros.cpp similarity index 100% rename from dbms/src/Common/Macros.cpp rename to src/Common/Macros.cpp diff --git a/dbms/src/Common/Macros.h b/src/Common/Macros.h similarity index 100% rename from dbms/src/Common/Macros.h rename to src/Common/Macros.h diff --git a/dbms/src/Common/MemorySanitizer.h b/src/Common/MemorySanitizer.h similarity index 100% rename from dbms/src/Common/MemorySanitizer.h rename to src/Common/MemorySanitizer.h diff --git a/dbms/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp similarity index 100% rename from dbms/src/Common/MemoryTracker.cpp rename to src/Common/MemoryTracker.cpp diff --git a/dbms/src/Common/MemoryTracker.h b/src/Common/MemoryTracker.h similarity index 100% rename from dbms/src/Common/MemoryTracker.h rename to src/Common/MemoryTracker.h diff --git a/dbms/src/Common/MultiVersion.h b/src/Common/MultiVersion.h similarity index 100% rename from dbms/src/Common/MultiVersion.h rename to src/Common/MultiVersion.h diff --git a/dbms/src/Common/NaNUtils.h b/src/Common/NaNUtils.h similarity index 100% rename from dbms/src/Common/NaNUtils.h rename to src/Common/NaNUtils.h diff --git a/dbms/src/Common/NamePrompter.h b/src/Common/NamePrompter.h similarity index 100% rename from dbms/src/Common/NamePrompter.h rename to src/Common/NamePrompter.h diff --git a/dbms/src/Common/NetException.h b/src/Common/NetException.h similarity index 100% rename from dbms/src/Common/NetException.h rename to src/Common/NetException.h diff --git a/dbms/src/Common/ObjectPool.h b/src/Common/ObjectPool.h similarity index 100% rename from dbms/src/Common/ObjectPool.h rename to src/Common/ObjectPool.h diff --git a/dbms/src/Common/OpenSSLHelpers.cpp b/src/Common/OpenSSLHelpers.cpp similarity index 100% rename from dbms/src/Common/OpenSSLHelpers.cpp rename to src/Common/OpenSSLHelpers.cpp diff --git a/dbms/src/Common/OpenSSLHelpers.h b/src/Common/OpenSSLHelpers.h similarity index 100% rename from dbms/src/Common/OpenSSLHelpers.h rename to src/Common/OpenSSLHelpers.h diff --git a/dbms/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp similarity index 100% rename from dbms/src/Common/OptimizedRegularExpression.cpp rename to src/Common/OptimizedRegularExpression.cpp diff --git a/dbms/src/Common/OptimizedRegularExpression.h b/src/Common/OptimizedRegularExpression.h similarity index 100% rename from dbms/src/Common/OptimizedRegularExpression.h rename to src/Common/OptimizedRegularExpression.h diff --git a/dbms/src/Common/PODArray.cpp b/src/Common/PODArray.cpp similarity index 100% rename from dbms/src/Common/PODArray.cpp rename to src/Common/PODArray.cpp diff --git a/dbms/src/Common/PODArray.h b/src/Common/PODArray.h similarity index 100% rename from dbms/src/Common/PODArray.h rename to src/Common/PODArray.h diff --git a/dbms/src/Common/PODArray_fwd.h b/src/Common/PODArray_fwd.h similarity index 100% rename from dbms/src/Common/PODArray_fwd.h rename to src/Common/PODArray_fwd.h diff --git a/dbms/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp similarity index 100% rename from dbms/src/Common/PipeFDs.cpp rename to src/Common/PipeFDs.cpp diff --git a/dbms/src/Common/PipeFDs.h b/src/Common/PipeFDs.h similarity index 100% rename from dbms/src/Common/PipeFDs.h rename to src/Common/PipeFDs.h diff --git a/dbms/src/Common/PoolBase.h b/src/Common/PoolBase.h similarity index 100% rename from dbms/src/Common/PoolBase.h rename to src/Common/PoolBase.h diff --git a/dbms/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h similarity index 100% rename from dbms/src/Common/PoolWithFailoverBase.h rename to src/Common/PoolWithFailoverBase.h diff --git a/dbms/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp similarity index 100% rename from dbms/src/Common/ProfileEvents.cpp rename to src/Common/ProfileEvents.cpp diff --git a/dbms/src/Common/ProfileEvents.h b/src/Common/ProfileEvents.h similarity index 100% rename from dbms/src/Common/ProfileEvents.h rename to src/Common/ProfileEvents.h diff --git a/dbms/src/Common/ProfilingScopedRWLock.h b/src/Common/ProfilingScopedRWLock.h similarity index 100% rename from dbms/src/Common/ProfilingScopedRWLock.h rename to src/Common/ProfilingScopedRWLock.h diff --git a/dbms/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp similarity index 99% rename from dbms/src/Common/QueryProfiler.cpp rename to src/Common/QueryProfiler.cpp index ac2987a3795..dd9f36fb3ae 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -23,7 +23,9 @@ namespace DB namespace { +#if defined(OS_LINUX) thread_local size_t write_trace_iteration = 0; +#endif void writeTraceInfo(TraceType trace_type, int /* sig */, siginfo_t * info, void * context) { @@ -53,7 +55,6 @@ namespace } #else UNUSED(info); - UNUSED(write_trace_iteration); #endif const auto signal_context = *reinterpret_cast(context); @@ -110,7 +111,7 @@ QueryProfilerBase::QueryProfilerBase(const UInt64 thread_id, const sev.sigev_notify = SIGEV_THREAD_ID; sev.sigev_signo = pause_signal; -# if defined(__FreeBSD__) +# if defined(OS_FREEBSD) sev._sigev_un._threadid = thread_id; # else sev._sigev_un._tid = thread_id; diff --git a/dbms/src/Common/QueryProfiler.h b/src/Common/QueryProfiler.h similarity index 100% rename from dbms/src/Common/QueryProfiler.h rename to src/Common/QueryProfiler.h diff --git a/src/Common/RWLock.cpp b/src/Common/RWLock.cpp new file mode 100644 index 00000000000..a8dba490fac --- /dev/null +++ b/src/Common/RWLock.cpp @@ -0,0 +1,307 @@ +#include "RWLock.h" +#include +#include +#include +#include + + +namespace ProfileEvents +{ + extern const Event RWLockAcquiredReadLocks; + extern const Event RWLockAcquiredWriteLocks; + extern const Event RWLockReadersWaitMilliseconds; + extern const Event RWLockWritersWaitMilliseconds; +} + + +namespace CurrentMetrics +{ + extern const Metric RWLockWaitingReaders; + extern const Metric RWLockWaitingWriters; + extern const Metric RWLockActiveReaders; + extern const Metric RWLockActiveWriters; +} + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +/** A one-time-use-object that represents lock ownership + * For the purpose of exception safety guarantees LockHolder is to be used in two steps: + * 1. Create an instance (allocating all the needed memory) + * 2. Associate the instance with the lock (attach to the lock and locking request group) + */ +class RWLockImpl::LockHolderImpl +{ + bool bound{false}; + String query_id; + CurrentMetrics::Increment active_client_increment; + RWLock parent; + GroupsContainer::iterator it_group; + +public: + LockHolderImpl(const LockHolderImpl & other) = delete; + LockHolderImpl& operator=(const LockHolderImpl & other) = delete; + + /// Implicit memory allocation for query_id is done here + LockHolderImpl(const String & query_id_, Type type) + : query_id{query_id_} + , active_client_increment{ + type == Type::Read ? CurrentMetrics::RWLockActiveReaders : CurrentMetrics::RWLockActiveWriters} + { + } + + ~LockHolderImpl() + { + if (bound && parent != nullptr) + parent->unlock(it_group, query_id); + else + active_client_increment.destroy(); + } + +private: + /// A separate method which binds the lock holder to the owned lock + /// N.B. It is very important that this method produces no allocations + bool bindWith(RWLock && parent_, GroupsContainer::iterator it_group_) noexcept + { + if (bound || parent_ == nullptr) + return false; + it_group = it_group_; + parent = std::move(parent_); + ++it_group->requests; + bound = true; + return true; + } + + friend class RWLockImpl; +}; + + +/** General algorithm: + * Step 1. Try the FastPath (for both Reads/Writes) + * Step 2. Find ourselves request group: attach to existing or create a new one + * Step 3. Wait/timed wait for ownership signal + * Step 3a. Check if we must handle timeout and exit + * Step 4. Persist lock ownership + * + * To guarantee that we do not get any piece of our data corrupted: + * 1. Perform all actions that include allocations before changing lock's internal state + * 2. Roll back any changes that make the state inconsistent + * + * Note: "SM" in the commentaries below stands for STATE MODIFICATION + */ +RWLockImpl::LockHolder +RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id, const std::chrono::milliseconds & lock_timeout_ms) +{ + const auto lock_deadline_tp = + (lock_timeout_ms == std::chrono::milliseconds(0)) + ? std::chrono::time_point::max() + : std::chrono::steady_clock::now() + lock_timeout_ms; + + const bool request_has_query_id = query_id != NO_QUERY; + + Stopwatch watch(CLOCK_MONOTONIC_COARSE); + CurrentMetrics::Increment waiting_client_increment((type == Read) ? CurrentMetrics::RWLockWaitingReaders + : CurrentMetrics::RWLockWaitingWriters); + auto finalize_metrics = [type, &watch] () + { + ProfileEvents::increment((type == Read) ? ProfileEvents::RWLockAcquiredReadLocks + : ProfileEvents::RWLockAcquiredWriteLocks); + ProfileEvents::increment((type == Read) ? ProfileEvents::RWLockReadersWaitMilliseconds + : ProfileEvents::RWLockWritersWaitMilliseconds, watch.elapsedMilliseconds()); + }; + + /// This object is placed above unique_lock, because it may lock in destructor. + auto lock_holder = std::make_shared(query_id, type); + + std::unique_lock state_lock(internal_state_mtx); + + /// The FastPath: + /// Check if the same query_id already holds the required lock in which case we can proceed without waiting + if (request_has_query_id) + { + const auto owner_query_it = owner_queries.find(query_id); + if (owner_query_it != owner_queries.end()) + { + if (wrlock_owner != writers_queue.end()) + throw Exception( + "RWLockImpl::getLock(): RWLock is already locked in exclusive mode", + ErrorCodes::LOGICAL_ERROR); + + /// Lock upgrading is not supported + if (type == Write) + throw Exception( + "RWLockImpl::getLock(): Cannot acquire exclusive lock while RWLock is already locked", + ErrorCodes::LOGICAL_ERROR); + + /// N.B. Type is Read here, query_id is not empty and it_query is a valid iterator + ++owner_query_it->second; /// SM1: nothrow + lock_holder->bindWith(shared_from_this(), rdlock_owner); /// SM2: nothrow + + finalize_metrics(); + return lock_holder; + } + } + + if (type == Type::Write) + { + writers_queue.emplace_back(type); /// SM1: may throw (nothing to roll back) + } + else if (readers_queue.empty() || + (rdlock_owner == readers_queue.begin() && readers_queue.size() == 1 && !writers_queue.empty())) + { + readers_queue.emplace_back(type); /// SM1: may throw (nothing to roll back) + } + GroupsContainer::iterator it_group = + (type == Type::Write) ? std::prev(writers_queue.end()) : std::prev(readers_queue.end()); + + /// Lock is free to acquire + if (rdlock_owner == readers_queue.end() && wrlock_owner == writers_queue.end()) + { + (type == Read ? rdlock_owner : wrlock_owner) = it_group; /// SM2: nothrow + } + else + { + /// Wait until our group becomes the lock owner + const auto predicate = [&] () { return it_group == (type == Read ? rdlock_owner : wrlock_owner); }; + + if (lock_deadline_tp == std::chrono::time_point::max()) + { + ++it_group->requests; + it_group->cv.wait(state_lock, predicate); + --it_group->requests; + } + else + { + ++it_group->requests; + const auto wait_result = it_group->cv.wait_until(state_lock, lock_deadline_tp, predicate); + --it_group->requests; + + /// Step 3a. Check if we must handle timeout and exit + if (!wait_result) /// Wait timed out! + { + /// Rollback(SM1): nothrow + if (it_group->requests == 0) + { + (type == Read ? readers_queue : writers_queue).erase(it_group); + } + + return nullptr; + } + } + } + + if (request_has_query_id) + { + try + { + const auto emplace_res = + owner_queries.emplace(query_id, 1); /// SM2: may throw on insertion + if (!emplace_res.second) + ++emplace_res.first->second; /// SM3: nothrow + } + catch (...) + { + /// Methods std::list<>::emplace_back() and std::unordered_map<>::emplace() provide strong exception safety + /// We only need to roll back the changes to these objects: owner_queries and the readers/writers queue + if (it_group->requests == 0) + dropOwnerGroupAndPassOwnership(it_group); /// Rollback(SM1): nothrow + + throw; + } + } + + lock_holder->bindWith(shared_from_this(), it_group); /// SM: nothrow + + finalize_metrics(); + return lock_holder; +} + + +/** The sequence points of acquiring lock ownership by an instance of LockHolderImpl: + * 1. owner_queries is updated + * 2. request group is updated by LockHolderImpl which in turn becomes "bound" + * + * If by the time when destructor of LockHolderImpl is called the instance has been "bound", + * it is guaranteed that all three steps have been executed successfully and the resulting state is consistent. + * With the mutex locked the order of steps to restore the lock's state can be arbitrary + * + * We do not employ try-catch: if something bad happens, there is nothing we can do =( + */ +void RWLockImpl::unlock(GroupsContainer::iterator group_it, const String & query_id) noexcept +{ + std::lock_guard state_lock(internal_state_mtx); + + /// All of theses are Undefined behavior and nothing we can do! + if (rdlock_owner == readers_queue.end() && wrlock_owner == writers_queue.end()) + return; + if (rdlock_owner != readers_queue.end() && group_it != rdlock_owner) + return; + if (wrlock_owner != writers_queue.end() && group_it != wrlock_owner) + return; + + /// If query_id is not empty it must be listed in parent->owner_queries + if (query_id != NO_QUERY) + { + const auto owner_query_it = owner_queries.find(query_id); + if (owner_query_it != owner_queries.end()) + { + if (--owner_query_it->second == 0) /// SM: nothrow + owner_queries.erase(owner_query_it); /// SM: nothrow + } + } + + /// If we are the last remaining referrer, remove this QNode and notify the next one + if (--group_it->requests == 0) /// SM: nothrow + dropOwnerGroupAndPassOwnership(group_it); +} + + +void RWLockImpl::dropOwnerGroupAndPassOwnership(GroupsContainer::iterator group_it) noexcept +{ + rdlock_owner = readers_queue.end(); + wrlock_owner = writers_queue.end(); + + if (group_it->type == Read) + { + readers_queue.erase(group_it); + /// Prepare next phase + if (!writers_queue.empty()) + { + wrlock_owner = writers_queue.begin(); + } + else + { + rdlock_owner = readers_queue.begin(); + } + } + else + { + writers_queue.erase(group_it); + /// Prepare next phase + if (!readers_queue.empty()) + { + rdlock_owner = readers_queue.begin(); + } + else + { + wrlock_owner = writers_queue.begin(); + } + } + + if (rdlock_owner != readers_queue.end()) + { + rdlock_owner->cv.notify_all(); + } + else if (wrlock_owner != writers_queue.end()) + { + wrlock_owner->cv.notify_one(); + } +} +} diff --git a/src/Common/RWLock.h b/src/Common/RWLock.h new file mode 100644 index 00000000000..ad0a3f139fc --- /dev/null +++ b/src/Common/RWLock.h @@ -0,0 +1,93 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +class RWLockImpl; +using RWLock = std::shared_ptr; + + +/// Implements Readers-Writers locking algorithm that serves requests in "Phase Fair" order. +/// (Phase Fair RWLock as suggested in https://www.cs.unc.edu/~anderson/papers/rtsj10-for-web.pdf) +/// It is used for synchronizing access to various objects on query level (i.e. Storages). +/// +/// In general, ClickHouse processes queries by multiple threads of execution in parallel. +/// As opposed to the standard OS synchronization primitives (mutexes), this implementation allows +/// unlock() to be called by a thread other than the one, that called lock(). +/// It is also possible to acquire RWLock in Read mode without waiting (FastPath) by multiple threads, +/// that execute the same query (share the same query_id). +/// +/// NOTE: it is important to allow acquiring the same lock in Read mode without waiting if it is already +/// acquired by another thread of the same query. Otherwise the following deadlock is possible: +/// - SELECT thread 1 locks in the Read mode +/// - ALTER tries to lock in the Write mode (waits for SELECT thread 1) +/// - SELECT thread 2 tries to lock in the Read mode (waits for ALTER) +class RWLockImpl : public std::enable_shared_from_this +{ +public: + enum Type + { + Read, + Write, + }; + + static RWLock create() { return RWLock(new RWLockImpl); } + + /// Just use LockHolder::reset() to release the lock + class LockHolderImpl; + friend class LockHolderImpl; + using LockHolder = std::shared_ptr; + + /// Empty query_id means the lock is acquired from outside of query context (e.g. in a background thread). + LockHolder getLock(Type type, const String & query_id, + const std::chrono::milliseconds & lock_timeout_ms = std::chrono::milliseconds(0)); + + /// Use as query_id to acquire a lock outside the query context. + inline static const String NO_QUERY = String(); + inline static const auto default_locking_timeout_ms = std::chrono::milliseconds(120000); + +private: + /// Group of locking requests that should be granted simultaneously + /// i.e. one or several readers or a single writer + struct Group + { + const Type type; + size_t requests; + + std::condition_variable cv; /// all locking requests of the group wait on this condvar + + explicit Group(Type type_) : type{type_}, requests{0} {} + }; + + using GroupsContainer = std::list; + using OwnerQueryIds = std::unordered_map; + +private: + mutable std::mutex internal_state_mtx; + + GroupsContainer readers_queue; + GroupsContainer writers_queue; + GroupsContainer::iterator rdlock_owner{readers_queue.end()}; /// equals to readers_queue.begin() in read phase + /// or readers_queue.end() otherwise + GroupsContainer::iterator wrlock_owner{writers_queue.end()}; /// equals to writers_queue.begin() in write phase + /// or writers_queue.end() otherwise + OwnerQueryIds owner_queries; + +private: + RWLockImpl() = default; + void unlock(GroupsContainer::iterator group_it, const String & query_id) noexcept; + void dropOwnerGroupAndPassOwnership(GroupsContainer::iterator group_it) noexcept; +}; +} diff --git a/dbms/src/Common/RadixSort.h b/src/Common/RadixSort.h similarity index 100% rename from dbms/src/Common/RadixSort.h rename to src/Common/RadixSort.h diff --git a/dbms/src/Common/RemoteHostFilter.cpp b/src/Common/RemoteHostFilter.cpp similarity index 100% rename from dbms/src/Common/RemoteHostFilter.cpp rename to src/Common/RemoteHostFilter.cpp diff --git a/dbms/src/Common/RemoteHostFilter.h b/src/Common/RemoteHostFilter.h similarity index 100% rename from dbms/src/Common/RemoteHostFilter.h rename to src/Common/RemoteHostFilter.h diff --git a/dbms/src/Common/SensitiveDataMasker.cpp b/src/Common/SensitiveDataMasker.cpp similarity index 100% rename from dbms/src/Common/SensitiveDataMasker.cpp rename to src/Common/SensitiveDataMasker.cpp diff --git a/dbms/src/Common/SensitiveDataMasker.h b/src/Common/SensitiveDataMasker.h similarity index 100% rename from dbms/src/Common/SensitiveDataMasker.h rename to src/Common/SensitiveDataMasker.h diff --git a/dbms/src/Common/SettingsChanges.h b/src/Common/SettingsChanges.h similarity index 100% rename from dbms/src/Common/SettingsChanges.h rename to src/Common/SettingsChanges.h diff --git a/dbms/src/Common/SharedBlockRowRef.h b/src/Common/SharedBlockRowRef.h similarity index 100% rename from dbms/src/Common/SharedBlockRowRef.h rename to src/Common/SharedBlockRowRef.h diff --git a/dbms/src/Common/SharedLibrary.cpp b/src/Common/SharedLibrary.cpp similarity index 100% rename from dbms/src/Common/SharedLibrary.cpp rename to src/Common/SharedLibrary.cpp diff --git a/dbms/src/Common/SharedLibrary.h b/src/Common/SharedLibrary.h similarity index 100% rename from dbms/src/Common/SharedLibrary.h rename to src/Common/SharedLibrary.h diff --git a/dbms/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp similarity index 100% rename from dbms/src/Common/ShellCommand.cpp rename to src/Common/ShellCommand.cpp diff --git a/dbms/src/Common/ShellCommand.h b/src/Common/ShellCommand.h similarity index 100% rename from dbms/src/Common/ShellCommand.h rename to src/Common/ShellCommand.h diff --git a/dbms/src/Common/SimpleActionBlocker.h b/src/Common/SimpleActionBlocker.h similarity index 100% rename from dbms/src/Common/SimpleActionBlocker.h rename to src/Common/SimpleActionBlocker.h diff --git a/dbms/src/Common/SimpleIncrement.h b/src/Common/SimpleIncrement.h similarity index 100% rename from dbms/src/Common/SimpleIncrement.h rename to src/Common/SimpleIncrement.h diff --git a/dbms/src/Common/SipHash.h b/src/Common/SipHash.h similarity index 100% rename from dbms/src/Common/SipHash.h rename to src/Common/SipHash.h diff --git a/dbms/src/Common/SmallObjectPool.h b/src/Common/SmallObjectPool.h similarity index 100% rename from dbms/src/Common/SmallObjectPool.h rename to src/Common/SmallObjectPool.h diff --git a/dbms/src/Common/SpaceSaving.h b/src/Common/SpaceSaving.h similarity index 100% rename from dbms/src/Common/SpaceSaving.h rename to src/Common/SpaceSaving.h diff --git a/dbms/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp similarity index 100% rename from dbms/src/Common/StackTrace.cpp rename to src/Common/StackTrace.cpp diff --git a/dbms/src/Common/StackTrace.h b/src/Common/StackTrace.h similarity index 100% rename from dbms/src/Common/StackTrace.h rename to src/Common/StackTrace.h diff --git a/dbms/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp similarity index 100% rename from dbms/src/Common/StatusFile.cpp rename to src/Common/StatusFile.cpp diff --git a/dbms/src/Common/StatusFile.h b/src/Common/StatusFile.h similarity index 100% rename from dbms/src/Common/StatusFile.h rename to src/Common/StatusFile.h diff --git a/dbms/src/Common/StatusInfo.cpp b/src/Common/StatusInfo.cpp similarity index 100% rename from dbms/src/Common/StatusInfo.cpp rename to src/Common/StatusInfo.cpp diff --git a/dbms/src/Common/StatusInfo.h b/src/Common/StatusInfo.h similarity index 100% rename from dbms/src/Common/StatusInfo.h rename to src/Common/StatusInfo.h diff --git a/dbms/src/Common/Stopwatch.cpp b/src/Common/Stopwatch.cpp similarity index 100% rename from dbms/src/Common/Stopwatch.cpp rename to src/Common/Stopwatch.cpp diff --git a/dbms/src/Common/Stopwatch.h b/src/Common/Stopwatch.h similarity index 100% rename from dbms/src/Common/Stopwatch.h rename to src/Common/Stopwatch.h diff --git a/dbms/src/Common/StringSearcher.h b/src/Common/StringSearcher.h similarity index 100% rename from dbms/src/Common/StringSearcher.h rename to src/Common/StringSearcher.h diff --git a/dbms/src/Common/StringUtils/CMakeLists.txt b/src/Common/StringUtils/CMakeLists.txt similarity index 100% rename from dbms/src/Common/StringUtils/CMakeLists.txt rename to src/Common/StringUtils/CMakeLists.txt diff --git a/dbms/src/Common/StringUtils/StringUtils.cpp b/src/Common/StringUtils/StringUtils.cpp similarity index 100% rename from dbms/src/Common/StringUtils/StringUtils.cpp rename to src/Common/StringUtils/StringUtils.cpp diff --git a/dbms/src/Common/StringUtils/StringUtils.h b/src/Common/StringUtils/StringUtils.h similarity index 100% rename from dbms/src/Common/StringUtils/StringUtils.h rename to src/Common/StringUtils/StringUtils.h diff --git a/dbms/src/Common/StudentTTest.cpp b/src/Common/StudentTTest.cpp similarity index 100% rename from dbms/src/Common/StudentTTest.cpp rename to src/Common/StudentTTest.cpp diff --git a/dbms/src/Common/StudentTTest.h b/src/Common/StudentTTest.h similarity index 100% rename from dbms/src/Common/StudentTTest.h rename to src/Common/StudentTTest.h diff --git a/dbms/src/Common/SymbolIndex.cpp b/src/Common/SymbolIndex.cpp similarity index 100% rename from dbms/src/Common/SymbolIndex.cpp rename to src/Common/SymbolIndex.cpp diff --git a/dbms/src/Common/SymbolIndex.h b/src/Common/SymbolIndex.h similarity index 100% rename from dbms/src/Common/SymbolIndex.h rename to src/Common/SymbolIndex.h diff --git a/dbms/src/Common/TaskStatsInfoGetter.cpp b/src/Common/TaskStatsInfoGetter.cpp similarity index 100% rename from dbms/src/Common/TaskStatsInfoGetter.cpp rename to src/Common/TaskStatsInfoGetter.cpp diff --git a/dbms/src/Common/TaskStatsInfoGetter.h b/src/Common/TaskStatsInfoGetter.h similarity index 100% rename from dbms/src/Common/TaskStatsInfoGetter.h rename to src/Common/TaskStatsInfoGetter.h diff --git a/dbms/src/Common/TerminalSize.cpp b/src/Common/TerminalSize.cpp similarity index 100% rename from dbms/src/Common/TerminalSize.cpp rename to src/Common/TerminalSize.cpp diff --git a/dbms/src/Common/TerminalSize.h b/src/Common/TerminalSize.h similarity index 100% rename from dbms/src/Common/TerminalSize.h rename to src/Common/TerminalSize.h diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp new file mode 100644 index 00000000000..60766e32361 --- /dev/null +++ b/src/Common/ThreadFuzzer.cpp @@ -0,0 +1,273 @@ +#include +#include +#include +#if defined(OS_LINUX) +# include +#endif +#include + +#include + +#include +#include +#include + +#include + +#include +#include + +#include + + +/// We will also wrap some thread synchronization functions to inject sleep/migration before or after. +#if defined(OS_LINUX) && !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) + #define THREAD_FUZZER_WRAP_PTHREAD 1 +#else + #define THREAD_FUZZER_WRAP_PTHREAD 0 +#endif + +#if THREAD_FUZZER_WRAP_PTHREAD +# define FOR_EACH_WRAPPED_FUNCTION(M) \ + M(int, pthread_mutex_lock, pthread_mutex_t * arg) \ + M(int, pthread_mutex_unlock, pthread_mutex_t * arg) +#endif + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CANNOT_MANIPULATE_SIGSET; + extern const int CANNOT_SET_SIGNAL_HANDLER; + extern const int CANNOT_CREATE_TIMER; +} + + +ThreadFuzzer::ThreadFuzzer() +{ + initConfiguration(); + if (!isEffective()) + return; + setup(); +} + + +template +static void initFromEnv(T & what, const char * name) +{ + const char * env = getenv(name); + if (!env) + return; + what = parse(env); +} + +template +static void initFromEnv(std::atomic & what, const char * name) +{ + const char * env = getenv(name); + if (!env) + return; + what.store(parse(env), std::memory_order_relaxed); +} + + +static std::atomic num_cpus = 0; + +#if THREAD_FUZZER_WRAP_PTHREAD +# define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \ + static std::atomic NAME##_before_yield_probability = 0; \ + static std::atomic NAME##_before_migrate_probability = 0; \ + static std::atomic NAME##_before_sleep_probability = 0; \ + static std::atomic NAME##_before_sleep_time_us = 0; \ +\ + static std::atomic NAME##_after_yield_probability = 0; \ + static std::atomic NAME##_after_migrate_probability = 0; \ + static std::atomic NAME##_after_sleep_probability = 0; \ + static std::atomic NAME##_after_sleep_time_us = 0; + +FOR_EACH_WRAPPED_FUNCTION(DEFINE_WRAPPER_PARAMS) + +# undef DEFINE_WRAPPER_PARAMS +#endif + +void ThreadFuzzer::initConfiguration() +{ +#if defined(OS_LINUX) + num_cpus.store(get_nprocs(), std::memory_order_relaxed); +#else + (void)num_cpus; +#endif + + initFromEnv(cpu_time_period_us, "THREAD_FUZZER_CPU_TIME_PERIOD_US"); + initFromEnv(yield_probability, "THREAD_FUZZER_YIELD_PROBABILITY"); + initFromEnv(migrate_probability, "THREAD_FUZZER_MIGRATE_PROBABILITY"); + initFromEnv(sleep_probability, "THREAD_FUZZER_SLEEP_PROBABILITY"); + initFromEnv(sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US"); + +#if THREAD_FUZZER_WRAP_PTHREAD +# define INIT_WRAPPER_PARAMS(RET, NAME, ...) \ + initFromEnv(NAME##_before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \ + initFromEnv(NAME##_before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \ + initFromEnv(NAME##_before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \ + initFromEnv(NAME##_before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \ +\ + initFromEnv(NAME##_after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \ + initFromEnv(NAME##_after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \ + initFromEnv(NAME##_after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \ + initFromEnv(NAME##_after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US"); + + FOR_EACH_WRAPPED_FUNCTION(INIT_WRAPPER_PARAMS) + +# undef INIT_WRAPPER_PARAMS +#endif +} + + +bool ThreadFuzzer::isEffective() const +{ +#if THREAD_FUZZER_WRAP_PTHREAD +# define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \ + if (NAME##_before_yield_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_before_migrate_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_before_sleep_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_before_sleep_time_us.load(std::memory_order_relaxed)) \ + return true; \ +\ + if (NAME##_after_yield_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_after_migrate_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_after_sleep_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_after_sleep_time_us.load(std::memory_order_relaxed)) \ + return true; + + FOR_EACH_WRAPPED_FUNCTION(CHECK_WRAPPER_PARAMS) + +# undef INIT_WRAPPER_PARAMS +#endif + + return cpu_time_period_us != 0 + && (yield_probability > 0 + || migrate_probability > 0 + || (sleep_probability > 0 && sleep_time_us > 0)); +} + + +static void injection( + double yield_probability, + double migrate_probability, + double sleep_probability, + double sleep_time_us [[maybe_unused]]) +{ + if (yield_probability > 0 + && std::bernoulli_distribution(yield_probability)(thread_local_rng)) + { + sched_yield(); + } + +#if defined(OS_LINUX) + int num_cpus_loaded = num_cpus.load(std::memory_order_relaxed); + if (num_cpus_loaded > 0 + && migrate_probability > 0 + && std::bernoulli_distribution(migrate_probability)(thread_local_rng)) + { + int migrate_to = std::uniform_int_distribution<>(0, num_cpus_loaded - 1)(thread_local_rng); + + cpu_set_t set{}; + CPU_ZERO(&set); + CPU_SET(migrate_to, &set); + + (void)sched_setaffinity(0, sizeof(set), &set); + } +#else + UNUSED(migrate_probability); +#endif + + if (sleep_probability > 0 + && sleep_time_us > 0 + && std::bernoulli_distribution(sleep_probability)(thread_local_rng)) + { + sleepForNanoseconds(sleep_time_us * 1000); + } +} + + +void ThreadFuzzer::signalHandler(int) +{ + auto saved_errno = errno; + + auto & fuzzer = ThreadFuzzer::instance(); + injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.sleep_probability, fuzzer.sleep_time_us); + + errno = saved_errno; +} + +void ThreadFuzzer::setup() +{ + struct sigaction sa{}; + sa.sa_handler = signalHandler; + sa.sa_flags = SA_RESTART; + +#if defined(OS_LINUX) + if (sigemptyset(&sa.sa_mask)) + throwFromErrno("Failed to clean signal mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET); + + if (sigaddset(&sa.sa_mask, SIGPROF)) + throwFromErrno("Failed to add signal to mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET); +#else + // the two following functions always return 0 under mac + sigemptyset(&sa.sa_mask); + sigaddset(&sa.sa_mask, SIGPROF); +#endif + + if (sigaction(SIGPROF, &sa, nullptr)) + throwFromErrno("Failed to setup signal handler for thread fuzzer", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + + static constexpr UInt32 timer_precision = 1000000; + + struct timeval interval; + interval.tv_sec = cpu_time_period_us / timer_precision; + interval.tv_usec = cpu_time_period_us % timer_precision; + + struct itimerval timer = {.it_interval = interval, .it_value = interval}; + + if (0 != setitimer(ITIMER_PROF, &timer, nullptr)) + throwFromErrno("Failed to create profiling timer", ErrorCodes::CANNOT_CREATE_TIMER); +} + + +/// We expect that for every function like pthread_mutex_lock there is the same function with two underscores prefix. +/// NOTE We cannot use dlsym(... RTLD_NEXT), because it will call pthread_mutex_lock and it will lead to infinite recursion. + +#if THREAD_FUZZER_WRAP_PTHREAD +# define MAKE_WRAPPER(RET, NAME, ...) \ + extern "C" RET __##NAME(__VA_ARGS__); /* NOLINT */ \ + extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \ + { \ + injection( \ + NAME##_before_yield_probability.load(std::memory_order_relaxed), \ + NAME##_before_migrate_probability.load(std::memory_order_relaxed), \ + NAME##_before_sleep_probability.load(std::memory_order_relaxed), \ + NAME##_before_sleep_time_us.load(std::memory_order_relaxed)); \ +\ + auto && ret{__##NAME(arg)}; \ +\ + injection( \ + NAME##_after_yield_probability.load(std::memory_order_relaxed), \ + NAME##_after_migrate_probability.load(std::memory_order_relaxed), \ + NAME##_after_sleep_probability.load(std::memory_order_relaxed), \ + NAME##_after_sleep_time_us.load(std::memory_order_relaxed)); \ +\ + return ret; \ + } + +FOR_EACH_WRAPPED_FUNCTION(MAKE_WRAPPER) + +# undef MAKE_WRAPPER +#endif +} diff --git a/dbms/src/Common/ThreadFuzzer.h b/src/Common/ThreadFuzzer.h similarity index 100% rename from dbms/src/Common/ThreadFuzzer.h rename to src/Common/ThreadFuzzer.h diff --git a/dbms/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp similarity index 100% rename from dbms/src/Common/ThreadPool.cpp rename to src/Common/ThreadPool.cpp diff --git a/dbms/src/Common/ThreadPool.h b/src/Common/ThreadPool.h similarity index 100% rename from dbms/src/Common/ThreadPool.h rename to src/Common/ThreadPool.h diff --git a/dbms/src/Common/ThreadProfileEvents.h b/src/Common/ThreadProfileEvents.h similarity index 100% rename from dbms/src/Common/ThreadProfileEvents.h rename to src/Common/ThreadProfileEvents.h diff --git a/dbms/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp similarity index 100% rename from dbms/src/Common/ThreadStatus.cpp rename to src/Common/ThreadStatus.cpp diff --git a/dbms/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h similarity index 100% rename from dbms/src/Common/ThreadStatus.h rename to src/Common/ThreadStatus.h diff --git a/dbms/src/Common/Throttler.h b/src/Common/Throttler.h similarity index 100% rename from dbms/src/Common/Throttler.h rename to src/Common/Throttler.h diff --git a/dbms/src/Common/TraceCollector.cpp b/src/Common/TraceCollector.cpp similarity index 100% rename from dbms/src/Common/TraceCollector.cpp rename to src/Common/TraceCollector.cpp diff --git a/dbms/src/Common/TraceCollector.h b/src/Common/TraceCollector.h similarity index 100% rename from dbms/src/Common/TraceCollector.h rename to src/Common/TraceCollector.h diff --git a/dbms/src/Common/TypeList.h b/src/Common/TypeList.h similarity index 100% rename from dbms/src/Common/TypeList.h rename to src/Common/TypeList.h diff --git a/dbms/src/Common/TypePromotion.h b/src/Common/TypePromotion.h similarity index 100% rename from dbms/src/Common/TypePromotion.h rename to src/Common/TypePromotion.h diff --git a/dbms/src/Common/UInt128.h b/src/Common/UInt128.h similarity index 100% rename from dbms/src/Common/UInt128.h rename to src/Common/UInt128.h diff --git a/dbms/src/Common/UTF8Helpers.cpp b/src/Common/UTF8Helpers.cpp similarity index 100% rename from dbms/src/Common/UTF8Helpers.cpp rename to src/Common/UTF8Helpers.cpp diff --git a/dbms/src/Common/UTF8Helpers.h b/src/Common/UTF8Helpers.h similarity index 100% rename from dbms/src/Common/UTF8Helpers.h rename to src/Common/UTF8Helpers.h diff --git a/dbms/src/Common/UnicodeBar.h b/src/Common/UnicodeBar.h similarity index 100% rename from dbms/src/Common/UnicodeBar.h rename to src/Common/UnicodeBar.h diff --git a/dbms/src/Common/VariableContext.h b/src/Common/VariableContext.h similarity index 100% rename from dbms/src/Common/VariableContext.h rename to src/Common/VariableContext.h diff --git a/dbms/src/Common/Visitor.h b/src/Common/Visitor.h similarity index 100% rename from dbms/src/Common/Visitor.h rename to src/Common/Visitor.h diff --git a/dbms/src/Common/Volnitsky.h b/src/Common/Volnitsky.h similarity index 100% rename from dbms/src/Common/Volnitsky.h rename to src/Common/Volnitsky.h diff --git a/dbms/src/Common/WeakHash.cpp b/src/Common/WeakHash.cpp similarity index 100% rename from dbms/src/Common/WeakHash.cpp rename to src/Common/WeakHash.cpp diff --git a/dbms/src/Common/WeakHash.h b/src/Common/WeakHash.h similarity index 100% rename from dbms/src/Common/WeakHash.h rename to src/Common/WeakHash.h diff --git a/dbms/src/Common/XDBCBridgeHelper.h b/src/Common/XDBCBridgeHelper.h similarity index 97% rename from dbms/src/Common/XDBCBridgeHelper.h rename to src/Common/XDBCBridgeHelper.h index 613d1bed8d7..b9d1f2cdcdf 100644 --- a/dbms/src/Common/XDBCBridgeHelper.h +++ b/src/Common/XDBCBridgeHelper.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -230,6 +231,10 @@ struct JDBCBridgeMixin { return "JDBC"; } + static AccessType getSourceAccessType() + { + return AccessType::JDBC; + } static std::unique_ptr startBridge(const Poco::Util::AbstractConfiguration &, const Poco::Logger *, const Poco::Timespan &) { @@ -253,6 +258,10 @@ struct ODBCBridgeMixin { return "ODBC"; } + static AccessType getSourceAccessType() + { + return AccessType::ODBC; + } static std::unique_ptr startBridge(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout) { diff --git a/src/Common/ZooKeeper/CMakeLists.txt b/src/Common/ZooKeeper/CMakeLists.txt new file mode 100644 index 00000000000..4dbf999419e --- /dev/null +++ b/src/Common/ZooKeeper/CMakeLists.txt @@ -0,0 +1,16 @@ +include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) + +add_headers_and_sources(clickhouse_common_zookeeper .) + +add_library(clickhouse_common_zookeeper ${clickhouse_common_zookeeper_headers} ${clickhouse_common_zookeeper_sources}) + +target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY}) +target_include_directories(clickhouse_common_zookeeper PUBLIC ${DBMS_INCLUDE_DIR}) + +if (USE_POCO_NETSSL) + target_link_libraries (clickhouse_common_zookeeper PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) +endif() + +if (ENABLE_TESTS) + add_subdirectory (tests) +endif () diff --git a/dbms/src/Common/ZooKeeper/Common.h b/src/Common/ZooKeeper/Common.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Common.h rename to src/Common/ZooKeeper/Common.h diff --git a/dbms/src/Common/ZooKeeper/IKeeper.cpp b/src/Common/ZooKeeper/IKeeper.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/IKeeper.cpp rename to src/Common/ZooKeeper/IKeeper.cpp diff --git a/dbms/src/Common/ZooKeeper/IKeeper.h b/src/Common/ZooKeeper/IKeeper.h similarity index 100% rename from dbms/src/Common/ZooKeeper/IKeeper.h rename to src/Common/ZooKeeper/IKeeper.h diff --git a/dbms/src/Common/ZooKeeper/Increment.h b/src/Common/ZooKeeper/Increment.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Increment.h rename to src/Common/ZooKeeper/Increment.h diff --git a/dbms/src/Common/ZooKeeper/KeeperException.h b/src/Common/ZooKeeper/KeeperException.h similarity index 100% rename from dbms/src/Common/ZooKeeper/KeeperException.h rename to src/Common/ZooKeeper/KeeperException.h diff --git a/dbms/src/Common/ZooKeeper/LeaderElection.h b/src/Common/ZooKeeper/LeaderElection.h similarity index 100% rename from dbms/src/Common/ZooKeeper/LeaderElection.h rename to src/Common/ZooKeeper/LeaderElection.h diff --git a/dbms/src/Common/ZooKeeper/Lock.cpp b/src/Common/ZooKeeper/Lock.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/Lock.cpp rename to src/Common/ZooKeeper/Lock.cpp diff --git a/dbms/src/Common/ZooKeeper/Lock.h b/src/Common/ZooKeeper/Lock.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Lock.h rename to src/Common/ZooKeeper/Lock.h diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.cpp b/src/Common/ZooKeeper/TestKeeper.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/TestKeeper.cpp rename to src/Common/ZooKeeper/TestKeeper.cpp diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.h b/src/Common/ZooKeeper/TestKeeper.h similarity index 100% rename from dbms/src/Common/ZooKeeper/TestKeeper.h rename to src/Common/ZooKeeper/TestKeeper.h diff --git a/dbms/src/Common/ZooKeeper/Types.h b/src/Common/ZooKeeper/Types.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Types.h rename to src/Common/ZooKeeper/Types.h diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp similarity index 96% rename from dbms/src/Common/ZooKeeper/ZooKeeper.cpp rename to src/Common/ZooKeeper/ZooKeeper.cpp index 99c3f115021..032d1e90ff5 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -59,30 +59,35 @@ void ZooKeeper::init(const std::string & implementation_, const std::string & ho if (implementation == "zookeeper") { if (hosts.empty()) - throw KeeperException("No addresses passed to ZooKeeper constructor.", Coordination::ZBADARGUMENTS); + throw KeeperException("No hosts passed to ZooKeeper constructor.", Coordination::ZBADARGUMENTS); - std::vector addresses_strings; - splitInto<','>(addresses_strings, hosts); - Coordination::ZooKeeper::Addresses addresses; - addresses.reserve(addresses_strings.size()); + std::vector hosts_strings; + splitInto<','>(hosts_strings, hosts); + Coordination::ZooKeeper::Nodes nodes; + nodes.reserve(hosts_strings.size()); - for (const auto & address_string : addresses_strings) + for (auto & host_string : hosts_strings) { try { - addresses.emplace_back(address_string); + bool secure = bool(startsWith(host_string, "secure://")); + + if (secure) + host_string.erase(0, strlen("secure://")); + + nodes.emplace_back(Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{host_string}, secure}); } catch (const Poco::Net::DNSException & e) { - LOG_ERROR(log, "Cannot use ZooKeeper address " << address_string << ", reason: " << e.displayText()); + LOG_ERROR(log, "Cannot use ZooKeeper host " << host_string << ", reason: " << e.displayText()); } } - if (addresses.empty()) - throw KeeperException("Cannot use any of provided ZooKeeper addresses", Coordination::ZBADARGUMENTS); + if (nodes.empty()) + throw KeeperException("Cannot use any of provided ZooKeeper nodes", Coordination::ZBADARGUMENTS); impl = std::make_unique( - addresses, + nodes, chroot, identity_.empty() ? "" : "digest", identity_, @@ -130,6 +135,7 @@ struct ZooKeeperArgs if (startsWith(key, "node")) { hosts_strings.push_back( + (config.getBool(config_name + "." + key + ".secure", false) ? "secure://" : "") + config.getString(config_name + "." + key + ".host") + ":" + config.getString(config_name + "." + key + ".port", "2181") ); diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h similarity index 98% rename from dbms/src/Common/ZooKeeper/ZooKeeper.h rename to src/Common/ZooKeeper/ZooKeeper.h index 2d4d449b1a6..db166314a07 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -63,10 +63,14 @@ public: example1 2181 + + 1 example2 2181 + + 1 30000 10000 diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp b/src/Common/ZooKeeper/ZooKeeperHolder.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp rename to src/Common/ZooKeeper/ZooKeeperHolder.cpp diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.h b/src/Common/ZooKeeper/ZooKeeperHolder.h similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperHolder.h rename to src/Common/ZooKeeper/ZooKeeperHolder.h diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp similarity index 97% rename from dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp rename to src/Common/ZooKeeper/ZooKeeperImpl.cpp index b8700a93e35..2fba10b20e9 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -11,6 +11,11 @@ #include #include +#include +#if USE_POCO_NETSSL +#include +#endif + #include @@ -44,6 +49,13 @@ namespace CurrentMetrics extern const Metric ZooKeeperWatch; } +namespace DB +{ + namespace ErrorCodes + { + extern const int SUPPORT_IS_DISABLED; + } +} /** ZooKeeper wire protocol. @@ -817,7 +829,7 @@ ZooKeeper::~ZooKeeper() ZooKeeper::ZooKeeper( - const Addresses & addresses, + const Nodes & nodes, const String & root_path_, const String & auth_scheme, const String & auth_data, @@ -851,7 +863,7 @@ ZooKeeper::ZooKeeper( default_acls.emplace_back(std::move(acl)); } - connect(addresses, connection_timeout); + connect(nodes, connection_timeout); if (!auth_scheme.empty()) sendAuth(auth_scheme, auth_data); @@ -864,11 +876,11 @@ ZooKeeper::ZooKeeper( void ZooKeeper::connect( - const Addresses & addresses, + const Nodes & nodes, Poco::Timespan connection_timeout) { - if (addresses.empty()) - throw Exception("No addresses passed to ZooKeeper constructor", ZBADARGUMENTS); + if (nodes.empty()) + throw Exception("No nodes passed to ZooKeeper constructor", ZBADARGUMENTS); static constexpr size_t num_tries = 3; bool connected = false; @@ -876,12 +888,25 @@ void ZooKeeper::connect( WriteBufferFromOwnString fail_reasons; for (size_t try_no = 0; try_no < num_tries; ++try_no) { - for (const auto & address : addresses) + for (const auto & node : nodes) { try { - socket = Poco::Net::StreamSocket(); /// Reset the state of previous attempt. - socket.connect(address, connection_timeout); + /// Reset the state of previous attempt. + if (node.secure) + { +#if USE_POCO_NETSSL + socket = Poco::Net::SecureStreamSocket(); +#else + throw Exception{"Communication with ZooKeeper over SSL is disabled because poco library was built without NetSSL support.", ErrorCodes::SUPPORT_IS_DISABLED}; +#endif + } + else + { + socket = Poco::Net::StreamSocket(); + } + + socket.connect(node.address, connection_timeout); socket.setReceiveTimeout(operation_timeout); socket.setSendTimeout(operation_timeout); @@ -915,7 +940,7 @@ void ZooKeeper::connect( } catch (...) { - fail_reasons << "\n" << getCurrentExceptionMessage(false) << ", " << address.toString(); + fail_reasons << "\n" << getCurrentExceptionMessage(false) << ", " << node.address.toString(); } } @@ -926,15 +951,19 @@ void ZooKeeper::connect( if (!connected) { WriteBufferFromOwnString message; - message << "All connection tries failed while connecting to ZooKeeper. Addresses: "; + message << "All connection tries failed while connecting to ZooKeeper. nodes: "; bool first = true; - for (const auto & address : addresses) + for (const auto & node : nodes) { if (first) first = false; else message << ", "; - message << address.toString(); + + if (node.secure) + message << "secure://"; + + message << node.address.toString(); } message << fail_reasons.str() << "\n"; diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h b/src/Common/ZooKeeper/ZooKeeperImpl.h similarity index 96% rename from dbms/src/Common/ZooKeeper/ZooKeeperImpl.h rename to src/Common/ZooKeeper/ZooKeeperImpl.h index 88e949dbd45..840cbdbde3f 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -93,17 +93,23 @@ struct ZooKeeperRequest; class ZooKeeper : public IKeeper { public: - using Addresses = std::vector; + struct Node + { + Poco::Net::SocketAddress address; + bool secure; + }; + + using Nodes = std::vector; using XID = int32_t; using OpNum = int32_t; - /** Connection to addresses is performed in order. If you want, shuffle them manually. + /** Connection to nodes is performed in order. If you want, shuffle them manually. * Operation timeout couldn't be greater than session timeout. * Operation timeout applies independently for network read, network write, waiting for events and synchronization. */ ZooKeeper( - const Addresses & addresses, + const Nodes & nodes, const String & root_path, const String & auth_scheme, const String & auth_data, @@ -213,7 +219,7 @@ private: ThreadFromGlobalPool receive_thread; void connect( - const Addresses & addresses, + const Nodes & node, Poco::Timespan connection_timeout); void sendHandshake(); diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.cpp b/src/Common/ZooKeeper/ZooKeeperNodeCache.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.cpp rename to src/Common/ZooKeeper/ZooKeeperNodeCache.cpp diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.h b/src/Common/ZooKeeper/ZooKeeperNodeCache.h similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.h rename to src/Common/ZooKeeper/ZooKeeperNodeCache.h diff --git a/src/Common/ZooKeeper/tests/CMakeLists.txt b/src/Common/ZooKeeper/tests/CMakeLists.txt new file mode 100644 index 00000000000..45a48ddc7a9 --- /dev/null +++ b/src/Common/ZooKeeper/tests/CMakeLists.txt @@ -0,0 +1,23 @@ +add_executable(zkutil_test_commands zkutil_test_commands.cpp) +target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper) + +add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp) +target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper string_utils) + +add_executable(zkutil_test_lock zkutil_test_lock.cpp) +target_link_libraries(zkutil_test_lock PRIVATE clickhouse_common_zookeeper) + +add_executable(zkutil_expiration_test zkutil_expiration_test.cpp) +target_link_libraries(zkutil_expiration_test PRIVATE clickhouse_common_zookeeper) + +add_executable(zkutil_test_async zkutil_test_async.cpp) +target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper) + +add_executable(zkutil_zookeeper_holder zkutil_zookeeper_holder.cpp) +target_link_libraries(zkutil_zookeeper_holder PRIVATE clickhouse_common_zookeeper) + +add_executable (zk_many_watches_reconnect zk_many_watches_reconnect.cpp) +target_link_libraries (zk_many_watches_reconnect PRIVATE clickhouse_common_zookeeper clickhouse_common_config) + +add_executable (zookeeper_impl zookeeper_impl.cpp) +target_link_libraries (zookeeper_impl PRIVATE clickhouse_common_zookeeper) diff --git a/dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp b/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp similarity index 97% rename from dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp rename to src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp index a07c1ae8983..cd4c6e0a159 100644 --- a/dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp +++ b/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp @@ -123,14 +123,17 @@ TEST(zkutil, MultiAsync) ops.clear(); auto res = fut.get(); - ASSERT_EQ(res.error, Coordination::ZNODEEXISTS); - ASSERT_EQ(res.responses.size(), 2); + + /// The test is quite heavy. It is normal if session is expired during this test. + /// If we don't check that, the test will be flacky. + if (res.error != Coordination::ZSESSIONEXPIRED && res.error != Coordination::ZCONNECTIONLOSS) + { + ASSERT_EQ(res.error, Coordination::ZNODEEXISTS); + ASSERT_EQ(res.responses.size(), 2); + } } catch (const Coordination::Exception & e) { - /// The test is quite heavy. It is normal if session is expired during this test. - /// If we don't check that, the test will be flacky. - if (e.code != Coordination::ZSESSIONEXPIRED && e.code != Coordination::ZCONNECTIONLOSS) throw; } diff --git a/dbms/src/Common/ZooKeeper/tests/nozk.sh b/src/Common/ZooKeeper/tests/nozk.sh similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/nozk.sh rename to src/Common/ZooKeeper/tests/nozk.sh diff --git a/dbms/src/Common/ZooKeeper/tests/yeszk.sh b/src/Common/ZooKeeper/tests/yeszk.sh similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/yeszk.sh rename to src/Common/ZooKeeper/tests/yeszk.sh diff --git a/dbms/src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp b/src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp rename to src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp b/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp rename to src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_async.cpp b/src/Common/ZooKeeper/tests/zkutil_test_async.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_async.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_async.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp b/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_commands.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp b/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp similarity index 90% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp index aa348163adf..d9d3402fa32 100644 --- a/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp +++ b/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -23,15 +24,23 @@ try Poco::Logger::root().setChannel(channel); Poco::Logger::root().setLevel("trace"); - std::string addresses_arg = argv[1]; - std::vector addresses_strings; - splitInto<','>(addresses_strings, addresses_arg); - ZooKeeper::Addresses addresses; - addresses.reserve(addresses_strings.size()); - for (const auto & address_string : addresses_strings) - addresses.emplace_back(address_string); + std::string hosts_arg = argv[1]; + std::vector hosts_strings; + splitInto<','>(hosts_strings, hosts_arg); + ZooKeeper::Nodes nodes; + nodes.reserve(hosts_strings.size()); + for (auto & host_string : hosts_strings) + { + bool secure = bool(startsWith(host_string, "secure://")); - ZooKeeper zk(addresses, {}, {}, {}, {5, 0}, {0, 50000}, {0, 50000}); + if (secure) + host_string.erase(0, strlen("secure://")); + + nodes.emplace_back(ZooKeeper::Node{Poco::Net::SocketAddress{host_string},secure}); + } + + + ZooKeeper zk(nodes, {}, {}, {}, {5, 0}, {0, 50000}, {0, 50000}); Poco::Event event(true); diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_lock.cpp b/src/Common/ZooKeeper/tests/zkutil_test_lock.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_lock.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_lock.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp b/src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp rename to src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zookeeper_impl.cpp b/src/Common/ZooKeeper/tests/zookeeper_impl.cpp similarity index 76% rename from dbms/src/Common/ZooKeeper/tests/zookeeper_impl.cpp rename to src/Common/ZooKeeper/tests/zookeeper_impl.cpp index da609a7bc72..74ba63514f2 100644 --- a/dbms/src/Common/ZooKeeper/tests/zookeeper_impl.cpp +++ b/src/Common/ZooKeeper/tests/zookeeper_impl.cpp @@ -5,7 +5,7 @@ int main() try { - Coordination::ZooKeeper zookeeper({Poco::Net::SocketAddress{"localhost:2181"}}, "", "", "", {30, 0}, {0, 50000}, {0, 50000}); + Coordination::ZooKeeper zookeeper({Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{"localhost:2181"}, false}}, "", "", "", {30, 0}, {0, 50000}, {0, 50000}); zookeeper.create("/test", "hello", false, false, {}, [](const Coordination::CreateResponse & response) { diff --git a/dbms/src/Common/assert_cast.h b/src/Common/assert_cast.h similarity index 100% rename from dbms/src/Common/assert_cast.h rename to src/Common/assert_cast.h diff --git a/dbms/src/Common/checkStackSize.cpp b/src/Common/checkStackSize.cpp similarity index 100% rename from dbms/src/Common/checkStackSize.cpp rename to src/Common/checkStackSize.cpp diff --git a/dbms/src/Common/checkStackSize.h b/src/Common/checkStackSize.h similarity index 100% rename from dbms/src/Common/checkStackSize.h rename to src/Common/checkStackSize.h diff --git a/dbms/src/Common/config.h.in b/src/Common/config.h.in similarity index 100% rename from dbms/src/Common/config.h.in rename to src/Common/config.h.in diff --git a/dbms/src/Common/config_version.h.in b/src/Common/config_version.h.in similarity index 100% rename from dbms/src/Common/config_version.h.in rename to src/Common/config_version.h.in diff --git a/dbms/src/Common/createHardLink.cpp b/src/Common/createHardLink.cpp similarity index 100% rename from dbms/src/Common/createHardLink.cpp rename to src/Common/createHardLink.cpp diff --git a/dbms/src/Common/createHardLink.h b/src/Common/createHardLink.h similarity index 100% rename from dbms/src/Common/createHardLink.h rename to src/Common/createHardLink.h diff --git a/dbms/src/Common/escapeForFileName.cpp b/src/Common/escapeForFileName.cpp similarity index 100% rename from dbms/src/Common/escapeForFileName.cpp rename to src/Common/escapeForFileName.cpp diff --git a/dbms/src/Common/escapeForFileName.h b/src/Common/escapeForFileName.h similarity index 100% rename from dbms/src/Common/escapeForFileName.h rename to src/Common/escapeForFileName.h diff --git a/dbms/src/Common/filesystemHelpers.cpp b/src/Common/filesystemHelpers.cpp similarity index 100% rename from dbms/src/Common/filesystemHelpers.cpp rename to src/Common/filesystemHelpers.cpp diff --git a/dbms/src/Common/filesystemHelpers.h b/src/Common/filesystemHelpers.h similarity index 100% rename from dbms/src/Common/filesystemHelpers.h rename to src/Common/filesystemHelpers.h diff --git a/dbms/src/Common/formatIPv6.cpp b/src/Common/formatIPv6.cpp similarity index 100% rename from dbms/src/Common/formatIPv6.cpp rename to src/Common/formatIPv6.cpp diff --git a/dbms/src/Common/formatIPv6.h b/src/Common/formatIPv6.h similarity index 100% rename from dbms/src/Common/formatIPv6.h rename to src/Common/formatIPv6.h diff --git a/dbms/src/Common/formatReadable.cpp b/src/Common/formatReadable.cpp similarity index 100% rename from dbms/src/Common/formatReadable.cpp rename to src/Common/formatReadable.cpp diff --git a/dbms/src/Common/formatReadable.h b/src/Common/formatReadable.h similarity index 100% rename from dbms/src/Common/formatReadable.h rename to src/Common/formatReadable.h diff --git a/dbms/src/Common/getExecutablePath.cpp b/src/Common/getExecutablePath.cpp similarity index 100% rename from dbms/src/Common/getExecutablePath.cpp rename to src/Common/getExecutablePath.cpp diff --git a/dbms/src/Common/getExecutablePath.h b/src/Common/getExecutablePath.h similarity index 100% rename from dbms/src/Common/getExecutablePath.h rename to src/Common/getExecutablePath.h diff --git a/dbms/src/Common/getMultipleKeysFromConfig.cpp b/src/Common/getMultipleKeysFromConfig.cpp similarity index 100% rename from dbms/src/Common/getMultipleKeysFromConfig.cpp rename to src/Common/getMultipleKeysFromConfig.cpp diff --git a/dbms/src/Common/getMultipleKeysFromConfig.h b/src/Common/getMultipleKeysFromConfig.h similarity index 100% rename from dbms/src/Common/getMultipleKeysFromConfig.h rename to src/Common/getMultipleKeysFromConfig.h diff --git a/dbms/src/Common/getNumberOfPhysicalCPUCores.cpp b/src/Common/getNumberOfPhysicalCPUCores.cpp similarity index 97% rename from dbms/src/Common/getNumberOfPhysicalCPUCores.cpp rename to src/Common/getNumberOfPhysicalCPUCores.cpp index 625c309bde5..32b70b76fbd 100644 --- a/dbms/src/Common/getNumberOfPhysicalCPUCores.cpp +++ b/src/Common/getNumberOfPhysicalCPUCores.cpp @@ -4,7 +4,6 @@ #include #if USE_CPUID # include -# include #elif USE_CPUINFO # include #endif diff --git a/dbms/src/Common/getNumberOfPhysicalCPUCores.h b/src/Common/getNumberOfPhysicalCPUCores.h similarity index 100% rename from dbms/src/Common/getNumberOfPhysicalCPUCores.h rename to src/Common/getNumberOfPhysicalCPUCores.h diff --git a/dbms/src/Common/hasLinuxCapability.cpp b/src/Common/hasLinuxCapability.cpp similarity index 100% rename from dbms/src/Common/hasLinuxCapability.cpp rename to src/Common/hasLinuxCapability.cpp diff --git a/dbms/src/Common/hasLinuxCapability.h b/src/Common/hasLinuxCapability.h similarity index 100% rename from dbms/src/Common/hasLinuxCapability.h rename to src/Common/hasLinuxCapability.h diff --git a/dbms/src/Common/hex.cpp b/src/Common/hex.cpp similarity index 100% rename from dbms/src/Common/hex.cpp rename to src/Common/hex.cpp diff --git a/dbms/src/Common/hex.h b/src/Common/hex.h similarity index 100% rename from dbms/src/Common/hex.h rename to src/Common/hex.h diff --git a/dbms/src/Common/intExp.h b/src/Common/intExp.h similarity index 100% rename from dbms/src/Common/intExp.h rename to src/Common/intExp.h diff --git a/dbms/src/Common/interpolate.h b/src/Common/interpolate.h similarity index 100% rename from dbms/src/Common/interpolate.h rename to src/Common/interpolate.h diff --git a/dbms/src/Common/isLocalAddress.cpp b/src/Common/isLocalAddress.cpp similarity index 100% rename from dbms/src/Common/isLocalAddress.cpp rename to src/Common/isLocalAddress.cpp diff --git a/dbms/src/Common/isLocalAddress.h b/src/Common/isLocalAddress.h similarity index 100% rename from dbms/src/Common/isLocalAddress.h rename to src/Common/isLocalAddress.h diff --git a/dbms/src/Common/malloc.cpp b/src/Common/malloc.cpp similarity index 100% rename from dbms/src/Common/malloc.cpp rename to src/Common/malloc.cpp diff --git a/dbms/src/Common/memcmpSmall.h b/src/Common/memcmpSmall.h similarity index 100% rename from dbms/src/Common/memcmpSmall.h rename to src/Common/memcmpSmall.h diff --git a/dbms/src/Common/memcpySmall.h b/src/Common/memcpySmall.h similarity index 100% rename from dbms/src/Common/memcpySmall.h rename to src/Common/memcpySmall.h diff --git a/dbms/src/Common/new_delete.cpp b/src/Common/new_delete.cpp similarity index 100% rename from dbms/src/Common/new_delete.cpp rename to src/Common/new_delete.cpp diff --git a/dbms/src/Common/parseAddress.cpp b/src/Common/parseAddress.cpp similarity index 100% rename from dbms/src/Common/parseAddress.cpp rename to src/Common/parseAddress.cpp diff --git a/dbms/src/Common/parseAddress.h b/src/Common/parseAddress.h similarity index 100% rename from dbms/src/Common/parseAddress.h rename to src/Common/parseAddress.h diff --git a/dbms/src/Common/parseGlobs.cpp b/src/Common/parseGlobs.cpp similarity index 86% rename from dbms/src/Common/parseGlobs.cpp rename to src/Common/parseGlobs.cpp index 79b441441bc..71ddbbd92ea 100644 --- a/dbms/src/Common/parseGlobs.cpp +++ b/src/Common/parseGlobs.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -45,11 +46,18 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob char point; std::istringstream iss_range(buffer); iss_range >> range_begin >> point >> point >> range_end; - assert(iss_range.good()); + assert(!iss_range.fail()); + bool leading_zeros = buffer[0] == '0'; + size_t num_len = std::to_string(range_end).size(); + if (leading_zeros) + oss_for_replacing << std::setfill('0') << std::setw(num_len); oss_for_replacing << range_begin; for (size_t i = range_begin + 1; i <= range_end; ++i) { - oss_for_replacing << '|' << i; + oss_for_replacing << '|'; + if (leading_zeros) + oss_for_replacing << std::setfill('0') << std::setw(num_len); + oss_for_replacing << i; } } else diff --git a/dbms/src/Common/parseGlobs.h b/src/Common/parseGlobs.h similarity index 100% rename from dbms/src/Common/parseGlobs.h rename to src/Common/parseGlobs.h diff --git a/dbms/src/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp similarity index 100% rename from dbms/src/Common/parseRemoteDescription.cpp rename to src/Common/parseRemoteDescription.cpp diff --git a/dbms/src/Common/parseRemoteDescription.h b/src/Common/parseRemoteDescription.h similarity index 100% rename from dbms/src/Common/parseRemoteDescription.h rename to src/Common/parseRemoteDescription.h diff --git a/dbms/src/Common/quoteString.cpp b/src/Common/quoteString.cpp similarity index 100% rename from dbms/src/Common/quoteString.cpp rename to src/Common/quoteString.cpp diff --git a/dbms/src/Common/quoteString.h b/src/Common/quoteString.h similarity index 100% rename from dbms/src/Common/quoteString.h rename to src/Common/quoteString.h diff --git a/dbms/src/Common/randomSeed.cpp b/src/Common/randomSeed.cpp similarity index 100% rename from dbms/src/Common/randomSeed.cpp rename to src/Common/randomSeed.cpp diff --git a/dbms/src/Common/randomSeed.h b/src/Common/randomSeed.h similarity index 100% rename from dbms/src/Common/randomSeed.h rename to src/Common/randomSeed.h diff --git a/dbms/src/Common/setThreadName.cpp b/src/Common/setThreadName.cpp similarity index 96% rename from dbms/src/Common/setThreadName.cpp rename to src/Common/setThreadName.cpp index 9774d9b4b86..3c20711a761 100644 --- a/dbms/src/Common/setThreadName.cpp +++ b/src/Common/setThreadName.cpp @@ -29,11 +29,10 @@ void setThreadName(const char * name) throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR); #endif -#if defined(__FreeBSD__) +#if defined(OS_FREEBSD) pthread_set_name_np(pthread_self(), name); - return; - -#elif defined(__APPLE__) + if ((false)) +#elif defined(OS_DARWIN) if (0 != pthread_setname_np(name)) #else if (0 != prctl(PR_SET_NAME, name, 0, 0, 0)) diff --git a/dbms/src/Common/setThreadName.h b/src/Common/setThreadName.h similarity index 100% rename from dbms/src/Common/setThreadName.h rename to src/Common/setThreadName.h diff --git a/dbms/src/Common/tests/CMakeLists.txt b/src/Common/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Common/tests/CMakeLists.txt rename to src/Common/tests/CMakeLists.txt diff --git a/dbms/src/Common/tests/arena_with_free_lists.cpp b/src/Common/tests/arena_with_free_lists.cpp similarity index 100% rename from dbms/src/Common/tests/arena_with_free_lists.cpp rename to src/Common/tests/arena_with_free_lists.cpp diff --git a/dbms/src/Common/tests/array_cache.cpp b/src/Common/tests/array_cache.cpp similarity index 100% rename from dbms/src/Common/tests/array_cache.cpp rename to src/Common/tests/array_cache.cpp diff --git a/dbms/src/Common/tests/auto_array.cpp b/src/Common/tests/auto_array.cpp similarity index 100% rename from dbms/src/Common/tests/auto_array.cpp rename to src/Common/tests/auto_array.cpp diff --git a/dbms/src/Common/tests/chaos_sanitizer.cpp b/src/Common/tests/chaos_sanitizer.cpp similarity index 100% rename from dbms/src/Common/tests/chaos_sanitizer.cpp rename to src/Common/tests/chaos_sanitizer.cpp diff --git a/dbms/src/Common/tests/compact_array.cpp b/src/Common/tests/compact_array.cpp similarity index 100% rename from dbms/src/Common/tests/compact_array.cpp rename to src/Common/tests/compact_array.cpp diff --git a/dbms/src/Common/tests/cow_columns.cpp b/src/Common/tests/cow_columns.cpp similarity index 100% rename from dbms/src/Common/tests/cow_columns.cpp rename to src/Common/tests/cow_columns.cpp diff --git a/dbms/src/Common/tests/cow_compositions.cpp b/src/Common/tests/cow_compositions.cpp similarity index 100% rename from dbms/src/Common/tests/cow_compositions.cpp rename to src/Common/tests/cow_compositions.cpp diff --git a/dbms/src/Common/tests/gtest_getMultipleValuesFromConfig.cpp b/src/Common/tests/gtest_getMultipleValuesFromConfig.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_getMultipleValuesFromConfig.cpp rename to src/Common/tests/gtest_getMultipleValuesFromConfig.cpp diff --git a/dbms/src/Common/tests/gtest_global_context.h b/src/Common/tests/gtest_global_context.h similarity index 100% rename from dbms/src/Common/tests/gtest_global_context.h rename to src/Common/tests/gtest_global_context.h diff --git a/dbms/src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp b/src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp rename to src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp diff --git a/dbms/src/Common/tests/gtest_pod_array.cpp b/src/Common/tests/gtest_pod_array.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_pod_array.cpp rename to src/Common/tests/gtest_pod_array.cpp diff --git a/dbms/src/Common/tests/gtest_rw_lock.cpp b/src/Common/tests/gtest_rw_lock.cpp similarity index 90% rename from dbms/src/Common/tests/gtest_rw_lock.cpp rename to src/Common/tests/gtest_rw_lock.cpp index dec4c732fd5..73987a25508 100644 --- a/dbms/src/Common/tests/gtest_rw_lock.cpp +++ b/src/Common/tests/gtest_rw_lock.cpp @@ -150,9 +150,16 @@ TEST(Common, RWLockDeadlock) usleep(100000); usleep(100000); usleep(100000); + usleep(100000); try { - auto holder2 = lock2->getLock(RWLockImpl::Read, "q1"); + auto holder2 = lock2->getLock(RWLockImpl::Read, "q1", std::chrono::milliseconds(100)); + if (!holder2) + { + throw Exception( + "Locking attempt timed out! Possible deadlock avoided. Client should retry.", + ErrorCodes::DEADLOCK_AVOIDED); + } } catch (const Exception & e) { @@ -174,9 +181,16 @@ TEST(Common, RWLockDeadlock) auto holder2 = lock2->getLock(RWLockImpl::Read, "q3"); usleep(100000); usleep(100000); + usleep(100000); try { - auto holder1 = lock1->getLock(RWLockImpl::Read, "q3"); + auto holder1 = lock1->getLock(RWLockImpl::Read, "q3", std::chrono::milliseconds(100)); + if (!holder1) + { + throw Exception( + "Locking attempt timed out! Possible deadlock avoided. Client should retry.", + ErrorCodes::DEADLOCK_AVOIDED); + } } catch (const Exception & e) { diff --git a/dbms/src/Common/tests/gtest_sensitive_data_masker.cpp b/src/Common/tests/gtest_sensitive_data_masker.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_sensitive_data_masker.cpp rename to src/Common/tests/gtest_sensitive_data_masker.cpp diff --git a/dbms/src/Common/tests/gtest_shell_command.cpp b/src/Common/tests/gtest_shell_command.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_shell_command.cpp rename to src/Common/tests/gtest_shell_command.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_concurrent_wait.cpp b/src/Common/tests/gtest_thread_pool_concurrent_wait.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_concurrent_wait.cpp rename to src/Common/tests/gtest_thread_pool_concurrent_wait.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_global_full.cpp b/src/Common/tests/gtest_thread_pool_global_full.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_global_full.cpp rename to src/Common/tests/gtest_thread_pool_global_full.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_limit.cpp b/src/Common/tests/gtest_thread_pool_limit.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_limit.cpp rename to src/Common/tests/gtest_thread_pool_limit.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_loop.cpp b/src/Common/tests/gtest_thread_pool_loop.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_loop.cpp rename to src/Common/tests/gtest_thread_pool_loop.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_schedule_exception.cpp b/src/Common/tests/gtest_thread_pool_schedule_exception.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_schedule_exception.cpp rename to src/Common/tests/gtest_thread_pool_schedule_exception.cpp diff --git a/dbms/src/Common/tests/gtest_unescapeForFileName.cpp b/src/Common/tests/gtest_unescapeForFileName.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_unescapeForFileName.cpp rename to src/Common/tests/gtest_unescapeForFileName.cpp diff --git a/dbms/src/Common/tests/hash_table.cpp b/src/Common/tests/hash_table.cpp similarity index 100% rename from dbms/src/Common/tests/hash_table.cpp rename to src/Common/tests/hash_table.cpp diff --git a/dbms/src/Common/tests/hashes_test.cpp b/src/Common/tests/hashes_test.cpp similarity index 100% rename from dbms/src/Common/tests/hashes_test.cpp rename to src/Common/tests/hashes_test.cpp diff --git a/dbms/src/Common/tests/int_hashes_perf.cpp b/src/Common/tests/int_hashes_perf.cpp similarity index 100% rename from dbms/src/Common/tests/int_hashes_perf.cpp rename to src/Common/tests/int_hashes_perf.cpp diff --git a/dbms/src/Common/tests/integer_hash_tables_and_hashes.cpp b/src/Common/tests/integer_hash_tables_and_hashes.cpp similarity index 100% rename from dbms/src/Common/tests/integer_hash_tables_and_hashes.cpp rename to src/Common/tests/integer_hash_tables_and_hashes.cpp diff --git a/dbms/src/Common/tests/parallel_aggregation.cpp b/src/Common/tests/parallel_aggregation.cpp similarity index 100% rename from dbms/src/Common/tests/parallel_aggregation.cpp rename to src/Common/tests/parallel_aggregation.cpp diff --git a/dbms/src/Common/tests/parallel_aggregation2.cpp b/src/Common/tests/parallel_aggregation2.cpp similarity index 100% rename from dbms/src/Common/tests/parallel_aggregation2.cpp rename to src/Common/tests/parallel_aggregation2.cpp diff --git a/dbms/src/Common/tests/pod_array.cpp b/src/Common/tests/pod_array.cpp similarity index 100% rename from dbms/src/Common/tests/pod_array.cpp rename to src/Common/tests/pod_array.cpp diff --git a/dbms/src/Common/tests/radix_sort.cpp b/src/Common/tests/radix_sort.cpp similarity index 100% rename from dbms/src/Common/tests/radix_sort.cpp rename to src/Common/tests/radix_sort.cpp diff --git a/dbms/src/Common/tests/simple_cache.cpp b/src/Common/tests/simple_cache.cpp similarity index 100% rename from dbms/src/Common/tests/simple_cache.cpp rename to src/Common/tests/simple_cache.cpp diff --git a/dbms/src/Common/tests/sip_hash_perf.cpp b/src/Common/tests/sip_hash_perf.cpp similarity index 100% rename from dbms/src/Common/tests/sip_hash_perf.cpp rename to src/Common/tests/sip_hash_perf.cpp diff --git a/dbms/src/Common/tests/small_table.cpp b/src/Common/tests/small_table.cpp similarity index 100% rename from dbms/src/Common/tests/small_table.cpp rename to src/Common/tests/small_table.cpp diff --git a/dbms/src/Common/tests/space_saving.cpp b/src/Common/tests/space_saving.cpp similarity index 100% rename from dbms/src/Common/tests/space_saving.cpp rename to src/Common/tests/space_saving.cpp diff --git a/dbms/src/Common/tests/stopwatch.cpp b/src/Common/tests/stopwatch.cpp similarity index 100% rename from dbms/src/Common/tests/stopwatch.cpp rename to src/Common/tests/stopwatch.cpp diff --git a/dbms/src/Common/tests/symbol_index.cpp b/src/Common/tests/symbol_index.cpp similarity index 100% rename from dbms/src/Common/tests/symbol_index.cpp rename to src/Common/tests/symbol_index.cpp diff --git a/dbms/src/Common/tests/thread_creation_latency.cpp b/src/Common/tests/thread_creation_latency.cpp similarity index 100% rename from dbms/src/Common/tests/thread_creation_latency.cpp rename to src/Common/tests/thread_creation_latency.cpp diff --git a/dbms/src/Common/thread_local_rng.cpp b/src/Common/thread_local_rng.cpp similarity index 100% rename from dbms/src/Common/thread_local_rng.cpp rename to src/Common/thread_local_rng.cpp diff --git a/dbms/src/Common/thread_local_rng.h b/src/Common/thread_local_rng.h similarity index 100% rename from dbms/src/Common/thread_local_rng.h rename to src/Common/thread_local_rng.h diff --git a/dbms/src/Common/typeid_cast.h b/src/Common/typeid_cast.h similarity index 100% rename from dbms/src/Common/typeid_cast.h rename to src/Common/typeid_cast.h diff --git a/dbms/src/Compression/CMakeLists.txt b/src/Compression/CMakeLists.txt similarity index 100% rename from dbms/src/Compression/CMakeLists.txt rename to src/Compression/CMakeLists.txt diff --git a/dbms/src/Compression/CachedCompressedReadBuffer.cpp b/src/Compression/CachedCompressedReadBuffer.cpp similarity index 100% rename from dbms/src/Compression/CachedCompressedReadBuffer.cpp rename to src/Compression/CachedCompressedReadBuffer.cpp diff --git a/dbms/src/Compression/CachedCompressedReadBuffer.h b/src/Compression/CachedCompressedReadBuffer.h similarity index 100% rename from dbms/src/Compression/CachedCompressedReadBuffer.h rename to src/Compression/CachedCompressedReadBuffer.h diff --git a/dbms/src/Compression/CompressedReadBuffer.cpp b/src/Compression/CompressedReadBuffer.cpp similarity index 100% rename from dbms/src/Compression/CompressedReadBuffer.cpp rename to src/Compression/CompressedReadBuffer.cpp diff --git a/dbms/src/Compression/CompressedReadBuffer.h b/src/Compression/CompressedReadBuffer.h similarity index 100% rename from dbms/src/Compression/CompressedReadBuffer.h rename to src/Compression/CompressedReadBuffer.h diff --git a/dbms/src/Compression/CompressedReadBufferBase.cpp b/src/Compression/CompressedReadBufferBase.cpp similarity index 100% rename from dbms/src/Compression/CompressedReadBufferBase.cpp rename to src/Compression/CompressedReadBufferBase.cpp diff --git a/dbms/src/Compression/CompressedReadBufferBase.h b/src/Compression/CompressedReadBufferBase.h similarity index 100% rename from dbms/src/Compression/CompressedReadBufferBase.h rename to src/Compression/CompressedReadBufferBase.h diff --git a/dbms/src/Compression/CompressedReadBufferFromFile.cpp b/src/Compression/CompressedReadBufferFromFile.cpp similarity index 100% rename from dbms/src/Compression/CompressedReadBufferFromFile.cpp rename to src/Compression/CompressedReadBufferFromFile.cpp diff --git a/dbms/src/Compression/CompressedReadBufferFromFile.h b/src/Compression/CompressedReadBufferFromFile.h similarity index 100% rename from dbms/src/Compression/CompressedReadBufferFromFile.h rename to src/Compression/CompressedReadBufferFromFile.h diff --git a/dbms/src/Compression/CompressedWriteBuffer.cpp b/src/Compression/CompressedWriteBuffer.cpp similarity index 100% rename from dbms/src/Compression/CompressedWriteBuffer.cpp rename to src/Compression/CompressedWriteBuffer.cpp diff --git a/dbms/src/Compression/CompressedWriteBuffer.h b/src/Compression/CompressedWriteBuffer.h similarity index 100% rename from dbms/src/Compression/CompressedWriteBuffer.h rename to src/Compression/CompressedWriteBuffer.h diff --git a/dbms/src/Compression/CompressionCodecDelta.cpp b/src/Compression/CompressionCodecDelta.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecDelta.cpp rename to src/Compression/CompressionCodecDelta.cpp diff --git a/dbms/src/Compression/CompressionCodecDelta.h b/src/Compression/CompressionCodecDelta.h similarity index 100% rename from dbms/src/Compression/CompressionCodecDelta.h rename to src/Compression/CompressionCodecDelta.h diff --git a/dbms/src/Compression/CompressionCodecDoubleDelta.cpp b/src/Compression/CompressionCodecDoubleDelta.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecDoubleDelta.cpp rename to src/Compression/CompressionCodecDoubleDelta.cpp diff --git a/dbms/src/Compression/CompressionCodecDoubleDelta.h b/src/Compression/CompressionCodecDoubleDelta.h similarity index 100% rename from dbms/src/Compression/CompressionCodecDoubleDelta.h rename to src/Compression/CompressionCodecDoubleDelta.h diff --git a/dbms/src/Compression/CompressionCodecGorilla.cpp b/src/Compression/CompressionCodecGorilla.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecGorilla.cpp rename to src/Compression/CompressionCodecGorilla.cpp diff --git a/dbms/src/Compression/CompressionCodecGorilla.h b/src/Compression/CompressionCodecGorilla.h similarity index 100% rename from dbms/src/Compression/CompressionCodecGorilla.h rename to src/Compression/CompressionCodecGorilla.h diff --git a/dbms/src/Compression/CompressionCodecLZ4.cpp b/src/Compression/CompressionCodecLZ4.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecLZ4.cpp rename to src/Compression/CompressionCodecLZ4.cpp diff --git a/dbms/src/Compression/CompressionCodecLZ4.h b/src/Compression/CompressionCodecLZ4.h similarity index 100% rename from dbms/src/Compression/CompressionCodecLZ4.h rename to src/Compression/CompressionCodecLZ4.h diff --git a/dbms/src/Compression/CompressionCodecMultiple.cpp b/src/Compression/CompressionCodecMultiple.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecMultiple.cpp rename to src/Compression/CompressionCodecMultiple.cpp diff --git a/dbms/src/Compression/CompressionCodecMultiple.h b/src/Compression/CompressionCodecMultiple.h similarity index 100% rename from dbms/src/Compression/CompressionCodecMultiple.h rename to src/Compression/CompressionCodecMultiple.h diff --git a/dbms/src/Compression/CompressionCodecNone.cpp b/src/Compression/CompressionCodecNone.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecNone.cpp rename to src/Compression/CompressionCodecNone.cpp diff --git a/dbms/src/Compression/CompressionCodecNone.h b/src/Compression/CompressionCodecNone.h similarity index 100% rename from dbms/src/Compression/CompressionCodecNone.h rename to src/Compression/CompressionCodecNone.h diff --git a/dbms/src/Compression/CompressionCodecT64.cpp b/src/Compression/CompressionCodecT64.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecT64.cpp rename to src/Compression/CompressionCodecT64.cpp diff --git a/dbms/src/Compression/CompressionCodecT64.h b/src/Compression/CompressionCodecT64.h similarity index 100% rename from dbms/src/Compression/CompressionCodecT64.h rename to src/Compression/CompressionCodecT64.h diff --git a/dbms/src/Compression/CompressionCodecZSTD.cpp b/src/Compression/CompressionCodecZSTD.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecZSTD.cpp rename to src/Compression/CompressionCodecZSTD.cpp diff --git a/dbms/src/Compression/CompressionCodecZSTD.h b/src/Compression/CompressionCodecZSTD.h similarity index 100% rename from dbms/src/Compression/CompressionCodecZSTD.h rename to src/Compression/CompressionCodecZSTD.h diff --git a/dbms/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp similarity index 100% rename from dbms/src/Compression/CompressionFactory.cpp rename to src/Compression/CompressionFactory.cpp diff --git a/dbms/src/Compression/CompressionFactory.h b/src/Compression/CompressionFactory.h similarity index 100% rename from dbms/src/Compression/CompressionFactory.h rename to src/Compression/CompressionFactory.h diff --git a/dbms/src/Compression/CompressionInfo.h b/src/Compression/CompressionInfo.h similarity index 100% rename from dbms/src/Compression/CompressionInfo.h rename to src/Compression/CompressionInfo.h diff --git a/dbms/src/Compression/ICompressionCodec.cpp b/src/Compression/ICompressionCodec.cpp similarity index 100% rename from dbms/src/Compression/ICompressionCodec.cpp rename to src/Compression/ICompressionCodec.cpp diff --git a/dbms/src/Compression/ICompressionCodec.h b/src/Compression/ICompressionCodec.h similarity index 100% rename from dbms/src/Compression/ICompressionCodec.h rename to src/Compression/ICompressionCodec.h diff --git a/dbms/src/Compression/LZ4_decompress_faster.cpp b/src/Compression/LZ4_decompress_faster.cpp similarity index 100% rename from dbms/src/Compression/LZ4_decompress_faster.cpp rename to src/Compression/LZ4_decompress_faster.cpp diff --git a/dbms/src/Compression/LZ4_decompress_faster.h b/src/Compression/LZ4_decompress_faster.h similarity index 100% rename from dbms/src/Compression/LZ4_decompress_faster.h rename to src/Compression/LZ4_decompress_faster.h diff --git a/dbms/src/Compression/tests/CMakeLists.txt b/src/Compression/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Compression/tests/CMakeLists.txt rename to src/Compression/tests/CMakeLists.txt diff --git a/dbms/src/Compression/tests/cached_compressed_read_buffer.cpp b/src/Compression/tests/cached_compressed_read_buffer.cpp similarity index 100% rename from dbms/src/Compression/tests/cached_compressed_read_buffer.cpp rename to src/Compression/tests/cached_compressed_read_buffer.cpp diff --git a/dbms/src/Compression/tests/compressed_buffer.cpp b/src/Compression/tests/compressed_buffer.cpp similarity index 100% rename from dbms/src/Compression/tests/compressed_buffer.cpp rename to src/Compression/tests/compressed_buffer.cpp diff --git a/dbms/src/Compression/tests/compressed_buffer_fuzz.cpp b/src/Compression/tests/compressed_buffer_fuzz.cpp similarity index 100% rename from dbms/src/Compression/tests/compressed_buffer_fuzz.cpp rename to src/Compression/tests/compressed_buffer_fuzz.cpp diff --git a/dbms/src/Compression/tests/gtest_compressionCodec.cpp b/src/Compression/tests/gtest_compressionCodec.cpp similarity index 99% rename from dbms/src/Compression/tests/gtest_compressionCodec.cpp rename to src/Compression/tests/gtest_compressionCodec.cpp index a6bfdaebb14..f3652366a24 100644 --- a/dbms/src/Compression/tests/gtest_compressionCodec.cpp +++ b/src/Compression/tests/gtest_compressionCodec.cpp @@ -462,7 +462,7 @@ CompressionCodecPtr makeCodec(const std::string & codec_string, const DataTypePt { const std::string codec_statement = "(" + codec_string + ")"; Tokens tokens(codec_statement.begin().base(), codec_statement.end().base()); - IParser::Pos token_iterator(tokens); + IParser::Pos token_iterator(tokens, 0); Expected expected; ASTPtr codec_ast; diff --git a/dbms/src/Core/AccurateComparison.h b/src/Core/AccurateComparison.h similarity index 100% rename from dbms/src/Core/AccurateComparison.h rename to src/Core/AccurateComparison.h diff --git a/dbms/src/Core/BackgroundSchedulePool.cpp b/src/Core/BackgroundSchedulePool.cpp similarity index 100% rename from dbms/src/Core/BackgroundSchedulePool.cpp rename to src/Core/BackgroundSchedulePool.cpp diff --git a/dbms/src/Core/BackgroundSchedulePool.h b/src/Core/BackgroundSchedulePool.h similarity index 100% rename from dbms/src/Core/BackgroundSchedulePool.h rename to src/Core/BackgroundSchedulePool.h diff --git a/dbms/src/Core/Block.cpp b/src/Core/Block.cpp similarity index 100% rename from dbms/src/Core/Block.cpp rename to src/Core/Block.cpp diff --git a/dbms/src/Core/Block.h b/src/Core/Block.h similarity index 100% rename from dbms/src/Core/Block.h rename to src/Core/Block.h diff --git a/dbms/src/Core/BlockInfo.cpp b/src/Core/BlockInfo.cpp similarity index 100% rename from dbms/src/Core/BlockInfo.cpp rename to src/Core/BlockInfo.cpp diff --git a/dbms/src/Core/BlockInfo.h b/src/Core/BlockInfo.h similarity index 100% rename from dbms/src/Core/BlockInfo.h rename to src/Core/BlockInfo.h diff --git a/dbms/src/Core/CMakeLists.txt b/src/Core/CMakeLists.txt similarity index 100% rename from dbms/src/Core/CMakeLists.txt rename to src/Core/CMakeLists.txt diff --git a/dbms/src/Core/ColumnNumbers.h b/src/Core/ColumnNumbers.h similarity index 100% rename from dbms/src/Core/ColumnNumbers.h rename to src/Core/ColumnNumbers.h diff --git a/dbms/src/Core/ColumnWithTypeAndName.cpp b/src/Core/ColumnWithTypeAndName.cpp similarity index 100% rename from dbms/src/Core/ColumnWithTypeAndName.cpp rename to src/Core/ColumnWithTypeAndName.cpp diff --git a/dbms/src/Core/ColumnWithTypeAndName.h b/src/Core/ColumnWithTypeAndName.h similarity index 100% rename from dbms/src/Core/ColumnWithTypeAndName.h rename to src/Core/ColumnWithTypeAndName.h diff --git a/dbms/src/Core/ColumnsWithTypeAndName.h b/src/Core/ColumnsWithTypeAndName.h similarity index 100% rename from dbms/src/Core/ColumnsWithTypeAndName.h rename to src/Core/ColumnsWithTypeAndName.h diff --git a/dbms/src/Core/DecimalComparison.h b/src/Core/DecimalComparison.h similarity index 100% rename from dbms/src/Core/DecimalComparison.h rename to src/Core/DecimalComparison.h diff --git a/dbms/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h similarity index 100% rename from dbms/src/Core/DecimalFunctions.h rename to src/Core/DecimalFunctions.h diff --git a/dbms/src/Core/Defines.h b/src/Core/Defines.h similarity index 93% rename from dbms/src/Core/Defines.h rename to src/Core/Defines.h index f2d4a517712..fe614cec6bd 100644 --- a/dbms/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -91,3 +91,10 @@ # define ASAN_UNPOISON_MEMORY_REGION(a, b) # define ASAN_POISON_MEMORY_REGION(a, b) #endif + +/// Actually, there may be multiple acquisitions of different locks for a given table within one query. +/// Check with IStorage class for the list of possible locks +#define DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC 120 + +/// Default limit on recursion depth of recursive descend parser. +#define DBMS_DEFAULT_MAX_PARSER_DEPTH 1000 diff --git a/dbms/src/Core/ExternalResultDescription.cpp b/src/Core/ExternalResultDescription.cpp similarity index 100% rename from dbms/src/Core/ExternalResultDescription.cpp rename to src/Core/ExternalResultDescription.cpp diff --git a/dbms/src/Core/ExternalResultDescription.h b/src/Core/ExternalResultDescription.h similarity index 100% rename from dbms/src/Core/ExternalResultDescription.h rename to src/Core/ExternalResultDescription.h diff --git a/dbms/src/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp similarity index 100% rename from dbms/src/Core/ExternalTable.cpp rename to src/Core/ExternalTable.cpp diff --git a/dbms/src/Core/ExternalTable.h b/src/Core/ExternalTable.h similarity index 100% rename from dbms/src/Core/ExternalTable.h rename to src/Core/ExternalTable.h diff --git a/dbms/src/Core/Field.cpp b/src/Core/Field.cpp similarity index 100% rename from dbms/src/Core/Field.cpp rename to src/Core/Field.cpp diff --git a/dbms/src/Core/Field.h b/src/Core/Field.h similarity index 100% rename from dbms/src/Core/Field.h rename to src/Core/Field.h diff --git a/dbms/src/Core/MySQLProtocol.cpp b/src/Core/MySQLProtocol.cpp similarity index 100% rename from dbms/src/Core/MySQLProtocol.cpp rename to src/Core/MySQLProtocol.cpp diff --git a/dbms/src/Core/MySQLProtocol.h b/src/Core/MySQLProtocol.h similarity index 97% rename from dbms/src/Core/MySQLProtocol.h rename to src/Core/MySQLProtocol.h index 1fae57517c1..5255c6f263e 100644 --- a/dbms/src/Core/MySQLProtocol.h +++ b/src/Core/MySQLProtocol.h @@ -914,8 +914,17 @@ public: scramble.resize(SCRAMBLE_LENGTH + 1, 0); Poco::RandomInputStream generator; - for (size_t i = 0; i < SCRAMBLE_LENGTH; i++) + /** Generate a random string using ASCII characters but avoid separator character, + * produce pseudo random numbers between with about 7 bit worth of entropty between 1-127. + * https://github.com/mysql/mysql-server/blob/8.0/mysys/crypt_genhash_impl.cc#L427 + */ + for (size_t i = 0; i < SCRAMBLE_LENGTH; ++i) + { generator >> scramble[i]; + scramble[i] &= 0x7f; + if (scramble[i] == '\0' || scramble[i] == '$') + scramble[i] = scramble[i] + 1; + } } String getName() override @@ -993,8 +1002,13 @@ public: scramble.resize(SCRAMBLE_LENGTH + 1, 0); Poco::RandomInputStream generator; - for (size_t i = 0; i < SCRAMBLE_LENGTH; i++) + for (size_t i = 0; i < SCRAMBLE_LENGTH; ++i) + { generator >> scramble[i]; + scramble[i] &= 0x7f; + if (scramble[i] == '\0' || scramble[i] == '$') + scramble[i] = scramble[i] + 1; + } } String getName() override diff --git a/dbms/src/Core/Names.h b/src/Core/Names.h similarity index 100% rename from dbms/src/Core/Names.h rename to src/Core/Names.h diff --git a/dbms/src/Core/NamesAndTypes.cpp b/src/Core/NamesAndTypes.cpp similarity index 100% rename from dbms/src/Core/NamesAndTypes.cpp rename to src/Core/NamesAndTypes.cpp diff --git a/dbms/src/Core/NamesAndTypes.h b/src/Core/NamesAndTypes.h similarity index 100% rename from dbms/src/Core/NamesAndTypes.h rename to src/Core/NamesAndTypes.h diff --git a/dbms/src/Core/Protocol.h b/src/Core/Protocol.h similarity index 100% rename from dbms/src/Core/Protocol.h rename to src/Core/Protocol.h diff --git a/dbms/src/Core/QualifiedTableName.h b/src/Core/QualifiedTableName.h similarity index 100% rename from dbms/src/Core/QualifiedTableName.h rename to src/Core/QualifiedTableName.h diff --git a/dbms/src/Core/QueryProcessingStage.h b/src/Core/QueryProcessingStage.h similarity index 100% rename from dbms/src/Core/QueryProcessingStage.h rename to src/Core/QueryProcessingStage.h diff --git a/dbms/src/Core/Row.h b/src/Core/Row.h similarity index 100% rename from dbms/src/Core/Row.h rename to src/Core/Row.h diff --git a/dbms/src/Core/Settings.cpp b/src/Core/Settings.cpp similarity index 100% rename from dbms/src/Core/Settings.cpp rename to src/Core/Settings.cpp diff --git a/dbms/src/Core/Settings.h b/src/Core/Settings.h similarity index 98% rename from dbms/src/Core/Settings.h rename to src/Core/Settings.h index 753231603b2..325abc16f3f 100644 --- a/dbms/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -149,7 +149,7 @@ struct Settings : public SettingsCollection M(SettingInt64, os_thread_priority, 0, "If non zero - set corresponding 'nice' value for query processing threads. Can be used to adjust query priority for OS scheduler.", 0) \ \ M(SettingBool, log_queries, 0, "Log requests and write the log to the system table.", 0) \ - \ + M(SettingLogQueriesType, log_queries_min_type, QueryLogElementType::QUERY_START, "query_log minimal type to log, possible values (from low to high): QUERY_START, QUERY_FINISH, EXCEPTION_BEFORE_START, EXCEPTION_WHILE_PROCESSING.", 0) \ M(SettingUInt64, log_queries_cut_to_length, 100000, "If query length is greater than specified threshold (in bytes), then cut query when writing to query log. Also limit length of printed query in ordinary text log.", 0) \ \ M(SettingDistributedProductMode, distributed_product_mode, DistributedProductMode::DENY, "How are distributed subqueries performed inside IN or JOIN sections?", IMPORTANT) \ @@ -404,8 +404,11 @@ struct Settings : public SettingsCollection M(SettingBool, use_compact_format_in_distributed_parts_names, false, "Changes format of directories names for distributed table insert parts.", 0) \ M(SettingUInt64, multiple_joins_rewriter_version, 1, "1 or 2. Second rewriter version knows about table columns and keep not clashed names as is.", 0) \ M(SettingBool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ - M(SettingUInt64, max_parser_depth, 1000, "Maximum parser depth.", 0) \ + M(SettingUInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \ M(SettingSeconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \ + M(SettingBool, transform_null_in, false, "If enabled, NULL values will be matched with 'IN' operator as if they are considered equal.", 0) \ + M(SettingBool, allow_nondeterministic_mutations, false, "Allow non-deterministic functions in ALTER UPDATE/ALTER DELETE statements", 0) \ + M(SettingSeconds, lock_acquire_timeout, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "How long locking request should wait before failing", 0) \ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ \ @@ -419,7 +422,6 @@ struct Settings : public SettingsCollection M(SettingUInt64, mark_cache_min_lifetime, 0, "Obsolete setting, does nothing. Will be removed after 2020-05-31", 0) \ M(SettingBool, partial_merge_join, false, "Obsolete. Use join_algorithm='prefer_partial_merge' instead.", 0) \ - DECLARE_SETTINGS_COLLECTION(LIST_OF_SETTINGS) /** Set multiple settings from "profile" (in server configuration file (users.xml), profiles contain groups of multiple settings). diff --git a/dbms/src/Core/SettingsCollection.cpp b/src/Core/SettingsCollection.cpp similarity index 97% rename from dbms/src/Core/SettingsCollection.cpp rename to src/Core/SettingsCollection.cpp index 6d879b27181..238ac1c3c62 100644 --- a/dbms/src/Core/SettingsCollection.cpp +++ b/src/Core/SettingsCollection.cpp @@ -542,6 +542,13 @@ IMPLEMENT_SETTING_ENUM(FormatSettings::DateTimeInputFormat, DATE_TIME_INPUT_FORM M(trace, "trace") IMPLEMENT_SETTING_ENUM(LogsLevel, LOGS_LEVEL_LIST_OF_NAMES, ErrorCodes::BAD_ARGUMENTS) +#define LOG_QUERIES_TYPE_LIST_OF_NAMES(M) \ + M(QUERY_START, "QUERY_START") \ + M(QUERY_FINISH, "QUERY_FINISH") \ + M(EXCEPTION_BEFORE_START, "EXCEPTION_BEFORE_START") \ + M(EXCEPTION_WHILE_PROCESSING, "EXCEPTION_WHILE_PROCESSING") +IMPLEMENT_SETTING_ENUM(QueryLogElementType, LOG_QUERIES_TYPE_LIST_OF_NAMES, ErrorCodes::BAD_ARGUMENTS) + namespace details { diff --git a/dbms/src/Core/SettingsCollection.h b/src/Core/SettingsCollection.h similarity index 98% rename from dbms/src/Core/SettingsCollection.h rename to src/Core/SettingsCollection.h index da21412b7c1..d93772e86ed 100644 --- a/dbms/src/Core/SettingsCollection.h +++ b/src/Core/SettingsCollection.h @@ -298,6 +298,16 @@ enum class LogsLevel }; using SettingLogsLevel = SettingEnum; +// Make it signed for compatibility with DataTypeEnum8 +enum QueryLogElementType : int8_t +{ + QUERY_START = 1, + QUERY_FINISH = 2, + EXCEPTION_BEFORE_START = 3, + EXCEPTION_WHILE_PROCESSING = 4, +}; +using SettingLogQueriesType = SettingEnum; + enum class SettingsBinaryFormat { diff --git a/dbms/src/Core/SettingsCollectionImpl.h b/src/Core/SettingsCollectionImpl.h similarity index 100% rename from dbms/src/Core/SettingsCollectionImpl.h rename to src/Core/SettingsCollectionImpl.h diff --git a/dbms/src/Core/SortCursor.h b/src/Core/SortCursor.h similarity index 100% rename from dbms/src/Core/SortCursor.h rename to src/Core/SortCursor.h diff --git a/dbms/src/Core/SortDescription.h b/src/Core/SortDescription.h similarity index 100% rename from dbms/src/Core/SortDescription.h rename to src/Core/SortDescription.h diff --git a/dbms/src/Core/TypeListNumber.h b/src/Core/TypeListNumber.h similarity index 100% rename from dbms/src/Core/TypeListNumber.h rename to src/Core/TypeListNumber.h diff --git a/dbms/src/Core/Types.h b/src/Core/Types.h similarity index 100% rename from dbms/src/Core/Types.h rename to src/Core/Types.h diff --git a/dbms/src/Core/UUID.h b/src/Core/UUID.h similarity index 100% rename from dbms/src/Core/UUID.h rename to src/Core/UUID.h diff --git a/dbms/src/Core/callOnTypeIndex.h b/src/Core/callOnTypeIndex.h similarity index 100% rename from dbms/src/Core/callOnTypeIndex.h rename to src/Core/callOnTypeIndex.h diff --git a/dbms/src/Core/config_core.h.in b/src/Core/config_core.h.in similarity index 100% rename from dbms/src/Core/config_core.h.in rename to src/Core/config_core.h.in diff --git a/dbms/src/Core/iostream_debug_helpers.cpp b/src/Core/iostream_debug_helpers.cpp similarity index 100% rename from dbms/src/Core/iostream_debug_helpers.cpp rename to src/Core/iostream_debug_helpers.cpp diff --git a/dbms/src/Core/iostream_debug_helpers.h b/src/Core/iostream_debug_helpers.h similarity index 100% rename from dbms/src/Core/iostream_debug_helpers.h rename to src/Core/iostream_debug_helpers.h diff --git a/dbms/src/Core/tests/CMakeLists.txt b/src/Core/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Core/tests/CMakeLists.txt rename to src/Core/tests/CMakeLists.txt diff --git a/dbms/src/Core/tests/field.cpp b/src/Core/tests/field.cpp similarity index 100% rename from dbms/src/Core/tests/field.cpp rename to src/Core/tests/field.cpp diff --git a/dbms/src/Core/tests/gtest_DecimalFunctions.cpp b/src/Core/tests/gtest_DecimalFunctions.cpp similarity index 100% rename from dbms/src/Core/tests/gtest_DecimalFunctions.cpp rename to src/Core/tests/gtest_DecimalFunctions.cpp diff --git a/dbms/src/Core/tests/move_field.cpp b/src/Core/tests/move_field.cpp similarity index 100% rename from dbms/src/Core/tests/move_field.cpp rename to src/Core/tests/move_field.cpp diff --git a/dbms/src/Core/tests/string_pool.cpp b/src/Core/tests/string_pool.cpp similarity index 100% rename from dbms/src/Core/tests/string_pool.cpp rename to src/Core/tests/string_pool.cpp diff --git a/dbms/src/Core/tests/string_ref_hash.cpp b/src/Core/tests/string_ref_hash.cpp similarity index 100% rename from dbms/src/Core/tests/string_ref_hash.cpp rename to src/Core/tests/string_ref_hash.cpp diff --git a/dbms/src/DataStreams/AddingConstColumnBlockInputStream.h b/src/DataStreams/AddingConstColumnBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AddingConstColumnBlockInputStream.h rename to src/DataStreams/AddingConstColumnBlockInputStream.h diff --git a/dbms/src/DataStreams/AddingDefaultBlockOutputStream.cpp b/src/DataStreams/AddingDefaultBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AddingDefaultBlockOutputStream.cpp rename to src/DataStreams/AddingDefaultBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/AddingDefaultBlockOutputStream.h b/src/DataStreams/AddingDefaultBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/AddingDefaultBlockOutputStream.h rename to src/DataStreams/AddingDefaultBlockOutputStream.h diff --git a/dbms/src/DataStreams/AddingDefaultsBlockInputStream.cpp b/src/DataStreams/AddingDefaultsBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AddingDefaultsBlockInputStream.cpp rename to src/DataStreams/AddingDefaultsBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AddingDefaultsBlockInputStream.h b/src/DataStreams/AddingDefaultsBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AddingDefaultsBlockInputStream.h rename to src/DataStreams/AddingDefaultsBlockInputStream.h diff --git a/dbms/src/DataStreams/AggregatingBlockInputStream.cpp b/src/DataStreams/AggregatingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AggregatingBlockInputStream.cpp rename to src/DataStreams/AggregatingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AggregatingBlockInputStream.h b/src/DataStreams/AggregatingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AggregatingBlockInputStream.h rename to src/DataStreams/AggregatingBlockInputStream.h diff --git a/dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp b/src/DataStreams/AggregatingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp rename to src/DataStreams/AggregatingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AggregatingSortedBlockInputStream.h b/src/DataStreams/AggregatingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AggregatingSortedBlockInputStream.h rename to src/DataStreams/AggregatingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/AsynchronousBlockInputStream.cpp b/src/DataStreams/AsynchronousBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AsynchronousBlockInputStream.cpp rename to src/DataStreams/AsynchronousBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AsynchronousBlockInputStream.h b/src/DataStreams/AsynchronousBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AsynchronousBlockInputStream.h rename to src/DataStreams/AsynchronousBlockInputStream.h diff --git a/dbms/src/DataStreams/BlockIO.cpp b/src/DataStreams/BlockIO.cpp similarity index 100% rename from dbms/src/DataStreams/BlockIO.cpp rename to src/DataStreams/BlockIO.cpp diff --git a/dbms/src/DataStreams/BlockIO.h b/src/DataStreams/BlockIO.h similarity index 100% rename from dbms/src/DataStreams/BlockIO.h rename to src/DataStreams/BlockIO.h diff --git a/dbms/src/DataStreams/BlockStreamProfileInfo.cpp b/src/DataStreams/BlockStreamProfileInfo.cpp similarity index 100% rename from dbms/src/DataStreams/BlockStreamProfileInfo.cpp rename to src/DataStreams/BlockStreamProfileInfo.cpp diff --git a/dbms/src/DataStreams/BlockStreamProfileInfo.h b/src/DataStreams/BlockStreamProfileInfo.h similarity index 100% rename from dbms/src/DataStreams/BlockStreamProfileInfo.h rename to src/DataStreams/BlockStreamProfileInfo.h diff --git a/dbms/src/DataStreams/BlocksBlockInputStream.h b/src/DataStreams/BlocksBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/BlocksBlockInputStream.h rename to src/DataStreams/BlocksBlockInputStream.h diff --git a/dbms/src/DataStreams/BlocksListBlockInputStream.h b/src/DataStreams/BlocksListBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/BlocksListBlockInputStream.h rename to src/DataStreams/BlocksListBlockInputStream.h diff --git a/dbms/src/DataStreams/CMakeLists.txt b/src/DataStreams/CMakeLists.txt similarity index 100% rename from dbms/src/DataStreams/CMakeLists.txt rename to src/DataStreams/CMakeLists.txt diff --git a/dbms/src/DataStreams/CheckConstraintsBlockOutputStream.cpp b/src/DataStreams/CheckConstraintsBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CheckConstraintsBlockOutputStream.cpp rename to src/DataStreams/CheckConstraintsBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/CheckConstraintsBlockOutputStream.h b/src/DataStreams/CheckConstraintsBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/CheckConstraintsBlockOutputStream.h rename to src/DataStreams/CheckConstraintsBlockOutputStream.h diff --git a/dbms/src/DataStreams/CheckSortedBlockInputStream.cpp b/src/DataStreams/CheckSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CheckSortedBlockInputStream.cpp rename to src/DataStreams/CheckSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CheckSortedBlockInputStream.h b/src/DataStreams/CheckSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CheckSortedBlockInputStream.h rename to src/DataStreams/CheckSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.cpp b/src/DataStreams/CollapsingFinalBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CollapsingFinalBlockInputStream.cpp rename to src/DataStreams/CollapsingFinalBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h b/src/DataStreams/CollapsingFinalBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CollapsingFinalBlockInputStream.h rename to src/DataStreams/CollapsingFinalBlockInputStream.h diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp b/src/DataStreams/CollapsingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp rename to src/DataStreams/CollapsingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.h b/src/DataStreams/CollapsingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CollapsingSortedBlockInputStream.h rename to src/DataStreams/CollapsingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/ColumnGathererStream.cpp b/src/DataStreams/ColumnGathererStream.cpp similarity index 100% rename from dbms/src/DataStreams/ColumnGathererStream.cpp rename to src/DataStreams/ColumnGathererStream.cpp diff --git a/dbms/src/DataStreams/ColumnGathererStream.h b/src/DataStreams/ColumnGathererStream.h similarity index 100% rename from dbms/src/DataStreams/ColumnGathererStream.h rename to src/DataStreams/ColumnGathererStream.h diff --git a/dbms/src/DataStreams/ConcatBlockInputStream.h b/src/DataStreams/ConcatBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ConcatBlockInputStream.h rename to src/DataStreams/ConcatBlockInputStream.h diff --git a/dbms/src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h b/src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h rename to src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h diff --git a/dbms/src/DataStreams/ConvertingBlockInputStream.cpp b/src/DataStreams/ConvertingBlockInputStream.cpp similarity index 91% rename from dbms/src/DataStreams/ConvertingBlockInputStream.cpp rename to src/DataStreams/ConvertingBlockInputStream.cpp index 89864847a92..368ee7083b1 100644 --- a/dbms/src/DataStreams/ConvertingBlockInputStream.cpp +++ b/src/DataStreams/ConvertingBlockInputStream.cpp @@ -17,11 +17,11 @@ namespace ErrorCodes } -static ColumnPtr castColumnWithDiagnostic(const ColumnWithTypeAndName & src_elem, const ColumnWithTypeAndName & res_elem, const Context & context) +static ColumnPtr castColumnWithDiagnostic(const ColumnWithTypeAndName & src_elem, const ColumnWithTypeAndName & res_elem) { try { - return castColumn(src_elem, res_elem.type, context); + return castColumn(src_elem, res_elem.type); } catch (Exception & e) { @@ -32,11 +32,10 @@ static ColumnPtr castColumnWithDiagnostic(const ColumnWithTypeAndName & src_elem ConvertingBlockInputStream::ConvertingBlockInputStream( - const Context & context_, const BlockInputStreamPtr & input, const Block & result_header, MatchColumnsMode mode) - : context(context_), header(result_header), conversion(header.columns()) + : header(result_header), conversion(header.columns()) { children.emplace_back(input); @@ -85,7 +84,7 @@ ConvertingBlockInputStream::ConvertingBlockInputStream( /// Check conversion by dry run CAST function. - castColumnWithDiagnostic(src_elem, res_elem, context); + castColumnWithDiagnostic(src_elem, res_elem); } } @@ -107,7 +106,7 @@ Block ConvertingBlockInputStream::readImpl() const auto & src_elem = src.getByPosition(conversion[res_pos]); auto & res_elem = res.getByPosition(res_pos); - ColumnPtr converted = castColumnWithDiagnostic(src_elem, res_elem, context); + ColumnPtr converted = castColumnWithDiagnostic(src_elem, res_elem); if (isColumnConst(*src_elem.column) && !isColumnConst(*res_elem.column)) converted = converted->convertToFullColumnIfConst(); diff --git a/dbms/src/DataStreams/ConvertingBlockInputStream.h b/src/DataStreams/ConvertingBlockInputStream.h similarity index 95% rename from dbms/src/DataStreams/ConvertingBlockInputStream.h rename to src/DataStreams/ConvertingBlockInputStream.h index 553d9221dd6..b0324618408 100644 --- a/dbms/src/DataStreams/ConvertingBlockInputStream.h +++ b/src/DataStreams/ConvertingBlockInputStream.h @@ -32,7 +32,6 @@ public: }; ConvertingBlockInputStream( - const Context & context, const BlockInputStreamPtr & input, const Block & result_header, MatchColumnsMode mode); @@ -43,7 +42,6 @@ public: private: Block readImpl() override; - const Context & context; Block header; /// How to construct result block. Position in source block, where to get each column. diff --git a/dbms/src/DataStreams/CountingBlockOutputStream.cpp b/src/DataStreams/CountingBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CountingBlockOutputStream.cpp rename to src/DataStreams/CountingBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/CountingBlockOutputStream.h b/src/DataStreams/CountingBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/CountingBlockOutputStream.h rename to src/DataStreams/CountingBlockOutputStream.h diff --git a/dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp b/src/DataStreams/CreatingSetsBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp rename to src/DataStreams/CreatingSetsBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CreatingSetsBlockInputStream.h b/src/DataStreams/CreatingSetsBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CreatingSetsBlockInputStream.h rename to src/DataStreams/CreatingSetsBlockInputStream.h diff --git a/dbms/src/DataStreams/CubeBlockInputStream.cpp b/src/DataStreams/CubeBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CubeBlockInputStream.cpp rename to src/DataStreams/CubeBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CubeBlockInputStream.h b/src/DataStreams/CubeBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CubeBlockInputStream.h rename to src/DataStreams/CubeBlockInputStream.h diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.cpp b/src/DataStreams/DistinctBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/DistinctBlockInputStream.cpp rename to src/DataStreams/DistinctBlockInputStream.cpp diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.h b/src/DataStreams/DistinctBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/DistinctBlockInputStream.h rename to src/DataStreams/DistinctBlockInputStream.h diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp b/src/DataStreams/DistinctSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp rename to src/DataStreams/DistinctSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.h b/src/DataStreams/DistinctSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/DistinctSortedBlockInputStream.h rename to src/DataStreams/DistinctSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/ExecutionSpeedLimits.cpp b/src/DataStreams/ExecutionSpeedLimits.cpp similarity index 100% rename from dbms/src/DataStreams/ExecutionSpeedLimits.cpp rename to src/DataStreams/ExecutionSpeedLimits.cpp diff --git a/dbms/src/DataStreams/ExecutionSpeedLimits.h b/src/DataStreams/ExecutionSpeedLimits.h similarity index 100% rename from dbms/src/DataStreams/ExecutionSpeedLimits.h rename to src/DataStreams/ExecutionSpeedLimits.h diff --git a/dbms/src/DataStreams/ExpressionBlockInputStream.cpp b/src/DataStreams/ExpressionBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ExpressionBlockInputStream.cpp rename to src/DataStreams/ExpressionBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ExpressionBlockInputStream.h b/src/DataStreams/ExpressionBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ExpressionBlockInputStream.h rename to src/DataStreams/ExpressionBlockInputStream.h diff --git a/dbms/src/DataStreams/FillingBlockInputStream.cpp b/src/DataStreams/FillingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FillingBlockInputStream.cpp rename to src/DataStreams/FillingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FillingBlockInputStream.h b/src/DataStreams/FillingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FillingBlockInputStream.h rename to src/DataStreams/FillingBlockInputStream.h diff --git a/dbms/src/DataStreams/FilterBlockInputStream.cpp b/src/DataStreams/FilterBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FilterBlockInputStream.cpp rename to src/DataStreams/FilterBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FilterBlockInputStream.h b/src/DataStreams/FilterBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FilterBlockInputStream.h rename to src/DataStreams/FilterBlockInputStream.h diff --git a/dbms/src/DataStreams/FilterColumnsBlockInputStream.cpp b/src/DataStreams/FilterColumnsBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FilterColumnsBlockInputStream.cpp rename to src/DataStreams/FilterColumnsBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FilterColumnsBlockInputStream.h b/src/DataStreams/FilterColumnsBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FilterColumnsBlockInputStream.h rename to src/DataStreams/FilterColumnsBlockInputStream.h diff --git a/dbms/src/DataStreams/FinishSortingBlockInputStream.cpp b/src/DataStreams/FinishSortingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FinishSortingBlockInputStream.cpp rename to src/DataStreams/FinishSortingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FinishSortingBlockInputStream.h b/src/DataStreams/FinishSortingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FinishSortingBlockInputStream.h rename to src/DataStreams/FinishSortingBlockInputStream.h diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp b/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp rename to src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h b/src/DataStreams/GraphiteRollupSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h rename to src/DataStreams/GraphiteRollupSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/IBlockInputStream.cpp b/src/DataStreams/IBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/IBlockInputStream.cpp rename to src/DataStreams/IBlockInputStream.cpp diff --git a/dbms/src/DataStreams/IBlockInputStream.h b/src/DataStreams/IBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/IBlockInputStream.h rename to src/DataStreams/IBlockInputStream.h diff --git a/dbms/src/DataStreams/IBlockOutputStream.h b/src/DataStreams/IBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/IBlockOutputStream.h rename to src/DataStreams/IBlockOutputStream.h diff --git a/dbms/src/DataStreams/IBlockStream_fwd.h b/src/DataStreams/IBlockStream_fwd.h similarity index 100% rename from dbms/src/DataStreams/IBlockStream_fwd.h rename to src/DataStreams/IBlockStream_fwd.h diff --git a/dbms/src/DataStreams/InputStreamFromASTInsertQuery.cpp b/src/DataStreams/InputStreamFromASTInsertQuery.cpp similarity index 100% rename from dbms/src/DataStreams/InputStreamFromASTInsertQuery.cpp rename to src/DataStreams/InputStreamFromASTInsertQuery.cpp diff --git a/dbms/src/DataStreams/InputStreamFromASTInsertQuery.h b/src/DataStreams/InputStreamFromASTInsertQuery.h similarity index 100% rename from dbms/src/DataStreams/InputStreamFromASTInsertQuery.h rename to src/DataStreams/InputStreamFromASTInsertQuery.h diff --git a/dbms/src/DataStreams/InternalTextLogsRowOutputStream.cpp b/src/DataStreams/InternalTextLogsRowOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/InternalTextLogsRowOutputStream.cpp rename to src/DataStreams/InternalTextLogsRowOutputStream.cpp diff --git a/dbms/src/DataStreams/InternalTextLogsRowOutputStream.h b/src/DataStreams/InternalTextLogsRowOutputStream.h similarity index 100% rename from dbms/src/DataStreams/InternalTextLogsRowOutputStream.h rename to src/DataStreams/InternalTextLogsRowOutputStream.h diff --git a/dbms/src/DataStreams/LazyBlockInputStream.h b/src/DataStreams/LazyBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/LazyBlockInputStream.h rename to src/DataStreams/LazyBlockInputStream.h diff --git a/dbms/src/DataStreams/LimitBlockInputStream.cpp b/src/DataStreams/LimitBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/LimitBlockInputStream.cpp rename to src/DataStreams/LimitBlockInputStream.cpp diff --git a/dbms/src/DataStreams/LimitBlockInputStream.h b/src/DataStreams/LimitBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/LimitBlockInputStream.h rename to src/DataStreams/LimitBlockInputStream.h diff --git a/dbms/src/DataStreams/LimitByBlockInputStream.cpp b/src/DataStreams/LimitByBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/LimitByBlockInputStream.cpp rename to src/DataStreams/LimitByBlockInputStream.cpp diff --git a/dbms/src/DataStreams/LimitByBlockInputStream.h b/src/DataStreams/LimitByBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/LimitByBlockInputStream.h rename to src/DataStreams/LimitByBlockInputStream.h diff --git a/dbms/src/DataStreams/MarkInCompressedFile.h b/src/DataStreams/MarkInCompressedFile.h similarity index 100% rename from dbms/src/DataStreams/MarkInCompressedFile.h rename to src/DataStreams/MarkInCompressedFile.h diff --git a/dbms/src/DataStreams/MaterializingBlockInputStream.cpp b/src/DataStreams/MaterializingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MaterializingBlockInputStream.cpp rename to src/DataStreams/MaterializingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MaterializingBlockInputStream.h b/src/DataStreams/MaterializingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MaterializingBlockInputStream.h rename to src/DataStreams/MaterializingBlockInputStream.h diff --git a/dbms/src/DataStreams/MaterializingBlockOutputStream.h b/src/DataStreams/MaterializingBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/MaterializingBlockOutputStream.h rename to src/DataStreams/MaterializingBlockOutputStream.h diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp b/src/DataStreams/MergeSortingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergeSortingBlockInputStream.cpp rename to src/DataStreams/MergeSortingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.h b/src/DataStreams/MergeSortingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergeSortingBlockInputStream.h rename to src/DataStreams/MergeSortingBlockInputStream.h diff --git a/dbms/src/DataStreams/MergingAggregatedBlockInputStream.cpp b/src/DataStreams/MergingAggregatedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedBlockInputStream.cpp rename to src/DataStreams/MergingAggregatedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergingAggregatedBlockInputStream.h b/src/DataStreams/MergingAggregatedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedBlockInputStream.h rename to src/DataStreams/MergingAggregatedBlockInputStream.h diff --git a/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp rename to src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h rename to src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h diff --git a/dbms/src/DataStreams/MergingSortedBlockInputStream.cpp b/src/DataStreams/MergingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergingSortedBlockInputStream.cpp rename to src/DataStreams/MergingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergingSortedBlockInputStream.h b/src/DataStreams/MergingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergingSortedBlockInputStream.h rename to src/DataStreams/MergingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/NativeBlockInputStream.cpp b/src/DataStreams/NativeBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/NativeBlockInputStream.cpp rename to src/DataStreams/NativeBlockInputStream.cpp diff --git a/dbms/src/DataStreams/NativeBlockInputStream.h b/src/DataStreams/NativeBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/NativeBlockInputStream.h rename to src/DataStreams/NativeBlockInputStream.h diff --git a/dbms/src/DataStreams/NativeBlockOutputStream.cpp b/src/DataStreams/NativeBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/NativeBlockOutputStream.cpp rename to src/DataStreams/NativeBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/NativeBlockOutputStream.h b/src/DataStreams/NativeBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/NativeBlockOutputStream.h rename to src/DataStreams/NativeBlockOutputStream.h diff --git a/dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h b/src/DataStreams/NullAndDoCopyBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h rename to src/DataStreams/NullAndDoCopyBlockInputStream.h diff --git a/dbms/src/DataStreams/NullBlockInputStream.h b/src/DataStreams/NullBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/NullBlockInputStream.h rename to src/DataStreams/NullBlockInputStream.h diff --git a/dbms/src/DataStreams/NullBlockOutputStream.h b/src/DataStreams/NullBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/NullBlockOutputStream.h rename to src/DataStreams/NullBlockOutputStream.h diff --git a/dbms/src/DataStreams/OneBlockInputStream.h b/src/DataStreams/OneBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/OneBlockInputStream.h rename to src/DataStreams/OneBlockInputStream.h diff --git a/dbms/src/DataStreams/OwningBlockInputStream.h b/src/DataStreams/OwningBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/OwningBlockInputStream.h rename to src/DataStreams/OwningBlockInputStream.h diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp rename to src/DataStreams/ParallelAggregatingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h b/src/DataStreams/ParallelAggregatingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h rename to src/DataStreams/ParallelAggregatingBlockInputStream.h diff --git a/dbms/src/DataStreams/ParallelInputsProcessor.h b/src/DataStreams/ParallelInputsProcessor.h similarity index 100% rename from dbms/src/DataStreams/ParallelInputsProcessor.h rename to src/DataStreams/ParallelInputsProcessor.h diff --git a/dbms/src/DataStreams/ParallelParsingBlockInputStream.cpp b/src/DataStreams/ParallelParsingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ParallelParsingBlockInputStream.cpp rename to src/DataStreams/ParallelParsingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ParallelParsingBlockInputStream.h b/src/DataStreams/ParallelParsingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ParallelParsingBlockInputStream.h rename to src/DataStreams/ParallelParsingBlockInputStream.h diff --git a/dbms/src/DataStreams/PartialSortingBlockInputStream.cpp b/src/DataStreams/PartialSortingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/PartialSortingBlockInputStream.cpp rename to src/DataStreams/PartialSortingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/PartialSortingBlockInputStream.h b/src/DataStreams/PartialSortingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/PartialSortingBlockInputStream.h rename to src/DataStreams/PartialSortingBlockInputStream.h diff --git a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/src/DataStreams/PushingToViewsBlockOutputStream.cpp similarity index 95% rename from dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp rename to src/DataStreams/PushingToViewsBlockOutputStream.cpp index 5752fbaff96..6bab4aba1f3 100644 --- a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp +++ b/src/DataStreams/PushingToViewsBlockOutputStream.cpp @@ -25,7 +25,8 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( * Although now any insertion into the table is done via PushingToViewsBlockOutputStream, * but it's clear that here is not the best place for this functionality. */ - addTableLock(storage->lockStructureForShare(context.getInitialQueryId())); + addTableLock( + storage->lockStructureForShare(true, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout)); /// If the "root" table deduplactes blocks, there are no need to make deduplication for children /// Moreover, deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks @@ -54,7 +55,9 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( if (auto * materialized_view = dynamic_cast(dependent_table.get())) { - addTableLock(materialized_view->lockStructureForShare(context.getInitialQueryId())); + addTableLock( + materialized_view->lockStructureForShare( + true, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout)); StoragePtr inner_table = materialized_view->getTargetTable(); auto inner_table_id = inner_table->getStorageID(); @@ -247,7 +250,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n /// and two-level aggregation is triggered). in = std::make_shared( in, context.getSettingsRef().min_insert_block_size_rows, context.getSettingsRef().min_insert_block_size_bytes); - in = std::make_shared(context, in, view.out->getHeader(), ConvertingBlockInputStream::MatchColumnsMode::Name); + in = std::make_shared(in, view.out->getHeader(), ConvertingBlockInputStream::MatchColumnsMode::Name); } else in = std::make_shared(block); diff --git a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.h b/src/DataStreams/PushingToViewsBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/PushingToViewsBlockOutputStream.h rename to src/DataStreams/PushingToViewsBlockOutputStream.h diff --git a/dbms/src/DataStreams/RemoteBlockInputStream.cpp b/src/DataStreams/RemoteBlockInputStream.cpp similarity index 97% rename from dbms/src/DataStreams/RemoteBlockInputStream.cpp rename to src/DataStreams/RemoteBlockInputStream.cpp index 9d9f629d463..fc1578d1749 100644 --- a/dbms/src/DataStreams/RemoteBlockInputStream.cpp +++ b/src/DataStreams/RemoteBlockInputStream.cpp @@ -177,7 +177,7 @@ void RemoteBlockInputStream::sendExternalTables() /** If we receive a block with slightly different column types, or with excessive columns, * we will adapt it to expected structure. */ -static Block adaptBlockStructure(const Block & block, const Block & header, const Context & context) +static Block adaptBlockStructure(const Block & block, const Block & header) { /// Special case when reader doesn't care about result structure. Deprecated and used only in Benchmark, PerformanceTest. if (!header) @@ -204,7 +204,7 @@ static Block adaptBlockStructure(const Block & block, const Block & header, cons auto col = block.getByName(elem.name); col.column = block.getByName(elem.name).column->cut(0, 1); - column = castColumn(col, elem.type, context); + column = castColumn(col, elem.type); if (!isColumnConst(*column)) column = ColumnConst::create(column, block.rows()); @@ -216,7 +216,7 @@ static Block adaptBlockStructure(const Block & block, const Block & header, cons column = elem.column->cloneResized(block.rows()); } else - column = castColumn(block.getByName(elem.name), elem.type, context); + column = castColumn(block.getByName(elem.name), elem.type); res.insert({column, elem.type, elem.name}); } @@ -246,7 +246,7 @@ Block RemoteBlockInputStream::readImpl() case Protocol::Server::Data: /// If the block is not empty and is not a header block if (packet.block && (packet.block.rows() > 0)) - return adaptBlockStructure(packet.block, header, context); + return adaptBlockStructure(packet.block, header); break; /// If the block is empty - we will receive other packets before EndOfStream. case Protocol::Server::Exception: @@ -359,12 +359,17 @@ void RemoteBlockInputStream::sendQuery() void RemoteBlockInputStream::tryCancel(const char * reason) { - bool old_val = false; - if (!was_cancelled.compare_exchange_strong(old_val, true, std::memory_order_seq_cst, std::memory_order_relaxed)) - return; + { + std::lock_guard guard(was_cancelled_mutex); + + if (was_cancelled) + return; + + was_cancelled = true; + multiplexed_connections->sendCancel(); + } LOG_TRACE(log, "(" << multiplexed_connections->dumpAddresses() << ") " << reason); - multiplexed_connections->sendCancel(); } bool RemoteBlockInputStream::isQueryPending() const diff --git a/dbms/src/DataStreams/RemoteBlockInputStream.h b/src/DataStreams/RemoteBlockInputStream.h similarity index 98% rename from dbms/src/DataStreams/RemoteBlockInputStream.h rename to src/DataStreams/RemoteBlockInputStream.h index 783811f2521..66b1ebbb6c3 100644 --- a/dbms/src/DataStreams/RemoteBlockInputStream.h +++ b/src/DataStreams/RemoteBlockInputStream.h @@ -135,7 +135,8 @@ private: * - data size is already satisfactory (when using LIMIT, for example) * - an exception was thrown from client side */ - std::atomic was_cancelled { false }; + bool was_cancelled { false }; + std::mutex was_cancelled_mutex; /** An exception from replica was received. No need in receiving more packets or * requesting to cancel query execution diff --git a/dbms/src/DataStreams/RemoteBlockOutputStream.cpp b/src/DataStreams/RemoteBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/RemoteBlockOutputStream.cpp rename to src/DataStreams/RemoteBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/RemoteBlockOutputStream.h b/src/DataStreams/RemoteBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/RemoteBlockOutputStream.h rename to src/DataStreams/RemoteBlockOutputStream.h diff --git a/dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp b/src/DataStreams/ReplacingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp rename to src/DataStreams/ReplacingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ReplacingSortedBlockInputStream.h b/src/DataStreams/ReplacingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ReplacingSortedBlockInputStream.h rename to src/DataStreams/ReplacingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/ReverseBlockInputStream.cpp b/src/DataStreams/ReverseBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ReverseBlockInputStream.cpp rename to src/DataStreams/ReverseBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ReverseBlockInputStream.h b/src/DataStreams/ReverseBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ReverseBlockInputStream.h rename to src/DataStreams/ReverseBlockInputStream.h diff --git a/dbms/src/DataStreams/RollupBlockInputStream.cpp b/src/DataStreams/RollupBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/RollupBlockInputStream.cpp rename to src/DataStreams/RollupBlockInputStream.cpp diff --git a/dbms/src/DataStreams/RollupBlockInputStream.h b/src/DataStreams/RollupBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/RollupBlockInputStream.h rename to src/DataStreams/RollupBlockInputStream.h diff --git a/dbms/src/DataStreams/SizeLimits.cpp b/src/DataStreams/SizeLimits.cpp similarity index 100% rename from dbms/src/DataStreams/SizeLimits.cpp rename to src/DataStreams/SizeLimits.cpp diff --git a/dbms/src/DataStreams/SizeLimits.h b/src/DataStreams/SizeLimits.h similarity index 100% rename from dbms/src/DataStreams/SizeLimits.h rename to src/DataStreams/SizeLimits.h diff --git a/dbms/src/DataStreams/SquashingBlockInputStream.cpp b/src/DataStreams/SquashingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/SquashingBlockInputStream.cpp rename to src/DataStreams/SquashingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/SquashingBlockInputStream.h b/src/DataStreams/SquashingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/SquashingBlockInputStream.h rename to src/DataStreams/SquashingBlockInputStream.h diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.cpp b/src/DataStreams/SquashingBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/SquashingBlockOutputStream.cpp rename to src/DataStreams/SquashingBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.h b/src/DataStreams/SquashingBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/SquashingBlockOutputStream.h rename to src/DataStreams/SquashingBlockOutputStream.h diff --git a/dbms/src/DataStreams/SquashingTransform.cpp b/src/DataStreams/SquashingTransform.cpp similarity index 100% rename from dbms/src/DataStreams/SquashingTransform.cpp rename to src/DataStreams/SquashingTransform.cpp diff --git a/dbms/src/DataStreams/SquashingTransform.h b/src/DataStreams/SquashingTransform.h similarity index 100% rename from dbms/src/DataStreams/SquashingTransform.h rename to src/DataStreams/SquashingTransform.h diff --git a/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp b/src/DataStreams/SummingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/SummingSortedBlockInputStream.cpp rename to src/DataStreams/SummingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/SummingSortedBlockInputStream.h b/src/DataStreams/SummingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/SummingSortedBlockInputStream.h rename to src/DataStreams/SummingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/TTLBlockInputStream.cpp b/src/DataStreams/TTLBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/TTLBlockInputStream.cpp rename to src/DataStreams/TTLBlockInputStream.cpp diff --git a/dbms/src/DataStreams/TTLBlockInputStream.h b/src/DataStreams/TTLBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/TTLBlockInputStream.h rename to src/DataStreams/TTLBlockInputStream.h diff --git a/dbms/src/DataStreams/TemporaryFileStream.h b/src/DataStreams/TemporaryFileStream.h similarity index 100% rename from dbms/src/DataStreams/TemporaryFileStream.h rename to src/DataStreams/TemporaryFileStream.h diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp b/src/DataStreams/TotalsHavingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp rename to src/DataStreams/TotalsHavingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.h b/src/DataStreams/TotalsHavingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/TotalsHavingBlockInputStream.h rename to src/DataStreams/TotalsHavingBlockInputStream.h diff --git a/dbms/src/DataStreams/UnionBlockInputStream.h b/src/DataStreams/UnionBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/UnionBlockInputStream.h rename to src/DataStreams/UnionBlockInputStream.h diff --git a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp b/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp rename to src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h b/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h rename to src/DataStreams/VersionedCollapsingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/copyData.cpp b/src/DataStreams/copyData.cpp similarity index 100% rename from dbms/src/DataStreams/copyData.cpp rename to src/DataStreams/copyData.cpp diff --git a/dbms/src/DataStreams/copyData.h b/src/DataStreams/copyData.h similarity index 100% rename from dbms/src/DataStreams/copyData.h rename to src/DataStreams/copyData.h diff --git a/dbms/src/DataStreams/finalizeBlock.cpp b/src/DataStreams/finalizeBlock.cpp similarity index 100% rename from dbms/src/DataStreams/finalizeBlock.cpp rename to src/DataStreams/finalizeBlock.cpp diff --git a/dbms/src/DataStreams/finalizeBlock.h b/src/DataStreams/finalizeBlock.h similarity index 100% rename from dbms/src/DataStreams/finalizeBlock.h rename to src/DataStreams/finalizeBlock.h diff --git a/dbms/src/DataStreams/materializeBlock.cpp b/src/DataStreams/materializeBlock.cpp similarity index 100% rename from dbms/src/DataStreams/materializeBlock.cpp rename to src/DataStreams/materializeBlock.cpp diff --git a/dbms/src/DataStreams/materializeBlock.h b/src/DataStreams/materializeBlock.h similarity index 100% rename from dbms/src/DataStreams/materializeBlock.h rename to src/DataStreams/materializeBlock.h diff --git a/dbms/src/DataStreams/narrowBlockInputStreams.cpp b/src/DataStreams/narrowBlockInputStreams.cpp similarity index 100% rename from dbms/src/DataStreams/narrowBlockInputStreams.cpp rename to src/DataStreams/narrowBlockInputStreams.cpp diff --git a/dbms/src/DataStreams/narrowBlockInputStreams.h b/src/DataStreams/narrowBlockInputStreams.h similarity index 100% rename from dbms/src/DataStreams/narrowBlockInputStreams.h rename to src/DataStreams/narrowBlockInputStreams.h diff --git a/dbms/src/DataStreams/processConstants.cpp b/src/DataStreams/processConstants.cpp similarity index 100% rename from dbms/src/DataStreams/processConstants.cpp rename to src/DataStreams/processConstants.cpp diff --git a/dbms/src/DataStreams/processConstants.h b/src/DataStreams/processConstants.h similarity index 100% rename from dbms/src/DataStreams/processConstants.h rename to src/DataStreams/processConstants.h diff --git a/dbms/src/DataStreams/tests/CMakeLists.txt b/src/DataStreams/tests/CMakeLists.txt similarity index 100% rename from dbms/src/DataStreams/tests/CMakeLists.txt rename to src/DataStreams/tests/CMakeLists.txt diff --git a/dbms/src/DataStreams/tests/collapsing_sorted_stream.cpp b/src/DataStreams/tests/collapsing_sorted_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/collapsing_sorted_stream.cpp rename to src/DataStreams/tests/collapsing_sorted_stream.cpp diff --git a/dbms/src/DataStreams/tests/expression_stream.cpp b/src/DataStreams/tests/expression_stream.cpp similarity index 99% rename from dbms/src/DataStreams/tests/expression_stream.cpp rename to src/DataStreams/tests/expression_stream.cpp index bd4117f5aab..fbfde018ed6 100644 --- a/dbms/src/DataStreams/tests/expression_stream.cpp +++ b/src/DataStreams/tests/expression_stream.cpp @@ -33,7 +33,7 @@ try std::string input = "SELECT number, number / 3, number * number"; ParserSelectQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); Context context = Context::createGlobal(); context.makeGlobalContext(); diff --git a/dbms/src/DataStreams/tests/filter_stream.cpp b/src/DataStreams/tests/filter_stream.cpp similarity index 99% rename from dbms/src/DataStreams/tests/filter_stream.cpp rename to src/DataStreams/tests/filter_stream.cpp index 5e324251440..8356b90957c 100644 --- a/dbms/src/DataStreams/tests/filter_stream.cpp +++ b/src/DataStreams/tests/filter_stream.cpp @@ -35,7 +35,7 @@ try std::string input = "SELECT number, number % 3 == 1"; ParserSelectQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); formatAST(*ast, std::cerr); std::cerr << std::endl; diff --git a/dbms/src/DataStreams/tests/finish_sorting_stream.cpp b/src/DataStreams/tests/finish_sorting_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/finish_sorting_stream.cpp rename to src/DataStreams/tests/finish_sorting_stream.cpp diff --git a/dbms/src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp b/src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp similarity index 100% rename from dbms/src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp rename to src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp diff --git a/dbms/src/DataStreams/tests/gtest_check_sorted_stream.cpp b/src/DataStreams/tests/gtest_check_sorted_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/gtest_check_sorted_stream.cpp rename to src/DataStreams/tests/gtest_check_sorted_stream.cpp diff --git a/dbms/src/DataStreams/tests/union_stream2.cpp b/src/DataStreams/tests/union_stream2.cpp similarity index 100% rename from dbms/src/DataStreams/tests/union_stream2.cpp rename to src/DataStreams/tests/union_stream2.cpp diff --git a/dbms/src/DataTypes/CMakeLists.txt b/src/DataTypes/CMakeLists.txt similarity index 100% rename from dbms/src/DataTypes/CMakeLists.txt rename to src/DataTypes/CMakeLists.txt diff --git a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeAggregateFunction.cpp rename to src/DataTypes/DataTypeAggregateFunction.cpp diff --git a/dbms/src/DataTypes/DataTypeAggregateFunction.h b/src/DataTypes/DataTypeAggregateFunction.h similarity index 100% rename from dbms/src/DataTypes/DataTypeAggregateFunction.h rename to src/DataTypes/DataTypeAggregateFunction.h diff --git a/dbms/src/DataTypes/DataTypeArray.cpp b/src/DataTypes/DataTypeArray.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeArray.cpp rename to src/DataTypes/DataTypeArray.cpp diff --git a/dbms/src/DataTypes/DataTypeArray.h b/src/DataTypes/DataTypeArray.h similarity index 100% rename from dbms/src/DataTypes/DataTypeArray.h rename to src/DataTypes/DataTypeArray.h diff --git a/dbms/src/DataTypes/DataTypeCustom.h b/src/DataTypes/DataTypeCustom.h similarity index 100% rename from dbms/src/DataTypes/DataTypeCustom.h rename to src/DataTypes/DataTypeCustom.h diff --git a/dbms/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp b/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp rename to src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp b/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp rename to src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.h b/src/DataTypes/DataTypeCustomSimpleAggregateFunction.h similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.h rename to src/DataTypes/DataTypeCustomSimpleAggregateFunction.h diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp b/src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp rename to src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.h b/src/DataTypes/DataTypeCustomSimpleTextSerialization.h similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.h rename to src/DataTypes/DataTypeCustomSimpleTextSerialization.h diff --git a/dbms/src/DataTypes/DataTypeDate.cpp b/src/DataTypes/DataTypeDate.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDate.cpp rename to src/DataTypes/DataTypeDate.cpp diff --git a/dbms/src/DataTypes/DataTypeDate.h b/src/DataTypes/DataTypeDate.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDate.h rename to src/DataTypes/DataTypeDate.h diff --git a/dbms/src/DataTypes/DataTypeDateTime.cpp b/src/DataTypes/DataTypeDateTime.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime.cpp rename to src/DataTypes/DataTypeDateTime.cpp diff --git a/dbms/src/DataTypes/DataTypeDateTime.h b/src/DataTypes/DataTypeDateTime.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime.h rename to src/DataTypes/DataTypeDateTime.h diff --git a/dbms/src/DataTypes/DataTypeDateTime64.cpp b/src/DataTypes/DataTypeDateTime64.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime64.cpp rename to src/DataTypes/DataTypeDateTime64.cpp diff --git a/dbms/src/DataTypes/DataTypeDateTime64.h b/src/DataTypes/DataTypeDateTime64.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime64.h rename to src/DataTypes/DataTypeDateTime64.h diff --git a/dbms/src/DataTypes/DataTypeDecimalBase.cpp b/src/DataTypes/DataTypeDecimalBase.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDecimalBase.cpp rename to src/DataTypes/DataTypeDecimalBase.cpp diff --git a/dbms/src/DataTypes/DataTypeDecimalBase.h b/src/DataTypes/DataTypeDecimalBase.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDecimalBase.h rename to src/DataTypes/DataTypeDecimalBase.h diff --git a/dbms/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeEnum.cpp rename to src/DataTypes/DataTypeEnum.cpp diff --git a/dbms/src/DataTypes/DataTypeEnum.h b/src/DataTypes/DataTypeEnum.h similarity index 100% rename from dbms/src/DataTypes/DataTypeEnum.h rename to src/DataTypes/DataTypeEnum.h diff --git a/dbms/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp similarity index 98% rename from dbms/src/DataTypes/DataTypeFactory.cpp rename to src/DataTypes/DataTypeFactory.cpp index 20f7681ec1b..f81adfe347c 100644 --- a/dbms/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -9,6 +9,8 @@ #include #include #include +#include + namespace DB { @@ -26,7 +28,7 @@ namespace ErrorCodes DataTypePtr DataTypeFactory::get(const String & full_name) const { ParserIdentifierWithOptionalParameters parser; - ASTPtr ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", 0); + ASTPtr ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); return get(ast); } diff --git a/dbms/src/DataTypes/DataTypeFactory.h b/src/DataTypes/DataTypeFactory.h similarity index 100% rename from dbms/src/DataTypes/DataTypeFactory.h rename to src/DataTypes/DataTypeFactory.h diff --git a/dbms/src/DataTypes/DataTypeFixedString.cpp b/src/DataTypes/DataTypeFixedString.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeFixedString.cpp rename to src/DataTypes/DataTypeFixedString.cpp diff --git a/dbms/src/DataTypes/DataTypeFixedString.h b/src/DataTypes/DataTypeFixedString.h similarity index 100% rename from dbms/src/DataTypes/DataTypeFixedString.h rename to src/DataTypes/DataTypeFixedString.h diff --git a/dbms/src/DataTypes/DataTypeFunction.cpp b/src/DataTypes/DataTypeFunction.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeFunction.cpp rename to src/DataTypes/DataTypeFunction.cpp diff --git a/dbms/src/DataTypes/DataTypeFunction.h b/src/DataTypes/DataTypeFunction.h similarity index 100% rename from dbms/src/DataTypes/DataTypeFunction.h rename to src/DataTypes/DataTypeFunction.h diff --git a/dbms/src/DataTypes/DataTypeInterval.cpp b/src/DataTypes/DataTypeInterval.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeInterval.cpp rename to src/DataTypes/DataTypeInterval.cpp diff --git a/dbms/src/DataTypes/DataTypeInterval.h b/src/DataTypes/DataTypeInterval.h similarity index 100% rename from dbms/src/DataTypes/DataTypeInterval.h rename to src/DataTypes/DataTypeInterval.h diff --git a/dbms/src/DataTypes/DataTypeLowCardinality.cpp b/src/DataTypes/DataTypeLowCardinality.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeLowCardinality.cpp rename to src/DataTypes/DataTypeLowCardinality.cpp diff --git a/dbms/src/DataTypes/DataTypeLowCardinality.h b/src/DataTypes/DataTypeLowCardinality.h similarity index 100% rename from dbms/src/DataTypes/DataTypeLowCardinality.h rename to src/DataTypes/DataTypeLowCardinality.h diff --git a/dbms/src/DataTypes/DataTypeLowCardinalityHelpers.cpp b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeLowCardinalityHelpers.cpp rename to src/DataTypes/DataTypeLowCardinalityHelpers.cpp diff --git a/dbms/src/DataTypes/DataTypeNothing.cpp b/src/DataTypes/DataTypeNothing.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeNothing.cpp rename to src/DataTypes/DataTypeNothing.cpp diff --git a/dbms/src/DataTypes/DataTypeNothing.h b/src/DataTypes/DataTypeNothing.h similarity index 100% rename from dbms/src/DataTypes/DataTypeNothing.h rename to src/DataTypes/DataTypeNothing.h diff --git a/dbms/src/DataTypes/DataTypeNullable.cpp b/src/DataTypes/DataTypeNullable.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeNullable.cpp rename to src/DataTypes/DataTypeNullable.cpp diff --git a/dbms/src/DataTypes/DataTypeNullable.h b/src/DataTypes/DataTypeNullable.h similarity index 100% rename from dbms/src/DataTypes/DataTypeNullable.h rename to src/DataTypes/DataTypeNullable.h diff --git a/dbms/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeNumberBase.cpp rename to src/DataTypes/DataTypeNumberBase.cpp diff --git a/dbms/src/DataTypes/DataTypeNumberBase.h b/src/DataTypes/DataTypeNumberBase.h similarity index 100% rename from dbms/src/DataTypes/DataTypeNumberBase.h rename to src/DataTypes/DataTypeNumberBase.h diff --git a/dbms/src/DataTypes/DataTypeSet.h b/src/DataTypes/DataTypeSet.h similarity index 100% rename from dbms/src/DataTypes/DataTypeSet.h rename to src/DataTypes/DataTypeSet.h diff --git a/dbms/src/DataTypes/DataTypeString.cpp b/src/DataTypes/DataTypeString.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeString.cpp rename to src/DataTypes/DataTypeString.cpp diff --git a/dbms/src/DataTypes/DataTypeString.h b/src/DataTypes/DataTypeString.h similarity index 100% rename from dbms/src/DataTypes/DataTypeString.h rename to src/DataTypes/DataTypeString.h diff --git a/dbms/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeTuple.cpp rename to src/DataTypes/DataTypeTuple.cpp diff --git a/dbms/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h similarity index 100% rename from dbms/src/DataTypes/DataTypeTuple.h rename to src/DataTypes/DataTypeTuple.h diff --git a/dbms/src/DataTypes/DataTypeUUID.cpp b/src/DataTypes/DataTypeUUID.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeUUID.cpp rename to src/DataTypes/DataTypeUUID.cpp diff --git a/dbms/src/DataTypes/DataTypeUUID.h b/src/DataTypes/DataTypeUUID.h similarity index 100% rename from dbms/src/DataTypes/DataTypeUUID.h rename to src/DataTypes/DataTypeUUID.h diff --git a/dbms/src/DataTypes/DataTypeWithSimpleSerialization.h b/src/DataTypes/DataTypeWithSimpleSerialization.h similarity index 100% rename from dbms/src/DataTypes/DataTypeWithSimpleSerialization.h rename to src/DataTypes/DataTypeWithSimpleSerialization.h diff --git a/dbms/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypesDecimal.cpp rename to src/DataTypes/DataTypesDecimal.cpp diff --git a/dbms/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h similarity index 100% rename from dbms/src/DataTypes/DataTypesDecimal.h rename to src/DataTypes/DataTypesDecimal.h diff --git a/dbms/src/DataTypes/DataTypesNumber.cpp b/src/DataTypes/DataTypesNumber.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypesNumber.cpp rename to src/DataTypes/DataTypesNumber.cpp diff --git a/dbms/src/DataTypes/DataTypesNumber.h b/src/DataTypes/DataTypesNumber.h similarity index 100% rename from dbms/src/DataTypes/DataTypesNumber.h rename to src/DataTypes/DataTypesNumber.h diff --git a/dbms/src/DataTypes/FieldToDataType.cpp b/src/DataTypes/FieldToDataType.cpp similarity index 100% rename from dbms/src/DataTypes/FieldToDataType.cpp rename to src/DataTypes/FieldToDataType.cpp diff --git a/dbms/src/DataTypes/FieldToDataType.h b/src/DataTypes/FieldToDataType.h similarity index 100% rename from dbms/src/DataTypes/FieldToDataType.h rename to src/DataTypes/FieldToDataType.h diff --git a/dbms/src/DataTypes/IDataType.cpp b/src/DataTypes/IDataType.cpp similarity index 100% rename from dbms/src/DataTypes/IDataType.cpp rename to src/DataTypes/IDataType.cpp diff --git a/dbms/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h similarity index 100% rename from dbms/src/DataTypes/IDataType.h rename to src/DataTypes/IDataType.h diff --git a/dbms/src/DataTypes/IDataTypeDummy.h b/src/DataTypes/IDataTypeDummy.h similarity index 100% rename from dbms/src/DataTypes/IDataTypeDummy.h rename to src/DataTypes/IDataTypeDummy.h diff --git a/dbms/src/DataTypes/Native.h b/src/DataTypes/Native.h similarity index 100% rename from dbms/src/DataTypes/Native.h rename to src/DataTypes/Native.h diff --git a/dbms/src/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp similarity index 100% rename from dbms/src/DataTypes/NestedUtils.cpp rename to src/DataTypes/NestedUtils.cpp diff --git a/dbms/src/DataTypes/NestedUtils.h b/src/DataTypes/NestedUtils.h similarity index 100% rename from dbms/src/DataTypes/NestedUtils.h rename to src/DataTypes/NestedUtils.h diff --git a/dbms/src/DataTypes/NumberTraits.h b/src/DataTypes/NumberTraits.h similarity index 100% rename from dbms/src/DataTypes/NumberTraits.h rename to src/DataTypes/NumberTraits.h diff --git a/dbms/src/DataTypes/convertMySQLDataType.cpp b/src/DataTypes/convertMySQLDataType.cpp similarity index 100% rename from dbms/src/DataTypes/convertMySQLDataType.cpp rename to src/DataTypes/convertMySQLDataType.cpp diff --git a/dbms/src/DataTypes/convertMySQLDataType.h b/src/DataTypes/convertMySQLDataType.h similarity index 100% rename from dbms/src/DataTypes/convertMySQLDataType.h rename to src/DataTypes/convertMySQLDataType.h diff --git a/dbms/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp similarity index 100% rename from dbms/src/DataTypes/getLeastSupertype.cpp rename to src/DataTypes/getLeastSupertype.cpp diff --git a/dbms/src/DataTypes/getLeastSupertype.h b/src/DataTypes/getLeastSupertype.h similarity index 100% rename from dbms/src/DataTypes/getLeastSupertype.h rename to src/DataTypes/getLeastSupertype.h diff --git a/dbms/src/DataTypes/getMostSubtype.cpp b/src/DataTypes/getMostSubtype.cpp similarity index 100% rename from dbms/src/DataTypes/getMostSubtype.cpp rename to src/DataTypes/getMostSubtype.cpp diff --git a/dbms/src/DataTypes/getMostSubtype.h b/src/DataTypes/getMostSubtype.h similarity index 100% rename from dbms/src/DataTypes/getMostSubtype.h rename to src/DataTypes/getMostSubtype.h diff --git a/dbms/src/DataTypes/tests/CMakeLists.txt b/src/DataTypes/tests/CMakeLists.txt similarity index 100% rename from dbms/src/DataTypes/tests/CMakeLists.txt rename to src/DataTypes/tests/CMakeLists.txt diff --git a/dbms/src/DataTypes/tests/data_type_string.cpp b/src/DataTypes/tests/data_type_string.cpp similarity index 100% rename from dbms/src/DataTypes/tests/data_type_string.cpp rename to src/DataTypes/tests/data_type_string.cpp diff --git a/dbms/src/DataTypes/tests/data_types_number_fixed.cpp b/src/DataTypes/tests/data_types_number_fixed.cpp similarity index 100% rename from dbms/src/DataTypes/tests/data_types_number_fixed.cpp rename to src/DataTypes/tests/data_types_number_fixed.cpp diff --git a/dbms/src/DataTypes/tests/gtest_data_type_get_common_type.cpp b/src/DataTypes/tests/gtest_data_type_get_common_type.cpp similarity index 100% rename from dbms/src/DataTypes/tests/gtest_data_type_get_common_type.cpp rename to src/DataTypes/tests/gtest_data_type_get_common_type.cpp diff --git a/dbms/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp similarity index 98% rename from dbms/src/Databases/DatabaseDictionary.cpp rename to src/Databases/DatabaseDictionary.cpp index 006eb1656a2..9e7788bf846 100644 --- a/dbms/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -64,7 +64,7 @@ StoragePtr DatabaseDictionary::tryGetTable( const Context & context, const String & table_name) const { - auto dict_ptr = context.getExternalDictionariesLoader().tryGetDictionary(table_name); + auto dict_ptr = context.getExternalDictionariesLoader().tryGetDictionary(table_name, true /*load*/); if (dict_ptr) { const DictionaryStructure & dictionary_structure = dict_ptr->getStructure(); @@ -94,7 +94,7 @@ ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const Context & context, const auto & dictionaries = context.getExternalDictionariesLoader(); auto dictionary = throw_on_error ? dictionaries.getDictionary(table_name) - : dictionaries.tryGetDictionary(table_name); + : dictionaries.tryGetDictionary(table_name, true /*load*/); if (!dictionary) return {}; diff --git a/dbms/src/Databases/DatabaseDictionary.h b/src/Databases/DatabaseDictionary.h similarity index 100% rename from dbms/src/Databases/DatabaseDictionary.h rename to src/Databases/DatabaseDictionary.h diff --git a/dbms/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp similarity index 79% rename from dbms/src/Databases/DatabaseFactory.cpp rename to src/Databases/DatabaseFactory.cpp index 40e5682565d..f1cea04dc29 100644 --- a/dbms/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -15,6 +16,7 @@ #if USE_MYSQL #include +#include #endif @@ -49,6 +51,15 @@ DatabasePtr DatabaseFactory::get( } } +template +static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &engine_name) +{ + if (!ast || !ast->as()) + throw Exception("Database engine " + engine_name + " requested literal argument.", ErrorCodes::BAD_ARGUMENTS); + + return ast->as()->value.safeGet(); +} + DatabasePtr DatabaseFactory::getImpl( const String & database_name, const String & metadata_path, const ASTStorage * engine_define, Context & context) { @@ -79,11 +90,14 @@ DatabasePtr DatabaseFactory::getImpl( throw Exception("MySQL Database require mysql_hostname, mysql_database_name, mysql_username, mysql_password arguments.", ErrorCodes::BAD_ARGUMENTS); - const auto & arguments = engine->arguments->children; - const auto & host_name_and_port = arguments[0]->as()->value.safeGet(); - const auto & database_name_in_mysql = arguments[1]->as()->value.safeGet(); - const auto & mysql_user_name = arguments[2]->as()->value.safeGet(); - const auto & mysql_user_password = arguments[3]->as()->value.safeGet(); + + ASTs & arguments = engine->arguments->children; + arguments[1] = evaluateConstantExpressionOrIdentifierAsLiteral(arguments[1], context); + + const auto & host_name_and_port = safeGetLiteralValue(arguments[0], "MySQL"); + const auto & database_name_in_mysql = safeGetLiteralValue(arguments[1], "MySQL"); + const auto & mysql_user_name = safeGetLiteralValue(arguments[2], "MySQL"); + const auto & mysql_user_password = safeGetLiteralValue(arguments[3], "MySQL"); try { @@ -114,7 +128,7 @@ DatabasePtr DatabaseFactory::getImpl( const auto & arguments = engine->arguments->children; - const auto cache_expiration_time_seconds = arguments[0]->as()->value.safeGet(); + const auto cache_expiration_time_seconds = safeGetLiteralValue(arguments[0], "Lazy"); return std::make_shared(database_name, metadata_path, cache_expiration_time_seconds, context); } diff --git a/dbms/src/Databases/DatabaseFactory.h b/src/Databases/DatabaseFactory.h similarity index 100% rename from dbms/src/Databases/DatabaseFactory.h rename to src/Databases/DatabaseFactory.h diff --git a/dbms/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp similarity index 100% rename from dbms/src/Databases/DatabaseLazy.cpp rename to src/Databases/DatabaseLazy.cpp diff --git a/dbms/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h similarity index 100% rename from dbms/src/Databases/DatabaseLazy.h rename to src/Databases/DatabaseLazy.h diff --git a/dbms/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp similarity index 100% rename from dbms/src/Databases/DatabaseMemory.cpp rename to src/Databases/DatabaseMemory.cpp diff --git a/dbms/src/Databases/DatabaseMemory.h b/src/Databases/DatabaseMemory.h similarity index 100% rename from dbms/src/Databases/DatabaseMemory.h rename to src/Databases/DatabaseMemory.h diff --git a/dbms/src/Databases/DatabaseMySQL.cpp b/src/Databases/DatabaseMySQL.cpp similarity index 99% rename from dbms/src/Databases/DatabaseMySQL.cpp rename to src/Databases/DatabaseMySQL.cpp index ad40cff9e6b..1cbbd4b06d9 100644 --- a/dbms/src/Databases/DatabaseMySQL.cpp +++ b/src/Databases/DatabaseMySQL.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ namespace ErrorCodes constexpr static const auto suffix = ".remove_flag"; static constexpr const std::chrono::seconds cleaner_sleep_time{30}; +static const SettingSeconds lock_acquire_timeout{10}; static String toQueryStringWithQuote(const std::vector & quote_list) { @@ -358,7 +360,7 @@ void DatabaseMySQL::cleanOutdatedTables() ++iterator; else { - const auto table_lock = (*iterator)->lockAlterIntention(); + const auto table_lock = (*iterator)->lockAlterIntention(RWLockImpl::NO_QUERY, lock_acquire_timeout); (*iterator)->shutdown(); (*iterator)->is_dropped = true; diff --git a/dbms/src/Databases/DatabaseMySQL.h b/src/Databases/DatabaseMySQL.h similarity index 100% rename from dbms/src/Databases/DatabaseMySQL.h rename to src/Databases/DatabaseMySQL.h diff --git a/dbms/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp similarity index 100% rename from dbms/src/Databases/DatabaseOnDisk.cpp rename to src/Databases/DatabaseOnDisk.cpp diff --git a/dbms/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h similarity index 100% rename from dbms/src/Databases/DatabaseOnDisk.h rename to src/Databases/DatabaseOnDisk.h diff --git a/dbms/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp similarity index 98% rename from dbms/src/Databases/DatabaseOrdinary.cpp rename to src/Databases/DatabaseOrdinary.cpp index a1f5ea1ae6f..11c4a4400cd 100644 --- a/dbms/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -234,7 +234,8 @@ void DatabaseOrdinary::alterTable( } ParserCreateQuery parser; - ASTPtr ast = parseQuery(parser, statement.data(), statement.data() + statement.size(), "in file " + table_metadata_path, 0); + ASTPtr ast = parseQuery(parser, statement.data(), statement.data() + statement.size(), "in file " + table_metadata_path, + 0, context.getSettingsRef().max_parser_depth); auto & ast_create_query = ast->as(); diff --git a/dbms/src/Databases/DatabaseOrdinary.h b/src/Databases/DatabaseOrdinary.h similarity index 100% rename from dbms/src/Databases/DatabaseOrdinary.h rename to src/Databases/DatabaseOrdinary.h diff --git a/dbms/src/Databases/DatabaseWithDictionaries.cpp b/src/Databases/DatabaseWithDictionaries.cpp similarity index 89% rename from dbms/src/Databases/DatabaseWithDictionaries.cpp rename to src/Databases/DatabaseWithDictionaries.cpp index e849962aae3..6673fdf8075 100644 --- a/dbms/src/Databases/DatabaseWithDictionaries.cpp +++ b/src/Databases/DatabaseWithDictionaries.cpp @@ -26,6 +26,8 @@ namespace ErrorCodes extern const int TABLE_ALREADY_EXISTS; extern const int UNKNOWN_TABLE; extern const int DICTIONARY_ALREADY_EXISTS; + extern const int FILE_DOESNT_EXIST; + extern const int CANNOT_GET_CREATE_TABLE_QUERY; } @@ -165,7 +167,7 @@ void DatabaseWithDictionaries::removeDictionary(const Context & context, const S } } -StoragePtr DatabaseWithDictionaries::tryGetTable(const Context & context, const String & table_name) const +StoragePtr DatabaseWithDictionaries::tryGetTableImpl(const Context & context, const String & table_name, bool load) const { if (auto table_ptr = DatabaseWithOwnTablesBase::tryGetTable(context, table_name)) return table_ptr; @@ -173,10 +175,34 @@ StoragePtr DatabaseWithDictionaries::tryGetTable(const Context & context, const if (isDictionaryExist(context, table_name)) /// We don't need lock database here, because database doesn't store dictionary itself /// just metadata - return getDictionaryStorage(context, table_name); + return getDictionaryStorage(context, table_name, load); return {}; } +StoragePtr DatabaseWithDictionaries::tryGetTable(const Context & context, const String & table_name) const +{ + return tryGetTableImpl(context, table_name, true /*load*/); +} + +ASTPtr DatabaseWithDictionaries::getCreateTableQueryImpl(const Context & context, const String & table_name, bool throw_on_error) const +{ + ASTPtr ast; + bool has_table = tryGetTableImpl(context, table_name, false /*load*/) != nullptr; + auto table_metadata_path = getObjectMetadataPath(table_name); + try + { + ast = getCreateQueryFromMetadata(context, table_metadata_path, throw_on_error); + } + catch (const Exception & e) + { + if (!has_table && e.code() == ErrorCodes::FILE_DOESNT_EXIST && throw_on_error) + throw Exception{"Table " + backQuote(table_name) + " doesn't exist", + ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY}; + else if (throw_on_error) + throw; + } + return ast; +} DatabaseTablesIteratorPtr DatabaseWithDictionaries::getTablesWithDictionaryTablesIterator( const Context & context, const FilterByNameFunction & filter_by_dictionary_name) @@ -195,7 +221,7 @@ DatabaseTablesIteratorPtr DatabaseWithDictionaries::getTablesWithDictionaryTable while (dictionaries_it && dictionaries_it->isValid()) { auto table_name = dictionaries_it->name(); - auto table_ptr = getDictionaryStorage(context, table_name); + auto table_ptr = getDictionaryStorage(context, table_name, false /*load*/); if (table_ptr) result.emplace(table_name, table_ptr); dictionaries_it->next(); @@ -223,11 +249,11 @@ bool DatabaseWithDictionaries::isDictionaryExist(const Context & /*context*/, co return dictionaries.find(dictionary_name) != dictionaries.end(); } -StoragePtr DatabaseWithDictionaries::getDictionaryStorage(const Context & context, const String & table_name) const +StoragePtr DatabaseWithDictionaries::getDictionaryStorage(const Context & context, const String & table_name, bool load) const { auto dict_name = database_name + "." + table_name; const auto & external_loader = context.getExternalDictionariesLoader(); - auto dict_ptr = external_loader.tryGetDictionary(dict_name); + auto dict_ptr = external_loader.tryGetDictionary(dict_name, load); if (dict_ptr) { const DictionaryStructure & dictionary_structure = dict_ptr->getStructure(); diff --git a/dbms/src/Databases/DatabaseWithDictionaries.h b/src/Databases/DatabaseWithDictionaries.h similarity index 87% rename from dbms/src/Databases/DatabaseWithDictionaries.h rename to src/Databases/DatabaseWithDictionaries.h index e47ab6206c5..50e4dca671f 100644 --- a/dbms/src/Databases/DatabaseWithDictionaries.h +++ b/src/Databases/DatabaseWithDictionaries.h @@ -20,6 +20,8 @@ public: StoragePtr tryGetTable(const Context & context, const String & table_name) const override; + ASTPtr getCreateTableQueryImpl(const Context & context, const String & table_name, bool throw_on_error) const override; + DatabaseTablesIteratorPtr getTablesWithDictionaryTablesIterator(const Context & context, const FilterByNameFunction & filter_by_dictionary_name) override; DatabaseDictionariesIteratorPtr getDictionariesIterator(const Context & context, const FilterByNameFunction & filter_by_dictionary_name) override; @@ -37,7 +39,7 @@ protected: void attachToExternalDictionariesLoader(Context & context); void detachFromExternalDictionariesLoader(); - StoragePtr getDictionaryStorage(const Context & context, const String & table_name) const; + StoragePtr getDictionaryStorage(const Context & context, const String & table_name, bool load) const; ASTPtr getCreateDictionaryQueryImpl(const Context & context, const String & dictionary_name, @@ -45,6 +47,8 @@ protected: private: ext::scope_guard database_as_config_repo_for_external_loader; + + StoragePtr tryGetTableImpl(const Context & context, const String & table_name, bool load) const; }; } diff --git a/dbms/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp similarity index 95% rename from dbms/src/Databases/DatabasesCommon.cpp rename to src/Databases/DatabasesCommon.cpp index cd1b155853f..9d9c0707a7c 100644 --- a/dbms/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -70,8 +70,6 @@ StoragePtr DatabaseWithOwnTablesBase::detachTable(const String & table_name) StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_name) { StoragePtr res; - if (dictionaries.count(table_name)) - throw Exception("Cannot detach dictionary " + database_name + "." + table_name + " as table, use DETACH DICTIONARY query.", ErrorCodes::UNKNOWN_TABLE); auto it = tables.find(table_name); if (it == tables.end()) diff --git a/dbms/src/Databases/DatabasesCommon.h b/src/Databases/DatabasesCommon.h similarity index 100% rename from dbms/src/Databases/DatabasesCommon.h rename to src/Databases/DatabasesCommon.h diff --git a/dbms/src/Databases/IDatabase.h b/src/Databases/IDatabase.h similarity index 100% rename from dbms/src/Databases/IDatabase.h rename to src/Databases/IDatabase.h diff --git a/dbms/src/Dictionaries/CMakeLists.txt b/src/Dictionaries/CMakeLists.txt similarity index 100% rename from dbms/src/Dictionaries/CMakeLists.txt rename to src/Dictionaries/CMakeLists.txt diff --git a/dbms/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary.cpp rename to src/Dictionaries/CacheDictionary.cpp diff --git a/dbms/src/Dictionaries/CacheDictionary.h b/src/Dictionaries/CacheDictionary.h similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary.h rename to src/Dictionaries/CacheDictionary.h diff --git a/dbms/src/Dictionaries/CacheDictionary.inc.h b/src/Dictionaries/CacheDictionary.inc.h similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary.inc.h rename to src/Dictionaries/CacheDictionary.inc.h diff --git a/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in b/src/Dictionaries/CacheDictionary_generate1.cpp.in similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in rename to src/Dictionaries/CacheDictionary_generate1.cpp.in diff --git a/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in b/src/Dictionaries/CacheDictionary_generate2.cpp.in similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in rename to src/Dictionaries/CacheDictionary_generate2.cpp.in diff --git a/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in b/src/Dictionaries/CacheDictionary_generate3.cpp.in similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in rename to src/Dictionaries/CacheDictionary_generate3.cpp.in diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp similarity index 92% rename from dbms/src/Dictionaries/ClickHouseDictionarySource.cpp rename to src/Dictionaries/ClickHouseDictionarySource.cpp index 0894a655724..ad08754e4e7 100644 --- a/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -131,6 +132,7 @@ BlockInputStreamPtr ClickHouseDictionarySource::loadAll() { BlockIO res = executeQuery(load_all_query, context, true); /// FIXME res.in may implicitly use some objects owned be res, but them will be destructed after return + res.in = std::make_shared(res.in, sample_block, ConvertingBlockInputStream::MatchColumnsMode::Position); return res.in; } return std::make_shared(pool, load_all_query, sample_block, context); @@ -140,7 +142,11 @@ BlockInputStreamPtr ClickHouseDictionarySource::loadUpdatedAll() { std::string load_update_query = getUpdateFieldAndDate(); if (is_local) - return executeQuery(load_update_query, context, true).in; + { + auto res = executeQuery(load_update_query, context, true); + res.in = std::make_shared(res.in, sample_block, ConvertingBlockInputStream::MatchColumnsMode::Position); + return res.in; + } return std::make_shared(pool, load_update_query, sample_block, context); } @@ -183,7 +189,12 @@ std::string ClickHouseDictionarySource::toString() const BlockInputStreamPtr ClickHouseDictionarySource::createStreamForSelectiveLoad(const std::string & query) { if (is_local) - return executeQuery(query, context, true).in; + { + auto res = executeQuery(query, context, true); + res.in = std::make_shared( + res.in, sample_block, ConvertingBlockInputStream::MatchColumnsMode::Position); + return res.in; + } return std::make_shared(pool, query, sample_block, context); } diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/ClickHouseDictionarySource.h rename to src/Dictionaries/ClickHouseDictionarySource.h diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp b/src/Dictionaries/ComplexKeyCacheDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h b/src/Dictionaries/ComplexKeyCacheDictionary.h similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary.h rename to src/Dictionaries/ComplexKeyCacheDictionary.h diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp b/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in b/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in rename to src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in b/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in rename to src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in b/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in rename to src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp b/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp b/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp b/src/Dictionaries/ComplexKeyHashedDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp rename to src/Dictionaries/ComplexKeyHashedDictionary.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h b/src/Dictionaries/ComplexKeyHashedDictionary.h similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyHashedDictionary.h rename to src/Dictionaries/ComplexKeyHashedDictionary.h diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStream.h b/src/Dictionaries/DictionaryBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryBlockInputStream.h rename to src/Dictionaries/DictionaryBlockInputStream.h diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp b/src/Dictionaries/DictionaryBlockInputStreamBase.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp rename to src/Dictionaries/DictionaryBlockInputStreamBase.cpp diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h b/src/Dictionaries/DictionaryBlockInputStreamBase.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h rename to src/Dictionaries/DictionaryBlockInputStreamBase.h diff --git a/dbms/src/Dictionaries/DictionaryFactory.cpp b/src/Dictionaries/DictionaryFactory.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionaryFactory.cpp rename to src/Dictionaries/DictionaryFactory.cpp diff --git a/dbms/src/Dictionaries/DictionaryFactory.h b/src/Dictionaries/DictionaryFactory.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryFactory.h rename to src/Dictionaries/DictionaryFactory.h diff --git a/dbms/src/Dictionaries/DictionarySourceFactory.cpp b/src/Dictionaries/DictionarySourceFactory.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceFactory.cpp rename to src/Dictionaries/DictionarySourceFactory.cpp diff --git a/dbms/src/Dictionaries/DictionarySourceFactory.h b/src/Dictionaries/DictionarySourceFactory.h similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceFactory.h rename to src/Dictionaries/DictionarySourceFactory.h diff --git a/dbms/src/Dictionaries/DictionarySourceHelpers.cpp b/src/Dictionaries/DictionarySourceHelpers.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceHelpers.cpp rename to src/Dictionaries/DictionarySourceHelpers.cpp diff --git a/dbms/src/Dictionaries/DictionarySourceHelpers.h b/src/Dictionaries/DictionarySourceHelpers.h similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceHelpers.h rename to src/Dictionaries/DictionarySourceHelpers.h diff --git a/dbms/src/Dictionaries/DictionaryStructure.cpp b/src/Dictionaries/DictionaryStructure.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionaryStructure.cpp rename to src/Dictionaries/DictionaryStructure.cpp diff --git a/dbms/src/Dictionaries/DictionaryStructure.h b/src/Dictionaries/DictionaryStructure.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryStructure.h rename to src/Dictionaries/DictionaryStructure.h diff --git a/dbms/src/Dictionaries/Embedded/CMakeLists.txt b/src/Dictionaries/Embedded/CMakeLists.txt similarity index 100% rename from dbms/src/Dictionaries/Embedded/CMakeLists.txt rename to src/Dictionaries/Embedded/CMakeLists.txt diff --git a/dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.cpp b/src/Dictionaries/Embedded/GeoDictionariesLoader.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.cpp rename to src/Dictionaries/Embedded/GeoDictionariesLoader.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.h b/src/Dictionaries/Embedded/GeoDictionariesLoader.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.h rename to src/Dictionaries/Embedded/GeoDictionariesLoader.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/Entries.h b/src/Dictionaries/Embedded/GeodataProviders/Entries.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/Entries.h rename to src/Dictionaries/Embedded/GeodataProviders/Entries.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp rename to src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp rename to src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h rename to src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp rename to src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h rename to src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp rename to src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/Types.h b/src/Dictionaries/Embedded/GeodataProviders/Types.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/Types.h rename to src/Dictionaries/Embedded/GeodataProviders/Types.h diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchies.cpp b/src/Dictionaries/Embedded/RegionsHierarchies.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchies.cpp rename to src/Dictionaries/Embedded/RegionsHierarchies.cpp diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchies.h b/src/Dictionaries/Embedded/RegionsHierarchies.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchies.h rename to src/Dictionaries/Embedded/RegionsHierarchies.h diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/src/Dictionaries/Embedded/RegionsHierarchy.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp rename to src/Dictionaries/Embedded/RegionsHierarchy.cpp diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.h b/src/Dictionaries/Embedded/RegionsHierarchy.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchy.h rename to src/Dictionaries/Embedded/RegionsHierarchy.h diff --git a/dbms/src/Dictionaries/Embedded/RegionsNames.cpp b/src/Dictionaries/Embedded/RegionsNames.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsNames.cpp rename to src/Dictionaries/Embedded/RegionsNames.cpp diff --git a/dbms/src/Dictionaries/Embedded/RegionsNames.h b/src/Dictionaries/Embedded/RegionsNames.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsNames.h rename to src/Dictionaries/Embedded/RegionsNames.h diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.cpp b/src/Dictionaries/ExecutableDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/ExecutableDictionarySource.cpp rename to src/Dictionaries/ExecutableDictionarySource.cpp diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.h b/src/Dictionaries/ExecutableDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/ExecutableDictionarySource.h rename to src/Dictionaries/ExecutableDictionarySource.h diff --git a/dbms/src/Dictionaries/ExternalQueryBuilder.cpp b/src/Dictionaries/ExternalQueryBuilder.cpp similarity index 100% rename from dbms/src/Dictionaries/ExternalQueryBuilder.cpp rename to src/Dictionaries/ExternalQueryBuilder.cpp diff --git a/dbms/src/Dictionaries/ExternalQueryBuilder.h b/src/Dictionaries/ExternalQueryBuilder.h similarity index 100% rename from dbms/src/Dictionaries/ExternalQueryBuilder.h rename to src/Dictionaries/ExternalQueryBuilder.h diff --git a/dbms/src/Dictionaries/FileDictionarySource.cpp b/src/Dictionaries/FileDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/FileDictionarySource.cpp rename to src/Dictionaries/FileDictionarySource.cpp diff --git a/dbms/src/Dictionaries/FileDictionarySource.h b/src/Dictionaries/FileDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/FileDictionarySource.h rename to src/Dictionaries/FileDictionarySource.h diff --git a/dbms/src/Dictionaries/FlatDictionary.cpp b/src/Dictionaries/FlatDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/FlatDictionary.cpp rename to src/Dictionaries/FlatDictionary.cpp diff --git a/dbms/src/Dictionaries/FlatDictionary.h b/src/Dictionaries/FlatDictionary.h similarity index 100% rename from dbms/src/Dictionaries/FlatDictionary.h rename to src/Dictionaries/FlatDictionary.h diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/HTTPDictionarySource.cpp rename to src/Dictionaries/HTTPDictionarySource.cpp diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.h b/src/Dictionaries/HTTPDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/HTTPDictionarySource.h rename to src/Dictionaries/HTTPDictionarySource.h diff --git a/dbms/src/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/HashedDictionary.cpp rename to src/Dictionaries/HashedDictionary.cpp diff --git a/dbms/src/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h similarity index 100% rename from dbms/src/Dictionaries/HashedDictionary.h rename to src/Dictionaries/HashedDictionary.h diff --git a/dbms/src/Dictionaries/IDictionary.h b/src/Dictionaries/IDictionary.h similarity index 100% rename from dbms/src/Dictionaries/IDictionary.h rename to src/Dictionaries/IDictionary.h diff --git a/dbms/src/Dictionaries/IDictionarySource.h b/src/Dictionaries/IDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/IDictionarySource.h rename to src/Dictionaries/IDictionarySource.h diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/LibraryDictionarySource.cpp rename to src/Dictionaries/LibraryDictionarySource.cpp diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.h b/src/Dictionaries/LibraryDictionarySource.h similarity index 95% rename from dbms/src/Dictionaries/LibraryDictionarySource.h rename to src/Dictionaries/LibraryDictionarySource.h index e42a7ba1dc8..4d73b3f97d4 100644 --- a/dbms/src/Dictionaries/LibraryDictionarySource.h +++ b/src/Dictionaries/LibraryDictionarySource.h @@ -28,7 +28,7 @@ class CStringsHolder; /// Allows loading dictionaries from dynamic libraries (.so) /// Experimental version -/// Example: dbms/tests/external_dictionaries/dictionary_library/dictionary_library.cpp +/// Example: tests/external_dictionaries/dictionary_library/dictionary_library.cpp class LibraryDictionarySource final : public IDictionarySource { public: diff --git a/dbms/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/src/Dictionaries/LibraryDictionarySourceExternal.cpp similarity index 100% rename from dbms/src/Dictionaries/LibraryDictionarySourceExternal.cpp rename to src/Dictionaries/LibraryDictionarySourceExternal.cpp diff --git a/dbms/src/Dictionaries/LibraryDictionarySourceExternal.h b/src/Dictionaries/LibraryDictionarySourceExternal.h similarity index 100% rename from dbms/src/Dictionaries/LibraryDictionarySourceExternal.h rename to src/Dictionaries/LibraryDictionarySourceExternal.h diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp b/src/Dictionaries/MongoDBBlockInputStream.cpp similarity index 100% rename from dbms/src/Dictionaries/MongoDBBlockInputStream.cpp rename to src/Dictionaries/MongoDBBlockInputStream.cpp diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.h b/src/Dictionaries/MongoDBBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/MongoDBBlockInputStream.h rename to src/Dictionaries/MongoDBBlockInputStream.h diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp similarity index 99% rename from dbms/src/Dictionaries/MongoDBDictionarySource.cpp rename to src/Dictionaries/MongoDBDictionarySource.cpp index 0484315aec0..7247d8a4613 100644 --- a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -48,7 +48,7 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) // only after poco // naming conflict: // Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value); -// dbms/src/IO/WriteHelpers.h:146 #define writeCString(s, buf) +// src/IO/WriteHelpers.h:146 #define writeCString(s, buf) # include # include # include diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.h b/src/Dictionaries/MongoDBDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/MongoDBDictionarySource.h rename to src/Dictionaries/MongoDBDictionarySource.h diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/MySQLDictionarySource.cpp rename to src/Dictionaries/MySQLDictionarySource.cpp diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.h b/src/Dictionaries/MySQLDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/MySQLDictionarySource.h rename to src/Dictionaries/MySQLDictionarySource.h diff --git a/dbms/src/Dictionaries/PolygonDictionary.cpp b/src/Dictionaries/PolygonDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/PolygonDictionary.cpp rename to src/Dictionaries/PolygonDictionary.cpp diff --git a/dbms/src/Dictionaries/PolygonDictionary.h b/src/Dictionaries/PolygonDictionary.h similarity index 100% rename from dbms/src/Dictionaries/PolygonDictionary.h rename to src/Dictionaries/PolygonDictionary.h diff --git a/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h b/src/Dictionaries/RangeDictionaryBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h rename to src/Dictionaries/RangeDictionaryBlockInputStream.h diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.cpp b/src/Dictionaries/RangeHashedDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/RangeHashedDictionary.cpp rename to src/Dictionaries/RangeHashedDictionary.cpp diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h similarity index 100% rename from dbms/src/Dictionaries/RangeHashedDictionary.h rename to src/Dictionaries/RangeHashedDictionary.h diff --git a/dbms/src/Dictionaries/RedisBlockInputStream.cpp b/src/Dictionaries/RedisBlockInputStream.cpp similarity index 100% rename from dbms/src/Dictionaries/RedisBlockInputStream.cpp rename to src/Dictionaries/RedisBlockInputStream.cpp diff --git a/dbms/src/Dictionaries/RedisBlockInputStream.h b/src/Dictionaries/RedisBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/RedisBlockInputStream.h rename to src/Dictionaries/RedisBlockInputStream.h diff --git a/dbms/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/RedisDictionarySource.cpp rename to src/Dictionaries/RedisDictionarySource.cpp diff --git a/dbms/src/Dictionaries/RedisDictionarySource.h b/src/Dictionaries/RedisDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/RedisDictionarySource.h rename to src/Dictionaries/RedisDictionarySource.h diff --git a/dbms/src/Dictionaries/TrieDictionary.cpp b/src/Dictionaries/TrieDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/TrieDictionary.cpp rename to src/Dictionaries/TrieDictionary.cpp diff --git a/dbms/src/Dictionaries/TrieDictionary.h b/src/Dictionaries/TrieDictionary.h similarity index 100% rename from dbms/src/Dictionaries/TrieDictionary.h rename to src/Dictionaries/TrieDictionary.h diff --git a/dbms/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/XDBCDictionarySource.cpp rename to src/Dictionaries/XDBCDictionarySource.cpp diff --git a/dbms/src/Dictionaries/XDBCDictionarySource.h b/src/Dictionaries/XDBCDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/XDBCDictionarySource.h rename to src/Dictionaries/XDBCDictionarySource.h diff --git a/dbms/src/Dictionaries/getDictionaryConfigurationFromAST.cpp b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp similarity index 100% rename from dbms/src/Dictionaries/getDictionaryConfigurationFromAST.cpp rename to src/Dictionaries/getDictionaryConfigurationFromAST.cpp diff --git a/dbms/src/Dictionaries/getDictionaryConfigurationFromAST.h b/src/Dictionaries/getDictionaryConfigurationFromAST.h similarity index 100% rename from dbms/src/Dictionaries/getDictionaryConfigurationFromAST.h rename to src/Dictionaries/getDictionaryConfigurationFromAST.h diff --git a/dbms/src/Dictionaries/readInvalidateQuery.cpp b/src/Dictionaries/readInvalidateQuery.cpp similarity index 100% rename from dbms/src/Dictionaries/readInvalidateQuery.cpp rename to src/Dictionaries/readInvalidateQuery.cpp diff --git a/dbms/src/Dictionaries/readInvalidateQuery.h b/src/Dictionaries/readInvalidateQuery.h similarity index 100% rename from dbms/src/Dictionaries/readInvalidateQuery.h rename to src/Dictionaries/readInvalidateQuery.h diff --git a/dbms/src/Dictionaries/registerDictionaries.cpp b/src/Dictionaries/registerDictionaries.cpp similarity index 100% rename from dbms/src/Dictionaries/registerDictionaries.cpp rename to src/Dictionaries/registerDictionaries.cpp diff --git a/dbms/src/Dictionaries/registerDictionaries.h b/src/Dictionaries/registerDictionaries.h similarity index 100% rename from dbms/src/Dictionaries/registerDictionaries.h rename to src/Dictionaries/registerDictionaries.h diff --git a/dbms/src/Dictionaries/tests/CMakeLists.txt b/src/Dictionaries/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Dictionaries/tests/CMakeLists.txt rename to src/Dictionaries/tests/CMakeLists.txt diff --git a/dbms/src/Dictionaries/tests/gtest_dictionary_configuration.cpp b/src/Dictionaries/tests/gtest_dictionary_configuration.cpp similarity index 98% rename from dbms/src/Dictionaries/tests/gtest_dictionary_configuration.cpp rename to src/Dictionaries/tests/gtest_dictionary_configuration.cpp index 9c1ad9467b0..ae9c5385b8d 100644 --- a/dbms/src/Dictionaries/tests/gtest_dictionary_configuration.cpp +++ b/src/Dictionaries/tests/gtest_dictionary_configuration.cpp @@ -52,7 +52,7 @@ TEST(ConvertDictionaryAST, SimpleDictConfiguration) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create); @@ -120,7 +120,7 @@ TEST(ConvertDictionaryAST, TrickyAttributes) " SOURCE(CLICKHOUSE(HOST 'localhost'))"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create); @@ -165,7 +165,7 @@ TEST(ConvertDictionaryAST, ComplexKeyAndLayoutWithParams) " LIFETIME(MIN 1 MAX 10)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create); @@ -216,7 +216,7 @@ TEST(ConvertDictionaryAST, ComplexSource) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); DictionaryConfigurationPtr config = getDictionaryConfigurationFromAST(*create); /// source diff --git a/dbms/src/Dictionaries/writeParenthesisedString.cpp b/src/Dictionaries/writeParenthesisedString.cpp similarity index 100% rename from dbms/src/Dictionaries/writeParenthesisedString.cpp rename to src/Dictionaries/writeParenthesisedString.cpp diff --git a/dbms/src/Dictionaries/writeParenthesisedString.h b/src/Dictionaries/writeParenthesisedString.h similarity index 100% rename from dbms/src/Dictionaries/writeParenthesisedString.h rename to src/Dictionaries/writeParenthesisedString.h diff --git a/dbms/src/Disks/CMakeLists.txt b/src/Disks/CMakeLists.txt similarity index 100% rename from dbms/src/Disks/CMakeLists.txt rename to src/Disks/CMakeLists.txt diff --git a/dbms/src/Disks/DiskFactory.cpp b/src/Disks/DiskFactory.cpp similarity index 100% rename from dbms/src/Disks/DiskFactory.cpp rename to src/Disks/DiskFactory.cpp diff --git a/dbms/src/Disks/DiskFactory.h b/src/Disks/DiskFactory.h similarity index 100% rename from dbms/src/Disks/DiskFactory.h rename to src/Disks/DiskFactory.h diff --git a/dbms/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp similarity index 100% rename from dbms/src/Disks/DiskLocal.cpp rename to src/Disks/DiskLocal.cpp diff --git a/dbms/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h similarity index 100% rename from dbms/src/Disks/DiskLocal.h rename to src/Disks/DiskLocal.h diff --git a/dbms/src/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp similarity index 100% rename from dbms/src/Disks/DiskMemory.cpp rename to src/Disks/DiskMemory.cpp diff --git a/dbms/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h similarity index 100% rename from dbms/src/Disks/DiskMemory.h rename to src/Disks/DiskMemory.h diff --git a/dbms/src/Disks/DiskS3.cpp b/src/Disks/DiskS3.cpp similarity index 100% rename from dbms/src/Disks/DiskS3.cpp rename to src/Disks/DiskS3.cpp diff --git a/dbms/src/Disks/DiskS3.h b/src/Disks/DiskS3.h similarity index 100% rename from dbms/src/Disks/DiskS3.h rename to src/Disks/DiskS3.h diff --git a/dbms/src/Disks/DiskSpaceMonitor.cpp b/src/Disks/DiskSpaceMonitor.cpp similarity index 100% rename from dbms/src/Disks/DiskSpaceMonitor.cpp rename to src/Disks/DiskSpaceMonitor.cpp diff --git a/dbms/src/Disks/DiskSpaceMonitor.h b/src/Disks/DiskSpaceMonitor.h similarity index 100% rename from dbms/src/Disks/DiskSpaceMonitor.h rename to src/Disks/DiskSpaceMonitor.h diff --git a/dbms/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp similarity index 100% rename from dbms/src/Disks/IDisk.cpp rename to src/Disks/IDisk.cpp diff --git a/dbms/src/Disks/IDisk.h b/src/Disks/IDisk.h similarity index 100% rename from dbms/src/Disks/IDisk.h rename to src/Disks/IDisk.h diff --git a/dbms/src/Disks/registerDisks.cpp b/src/Disks/registerDisks.cpp similarity index 100% rename from dbms/src/Disks/registerDisks.cpp rename to src/Disks/registerDisks.cpp diff --git a/dbms/src/Disks/registerDisks.h b/src/Disks/registerDisks.h similarity index 100% rename from dbms/src/Disks/registerDisks.h rename to src/Disks/registerDisks.h diff --git a/dbms/src/Disks/tests/CMakeLists.txt b/src/Disks/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Disks/tests/CMakeLists.txt rename to src/Disks/tests/CMakeLists.txt diff --git a/dbms/src/Disks/tests/gtest_disk.cpp b/src/Disks/tests/gtest_disk.cpp similarity index 100% rename from dbms/src/Disks/tests/gtest_disk.cpp rename to src/Disks/tests/gtest_disk.cpp diff --git a/dbms/src/Disks/tests/gtest_disk.h b/src/Disks/tests/gtest_disk.h similarity index 100% rename from dbms/src/Disks/tests/gtest_disk.h rename to src/Disks/tests/gtest_disk.h diff --git a/dbms/src/Disks/tests/gtest_path_functions.cpp b/src/Disks/tests/gtest_path_functions.cpp similarity index 100% rename from dbms/src/Disks/tests/gtest_path_functions.cpp rename to src/Disks/tests/gtest_path_functions.cpp diff --git a/dbms/src/Formats/CMakeLists.txt b/src/Formats/CMakeLists.txt similarity index 100% rename from dbms/src/Formats/CMakeLists.txt rename to src/Formats/CMakeLists.txt diff --git a/dbms/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp similarity index 99% rename from dbms/src/Formats/FormatFactory.cpp rename to src/Formats/FormatFactory.cpp index c2b890ec631..7d741004766 100644 --- a/dbms/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -356,6 +356,8 @@ FormatFactory::FormatFactory() registerInputFormatProcessorTemplate(*this); registerOutputFormatProcessorTemplate(*this); registerInputFormatProcessorRegexp(*this); + registerInputFormatProcessorMsgPack(*this); + registerOutputFormatProcessorMsgPack(*this); registerFileSegmentationEngineTabSeparated(*this); registerFileSegmentationEngineCSV(*this); diff --git a/dbms/src/Formats/FormatFactory.h b/src/Formats/FormatFactory.h similarity index 97% rename from dbms/src/Formats/FormatFactory.h rename to src/Formats/FormatFactory.h index 7c515dbce90..9199ed89890 100644 --- a/dbms/src/Formats/FormatFactory.h +++ b/src/Formats/FormatFactory.h @@ -171,7 +171,9 @@ void registerOutputFormatProcessorProtobuf(FormatFactory & factory); void registerInputFormatProcessorAvro(FormatFactory & factory); void registerOutputFormatProcessorAvro(FormatFactory & factory); void registerInputFormatProcessorTemplate(FormatFactory & factory); -void registerOutputFormatProcessorTemplate(FormatFactory &factory); +void registerOutputFormatProcessorTemplate(FormatFactory & factory); +void registerInputFormatProcessorMsgPack(FormatFactory & factory); +void registerOutputFormatProcessorMsgPack(FormatFactory & factory); /// File Segmentation Engines for parallel reading diff --git a/dbms/src/Formats/FormatSchemaInfo.cpp b/src/Formats/FormatSchemaInfo.cpp similarity index 100% rename from dbms/src/Formats/FormatSchemaInfo.cpp rename to src/Formats/FormatSchemaInfo.cpp diff --git a/dbms/src/Formats/FormatSchemaInfo.h b/src/Formats/FormatSchemaInfo.h similarity index 100% rename from dbms/src/Formats/FormatSchemaInfo.h rename to src/Formats/FormatSchemaInfo.h diff --git a/dbms/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h similarity index 100% rename from dbms/src/Formats/FormatSettings.h rename to src/Formats/FormatSettings.h diff --git a/dbms/src/Formats/IRowInputStream.cpp b/src/Formats/IRowInputStream.cpp similarity index 100% rename from dbms/src/Formats/IRowInputStream.cpp rename to src/Formats/IRowInputStream.cpp diff --git a/dbms/src/Formats/IRowInputStream.h b/src/Formats/IRowInputStream.h similarity index 100% rename from dbms/src/Formats/IRowInputStream.h rename to src/Formats/IRowInputStream.h diff --git a/dbms/src/Formats/IRowOutputStream.cpp b/src/Formats/IRowOutputStream.cpp similarity index 100% rename from dbms/src/Formats/IRowOutputStream.cpp rename to src/Formats/IRowOutputStream.cpp diff --git a/dbms/src/Formats/IRowOutputStream.h b/src/Formats/IRowOutputStream.h similarity index 100% rename from dbms/src/Formats/IRowOutputStream.h rename to src/Formats/IRowOutputStream.h diff --git a/dbms/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp similarity index 100% rename from dbms/src/Formats/MySQLBlockInputStream.cpp rename to src/Formats/MySQLBlockInputStream.cpp diff --git a/dbms/src/Formats/MySQLBlockInputStream.h b/src/Formats/MySQLBlockInputStream.h similarity index 100% rename from dbms/src/Formats/MySQLBlockInputStream.h rename to src/Formats/MySQLBlockInputStream.h diff --git a/dbms/src/Formats/NativeFormat.cpp b/src/Formats/NativeFormat.cpp similarity index 100% rename from dbms/src/Formats/NativeFormat.cpp rename to src/Formats/NativeFormat.cpp diff --git a/dbms/src/Formats/NullFormat.cpp b/src/Formats/NullFormat.cpp similarity index 100% rename from dbms/src/Formats/NullFormat.cpp rename to src/Formats/NullFormat.cpp diff --git a/dbms/src/Formats/ParsedTemplateFormatString.cpp b/src/Formats/ParsedTemplateFormatString.cpp similarity index 100% rename from dbms/src/Formats/ParsedTemplateFormatString.cpp rename to src/Formats/ParsedTemplateFormatString.cpp diff --git a/dbms/src/Formats/ParsedTemplateFormatString.h b/src/Formats/ParsedTemplateFormatString.h similarity index 100% rename from dbms/src/Formats/ParsedTemplateFormatString.h rename to src/Formats/ParsedTemplateFormatString.h diff --git a/dbms/src/Formats/ProtobufColumnMatcher.cpp b/src/Formats/ProtobufColumnMatcher.cpp similarity index 100% rename from dbms/src/Formats/ProtobufColumnMatcher.cpp rename to src/Formats/ProtobufColumnMatcher.cpp diff --git a/dbms/src/Formats/ProtobufColumnMatcher.h b/src/Formats/ProtobufColumnMatcher.h similarity index 100% rename from dbms/src/Formats/ProtobufColumnMatcher.h rename to src/Formats/ProtobufColumnMatcher.h diff --git a/dbms/src/Formats/ProtobufReader.cpp b/src/Formats/ProtobufReader.cpp similarity index 100% rename from dbms/src/Formats/ProtobufReader.cpp rename to src/Formats/ProtobufReader.cpp diff --git a/dbms/src/Formats/ProtobufReader.h b/src/Formats/ProtobufReader.h similarity index 100% rename from dbms/src/Formats/ProtobufReader.h rename to src/Formats/ProtobufReader.h diff --git a/dbms/src/Formats/ProtobufSchemas.cpp b/src/Formats/ProtobufSchemas.cpp similarity index 100% rename from dbms/src/Formats/ProtobufSchemas.cpp rename to src/Formats/ProtobufSchemas.cpp diff --git a/dbms/src/Formats/ProtobufSchemas.h b/src/Formats/ProtobufSchemas.h similarity index 100% rename from dbms/src/Formats/ProtobufSchemas.h rename to src/Formats/ProtobufSchemas.h diff --git a/dbms/src/Formats/ProtobufWriter.cpp b/src/Formats/ProtobufWriter.cpp similarity index 100% rename from dbms/src/Formats/ProtobufWriter.cpp rename to src/Formats/ProtobufWriter.cpp diff --git a/dbms/src/Formats/ProtobufWriter.h b/src/Formats/ProtobufWriter.h similarity index 100% rename from dbms/src/Formats/ProtobufWriter.h rename to src/Formats/ProtobufWriter.h diff --git a/dbms/src/Formats/config_formats.h.in b/src/Formats/config_formats.h.in similarity index 100% rename from dbms/src/Formats/config_formats.h.in rename to src/Formats/config_formats.h.in diff --git a/dbms/src/Formats/tests/CMakeLists.txt b/src/Formats/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Formats/tests/CMakeLists.txt rename to src/Formats/tests/CMakeLists.txt diff --git a/dbms/src/Formats/tests/tab_separated_streams.cpp b/src/Formats/tests/tab_separated_streams.cpp similarity index 100% rename from dbms/src/Formats/tests/tab_separated_streams.cpp rename to src/Formats/tests/tab_separated_streams.cpp diff --git a/dbms/src/Formats/verbosePrintString.cpp b/src/Formats/verbosePrintString.cpp similarity index 100% rename from dbms/src/Formats/verbosePrintString.cpp rename to src/Formats/verbosePrintString.cpp diff --git a/dbms/src/Formats/verbosePrintString.h b/src/Formats/verbosePrintString.h similarity index 100% rename from dbms/src/Formats/verbosePrintString.h rename to src/Formats/verbosePrintString.h diff --git a/dbms/src/Functions/CMakeLists.txt b/src/Functions/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/CMakeLists.txt rename to src/Functions/CMakeLists.txt diff --git a/dbms/src/Functions/CRC.cpp b/src/Functions/CRC.cpp similarity index 100% rename from dbms/src/Functions/CRC.cpp rename to src/Functions/CRC.cpp diff --git a/dbms/src/Functions/CustomWeekTransforms.h b/src/Functions/CustomWeekTransforms.h similarity index 100% rename from dbms/src/Functions/CustomWeekTransforms.h rename to src/Functions/CustomWeekTransforms.h diff --git a/dbms/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h similarity index 100% rename from dbms/src/Functions/DateTimeTransforms.h rename to src/Functions/DateTimeTransforms.h diff --git a/dbms/src/Functions/DivisionUtils.h b/src/Functions/DivisionUtils.h similarity index 100% rename from dbms/src/Functions/DivisionUtils.h rename to src/Functions/DivisionUtils.h diff --git a/dbms/src/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h similarity index 100% rename from dbms/src/Functions/DummyJSONParser.h rename to src/Functions/DummyJSONParser.h diff --git a/dbms/src/Functions/EmptyImpl.h b/src/Functions/EmptyImpl.h similarity index 100% rename from dbms/src/Functions/EmptyImpl.h rename to src/Functions/EmptyImpl.h diff --git a/dbms/src/Functions/FunctionBase64Conversion.h b/src/Functions/FunctionBase64Conversion.h similarity index 100% rename from dbms/src/Functions/FunctionBase64Conversion.h rename to src/Functions/FunctionBase64Conversion.h diff --git a/dbms/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h similarity index 100% rename from dbms/src/Functions/FunctionBinaryArithmetic.h rename to src/Functions/FunctionBinaryArithmetic.h diff --git a/dbms/src/Functions/FunctionBitTestMany.h b/src/Functions/FunctionBitTestMany.h similarity index 100% rename from dbms/src/Functions/FunctionBitTestMany.h rename to src/Functions/FunctionBitTestMany.h diff --git a/dbms/src/Functions/FunctionCustomWeekToSomething.h b/src/Functions/FunctionCustomWeekToSomething.h similarity index 100% rename from dbms/src/Functions/FunctionCustomWeekToSomething.h rename to src/Functions/FunctionCustomWeekToSomething.h diff --git a/dbms/src/Functions/FunctionDateOrDateTimeAddInterval.h b/src/Functions/FunctionDateOrDateTimeAddInterval.h similarity index 100% rename from dbms/src/Functions/FunctionDateOrDateTimeAddInterval.h rename to src/Functions/FunctionDateOrDateTimeAddInterval.h diff --git a/dbms/src/Functions/FunctionDateOrDateTimeToSomething.h b/src/Functions/FunctionDateOrDateTimeToSomething.h similarity index 100% rename from dbms/src/Functions/FunctionDateOrDateTimeToSomething.h rename to src/Functions/FunctionDateOrDateTimeToSomething.h diff --git a/dbms/src/Functions/FunctionFQDN.cpp b/src/Functions/FunctionFQDN.cpp similarity index 100% rename from dbms/src/Functions/FunctionFQDN.cpp rename to src/Functions/FunctionFQDN.cpp diff --git a/dbms/src/Functions/FunctionFactory.cpp b/src/Functions/FunctionFactory.cpp similarity index 100% rename from dbms/src/Functions/FunctionFactory.cpp rename to src/Functions/FunctionFactory.cpp diff --git a/dbms/src/Functions/FunctionFactory.h b/src/Functions/FunctionFactory.h similarity index 100% rename from dbms/src/Functions/FunctionFactory.h rename to src/Functions/FunctionFactory.h diff --git a/dbms/src/Functions/FunctionHelpers.cpp b/src/Functions/FunctionHelpers.cpp similarity index 100% rename from dbms/src/Functions/FunctionHelpers.cpp rename to src/Functions/FunctionHelpers.cpp diff --git a/dbms/src/Functions/FunctionHelpers.h b/src/Functions/FunctionHelpers.h similarity index 97% rename from dbms/src/Functions/FunctionHelpers.h rename to src/Functions/FunctionHelpers.h index 00957935448..34aa0add6e1 100644 --- a/dbms/src/Functions/FunctionHelpers.h +++ b/src/Functions/FunctionHelpers.h @@ -24,6 +24,12 @@ const Type * checkAndGetDataType(const IDataType * data_type) return typeid_cast(data_type); } +template +bool checkDataTypes(const IDataType * data_type) +{ + return (... || typeid_cast(data_type)); +} + template const ColumnConst * checkAndGetColumnConst(const IColumn * column) { diff --git a/dbms/src/Functions/FunctionIfBase.h b/src/Functions/FunctionIfBase.h similarity index 100% rename from dbms/src/Functions/FunctionIfBase.h rename to src/Functions/FunctionIfBase.h diff --git a/src/Functions/FunctionJoinGet.cpp b/src/Functions/FunctionJoinGet.cpp new file mode 100644 index 00000000000..97f2f82a38d --- /dev/null +++ b/src/Functions/FunctionJoinGet.cpp @@ -0,0 +1,123 @@ +#include + +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + +static auto getJoin(const ColumnsWithTypeAndName & arguments, const Context & context) +{ + if (arguments.size() != 3) + throw Exception{"Function joinGet takes 3 arguments", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; + + String join_name; + if (auto name_col = checkAndGetColumnConst(arguments[0].column.get())) + { + join_name = name_col->getValue(); + } + else + throw Exception{"Illegal type " + arguments[0].type->getName() + " of first argument of function joinGet, expected a const string.", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + + size_t dot = join_name.find('.'); + String database_name; + if (dot == String::npos) + { + database_name = context.getCurrentDatabase(); + dot = 0; + } + else + { + database_name = join_name.substr(0, dot); + ++dot; + } + String table_name = join_name.substr(dot); + auto table = DatabaseCatalog::instance().getTable({database_name, table_name}); + auto storage_join = std::dynamic_pointer_cast(table); + if (!storage_join) + throw Exception{"Table " + join_name + " should have engine StorageJoin", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + + String attr_name; + if (auto name_col = checkAndGetColumnConst(arguments[1].column.get())) + { + attr_name = name_col->getValue(); + } + else + throw Exception{"Illegal type " + arguments[1].type->getName() + + " of second argument of function joinGet, expected a const string.", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + return std::make_pair(storage_join, attr_name); +} + +template +FunctionBaseImplPtr JoinGetOverloadResolver::build(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const +{ + auto [storage_join, attr_name] = getJoin(arguments, context); + auto join = storage_join->getJoin(); + DataTypes data_types(arguments.size()); + + auto table_lock = storage_join->lockStructureForShare( + false, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout); + for (size_t i = 0; i < arguments.size(); ++i) + data_types[i] = arguments[i].type; + + auto return_type = join->joinGetReturnType(attr_name, or_null); + return std::make_unique>(table_lock, storage_join, join, attr_name, data_types, return_type); +} + +template +DataTypePtr JoinGetOverloadResolver::getReturnType(const ColumnsWithTypeAndName & arguments) const +{ + auto [storage_join, attr_name] = getJoin(arguments, context); + auto join = storage_join->getJoin(); + return join->joinGetReturnType(attr_name, or_null); +} + + +template +void ExecutableFunctionJoinGet::execute(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) +{ + auto ctn = block.getByPosition(arguments[2]); + if (isColumnConst(*ctn.column)) + ctn.column = ctn.column->cloneResized(1); + ctn.name = ""; // make sure the key name never collide with the join columns + Block key_block = {ctn}; + join->joinGet(key_block, attr_name, or_null); + auto & result_ctn = key_block.getByPosition(1); + if (isColumnConst(*ctn.column)) + result_ctn.column = ColumnConst::create(result_ctn.column, input_rows_count); + block.getByPosition(result) = result_ctn; +} + +template +ExecutableFunctionImplPtr FunctionJoinGet::prepare(const Block &, const ColumnNumbers &, size_t) const +{ + return std::make_unique>(join, attr_name); +} + +void registerFunctionJoinGet(FunctionFactory & factory) +{ + // joinGet + factory.registerFunction>(); + // joinGetOrNull + factory.registerFunction>(); +} + +template class ExecutableFunctionJoinGet; +template class ExecutableFunctionJoinGet; +template class FunctionJoinGet; +template class FunctionJoinGet; +template class JoinGetOverloadResolver; +template class JoinGetOverloadResolver; +} diff --git a/dbms/src/Functions/FunctionJoinGet.h b/src/Functions/FunctionJoinGet.h similarity index 89% rename from dbms/src/Functions/FunctionJoinGet.h rename to src/Functions/FunctionJoinGet.h index 8bc1f0d1fcb..f233ccd8a4f 100644 --- a/dbms/src/Functions/FunctionJoinGet.h +++ b/src/Functions/FunctionJoinGet.h @@ -6,16 +6,17 @@ namespace DB { class Context; -class Join; -using HashJoinPtr = std::shared_ptr; +class HashJoin; +using HashJoinPtr = std::shared_ptr; +template class ExecutableFunctionJoinGet final : public IExecutableFunctionImpl { public: ExecutableFunctionJoinGet(HashJoinPtr join_, String attr_name_) : join(std::move(join_)), attr_name(std::move(attr_name_)) {} - static constexpr auto name = "joinGet"; + static constexpr auto name = or_null ? "joinGetOrNull" : "joinGet"; bool useDefaultImplementationForNulls() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } @@ -30,10 +31,11 @@ private: const String attr_name; }; +template class FunctionJoinGet final : public IFunctionBaseImpl { public: - static constexpr auto name = "joinGet"; + static constexpr auto name = or_null ? "joinGetOrNull" : "joinGet"; FunctionJoinGet(TableStructureReadLockHolder table_lock_, StoragePtr storage_join_, HashJoinPtr join_, String attr_name_, @@ -63,10 +65,11 @@ private: DataTypePtr return_type; }; +template class JoinGetOverloadResolver final : public IFunctionOverloadResolverImpl { public: - static constexpr auto name = "joinGet"; + static constexpr auto name = or_null ? "joinGetOrNull" : "joinGet"; static FunctionOverloadResolverImplPtr create(const Context & context) { return std::make_unique(context); } explicit JoinGetOverloadResolver(const Context & context_) : context(context_) {} diff --git a/dbms/src/Functions/FunctionMathBinaryFloat64.h b/src/Functions/FunctionMathBinaryFloat64.h similarity index 100% rename from dbms/src/Functions/FunctionMathBinaryFloat64.h rename to src/Functions/FunctionMathBinaryFloat64.h diff --git a/dbms/src/Functions/FunctionMathConstFloat64.h b/src/Functions/FunctionMathConstFloat64.h similarity index 100% rename from dbms/src/Functions/FunctionMathConstFloat64.h rename to src/Functions/FunctionMathConstFloat64.h diff --git a/dbms/src/Functions/FunctionMathUnary.h b/src/Functions/FunctionMathUnary.h similarity index 100% rename from dbms/src/Functions/FunctionMathUnary.h rename to src/Functions/FunctionMathUnary.h diff --git a/dbms/src/Functions/FunctionNumericPredicate.h b/src/Functions/FunctionNumericPredicate.h similarity index 100% rename from dbms/src/Functions/FunctionNumericPredicate.h rename to src/Functions/FunctionNumericPredicate.h diff --git a/dbms/src/Functions/FunctionStartsEndsWith.h b/src/Functions/FunctionStartsEndsWith.h similarity index 100% rename from dbms/src/Functions/FunctionStartsEndsWith.h rename to src/Functions/FunctionStartsEndsWith.h diff --git a/dbms/src/Functions/FunctionStringOrArrayToT.h b/src/Functions/FunctionStringOrArrayToT.h similarity index 100% rename from dbms/src/Functions/FunctionStringOrArrayToT.h rename to src/Functions/FunctionStringOrArrayToT.h diff --git a/dbms/src/Functions/FunctionStringToString.h b/src/Functions/FunctionStringToString.h similarity index 100% rename from dbms/src/Functions/FunctionStringToString.h rename to src/Functions/FunctionStringToString.h diff --git a/dbms/src/Functions/FunctionUnaryArithmetic.h b/src/Functions/FunctionUnaryArithmetic.h similarity index 100% rename from dbms/src/Functions/FunctionUnaryArithmetic.h rename to src/Functions/FunctionUnaryArithmetic.h diff --git a/dbms/src/Functions/FunctionsBitmap.cpp b/src/Functions/FunctionsBitmap.cpp similarity index 100% rename from dbms/src/Functions/FunctionsBitmap.cpp rename to src/Functions/FunctionsBitmap.cpp diff --git a/dbms/src/Functions/FunctionsBitmap.h b/src/Functions/FunctionsBitmap.h similarity index 98% rename from dbms/src/Functions/FunctionsBitmap.h rename to src/Functions/FunctionsBitmap.h index 3eec1d5a354..bf84bfbe47e 100644 --- a/dbms/src/Functions/FunctionsBitmap.h +++ b/src/Functions/FunctionsBitmap.h @@ -31,13 +31,13 @@ namespace ErrorCodes * bitmapBuild: integer[] -> bitmap * * Convert bitmap to integer array: - * bitmapToArray: bitmap -> integer[] + * bitmapToArray: bitmap -> integer[] * * Retrun the smallest value in the set: - * bitmapMin: bitmap -> integer + * bitmapMin: bitmap -> integer * * Retrun the greatest value in the set: - * bitmapMax: bitmap -> integer + * bitmapMax: bitmap -> integer * * Return subset in specified range (not include the range_end): * bitmapSubsetInRange: bitmap,integer,integer -> bitmap @@ -49,28 +49,28 @@ namespace ErrorCodes * bitmapTransform: bitmap,integer[],integer[] -> bitmap * * Two bitmap and calculation: - * bitmapAnd: bitmap,bitmap -> bitmap + * bitmapAnd: bitmap,bitmap -> bitmap * * Two bitmap or calculation: - * bitmapOr: bitmap,bitmap -> bitmap + * bitmapOr: bitmap,bitmap -> bitmap * * Two bitmap xor calculation: - * bitmapXor: bitmap,bitmap -> bitmap + * bitmapXor: bitmap,bitmap -> bitmap * * Two bitmap andnot calculation: - * bitmapAndnot: bitmap,bitmap -> bitmap + * bitmapAndnot: bitmap,bitmap -> bitmap * * Retrun bitmap cardinality: - * bitmapCardinality: bitmap -> integer + * bitmapCardinality: bitmap -> integer * * Two bitmap and calculation, return cardinality: - * bitmapAndCardinality: bitmap,bitmap -> integer + * bitmapAndCardinality: bitmap,bitmap -> integer * * Two bitmap or calculation, return cardinality: - * bitmapOrCardinality: bitmap,bitmap -> integer + * bitmapOrCardinality: bitmap,bitmap -> integer * * Two bitmap xor calculation, return cardinality: - * bitmapXorCardinality: bitmap,bitmap -> integer + * bitmapXorCardinality: bitmap,bitmap -> integer * * Two bitmap andnot calculation, return cardinality: * bitmapAndnotCardinality: bitmap,bitmap -> integer diff --git a/dbms/src/Functions/FunctionsCoding.cpp b/src/Functions/FunctionsCoding.cpp similarity index 100% rename from dbms/src/Functions/FunctionsCoding.cpp rename to src/Functions/FunctionsCoding.cpp diff --git a/dbms/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h similarity index 100% rename from dbms/src/Functions/FunctionsCoding.h rename to src/Functions/FunctionsCoding.h diff --git a/dbms/src/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h similarity index 99% rename from dbms/src/Functions/FunctionsComparison.h rename to src/Functions/FunctionsComparison.h index 824f64a6b45..97a996bbe7e 100644 --- a/dbms/src/Functions/FunctionsComparison.h +++ b/src/Functions/FunctionsComparison.h @@ -1105,8 +1105,8 @@ private: { DataTypePtr common_type = getLeastSupertype({c0.type, c1.type}); - ColumnPtr c0_converted = castColumn(c0, common_type, context); - ColumnPtr c1_converted = castColumn(c1, common_type, context); + ColumnPtr c0_converted = castColumn(c0, common_type); + ColumnPtr c1_converted = castColumn(c1, common_type); executeGenericIdenticalTypes(block, result, c0_converted.get(), c1_converted.get()); } diff --git a/dbms/src/Functions/FunctionsConsistentHashing.h b/src/Functions/FunctionsConsistentHashing.h similarity index 100% rename from dbms/src/Functions/FunctionsConsistentHashing.h rename to src/Functions/FunctionsConsistentHashing.h diff --git a/dbms/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp similarity index 100% rename from dbms/src/Functions/FunctionsConversion.cpp rename to src/Functions/FunctionsConversion.cpp diff --git a/dbms/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h similarity index 99% rename from dbms/src/Functions/FunctionsConversion.h rename to src/Functions/FunctionsConversion.h index 100737b43c7..b493aef4cac 100644 --- a/dbms/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -1496,10 +1496,12 @@ struct ToStringMonotonicity IFunction::Monotonicity positive(true, true); IFunction::Monotonicity not_monotonic; - /// `toString` function is monotonous if the argument is Date or DateTime, or non-negative numbers with the same number of symbols. + auto type_ptr = &type; + if (auto * low_cardinality_type = checkAndGetDataType(type_ptr)) + type_ptr = low_cardinality_type->getDictionaryType().get(); - if (checkAndGetDataType(&type) - || typeid_cast(&type)) + /// `toString` function is monotonous if the argument is Date or DateTime or String, or non-negative numbers with the same number of symbols. + if (checkDataTypes(type_ptr)) return positive; if (left.isNull() || right.isNull()) @@ -2392,10 +2394,17 @@ protected: DataTypePtr getReturnType(const ColumnsWithTypeAndName & arguments) const override { - const auto type_col = checkAndGetColumnConst(arguments.back().column.get()); + const auto & column = arguments.back().column; + if (!column) + throw Exception("Second argument to " + getName() + " must be a constant string describing type." + " Instead there is non-constant column of type " + arguments.back().type->getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + const auto type_col = checkAndGetColumnConst(column.get()); if (!type_col) - throw Exception("Second argument to " + getName() + " must be a constant string describing type", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception("Second argument to " + getName() + " must be a constant string describing type." + " Instead there is a column with the following structure: " + column->dumpStructure(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); return DataTypeFactory::instance().get(type_col->getValue()); } diff --git a/dbms/src/Functions/FunctionsEmbeddedDictionaries.cpp b/src/Functions/FunctionsEmbeddedDictionaries.cpp similarity index 100% rename from dbms/src/Functions/FunctionsEmbeddedDictionaries.cpp rename to src/Functions/FunctionsEmbeddedDictionaries.cpp diff --git a/dbms/src/Functions/FunctionsEmbeddedDictionaries.h b/src/Functions/FunctionsEmbeddedDictionaries.h similarity index 100% rename from dbms/src/Functions/FunctionsEmbeddedDictionaries.h rename to src/Functions/FunctionsEmbeddedDictionaries.h diff --git a/dbms/src/Functions/FunctionsExternalDictionaries.cpp b/src/Functions/FunctionsExternalDictionaries.cpp similarity index 100% rename from dbms/src/Functions/FunctionsExternalDictionaries.cpp rename to src/Functions/FunctionsExternalDictionaries.cpp diff --git a/dbms/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h similarity index 99% rename from dbms/src/Functions/FunctionsExternalDictionaries.h rename to src/Functions/FunctionsExternalDictionaries.h index e1c89dd7d53..fc3c2c583a9 100644 --- a/dbms/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -128,7 +128,7 @@ private: auto dict = dictionaries_loader.getDictionary(dict_name_col->getValue()); const auto dict_ptr = dict.get(); - context.checkAccess(AccessType::dictHas, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); + context.checkAccess(AccessType::dictGet, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); if (!executeDispatchSimple(block, arguments, result, dict_ptr) && !executeDispatchSimple(block, arguments, result, dict_ptr) && @@ -1652,7 +1652,7 @@ private: auto dict = dictionaries_loader.getDictionary(dict_name_col->getValue()); const auto dict_ptr = dict.get(); - context.checkAccess(AccessType::dictGetHierarchy, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); + context.checkAccess(AccessType::dictGet, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); if (!executeDispatch(block, arguments, result, dict_ptr) && !executeDispatch(block, arguments, result, dict_ptr) && @@ -1816,7 +1816,7 @@ private: auto dict = dictionaries_loader.getDictionary(dict_name_col->getValue()); const auto dict_ptr = dict.get(); - context.checkAccess(AccessType::dictIsIn, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); + context.checkAccess(AccessType::dictGet, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); if (!executeDispatch(block, arguments, result, dict_ptr) && !executeDispatch(block, arguments, result, dict_ptr) diff --git a/dbms/src/Functions/FunctionsExternalModels.cpp b/src/Functions/FunctionsExternalModels.cpp similarity index 100% rename from dbms/src/Functions/FunctionsExternalModels.cpp rename to src/Functions/FunctionsExternalModels.cpp diff --git a/dbms/src/Functions/FunctionsExternalModels.h b/src/Functions/FunctionsExternalModels.h similarity index 100% rename from dbms/src/Functions/FunctionsExternalModels.h rename to src/Functions/FunctionsExternalModels.h diff --git a/dbms/src/Functions/FunctionsFormatting.cpp b/src/Functions/FunctionsFormatting.cpp similarity index 100% rename from dbms/src/Functions/FunctionsFormatting.cpp rename to src/Functions/FunctionsFormatting.cpp diff --git a/dbms/src/Functions/FunctionsFormatting.h b/src/Functions/FunctionsFormatting.h similarity index 100% rename from dbms/src/Functions/FunctionsFormatting.h rename to src/Functions/FunctionsFormatting.h diff --git a/dbms/src/Functions/FunctionsHashing.cpp b/src/Functions/FunctionsHashing.cpp similarity index 100% rename from dbms/src/Functions/FunctionsHashing.cpp rename to src/Functions/FunctionsHashing.cpp diff --git a/dbms/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h similarity index 100% rename from dbms/src/Functions/FunctionsHashing.h rename to src/Functions/FunctionsHashing.h diff --git a/dbms/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp similarity index 100% rename from dbms/src/Functions/FunctionsJSON.cpp rename to src/Functions/FunctionsJSON.cpp diff --git a/dbms/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h similarity index 100% rename from dbms/src/Functions/FunctionsJSON.h rename to src/Functions/FunctionsJSON.h diff --git a/dbms/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp similarity index 100% rename from dbms/src/Functions/FunctionsLogical.cpp rename to src/Functions/FunctionsLogical.cpp diff --git a/dbms/src/Functions/FunctionsLogical.h b/src/Functions/FunctionsLogical.h similarity index 100% rename from dbms/src/Functions/FunctionsLogical.h rename to src/Functions/FunctionsLogical.h diff --git a/dbms/src/Functions/FunctionsMiscellaneous.h b/src/Functions/FunctionsMiscellaneous.h similarity index 100% rename from dbms/src/Functions/FunctionsMiscellaneous.h rename to src/Functions/FunctionsMiscellaneous.h diff --git a/dbms/src/Functions/FunctionsMultiStringPosition.h b/src/Functions/FunctionsMultiStringPosition.h similarity index 100% rename from dbms/src/Functions/FunctionsMultiStringPosition.h rename to src/Functions/FunctionsMultiStringPosition.h diff --git a/dbms/src/Functions/FunctionsMultiStringSearch.h b/src/Functions/FunctionsMultiStringSearch.h similarity index 100% rename from dbms/src/Functions/FunctionsMultiStringSearch.h rename to src/Functions/FunctionsMultiStringSearch.h diff --git a/dbms/src/Functions/FunctionsRandom.cpp b/src/Functions/FunctionsRandom.cpp similarity index 100% rename from dbms/src/Functions/FunctionsRandom.cpp rename to src/Functions/FunctionsRandom.cpp diff --git a/dbms/src/Functions/FunctionsRandom.h b/src/Functions/FunctionsRandom.h similarity index 100% rename from dbms/src/Functions/FunctionsRandom.h rename to src/Functions/FunctionsRandom.h diff --git a/dbms/src/Functions/FunctionsRound.cpp b/src/Functions/FunctionsRound.cpp similarity index 100% rename from dbms/src/Functions/FunctionsRound.cpp rename to src/Functions/FunctionsRound.cpp diff --git a/dbms/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h similarity index 98% rename from dbms/src/Functions/FunctionsRound.h rename to src/Functions/FunctionsRound.h index 6d1afe5480c..99fa1d9441a 100644 --- a/dbms/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -596,8 +596,7 @@ class FunctionRoundDown : public IFunction { public: static constexpr auto name = "roundDown"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionRoundDown(const Context & context_) : context(context_) {} + static FunctionPtr create(const Context &) { return std::make_shared(); } public: String getName() const override { return name; } @@ -645,10 +644,10 @@ public: auto out = column_result.get(); if (!in_type->equals(*return_type)) - in_column = castColumn(block.getByPosition(arguments[0]), return_type, context); + in_column = castColumn(block.getByPosition(arguments[0]), return_type); if (!array_type->equals(*return_type)) - array_column = castColumn(block.getByPosition(arguments[1]), std::make_shared(return_type), context); + array_column = castColumn(block.getByPosition(arguments[1]), std::make_shared(return_type)); const auto in = in_column.get(); auto boundaries = typeid_cast(*array_column).getValue(); @@ -764,9 +763,6 @@ private: } } } - -private: - const Context & context; }; diff --git a/dbms/src/Functions/FunctionsStringArray.cpp b/src/Functions/FunctionsStringArray.cpp similarity index 100% rename from dbms/src/Functions/FunctionsStringArray.cpp rename to src/Functions/FunctionsStringArray.cpp diff --git a/dbms/src/Functions/FunctionsStringArray.h b/src/Functions/FunctionsStringArray.h similarity index 100% rename from dbms/src/Functions/FunctionsStringArray.h rename to src/Functions/FunctionsStringArray.h diff --git a/dbms/src/Functions/FunctionsStringRegex.cpp b/src/Functions/FunctionsStringRegex.cpp similarity index 100% rename from dbms/src/Functions/FunctionsStringRegex.cpp rename to src/Functions/FunctionsStringRegex.cpp diff --git a/dbms/src/Functions/FunctionsStringRegex.h b/src/Functions/FunctionsStringRegex.h similarity index 100% rename from dbms/src/Functions/FunctionsStringRegex.h rename to src/Functions/FunctionsStringRegex.h diff --git a/dbms/src/Functions/FunctionsStringSearch.h b/src/Functions/FunctionsStringSearch.h similarity index 100% rename from dbms/src/Functions/FunctionsStringSearch.h rename to src/Functions/FunctionsStringSearch.h diff --git a/dbms/src/Functions/FunctionsStringSearchToString.h b/src/Functions/FunctionsStringSearchToString.h similarity index 100% rename from dbms/src/Functions/FunctionsStringSearchToString.h rename to src/Functions/FunctionsStringSearchToString.h diff --git a/dbms/src/Functions/FunctionsStringSimilarity.cpp b/src/Functions/FunctionsStringSimilarity.cpp similarity index 100% rename from dbms/src/Functions/FunctionsStringSimilarity.cpp rename to src/Functions/FunctionsStringSimilarity.cpp diff --git a/dbms/src/Functions/FunctionsStringSimilarity.h b/src/Functions/FunctionsStringSimilarity.h similarity index 100% rename from dbms/src/Functions/FunctionsStringSimilarity.h rename to src/Functions/FunctionsStringSimilarity.h diff --git a/dbms/src/Functions/FunctionsVisitParam.h b/src/Functions/FunctionsVisitParam.h similarity index 100% rename from dbms/src/Functions/FunctionsVisitParam.h rename to src/Functions/FunctionsVisitParam.h diff --git a/dbms/src/Functions/GatherUtils/Algorithms.h b/src/Functions/GatherUtils/Algorithms.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Algorithms.h rename to src/Functions/GatherUtils/Algorithms.h diff --git a/dbms/src/Functions/GatherUtils/ArraySinkVisitor.h b/src/Functions/GatherUtils/ArraySinkVisitor.h similarity index 100% rename from dbms/src/Functions/GatherUtils/ArraySinkVisitor.h rename to src/Functions/GatherUtils/ArraySinkVisitor.h diff --git a/dbms/src/Functions/GatherUtils/ArraySourceVisitor.h b/src/Functions/GatherUtils/ArraySourceVisitor.h similarity index 100% rename from dbms/src/Functions/GatherUtils/ArraySourceVisitor.h rename to src/Functions/GatherUtils/ArraySourceVisitor.h diff --git a/dbms/src/Functions/GatherUtils/CMakeLists.txt b/src/Functions/GatherUtils/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/GatherUtils/CMakeLists.txt rename to src/Functions/GatherUtils/CMakeLists.txt diff --git a/dbms/src/Functions/GatherUtils/GatherUtils.h b/src/Functions/GatherUtils/GatherUtils.h similarity index 100% rename from dbms/src/Functions/GatherUtils/GatherUtils.h rename to src/Functions/GatherUtils/GatherUtils.h diff --git a/dbms/src/Functions/GatherUtils/IArraySink.h b/src/Functions/GatherUtils/IArraySink.h similarity index 100% rename from dbms/src/Functions/GatherUtils/IArraySink.h rename to src/Functions/GatherUtils/IArraySink.h diff --git a/dbms/src/Functions/GatherUtils/IArraySource.h b/src/Functions/GatherUtils/IArraySource.h similarity index 100% rename from dbms/src/Functions/GatherUtils/IArraySource.h rename to src/Functions/GatherUtils/IArraySource.h diff --git a/dbms/src/Functions/GatherUtils/IValueSource.h b/src/Functions/GatherUtils/IValueSource.h similarity index 100% rename from dbms/src/Functions/GatherUtils/IValueSource.h rename to src/Functions/GatherUtils/IValueSource.h diff --git a/dbms/src/Functions/GatherUtils/Selectors.h b/src/Functions/GatherUtils/Selectors.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Selectors.h rename to src/Functions/GatherUtils/Selectors.h diff --git a/dbms/src/Functions/GatherUtils/Sinks.h b/src/Functions/GatherUtils/Sinks.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Sinks.h rename to src/Functions/GatherUtils/Sinks.h diff --git a/dbms/src/Functions/GatherUtils/Slices.h b/src/Functions/GatherUtils/Slices.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Slices.h rename to src/Functions/GatherUtils/Slices.h diff --git a/dbms/src/Functions/GatherUtils/Sources.h b/src/Functions/GatherUtils/Sources.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Sources.h rename to src/Functions/GatherUtils/Sources.h diff --git a/dbms/src/Functions/GatherUtils/ValueSourceVisitor.h b/src/Functions/GatherUtils/ValueSourceVisitor.h similarity index 100% rename from dbms/src/Functions/GatherUtils/ValueSourceVisitor.h rename to src/Functions/GatherUtils/ValueSourceVisitor.h diff --git a/dbms/src/Functions/GatherUtils/concat.cpp b/src/Functions/GatherUtils/concat.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/concat.cpp rename to src/Functions/GatherUtils/concat.cpp diff --git a/dbms/src/Functions/GatherUtils/createArraySink.cpp b/src/Functions/GatherUtils/createArraySink.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/createArraySink.cpp rename to src/Functions/GatherUtils/createArraySink.cpp diff --git a/dbms/src/Functions/GatherUtils/createArraySource.cpp b/src/Functions/GatherUtils/createArraySource.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/createArraySource.cpp rename to src/Functions/GatherUtils/createArraySource.cpp diff --git a/dbms/src/Functions/GatherUtils/createValueSource.cpp b/src/Functions/GatherUtils/createValueSource.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/createValueSource.cpp rename to src/Functions/GatherUtils/createValueSource.cpp diff --git a/dbms/src/Functions/GatherUtils/has.cpp b/src/Functions/GatherUtils/has.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/has.cpp rename to src/Functions/GatherUtils/has.cpp diff --git a/dbms/src/Functions/GatherUtils/push.cpp b/src/Functions/GatherUtils/push.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/push.cpp rename to src/Functions/GatherUtils/push.cpp diff --git a/dbms/src/Functions/GatherUtils/resizeConstantSize.cpp b/src/Functions/GatherUtils/resizeConstantSize.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/resizeConstantSize.cpp rename to src/Functions/GatherUtils/resizeConstantSize.cpp diff --git a/dbms/src/Functions/GatherUtils/resizeDynamicSize.cpp b/src/Functions/GatherUtils/resizeDynamicSize.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/resizeDynamicSize.cpp rename to src/Functions/GatherUtils/resizeDynamicSize.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp b/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp rename to src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp rename to src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp rename to src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp rename to src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp b/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp rename to src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp rename to src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp diff --git a/dbms/src/Functions/GeoHash.cpp b/src/Functions/GeoHash.cpp similarity index 100% rename from dbms/src/Functions/GeoHash.cpp rename to src/Functions/GeoHash.cpp diff --git a/dbms/src/Functions/GeoHash.h b/src/Functions/GeoHash.h similarity index 100% rename from dbms/src/Functions/GeoHash.h rename to src/Functions/GeoHash.h diff --git a/dbms/src/Functions/HasTokenImpl.h b/src/Functions/HasTokenImpl.h similarity index 100% rename from dbms/src/Functions/HasTokenImpl.h rename to src/Functions/HasTokenImpl.h diff --git a/dbms/src/Functions/IFunction.cpp b/src/Functions/IFunction.cpp similarity index 100% rename from dbms/src/Functions/IFunction.cpp rename to src/Functions/IFunction.cpp diff --git a/dbms/src/Functions/IFunction.h b/src/Functions/IFunction.h similarity index 100% rename from dbms/src/Functions/IFunction.h rename to src/Functions/IFunction.h diff --git a/dbms/src/Functions/IFunctionAdaptors.h b/src/Functions/IFunctionAdaptors.h similarity index 100% rename from dbms/src/Functions/IFunctionAdaptors.h rename to src/Functions/IFunctionAdaptors.h diff --git a/dbms/src/Functions/IFunctionImpl.h b/src/Functions/IFunctionImpl.h similarity index 100% rename from dbms/src/Functions/IFunctionImpl.h rename to src/Functions/IFunctionImpl.h diff --git a/dbms/src/Functions/LowerUpperImpl.h b/src/Functions/LowerUpperImpl.h similarity index 100% rename from dbms/src/Functions/LowerUpperImpl.h rename to src/Functions/LowerUpperImpl.h diff --git a/dbms/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h similarity index 100% rename from dbms/src/Functions/LowerUpperUTF8Impl.h rename to src/Functions/LowerUpperUTF8Impl.h diff --git a/dbms/src/Functions/MultiSearchAllPositionsImpl.h b/src/Functions/MultiSearchAllPositionsImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchAllPositionsImpl.h rename to src/Functions/MultiSearchAllPositionsImpl.h diff --git a/dbms/src/Functions/MultiSearchFirstIndexImpl.h b/src/Functions/MultiSearchFirstIndexImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchFirstIndexImpl.h rename to src/Functions/MultiSearchFirstIndexImpl.h diff --git a/dbms/src/Functions/MultiSearchFirstPositionImpl.h b/src/Functions/MultiSearchFirstPositionImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchFirstPositionImpl.h rename to src/Functions/MultiSearchFirstPositionImpl.h diff --git a/dbms/src/Functions/MultiSearchImpl.h b/src/Functions/MultiSearchImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchImpl.h rename to src/Functions/MultiSearchImpl.h diff --git a/dbms/src/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h similarity index 100% rename from dbms/src/Functions/PolygonUtils.h rename to src/Functions/PolygonUtils.h diff --git a/dbms/src/Functions/PositionImpl.h b/src/Functions/PositionImpl.h similarity index 100% rename from dbms/src/Functions/PositionImpl.h rename to src/Functions/PositionImpl.h diff --git a/dbms/src/Functions/RapidJSONParser.h b/src/Functions/RapidJSONParser.h similarity index 100% rename from dbms/src/Functions/RapidJSONParser.h rename to src/Functions/RapidJSONParser.h diff --git a/dbms/src/Functions/Regexps.h b/src/Functions/Regexps.h similarity index 100% rename from dbms/src/Functions/Regexps.h rename to src/Functions/Regexps.h diff --git a/dbms/src/Functions/SimdJSONParser.h b/src/Functions/SimdJSONParser.h similarity index 100% rename from dbms/src/Functions/SimdJSONParser.h rename to src/Functions/SimdJSONParser.h diff --git a/dbms/src/Functions/URL/CMakeLists.txt b/src/Functions/URL/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/URL/CMakeLists.txt rename to src/Functions/URL/CMakeLists.txt diff --git a/dbms/src/Functions/URL/FunctionsURL.h b/src/Functions/URL/FunctionsURL.h similarity index 100% rename from dbms/src/Functions/URL/FunctionsURL.h rename to src/Functions/URL/FunctionsURL.h diff --git a/dbms/src/Functions/URL/URLHierarchy.cpp b/src/Functions/URL/URLHierarchy.cpp similarity index 100% rename from dbms/src/Functions/URL/URLHierarchy.cpp rename to src/Functions/URL/URLHierarchy.cpp diff --git a/dbms/src/Functions/URL/URLPathHierarchy.cpp b/src/Functions/URL/URLPathHierarchy.cpp similarity index 100% rename from dbms/src/Functions/URL/URLPathHierarchy.cpp rename to src/Functions/URL/URLPathHierarchy.cpp diff --git a/dbms/src/Functions/URL/basename.cpp b/src/Functions/URL/basename.cpp similarity index 100% rename from dbms/src/Functions/URL/basename.cpp rename to src/Functions/URL/basename.cpp diff --git a/dbms/src/Functions/URL/config_functions_url.h.in b/src/Functions/URL/config_functions_url.h.in similarity index 100% rename from dbms/src/Functions/URL/config_functions_url.h.in rename to src/Functions/URL/config_functions_url.h.in diff --git a/dbms/src/Functions/URL/cutFragment.cpp b/src/Functions/URL/cutFragment.cpp similarity index 100% rename from dbms/src/Functions/URL/cutFragment.cpp rename to src/Functions/URL/cutFragment.cpp diff --git a/dbms/src/Functions/URL/cutQueryString.cpp b/src/Functions/URL/cutQueryString.cpp similarity index 100% rename from dbms/src/Functions/URL/cutQueryString.cpp rename to src/Functions/URL/cutQueryString.cpp diff --git a/dbms/src/Functions/URL/cutQueryStringAndFragment.cpp b/src/Functions/URL/cutQueryStringAndFragment.cpp similarity index 100% rename from dbms/src/Functions/URL/cutQueryStringAndFragment.cpp rename to src/Functions/URL/cutQueryStringAndFragment.cpp diff --git a/dbms/src/Functions/URL/cutToFirstSignificantSubdomain.cpp b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp similarity index 100% rename from dbms/src/Functions/URL/cutToFirstSignificantSubdomain.cpp rename to src/Functions/URL/cutToFirstSignificantSubdomain.cpp diff --git a/dbms/src/Functions/URL/cutURLParameter.cpp b/src/Functions/URL/cutURLParameter.cpp similarity index 100% rename from dbms/src/Functions/URL/cutURLParameter.cpp rename to src/Functions/URL/cutURLParameter.cpp diff --git a/dbms/src/Functions/URL/cutWWW.cpp b/src/Functions/URL/cutWWW.cpp similarity index 100% rename from dbms/src/Functions/URL/cutWWW.cpp rename to src/Functions/URL/cutWWW.cpp diff --git a/dbms/src/Functions/URL/decodeURLComponent.cpp b/src/Functions/URL/decodeURLComponent.cpp similarity index 100% rename from dbms/src/Functions/URL/decodeURLComponent.cpp rename to src/Functions/URL/decodeURLComponent.cpp diff --git a/dbms/src/Functions/URL/domain.cpp b/src/Functions/URL/domain.cpp similarity index 100% rename from dbms/src/Functions/URL/domain.cpp rename to src/Functions/URL/domain.cpp diff --git a/dbms/src/Functions/URL/domain.h b/src/Functions/URL/domain.h similarity index 100% rename from dbms/src/Functions/URL/domain.h rename to src/Functions/URL/domain.h diff --git a/dbms/src/Functions/URL/domainWithoutWWW.cpp b/src/Functions/URL/domainWithoutWWW.cpp similarity index 100% rename from dbms/src/Functions/URL/domainWithoutWWW.cpp rename to src/Functions/URL/domainWithoutWWW.cpp diff --git a/dbms/src/Functions/URL/extractURLParameter.cpp b/src/Functions/URL/extractURLParameter.cpp similarity index 100% rename from dbms/src/Functions/URL/extractURLParameter.cpp rename to src/Functions/URL/extractURLParameter.cpp diff --git a/dbms/src/Functions/URL/extractURLParameterNames.cpp b/src/Functions/URL/extractURLParameterNames.cpp similarity index 100% rename from dbms/src/Functions/URL/extractURLParameterNames.cpp rename to src/Functions/URL/extractURLParameterNames.cpp diff --git a/dbms/src/Functions/URL/extractURLParameters.cpp b/src/Functions/URL/extractURLParameters.cpp similarity index 100% rename from dbms/src/Functions/URL/extractURLParameters.cpp rename to src/Functions/URL/extractURLParameters.cpp diff --git a/dbms/src/Functions/URL/firstSignificantSubdomain.cpp b/src/Functions/URL/firstSignificantSubdomain.cpp similarity index 100% rename from dbms/src/Functions/URL/firstSignificantSubdomain.cpp rename to src/Functions/URL/firstSignificantSubdomain.cpp diff --git a/dbms/src/Functions/URL/firstSignificantSubdomain.h b/src/Functions/URL/firstSignificantSubdomain.h similarity index 100% rename from dbms/src/Functions/URL/firstSignificantSubdomain.h rename to src/Functions/URL/firstSignificantSubdomain.h diff --git a/dbms/src/Functions/URL/fragment.cpp b/src/Functions/URL/fragment.cpp similarity index 100% rename from dbms/src/Functions/URL/fragment.cpp rename to src/Functions/URL/fragment.cpp diff --git a/dbms/src/Functions/URL/fragment.h b/src/Functions/URL/fragment.h similarity index 100% rename from dbms/src/Functions/URL/fragment.h rename to src/Functions/URL/fragment.h diff --git a/src/Functions/URL/path.cpp b/src/Functions/URL/path.cpp new file mode 100644 index 00000000000..2260604c1fc --- /dev/null +++ b/src/Functions/URL/path.cpp @@ -0,0 +1,19 @@ +#include +#include +#include "FunctionsURL.h" +#include "path.h" +#include + + +namespace DB +{ + +struct NamePath { static constexpr auto name = "path"; }; +using FunctionPath = FunctionStringToString>, NamePath>; + +void registerFunctionPath(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/src/Functions/URL/path.h b/src/Functions/URL/path.h new file mode 100644 index 00000000000..f2c5d31a0b0 --- /dev/null +++ b/src/Functions/URL/path.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +template +struct ExtractPath +{ + static size_t getReserveLengthForElement() { return 25; } + + static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) + { + res_data = data; + res_size = 0; + + Pos pos = data; + Pos end = pos + size; + + /// We support URLs with and without schema: + /// 1. http://host/path + /// 2. host/path + /// We search for first slash and if there is subsequent slash, then skip and repeat search for the next slash. + + pos = find_first_symbols<'/'>(pos, end); + if (end == pos) + return; + + /// Note that strings are zero-terminated. + bool has_subsequent_slash = pos[1] == '/'; + if (has_subsequent_slash) + { + /// Search for next slash. + pos = find_first_symbols<'/'>(pos + 2, end); + if (end == pos) + return; + } + + res_data = pos; + + if constexpr (with_query_string) + { + res_size = end - res_data; + } + else + { + Pos query_string_or_fragment = find_first_symbols<'?', '#'>(pos, end); + res_size = query_string_or_fragment - res_data; + } + } +}; + +} diff --git a/src/Functions/URL/pathFull.cpp b/src/Functions/URL/pathFull.cpp new file mode 100644 index 00000000000..661fb298c04 --- /dev/null +++ b/src/Functions/URL/pathFull.cpp @@ -0,0 +1,18 @@ +#include +#include +#include "FunctionsURL.h" +#include "path.h" +#include + +namespace DB +{ + +struct NamePathFull { static constexpr auto name = "pathFull"; }; +using FunctionPathFull = FunctionStringToString>, NamePathFull>; + +void registerFunctionPathFull(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/dbms/src/Functions/URL/protocol.cpp b/src/Functions/URL/protocol.cpp similarity index 100% rename from dbms/src/Functions/URL/protocol.cpp rename to src/Functions/URL/protocol.cpp diff --git a/dbms/src/Functions/URL/protocol.h b/src/Functions/URL/protocol.h similarity index 100% rename from dbms/src/Functions/URL/protocol.h rename to src/Functions/URL/protocol.h diff --git a/dbms/src/Functions/URL/queryString.cpp b/src/Functions/URL/queryString.cpp similarity index 100% rename from dbms/src/Functions/URL/queryString.cpp rename to src/Functions/URL/queryString.cpp diff --git a/dbms/src/Functions/URL/queryString.h b/src/Functions/URL/queryString.h similarity index 100% rename from dbms/src/Functions/URL/queryString.h rename to src/Functions/URL/queryString.h diff --git a/dbms/src/Functions/URL/queryStringAndFragment.cpp b/src/Functions/URL/queryStringAndFragment.cpp similarity index 100% rename from dbms/src/Functions/URL/queryStringAndFragment.cpp rename to src/Functions/URL/queryStringAndFragment.cpp diff --git a/dbms/src/Functions/URL/queryStringAndFragment.h b/src/Functions/URL/queryStringAndFragment.h similarity index 100% rename from dbms/src/Functions/URL/queryStringAndFragment.h rename to src/Functions/URL/queryStringAndFragment.h diff --git a/dbms/src/Functions/URL/registerFunctionsURL.cpp b/src/Functions/URL/registerFunctionsURL.cpp similarity index 100% rename from dbms/src/Functions/URL/registerFunctionsURL.cpp rename to src/Functions/URL/registerFunctionsURL.cpp diff --git a/dbms/src/Functions/URL/tldLookup.generated.cpp b/src/Functions/URL/tldLookup.generated.cpp similarity index 100% rename from dbms/src/Functions/URL/tldLookup.generated.cpp rename to src/Functions/URL/tldLookup.generated.cpp diff --git a/dbms/src/Functions/URL/tldLookup.gperf b/src/Functions/URL/tldLookup.gperf similarity index 100% rename from dbms/src/Functions/URL/tldLookup.gperf rename to src/Functions/URL/tldLookup.gperf diff --git a/dbms/src/Functions/URL/tldLookup.h b/src/Functions/URL/tldLookup.h similarity index 100% rename from dbms/src/Functions/URL/tldLookup.h rename to src/Functions/URL/tldLookup.h diff --git a/dbms/src/Functions/URL/tldLookup.sh b/src/Functions/URL/tldLookup.sh similarity index 100% rename from dbms/src/Functions/URL/tldLookup.sh rename to src/Functions/URL/tldLookup.sh diff --git a/dbms/src/Functions/URL/topLevelDomain.cpp b/src/Functions/URL/topLevelDomain.cpp similarity index 100% rename from dbms/src/Functions/URL/topLevelDomain.cpp rename to src/Functions/URL/topLevelDomain.cpp diff --git a/dbms/src/Functions/abs.cpp b/src/Functions/abs.cpp similarity index 100% rename from dbms/src/Functions/abs.cpp rename to src/Functions/abs.cpp diff --git a/dbms/src/Functions/acos.cpp b/src/Functions/acos.cpp similarity index 100% rename from dbms/src/Functions/acos.cpp rename to src/Functions/acos.cpp diff --git a/dbms/src/Functions/addDays.cpp b/src/Functions/addDays.cpp similarity index 100% rename from dbms/src/Functions/addDays.cpp rename to src/Functions/addDays.cpp diff --git a/dbms/src/Functions/addHours.cpp b/src/Functions/addHours.cpp similarity index 100% rename from dbms/src/Functions/addHours.cpp rename to src/Functions/addHours.cpp diff --git a/dbms/src/Functions/addMinutes.cpp b/src/Functions/addMinutes.cpp similarity index 100% rename from dbms/src/Functions/addMinutes.cpp rename to src/Functions/addMinutes.cpp diff --git a/dbms/src/Functions/addMonths.cpp b/src/Functions/addMonths.cpp similarity index 100% rename from dbms/src/Functions/addMonths.cpp rename to src/Functions/addMonths.cpp diff --git a/dbms/src/Functions/addQuarters.cpp b/src/Functions/addQuarters.cpp similarity index 100% rename from dbms/src/Functions/addQuarters.cpp rename to src/Functions/addQuarters.cpp diff --git a/dbms/src/Functions/addSeconds.cpp b/src/Functions/addSeconds.cpp similarity index 100% rename from dbms/src/Functions/addSeconds.cpp rename to src/Functions/addSeconds.cpp diff --git a/dbms/src/Functions/addWeeks.cpp b/src/Functions/addWeeks.cpp similarity index 100% rename from dbms/src/Functions/addWeeks.cpp rename to src/Functions/addWeeks.cpp diff --git a/dbms/src/Functions/addYears.cpp b/src/Functions/addYears.cpp similarity index 100% rename from dbms/src/Functions/addYears.cpp rename to src/Functions/addYears.cpp diff --git a/dbms/src/Functions/addressToLine.cpp b/src/Functions/addressToLine.cpp similarity index 100% rename from dbms/src/Functions/addressToLine.cpp rename to src/Functions/addressToLine.cpp diff --git a/dbms/src/Functions/addressToSymbol.cpp b/src/Functions/addressToSymbol.cpp similarity index 100% rename from dbms/src/Functions/addressToSymbol.cpp rename to src/Functions/addressToSymbol.cpp diff --git a/dbms/src/Functions/appendTrailingCharIfAbsent.cpp b/src/Functions/appendTrailingCharIfAbsent.cpp similarity index 100% rename from dbms/src/Functions/appendTrailingCharIfAbsent.cpp rename to src/Functions/appendTrailingCharIfAbsent.cpp diff --git a/dbms/src/Functions/array/CMakeLists.txt b/src/Functions/array/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/array/CMakeLists.txt rename to src/Functions/array/CMakeLists.txt diff --git a/dbms/src/Functions/array/FunctionArrayMapped.h b/src/Functions/array/FunctionArrayMapped.h similarity index 100% rename from dbms/src/Functions/array/FunctionArrayMapped.h rename to src/Functions/array/FunctionArrayMapped.h diff --git a/dbms/src/Functions/array/array.cpp b/src/Functions/array/array.cpp similarity index 91% rename from dbms/src/Functions/array/array.cpp rename to src/Functions/array/array.cpp index aa4b945055a..5073dc061be 100644 --- a/dbms/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -14,14 +14,9 @@ class FunctionArray : public IFunction { public: static constexpr auto name = "array"; - static FunctionPtr create(const Context & context) - { - return std::make_shared(context); - } - - explicit FunctionArray(const Context & context_) - : context(context_) + static FunctionPtr create(const Context &) { + return std::make_shared(); } bool useDefaultImplementationForNulls() const override { return false; } @@ -67,7 +62,7 @@ public: ColumnPtr preprocessed_column = arg.column; if (!arg.type->equals(*elem_type)) - preprocessed_column = castColumn(arg, elem_type, context); + preprocessed_column = castColumn(arg, elem_type); preprocessed_column = preprocessed_column->convertToFullColumnIfConst(); @@ -104,9 +99,6 @@ private: } bool addField(DataTypePtr type_res, const Field & f, Array & arr) const; - -private: - const Context & context; }; diff --git a/dbms/src/Functions/array/arrayAUC.cpp b/src/Functions/array/arrayAUC.cpp similarity index 100% rename from dbms/src/Functions/array/arrayAUC.cpp rename to src/Functions/array/arrayAUC.cpp diff --git a/dbms/src/Functions/array/arrayAll.cpp b/src/Functions/array/arrayAll.cpp similarity index 100% rename from dbms/src/Functions/array/arrayAll.cpp rename to src/Functions/array/arrayAll.cpp diff --git a/dbms/src/Functions/array/arrayCompact.cpp b/src/Functions/array/arrayCompact.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCompact.cpp rename to src/Functions/array/arrayCompact.cpp diff --git a/dbms/src/Functions/array/arrayConcat.cpp b/src/Functions/array/arrayConcat.cpp similarity index 93% rename from dbms/src/Functions/array/arrayConcat.cpp rename to src/Functions/array/arrayConcat.cpp index f96584e3f54..4a404ddf324 100644 --- a/dbms/src/Functions/array/arrayConcat.cpp +++ b/src/Functions/array/arrayConcat.cpp @@ -26,8 +26,7 @@ class FunctionArrayConcat : public IFunction { public: static constexpr auto name = "arrayConcat"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionArrayConcat(const Context & context_) : context(context_) {} + static FunctionPtr create(const Context &) { return std::make_shared(); } String getName() const override { return name; } @@ -73,7 +72,7 @@ public: ColumnPtr preprocessed_column = arg.column; if (!arg.type->equals(*return_type)) - preprocessed_column = castColumn(arg, return_type, context); + preprocessed_column = castColumn(arg, return_type); preprocessed_columns[i] = std::move(preprocessed_column); } @@ -103,9 +102,6 @@ public: } bool useDefaultImplementationForConstants() const override { return true; } - -private: - const Context & context; }; diff --git a/dbms/src/Functions/array/arrayCount.cpp b/src/Functions/array/arrayCount.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCount.cpp rename to src/Functions/array/arrayCount.cpp diff --git a/dbms/src/Functions/array/arrayCumSum.cpp b/src/Functions/array/arrayCumSum.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCumSum.cpp rename to src/Functions/array/arrayCumSum.cpp diff --git a/dbms/src/Functions/array/arrayCumSumNonNegative.cpp b/src/Functions/array/arrayCumSumNonNegative.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCumSumNonNegative.cpp rename to src/Functions/array/arrayCumSumNonNegative.cpp diff --git a/dbms/src/Functions/array/arrayDifference.cpp b/src/Functions/array/arrayDifference.cpp similarity index 100% rename from dbms/src/Functions/array/arrayDifference.cpp rename to src/Functions/array/arrayDifference.cpp diff --git a/dbms/src/Functions/array/arrayDistinct.cpp b/src/Functions/array/arrayDistinct.cpp similarity index 100% rename from dbms/src/Functions/array/arrayDistinct.cpp rename to src/Functions/array/arrayDistinct.cpp diff --git a/dbms/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp similarity index 100% rename from dbms/src/Functions/array/arrayElement.cpp rename to src/Functions/array/arrayElement.cpp diff --git a/dbms/src/Functions/array/arrayEnumerate.cpp b/src/Functions/array/arrayEnumerate.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerate.cpp rename to src/Functions/array/arrayEnumerate.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateDense.cpp b/src/Functions/array/arrayEnumerateDense.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateDense.cpp rename to src/Functions/array/arrayEnumerateDense.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateDenseRanked.cpp b/src/Functions/array/arrayEnumerateDenseRanked.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateDenseRanked.cpp rename to src/Functions/array/arrayEnumerateDenseRanked.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateExtended.h b/src/Functions/array/arrayEnumerateExtended.h similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateExtended.h rename to src/Functions/array/arrayEnumerateExtended.h diff --git a/dbms/src/Functions/array/arrayEnumerateRanked.cpp b/src/Functions/array/arrayEnumerateRanked.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateRanked.cpp rename to src/Functions/array/arrayEnumerateRanked.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateRanked.h b/src/Functions/array/arrayEnumerateRanked.h similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateRanked.h rename to src/Functions/array/arrayEnumerateRanked.h diff --git a/dbms/src/Functions/array/arrayEnumerateUniq.cpp b/src/Functions/array/arrayEnumerateUniq.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateUniq.cpp rename to src/Functions/array/arrayEnumerateUniq.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateUniqRanked.cpp b/src/Functions/array/arrayEnumerateUniqRanked.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateUniqRanked.cpp rename to src/Functions/array/arrayEnumerateUniqRanked.cpp diff --git a/dbms/src/Functions/array/arrayExists.cpp b/src/Functions/array/arrayExists.cpp similarity index 100% rename from dbms/src/Functions/array/arrayExists.cpp rename to src/Functions/array/arrayExists.cpp diff --git a/dbms/src/Functions/array/arrayFill.cpp b/src/Functions/array/arrayFill.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFill.cpp rename to src/Functions/array/arrayFill.cpp diff --git a/dbms/src/Functions/array/arrayFilter.cpp b/src/Functions/array/arrayFilter.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFilter.cpp rename to src/Functions/array/arrayFilter.cpp diff --git a/dbms/src/Functions/array/arrayFirst.cpp b/src/Functions/array/arrayFirst.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFirst.cpp rename to src/Functions/array/arrayFirst.cpp diff --git a/dbms/src/Functions/array/arrayFirstIndex.cpp b/src/Functions/array/arrayFirstIndex.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFirstIndex.cpp rename to src/Functions/array/arrayFirstIndex.cpp diff --git a/dbms/src/Functions/array/arrayFlatten.cpp b/src/Functions/array/arrayFlatten.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFlatten.cpp rename to src/Functions/array/arrayFlatten.cpp diff --git a/dbms/src/Functions/array/arrayIndex.h b/src/Functions/array/arrayIndex.h similarity index 98% rename from dbms/src/Functions/array/arrayIndex.h rename to src/Functions/array/arrayIndex.h index 1b1ac172f24..fab1332cbda 100644 --- a/dbms/src/Functions/array/arrayIndex.h +++ b/src/Functions/array/arrayIndex.h @@ -19,7 +19,6 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; } @@ -225,21 +224,6 @@ public: } }; -/// Specialization that catches internal errors. -template -struct ArrayIndexNumImpl -{ - template - static void vector( - const PaddedPODArray &, const ColumnArray::Offsets &, - const ScalarOrVector &, - PaddedPODArray &, - const PaddedPODArray *, - const PaddedPODArray *) - { - throw Exception{"Logical error in implementation of a function that returns array index", ErrorCodes::LOGICAL_ERROR}; - } -}; /// Implementation for arrays of numbers when the 2nd function argument /// is a NULL value. @@ -623,8 +607,7 @@ private: || executeNumberNumber(block, arguments, result) || executeNumberNumber(block, arguments, result) || executeNumberNumber(block, arguments, result) - || executeNumberNumber(block, arguments, result) - || executeNumberNumber(block, arguments, result); + || executeNumberNumber(block, arguments, result); } template diff --git a/dbms/src/Functions/array/arrayIntersect.cpp b/src/Functions/array/arrayIntersect.cpp similarity index 98% rename from dbms/src/Functions/array/arrayIntersect.cpp rename to src/Functions/array/arrayIntersect.cpp index ffeb6e99222..8bcb3c799e7 100644 --- a/dbms/src/Functions/array/arrayIntersect.cpp +++ b/src/Functions/array/arrayIntersect.cpp @@ -88,8 +88,8 @@ private: ColumnsWithTypeAndName casted; }; - CastArgumentsResult castColumns(Block & block, const ColumnNumbers & arguments, - const DataTypePtr & return_type, const DataTypePtr & return_type_with_nulls) const; + static CastArgumentsResult castColumns(Block & block, const ColumnNumbers & arguments, + const DataTypePtr & return_type, const DataTypePtr & return_type_with_nulls); UnpackedArrays prepareArrays(const ColumnsWithTypeAndName & columns, ColumnsWithTypeAndName & initial_columns) const; template @@ -207,7 +207,7 @@ ColumnPtr FunctionArrayIntersect::castRemoveNullable(const ColumnPtr & column, c FunctionArrayIntersect::CastArgumentsResult FunctionArrayIntersect::castColumns( Block & block, const ColumnNumbers & arguments, const DataTypePtr & return_type, - const DataTypePtr & return_type_with_nulls) const + const DataTypePtr & return_type_with_nulls) { size_t num_args = arguments.size(); ColumnsWithTypeAndName initial_columns(num_args); @@ -245,7 +245,7 @@ FunctionArrayIntersect::CastArgumentsResult FunctionArrayIntersect::castColumns( { if (!arg.type->equals(*return_type)) { - column.column = castColumn(arg, return_type, context); + column.column = castColumn(arg, return_type); column.type = return_type; } } @@ -258,12 +258,12 @@ FunctionArrayIntersect::CastArgumentsResult FunctionArrayIntersect::castColumns( /// because cannot cast Nullable(T) to T. if (static_cast(*arg.type).getNestedType()->isNullable()) { - column.column = castColumn(arg, nullable_return_type, context); + column.column = castColumn(arg, nullable_return_type); column.type = nullable_return_type; } else { - column.column = castColumn(arg, return_type, context); + column.column = castColumn(arg, return_type); column.type = return_type; } } @@ -274,7 +274,7 @@ FunctionArrayIntersect::CastArgumentsResult FunctionArrayIntersect::castColumns( /// return_type_with_nulls is the most common subtype with possible nullable parts. if (!arg.type->equals(*return_type_with_nulls)) { - column.column = castColumn(arg, return_type_with_nulls, context); + column.column = castColumn(arg, return_type_with_nulls); column.type = return_type_with_nulls; } } diff --git a/dbms/src/Functions/array/arrayJoin.cpp b/src/Functions/array/arrayJoin.cpp similarity index 100% rename from dbms/src/Functions/array/arrayJoin.cpp rename to src/Functions/array/arrayJoin.cpp diff --git a/dbms/src/Functions/array/arrayMap.cpp b/src/Functions/array/arrayMap.cpp similarity index 100% rename from dbms/src/Functions/array/arrayMap.cpp rename to src/Functions/array/arrayMap.cpp diff --git a/dbms/src/Functions/array/arrayPop.h b/src/Functions/array/arrayPop.h similarity index 100% rename from dbms/src/Functions/array/arrayPop.h rename to src/Functions/array/arrayPop.h diff --git a/dbms/src/Functions/array/arrayPopBack.cpp b/src/Functions/array/arrayPopBack.cpp similarity index 100% rename from dbms/src/Functions/array/arrayPopBack.cpp rename to src/Functions/array/arrayPopBack.cpp diff --git a/dbms/src/Functions/array/arrayPopFront.cpp b/src/Functions/array/arrayPopFront.cpp similarity index 100% rename from dbms/src/Functions/array/arrayPopFront.cpp rename to src/Functions/array/arrayPopFront.cpp diff --git a/dbms/src/Functions/array/arrayPush.h b/src/Functions/array/arrayPush.h similarity index 93% rename from dbms/src/Functions/array/arrayPush.h rename to src/Functions/array/arrayPush.h index 1b20a9a1d74..61ed420b099 100644 --- a/dbms/src/Functions/array/arrayPush.h +++ b/src/Functions/array/arrayPush.h @@ -21,8 +21,8 @@ namespace ErrorCodes class FunctionArrayPush : public IFunction { public: - FunctionArrayPush(const Context & context_, bool push_front_, const char * name_) - : context(context_), push_front(push_front_), name(name_) {} + FunctionArrayPush(bool push_front_, const char * name_) + : push_front(push_front_), name(name_) {} String getName() const override { return name; } @@ -62,11 +62,11 @@ public: auto appended_column = block.getByPosition(arguments[1]).column; if (!block.getByPosition(arguments[0]).type->equals(*return_type)) - array_column = castColumn(block.getByPosition(arguments[0]), return_type, context); + array_column = castColumn(block.getByPosition(arguments[0]), return_type); const DataTypePtr & return_nested_type = typeid_cast(*return_type).getNestedType(); if (!block.getByPosition(arguments[1]).type->equals(*return_nested_type)) - appended_column = castColumn(block.getByPosition(arguments[1]), return_nested_type, context); + appended_column = castColumn(block.getByPosition(arguments[1]), return_nested_type); std::unique_ptr array_source; std::unique_ptr value_source; @@ -106,7 +106,6 @@ public: bool useDefaultImplementationForNulls() const override { return false; } private: - const Context & context; bool push_front; const char * name; }; diff --git a/src/Functions/array/arrayPushBack.cpp b/src/Functions/array/arrayPushBack.cpp new file mode 100644 index 00000000000..3c6d4113b88 --- /dev/null +++ b/src/Functions/array/arrayPushBack.cpp @@ -0,0 +1,21 @@ +#include "arrayPush.h" +#include + + +namespace DB +{ + +class FunctionArrayPushBack : public FunctionArrayPush +{ +public: + static constexpr auto name = "arrayPushBack"; + static FunctionPtr create(const Context &) { return std::make_shared(); } + FunctionArrayPushBack() : FunctionArrayPush(false, name) {} +}; + +void registerFunctionArrayPushBack(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/src/Functions/array/arrayPushFront.cpp b/src/Functions/array/arrayPushFront.cpp new file mode 100644 index 00000000000..49f47170f3c --- /dev/null +++ b/src/Functions/array/arrayPushFront.cpp @@ -0,0 +1,23 @@ +#include "arrayPush.h" +#include + + +namespace DB +{ + + +class FunctionArrayPushFront : public FunctionArrayPush +{ +public: + static constexpr auto name = "arrayPushFront"; + static FunctionPtr create(const Context &) { return std::make_shared(); } + FunctionArrayPushFront() : FunctionArrayPush(true, name) {} +}; + + +void registerFunctionArrayPushFront(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/dbms/src/Functions/array/arrayReduce.cpp b/src/Functions/array/arrayReduce.cpp similarity index 100% rename from dbms/src/Functions/array/arrayReduce.cpp rename to src/Functions/array/arrayReduce.cpp diff --git a/dbms/src/Functions/array/arrayReduceInRanges.cpp b/src/Functions/array/arrayReduceInRanges.cpp similarity index 100% rename from dbms/src/Functions/array/arrayReduceInRanges.cpp rename to src/Functions/array/arrayReduceInRanges.cpp diff --git a/dbms/src/Functions/array/arrayResize.cpp b/src/Functions/array/arrayResize.cpp similarity index 94% rename from dbms/src/Functions/array/arrayResize.cpp rename to src/Functions/array/arrayResize.cpp index 9e34e7ccd92..d43fb8da944 100644 --- a/dbms/src/Functions/array/arrayResize.cpp +++ b/src/Functions/array/arrayResize.cpp @@ -25,8 +25,7 @@ class FunctionArrayResize : public IFunction { public: static constexpr auto name = "arrayResize"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionArrayResize(const Context & context_) : context(context_) {} + static FunctionPtr create(const Context &) { return std::make_shared(); } String getName() const override { return name; } @@ -80,7 +79,7 @@ public: auto size_column = block.getByPosition(arguments[1]).column; if (!block.getByPosition(arguments[0]).type->equals(*return_type)) - array_column = castColumn(block.getByPosition(arguments[0]), return_type, context); + array_column = castColumn(block.getByPosition(arguments[0]), return_type); const DataTypePtr & return_nested_type = typeid_cast(*return_type).getNestedType(); size_t size = array_column->size(); @@ -90,7 +89,7 @@ public: { appended_column = block.getByPosition(arguments[2]).column; if (!block.getByPosition(arguments[2]).type->equals(*return_nested_type)) - appended_column = castColumn(block.getByPosition(arguments[2]), return_nested_type, context); + appended_column = castColumn(block.getByPosition(arguments[2]), return_nested_type); } else appended_column = return_nested_type->createColumnConstWithDefaultValue(size); @@ -133,9 +132,6 @@ public: bool useDefaultImplementationForConstants() const override { return true; } bool useDefaultImplementationForNulls() const override { return false; } - -private: - const Context & context; }; diff --git a/dbms/src/Functions/array/arrayReverse.cpp b/src/Functions/array/arrayReverse.cpp similarity index 100% rename from dbms/src/Functions/array/arrayReverse.cpp rename to src/Functions/array/arrayReverse.cpp diff --git a/dbms/src/Functions/array/arrayScalarProduct.h b/src/Functions/array/arrayScalarProduct.h similarity index 100% rename from dbms/src/Functions/array/arrayScalarProduct.h rename to src/Functions/array/arrayScalarProduct.h diff --git a/dbms/src/Functions/array/arraySlice.cpp b/src/Functions/array/arraySlice.cpp similarity index 100% rename from dbms/src/Functions/array/arraySlice.cpp rename to src/Functions/array/arraySlice.cpp diff --git a/dbms/src/Functions/array/arraySort.cpp b/src/Functions/array/arraySort.cpp similarity index 100% rename from dbms/src/Functions/array/arraySort.cpp rename to src/Functions/array/arraySort.cpp diff --git a/dbms/src/Functions/array/arraySplit.cpp b/src/Functions/array/arraySplit.cpp similarity index 100% rename from dbms/src/Functions/array/arraySplit.cpp rename to src/Functions/array/arraySplit.cpp diff --git a/dbms/src/Functions/array/arraySum.cpp b/src/Functions/array/arraySum.cpp similarity index 100% rename from dbms/src/Functions/array/arraySum.cpp rename to src/Functions/array/arraySum.cpp diff --git a/dbms/src/Functions/array/arrayUniq.cpp b/src/Functions/array/arrayUniq.cpp similarity index 100% rename from dbms/src/Functions/array/arrayUniq.cpp rename to src/Functions/array/arrayUniq.cpp diff --git a/dbms/src/Functions/array/arrayWithConstant.cpp b/src/Functions/array/arrayWithConstant.cpp similarity index 100% rename from dbms/src/Functions/array/arrayWithConstant.cpp rename to src/Functions/array/arrayWithConstant.cpp diff --git a/dbms/src/Functions/array/arrayZip.cpp b/src/Functions/array/arrayZip.cpp similarity index 100% rename from dbms/src/Functions/array/arrayZip.cpp rename to src/Functions/array/arrayZip.cpp diff --git a/dbms/src/Functions/array/countEqual.cpp b/src/Functions/array/countEqual.cpp similarity index 100% rename from dbms/src/Functions/array/countEqual.cpp rename to src/Functions/array/countEqual.cpp diff --git a/dbms/src/Functions/array/emptyArray.cpp b/src/Functions/array/emptyArray.cpp similarity index 100% rename from dbms/src/Functions/array/emptyArray.cpp rename to src/Functions/array/emptyArray.cpp diff --git a/dbms/src/Functions/array/emptyArrayToSingle.cpp b/src/Functions/array/emptyArrayToSingle.cpp similarity index 100% rename from dbms/src/Functions/array/emptyArrayToSingle.cpp rename to src/Functions/array/emptyArrayToSingle.cpp diff --git a/dbms/src/Functions/array/has.cpp b/src/Functions/array/has.cpp similarity index 100% rename from dbms/src/Functions/array/has.cpp rename to src/Functions/array/has.cpp diff --git a/src/Functions/array/hasAll.cpp b/src/Functions/array/hasAll.cpp new file mode 100644 index 00000000000..b325a74f015 --- /dev/null +++ b/src/Functions/array/hasAll.cpp @@ -0,0 +1,21 @@ +#include "hasAllAny.h" +#include + + +namespace DB +{ + +class FunctionArrayHasAll : public FunctionArrayHasAllAny +{ +public: + static constexpr auto name = "hasAll"; + static FunctionPtr create(const Context &) { return std::make_shared(); } + FunctionArrayHasAll() : FunctionArrayHasAllAny(true, name) {} +}; + +void registerFunctionHasAll(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/dbms/src/Functions/array/hasAllAny.h b/src/Functions/array/hasAllAny.h similarity index 95% rename from dbms/src/Functions/array/hasAllAny.h rename to src/Functions/array/hasAllAny.h index fe6c026aecd..ea42e182c8c 100644 --- a/dbms/src/Functions/array/hasAllAny.h +++ b/src/Functions/array/hasAllAny.h @@ -27,8 +27,8 @@ namespace ErrorCodes class FunctionArrayHasAllAny : public IFunction { public: - FunctionArrayHasAllAny(const Context & context_, bool all_, const char * name_) - : context(context_), all(all_), name(name_) {} + FunctionArrayHasAllAny(bool all_, const char * name_) + : all(all_), name(name_) {} String getName() const override { return name; } @@ -81,7 +81,7 @@ public: /// Converts Array(Nothing) or Array(Nullable(Nothing) to common type. Example: hasAll([Null, 1], [Null]) -> 1 if (typeid_cast(removeNullable(nested_type).get())) - preprocessed_column = castColumn(argument, commonType(), context); + preprocessed_column = castColumn(argument, commonType()); preprocessed_columns[i] = std::move(preprocessed_column); } @@ -114,7 +114,6 @@ public: bool useDefaultImplementationForConstants() const override { return true; } private: - const Context & context; bool all; const char * name; }; diff --git a/src/Functions/array/hasAny.cpp b/src/Functions/array/hasAny.cpp new file mode 100644 index 00000000000..ace86ce10c4 --- /dev/null +++ b/src/Functions/array/hasAny.cpp @@ -0,0 +1,21 @@ +#include "hasAllAny.h" +#include + + +namespace DB +{ + +class FunctionArrayHasAny : public FunctionArrayHasAllAny +{ +public: + static constexpr auto name = "hasAny"; + static FunctionPtr create(const Context &) { return std::make_shared(); } + FunctionArrayHasAny() : FunctionArrayHasAllAny(false, name) {} +}; + +void registerFunctionHasAny(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/dbms/src/Functions/array/indexOf.cpp b/src/Functions/array/indexOf.cpp similarity index 100% rename from dbms/src/Functions/array/indexOf.cpp rename to src/Functions/array/indexOf.cpp diff --git a/dbms/src/Functions/array/length.cpp b/src/Functions/array/length.cpp similarity index 100% rename from dbms/src/Functions/array/length.cpp rename to src/Functions/array/length.cpp diff --git a/dbms/src/Functions/array/range.cpp b/src/Functions/array/range.cpp similarity index 98% rename from dbms/src/Functions/array/range.cpp rename to src/Functions/array/range.cpp index 283eb760fcf..3e74226e0d4 100644 --- a/dbms/src/Functions/array/range.cpp +++ b/src/Functions/array/range.cpp @@ -27,11 +27,9 @@ class FunctionRange : public IFunction public: static constexpr auto name = "range"; static constexpr size_t max_elements = 100'000'000; - static FunctionPtr create(const Context & context_) { return std::make_shared(context_); } - explicit FunctionRange(const Context & context_) : context(context_) {} + static FunctionPtr create(const Context &) { return std::make_shared(); } private: - const Context & context; String getName() const override { return name; } size_t getNumberOfArguments() const override { return 0; } @@ -347,9 +345,9 @@ private: for (size_t i = 0; i < arguments.size(); ++i) { if (i == 1) - columns_holder[i] = castColumn(block.getByPosition(arguments[i]), return_type, context)->convertToFullColumnIfConst(); + columns_holder[i] = castColumn(block.getByPosition(arguments[i]), return_type)->convertToFullColumnIfConst(); else - columns_holder[i] = castColumn(block.getByPosition(arguments[i]), return_type, context); + columns_holder[i] = castColumn(block.getByPosition(arguments[i]), return_type); columns[i] = columns_holder[i].get(); } diff --git a/dbms/src/Functions/array/registerFunctionsArray.cpp b/src/Functions/array/registerFunctionsArray.cpp similarity index 100% rename from dbms/src/Functions/array/registerFunctionsArray.cpp rename to src/Functions/array/registerFunctionsArray.cpp diff --git a/dbms/src/Functions/asin.cpp b/src/Functions/asin.cpp similarity index 100% rename from dbms/src/Functions/asin.cpp rename to src/Functions/asin.cpp diff --git a/dbms/src/Functions/assumeNotNull.cpp b/src/Functions/assumeNotNull.cpp similarity index 100% rename from dbms/src/Functions/assumeNotNull.cpp rename to src/Functions/assumeNotNull.cpp diff --git a/dbms/src/Functions/atan.cpp b/src/Functions/atan.cpp similarity index 100% rename from dbms/src/Functions/atan.cpp rename to src/Functions/atan.cpp diff --git a/dbms/src/Functions/bar.cpp b/src/Functions/bar.cpp similarity index 100% rename from dbms/src/Functions/bar.cpp rename to src/Functions/bar.cpp diff --git a/dbms/src/Functions/base64Decode.cpp b/src/Functions/base64Decode.cpp similarity index 100% rename from dbms/src/Functions/base64Decode.cpp rename to src/Functions/base64Decode.cpp diff --git a/dbms/src/Functions/base64Encode.cpp b/src/Functions/base64Encode.cpp similarity index 100% rename from dbms/src/Functions/base64Encode.cpp rename to src/Functions/base64Encode.cpp diff --git a/dbms/src/Functions/bitAnd.cpp b/src/Functions/bitAnd.cpp similarity index 100% rename from dbms/src/Functions/bitAnd.cpp rename to src/Functions/bitAnd.cpp diff --git a/dbms/src/Functions/bitBoolMaskAnd.cpp b/src/Functions/bitBoolMaskAnd.cpp similarity index 96% rename from dbms/src/Functions/bitBoolMaskAnd.cpp rename to src/Functions/bitBoolMaskAnd.cpp index c37a1ebc1d7..2c55e39506c 100644 --- a/dbms/src/Functions/bitBoolMaskAnd.cpp +++ b/src/Functions/bitBoolMaskAnd.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function provides "AND" operation for BoolMasks. /// Returns: "can be true" = A."can be true" AND B."can be true" /// "can be false" = A."can be false" OR B."can be false" diff --git a/dbms/src/Functions/bitBoolMaskOr.cpp b/src/Functions/bitBoolMaskOr.cpp similarity index 96% rename from dbms/src/Functions/bitBoolMaskOr.cpp rename to src/Functions/bitBoolMaskOr.cpp index ec3d4e266f1..0b439165fca 100644 --- a/dbms/src/Functions/bitBoolMaskOr.cpp +++ b/src/Functions/bitBoolMaskOr.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function provides "OR" operation for BoolMasks. /// Returns: "can be true" = A."can be true" OR B."can be true" /// "can be false" = A."can be false" AND B."can be false" diff --git a/dbms/src/Functions/bitCount.cpp b/src/Functions/bitCount.cpp similarity index 100% rename from dbms/src/Functions/bitCount.cpp rename to src/Functions/bitCount.cpp diff --git a/dbms/src/Functions/bitNot.cpp b/src/Functions/bitNot.cpp similarity index 100% rename from dbms/src/Functions/bitNot.cpp rename to src/Functions/bitNot.cpp diff --git a/dbms/src/Functions/bitOr.cpp b/src/Functions/bitOr.cpp similarity index 100% rename from dbms/src/Functions/bitOr.cpp rename to src/Functions/bitOr.cpp diff --git a/dbms/src/Functions/bitRotateLeft.cpp b/src/Functions/bitRotateLeft.cpp similarity index 100% rename from dbms/src/Functions/bitRotateLeft.cpp rename to src/Functions/bitRotateLeft.cpp diff --git a/dbms/src/Functions/bitRotateRight.cpp b/src/Functions/bitRotateRight.cpp similarity index 100% rename from dbms/src/Functions/bitRotateRight.cpp rename to src/Functions/bitRotateRight.cpp diff --git a/dbms/src/Functions/bitShiftLeft.cpp b/src/Functions/bitShiftLeft.cpp similarity index 100% rename from dbms/src/Functions/bitShiftLeft.cpp rename to src/Functions/bitShiftLeft.cpp diff --git a/dbms/src/Functions/bitShiftRight.cpp b/src/Functions/bitShiftRight.cpp similarity index 100% rename from dbms/src/Functions/bitShiftRight.cpp rename to src/Functions/bitShiftRight.cpp diff --git a/dbms/src/Functions/bitSwapLastTwo.cpp b/src/Functions/bitSwapLastTwo.cpp similarity index 97% rename from dbms/src/Functions/bitSwapLastTwo.cpp rename to src/Functions/bitSwapLastTwo.cpp index 11b52eca66f..d6fa9a39ec3 100644 --- a/dbms/src/Functions/bitSwapLastTwo.cpp +++ b/src/Functions/bitSwapLastTwo.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function provides "NOT" operation for BoolMasks by swapping last two bits ("can be true" <-> "can be false"). template struct BitSwapLastTwoImpl diff --git a/dbms/src/Functions/bitTest.cpp b/src/Functions/bitTest.cpp similarity index 100% rename from dbms/src/Functions/bitTest.cpp rename to src/Functions/bitTest.cpp diff --git a/dbms/src/Functions/bitTestAll.cpp b/src/Functions/bitTestAll.cpp similarity index 100% rename from dbms/src/Functions/bitTestAll.cpp rename to src/Functions/bitTestAll.cpp diff --git a/dbms/src/Functions/bitTestAny.cpp b/src/Functions/bitTestAny.cpp similarity index 100% rename from dbms/src/Functions/bitTestAny.cpp rename to src/Functions/bitTestAny.cpp diff --git a/dbms/src/Functions/bitWrapperFunc.cpp b/src/Functions/bitWrapperFunc.cpp similarity index 96% rename from dbms/src/Functions/bitWrapperFunc.cpp rename to src/Functions/bitWrapperFunc.cpp index 2de8c0feb99..9f7276fbf98 100644 --- a/dbms/src/Functions/bitWrapperFunc.cpp +++ b/src/Functions/bitWrapperFunc.cpp @@ -9,7 +9,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function wraps bool atomic functions /// and transforms their boolean return value to the BoolMask ("can be false" and "can be true" bits). template diff --git a/dbms/src/Functions/bitXor.cpp b/src/Functions/bitXor.cpp similarity index 100% rename from dbms/src/Functions/bitXor.cpp rename to src/Functions/bitXor.cpp diff --git a/dbms/src/Functions/blockNumber.cpp b/src/Functions/blockNumber.cpp similarity index 100% rename from dbms/src/Functions/blockNumber.cpp rename to src/Functions/blockNumber.cpp diff --git a/dbms/src/Functions/blockSerializedSize.cpp b/src/Functions/blockSerializedSize.cpp similarity index 100% rename from dbms/src/Functions/blockSerializedSize.cpp rename to src/Functions/blockSerializedSize.cpp diff --git a/dbms/src/Functions/blockSize.cpp b/src/Functions/blockSize.cpp similarity index 100% rename from dbms/src/Functions/blockSize.cpp rename to src/Functions/blockSize.cpp diff --git a/dbms/src/Functions/caseWithExpression.cpp b/src/Functions/caseWithExpression.cpp similarity index 100% rename from dbms/src/Functions/caseWithExpression.cpp rename to src/Functions/caseWithExpression.cpp diff --git a/dbms/src/Functions/castTypeToEither.h b/src/Functions/castTypeToEither.h similarity index 100% rename from dbms/src/Functions/castTypeToEither.h rename to src/Functions/castTypeToEither.h diff --git a/dbms/src/Functions/cbrt.cpp b/src/Functions/cbrt.cpp similarity index 100% rename from dbms/src/Functions/cbrt.cpp rename to src/Functions/cbrt.cpp diff --git a/dbms/src/Functions/coalesce.cpp b/src/Functions/coalesce.cpp similarity index 100% rename from dbms/src/Functions/coalesce.cpp rename to src/Functions/coalesce.cpp diff --git a/dbms/src/Functions/concat.cpp b/src/Functions/concat.cpp similarity index 100% rename from dbms/src/Functions/concat.cpp rename to src/Functions/concat.cpp diff --git a/dbms/src/Functions/config_functions.h.in b/src/Functions/config_functions.h.in similarity index 100% rename from dbms/src/Functions/config_functions.h.in rename to src/Functions/config_functions.h.in diff --git a/dbms/src/Functions/convertCharset.cpp b/src/Functions/convertCharset.cpp similarity index 100% rename from dbms/src/Functions/convertCharset.cpp rename to src/Functions/convertCharset.cpp diff --git a/dbms/src/Functions/cos.cpp b/src/Functions/cos.cpp similarity index 100% rename from dbms/src/Functions/cos.cpp rename to src/Functions/cos.cpp diff --git a/dbms/src/Functions/currentDatabase.cpp b/src/Functions/currentDatabase.cpp similarity index 100% rename from dbms/src/Functions/currentDatabase.cpp rename to src/Functions/currentDatabase.cpp diff --git a/dbms/src/Functions/currentQuota.cpp b/src/Functions/currentQuota.cpp similarity index 100% rename from dbms/src/Functions/currentQuota.cpp rename to src/Functions/currentQuota.cpp diff --git a/dbms/src/Functions/currentRowPolicies.cpp b/src/Functions/currentRowPolicies.cpp similarity index 100% rename from dbms/src/Functions/currentRowPolicies.cpp rename to src/Functions/currentRowPolicies.cpp diff --git a/dbms/src/Functions/currentUser.cpp b/src/Functions/currentUser.cpp similarity index 100% rename from dbms/src/Functions/currentUser.cpp rename to src/Functions/currentUser.cpp diff --git a/dbms/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp similarity index 100% rename from dbms/src/Functions/dateDiff.cpp rename to src/Functions/dateDiff.cpp diff --git a/dbms/src/Functions/defaultValueOfArgumentType.cpp b/src/Functions/defaultValueOfArgumentType.cpp similarity index 100% rename from dbms/src/Functions/defaultValueOfArgumentType.cpp rename to src/Functions/defaultValueOfArgumentType.cpp diff --git a/dbms/src/Functions/demange.cpp b/src/Functions/demange.cpp similarity index 100% rename from dbms/src/Functions/demange.cpp rename to src/Functions/demange.cpp diff --git a/dbms/src/Functions/divide.cpp b/src/Functions/divide.cpp similarity index 100% rename from dbms/src/Functions/divide.cpp rename to src/Functions/divide.cpp diff --git a/dbms/src/Functions/dumpColumnStructure.cpp b/src/Functions/dumpColumnStructure.cpp similarity index 100% rename from dbms/src/Functions/dumpColumnStructure.cpp rename to src/Functions/dumpColumnStructure.cpp diff --git a/dbms/src/Functions/e.cpp b/src/Functions/e.cpp similarity index 100% rename from dbms/src/Functions/e.cpp rename to src/Functions/e.cpp diff --git a/dbms/src/Functions/empty.cpp b/src/Functions/empty.cpp similarity index 100% rename from dbms/src/Functions/empty.cpp rename to src/Functions/empty.cpp diff --git a/dbms/src/Functions/endsWith.cpp b/src/Functions/endsWith.cpp similarity index 100% rename from dbms/src/Functions/endsWith.cpp rename to src/Functions/endsWith.cpp diff --git a/dbms/src/Functions/equals.cpp b/src/Functions/equals.cpp similarity index 100% rename from dbms/src/Functions/equals.cpp rename to src/Functions/equals.cpp diff --git a/dbms/src/Functions/erf.cpp b/src/Functions/erf.cpp similarity index 100% rename from dbms/src/Functions/erf.cpp rename to src/Functions/erf.cpp diff --git a/dbms/src/Functions/erfc.cpp b/src/Functions/erfc.cpp similarity index 100% rename from dbms/src/Functions/erfc.cpp rename to src/Functions/erfc.cpp diff --git a/dbms/src/Functions/evalMLMethod.cpp b/src/Functions/evalMLMethod.cpp similarity index 100% rename from dbms/src/Functions/evalMLMethod.cpp rename to src/Functions/evalMLMethod.cpp diff --git a/dbms/src/Functions/exp.cpp b/src/Functions/exp.cpp similarity index 100% rename from dbms/src/Functions/exp.cpp rename to src/Functions/exp.cpp diff --git a/dbms/src/Functions/exp10.cpp b/src/Functions/exp10.cpp similarity index 100% rename from dbms/src/Functions/exp10.cpp rename to src/Functions/exp10.cpp diff --git a/dbms/src/Functions/exp2.cpp b/src/Functions/exp2.cpp similarity index 100% rename from dbms/src/Functions/exp2.cpp rename to src/Functions/exp2.cpp diff --git a/dbms/src/Functions/extractTimeZoneFromFunctionArguments.cpp b/src/Functions/extractTimeZoneFromFunctionArguments.cpp similarity index 100% rename from dbms/src/Functions/extractTimeZoneFromFunctionArguments.cpp rename to src/Functions/extractTimeZoneFromFunctionArguments.cpp diff --git a/dbms/src/Functions/extractTimeZoneFromFunctionArguments.h b/src/Functions/extractTimeZoneFromFunctionArguments.h similarity index 100% rename from dbms/src/Functions/extractTimeZoneFromFunctionArguments.h rename to src/Functions/extractTimeZoneFromFunctionArguments.h diff --git a/dbms/src/Functions/filesystem.cpp b/src/Functions/filesystem.cpp similarity index 100% rename from dbms/src/Functions/filesystem.cpp rename to src/Functions/filesystem.cpp diff --git a/dbms/src/Functions/finalizeAggregation.cpp b/src/Functions/finalizeAggregation.cpp similarity index 100% rename from dbms/src/Functions/finalizeAggregation.cpp rename to src/Functions/finalizeAggregation.cpp diff --git a/dbms/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp similarity index 100% rename from dbms/src/Functions/formatDateTime.cpp rename to src/Functions/formatDateTime.cpp diff --git a/dbms/src/Functions/formatString.cpp b/src/Functions/formatString.cpp similarity index 100% rename from dbms/src/Functions/formatString.cpp rename to src/Functions/formatString.cpp diff --git a/dbms/src/Functions/formatString.h b/src/Functions/formatString.h similarity index 100% rename from dbms/src/Functions/formatString.h rename to src/Functions/formatString.h diff --git a/dbms/src/Functions/gcd.cpp b/src/Functions/gcd.cpp similarity index 100% rename from dbms/src/Functions/gcd.cpp rename to src/Functions/gcd.cpp diff --git a/dbms/src/Functions/generateUUIDv4.cpp b/src/Functions/generateUUIDv4.cpp similarity index 100% rename from dbms/src/Functions/generateUUIDv4.cpp rename to src/Functions/generateUUIDv4.cpp diff --git a/dbms/src/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp similarity index 100% rename from dbms/src/Functions/geoToH3.cpp rename to src/Functions/geoToH3.cpp diff --git a/dbms/src/Functions/geohashDecode.cpp b/src/Functions/geohashDecode.cpp similarity index 100% rename from dbms/src/Functions/geohashDecode.cpp rename to src/Functions/geohashDecode.cpp diff --git a/dbms/src/Functions/geohashEncode.cpp b/src/Functions/geohashEncode.cpp similarity index 100% rename from dbms/src/Functions/geohashEncode.cpp rename to src/Functions/geohashEncode.cpp diff --git a/dbms/src/Functions/geohashesInBox.cpp b/src/Functions/geohashesInBox.cpp similarity index 100% rename from dbms/src/Functions/geohashesInBox.cpp rename to src/Functions/geohashesInBox.cpp diff --git a/dbms/src/Functions/getMacro.cpp b/src/Functions/getMacro.cpp similarity index 100% rename from dbms/src/Functions/getMacro.cpp rename to src/Functions/getMacro.cpp diff --git a/dbms/src/Functions/getScalar.cpp b/src/Functions/getScalar.cpp similarity index 100% rename from dbms/src/Functions/getScalar.cpp rename to src/Functions/getScalar.cpp diff --git a/dbms/src/Functions/getSizeOfEnumType.cpp b/src/Functions/getSizeOfEnumType.cpp similarity index 100% rename from dbms/src/Functions/getSizeOfEnumType.cpp rename to src/Functions/getSizeOfEnumType.cpp diff --git a/dbms/src/Functions/greatCircleDistance.cpp b/src/Functions/greatCircleDistance.cpp similarity index 100% rename from dbms/src/Functions/greatCircleDistance.cpp rename to src/Functions/greatCircleDistance.cpp diff --git a/dbms/src/Functions/greater.cpp b/src/Functions/greater.cpp similarity index 100% rename from dbms/src/Functions/greater.cpp rename to src/Functions/greater.cpp diff --git a/dbms/src/Functions/greaterOrEquals.cpp b/src/Functions/greaterOrEquals.cpp similarity index 100% rename from dbms/src/Functions/greaterOrEquals.cpp rename to src/Functions/greaterOrEquals.cpp diff --git a/dbms/src/Functions/greatest.cpp b/src/Functions/greatest.cpp similarity index 96% rename from dbms/src/Functions/greatest.cpp rename to src/Functions/greatest.cpp index 6eb123708a4..9abf85e751b 100644 --- a/dbms/src/Functions/greatest.cpp +++ b/src/Functions/greatest.cpp @@ -57,7 +57,7 @@ using FunctionGreatest = FunctionBinaryArithmetic; void registerFunctionGreatest(FunctionFactory & factory) { - factory.registerFunction(); + factory.registerFunction(FunctionFactory::CaseInsensitive); } } diff --git a/dbms/src/Functions/h3EdgeAngle.cpp b/src/Functions/h3EdgeAngle.cpp similarity index 100% rename from dbms/src/Functions/h3EdgeAngle.cpp rename to src/Functions/h3EdgeAngle.cpp diff --git a/dbms/src/Functions/h3EdgeLengthM.cpp b/src/Functions/h3EdgeLengthM.cpp similarity index 100% rename from dbms/src/Functions/h3EdgeLengthM.cpp rename to src/Functions/h3EdgeLengthM.cpp diff --git a/dbms/src/Functions/h3GetBaseCell.cpp b/src/Functions/h3GetBaseCell.cpp similarity index 100% rename from dbms/src/Functions/h3GetBaseCell.cpp rename to src/Functions/h3GetBaseCell.cpp diff --git a/dbms/src/Functions/h3GetResolution.cpp b/src/Functions/h3GetResolution.cpp similarity index 100% rename from dbms/src/Functions/h3GetResolution.cpp rename to src/Functions/h3GetResolution.cpp diff --git a/dbms/src/Functions/h3HexAreaM2.cpp b/src/Functions/h3HexAreaM2.cpp similarity index 100% rename from dbms/src/Functions/h3HexAreaM2.cpp rename to src/Functions/h3HexAreaM2.cpp diff --git a/dbms/src/Functions/h3IndexesAreNeighbors.cpp b/src/Functions/h3IndexesAreNeighbors.cpp similarity index 100% rename from dbms/src/Functions/h3IndexesAreNeighbors.cpp rename to src/Functions/h3IndexesAreNeighbors.cpp diff --git a/dbms/src/Functions/h3IsValid.cpp b/src/Functions/h3IsValid.cpp similarity index 100% rename from dbms/src/Functions/h3IsValid.cpp rename to src/Functions/h3IsValid.cpp diff --git a/dbms/src/Functions/h3ToChildren.cpp b/src/Functions/h3ToChildren.cpp similarity index 100% rename from dbms/src/Functions/h3ToChildren.cpp rename to src/Functions/h3ToChildren.cpp diff --git a/dbms/src/Functions/h3ToParent.cpp b/src/Functions/h3ToParent.cpp similarity index 100% rename from dbms/src/Functions/h3ToParent.cpp rename to src/Functions/h3ToParent.cpp diff --git a/dbms/src/Functions/h3ToString.cpp b/src/Functions/h3ToString.cpp similarity index 100% rename from dbms/src/Functions/h3ToString.cpp rename to src/Functions/h3ToString.cpp diff --git a/dbms/src/Functions/h3kRing.cpp b/src/Functions/h3kRing.cpp similarity index 100% rename from dbms/src/Functions/h3kRing.cpp rename to src/Functions/h3kRing.cpp diff --git a/dbms/src/Functions/hasColumnInTable.cpp b/src/Functions/hasColumnInTable.cpp similarity index 100% rename from dbms/src/Functions/hasColumnInTable.cpp rename to src/Functions/hasColumnInTable.cpp diff --git a/dbms/src/Functions/hasToken.cpp b/src/Functions/hasToken.cpp similarity index 100% rename from dbms/src/Functions/hasToken.cpp rename to src/Functions/hasToken.cpp diff --git a/dbms/src/Functions/hasTokenCaseInsensitive.cpp b/src/Functions/hasTokenCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/hasTokenCaseInsensitive.cpp rename to src/Functions/hasTokenCaseInsensitive.cpp diff --git a/dbms/src/Functions/hostName.cpp b/src/Functions/hostName.cpp similarity index 100% rename from dbms/src/Functions/hostName.cpp rename to src/Functions/hostName.cpp diff --git a/dbms/src/Functions/identity.cpp b/src/Functions/identity.cpp similarity index 100% rename from dbms/src/Functions/identity.cpp rename to src/Functions/identity.cpp diff --git a/dbms/src/Functions/if.cpp b/src/Functions/if.cpp similarity index 98% rename from dbms/src/Functions/if.cpp rename to src/Functions/if.cpp index f16033b1f52..8974e0f3886 100644 --- a/dbms/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -172,8 +172,7 @@ class FunctionIf : public FunctionIfBase { public: static constexpr auto name = "if"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionIf(const Context & context_) : context(context_) {} + static FunctionPtr create(const Context &) { return std::make_shared(); } private: template @@ -591,7 +590,7 @@ private: return true; } - void executeGeneric(const ColumnUInt8 * cond_col, Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) + static void executeGeneric(const ColumnUInt8 * cond_col, Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) { /// Convert both columns to the common type (if needed). @@ -600,8 +599,8 @@ private: DataTypePtr common_type = getLeastSupertype({arg1.type, arg2.type}); - ColumnPtr col_then = castColumn(arg1, common_type, context); - ColumnPtr col_else = castColumn(arg2, common_type, context); + ColumnPtr col_then = castColumn(arg1, common_type); + ColumnPtr col_else = castColumn(arg2, common_type); MutableColumnPtr result_column = common_type->createColumn(); result_column->reserve(input_rows_count); @@ -988,8 +987,6 @@ public: executeGeneric(cond_col, block, arguments, result, input_rows_count); } } - - const Context & context; }; void registerFunctionIf(FunctionFactory & factory) diff --git a/dbms/src/Functions/ifNotFinite.cpp b/src/Functions/ifNotFinite.cpp similarity index 100% rename from dbms/src/Functions/ifNotFinite.cpp rename to src/Functions/ifNotFinite.cpp diff --git a/dbms/src/Functions/ifNull.cpp b/src/Functions/ifNull.cpp similarity index 100% rename from dbms/src/Functions/ifNull.cpp rename to src/Functions/ifNull.cpp diff --git a/dbms/src/Functions/ignore.cpp b/src/Functions/ignore.cpp similarity index 100% rename from dbms/src/Functions/ignore.cpp rename to src/Functions/ignore.cpp diff --git a/dbms/src/Functions/ignoreExceptNull.cpp b/src/Functions/ignoreExceptNull.cpp similarity index 100% rename from dbms/src/Functions/ignoreExceptNull.cpp rename to src/Functions/ignoreExceptNull.cpp diff --git a/src/Functions/in.cpp b/src/Functions/in.cpp new file mode 100644 index 00000000000..a89535c675a --- /dev/null +++ b/src/Functions/in.cpp @@ -0,0 +1,162 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; +} + +/** in(x, set) - function for evaluating the IN + * notIn(x, set) - and NOT IN. + */ + +template +struct FunctionInName; + +template <> +struct FunctionInName +{ + static constexpr auto name = "in"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "globalIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "notIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "globalNotIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "nullIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "globalNullIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "notNullIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "globalNotNullIn"; +}; + +template +class FunctionIn : public IFunction +{ +public: + static constexpr auto name = FunctionInName::name; + static FunctionPtr create(const Context &) + { + return std::make_shared(); + } + + String getName() const override + { + return name; + } + + size_t getNumberOfArguments() const override + { + return 2; + } + + DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override + { + return std::make_shared(); + } + + bool useDefaultImplementationForConstants() const override { return true; } + + bool useDefaultImplementationForNulls() const override { return null_is_skipped; } + + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t /*input_rows_count*/) override + { + /// NOTE: after updating this code, check that FunctionIgnoreExceptNull returns the same type of column. + + /// Second argument must be ColumnSet. + ColumnPtr column_set_ptr = block.getByPosition(arguments[1]).column; + const ColumnSet * column_set = checkAndGetColumnConstData(column_set_ptr.get()); + if (!column_set) + column_set = checkAndGetColumn(column_set_ptr.get()); + if (!column_set) + throw Exception("Second argument for function '" + getName() + "' must be Set; found " + column_set_ptr->getName(), + ErrorCodes::ILLEGAL_COLUMN); + + Block block_of_key_columns; + + /// First argument may be a tuple or a single column. + const ColumnWithTypeAndName & left_arg = block.getByPosition(arguments[0]); + const ColumnTuple * tuple = typeid_cast(left_arg.column.get()); + const ColumnConst * const_tuple = checkAndGetColumnConst(left_arg.column.get()); + const DataTypeTuple * type_tuple = typeid_cast(left_arg.type.get()); + + ColumnPtr materialized_tuple; + if (const_tuple) + { + materialized_tuple = const_tuple->convertToFullColumn(); + tuple = typeid_cast(materialized_tuple.get()); + } + + auto set = column_set->getData(); + auto set_types = set->getDataTypes(); + if (tuple && (set_types.size() != 1 || !set_types[0]->equals(*type_tuple))) + { + const auto & tuple_columns = tuple->getColumns(); + const DataTypes & tuple_types = type_tuple->getElements(); + size_t tuple_size = tuple_columns.size(); + for (size_t i = 0; i < tuple_size; ++i) + block_of_key_columns.insert({ tuple_columns[i], tuple_types[i], "" }); + } + else + block_of_key_columns.insert(left_arg); + + block.getByPosition(result).column = set->execute(block_of_key_columns, negative); + } +}; + + +void registerFunctionsIn(FunctionFactory & factory) +{ + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); +} + +} diff --git a/dbms/src/Functions/intDiv.cpp b/src/Functions/intDiv.cpp similarity index 97% rename from dbms/src/Functions/intDiv.cpp rename to src/Functions/intDiv.cpp index 0b6734c0136..062a374c00f 100644 --- a/dbms/src/Functions/intDiv.cpp +++ b/src/Functions/intDiv.cpp @@ -1,8 +1,9 @@ #include #include -#ifdef __SSE2__ - #define LIBDIVIDE_USE_SSE2 1 +#if defined(__SSE2__) +# define LIBDIVIDE_SSE2 1 +# define LIBDIVIDE_VECTOR_TYPE #endif #include @@ -45,7 +46,7 @@ struct DivideIntegralByConstantImpl const A * a_end = a_pos + size; -#ifdef __SSE2__ +#if defined(__SSE2__) static constexpr size_t values_per_sse_register = 16 / sizeof(A); const A * a_end_sse = a_pos + size / values_per_sse_register * values_per_sse_register; diff --git a/dbms/src/Functions/intDivOrZero.cpp b/src/Functions/intDivOrZero.cpp similarity index 100% rename from dbms/src/Functions/intDivOrZero.cpp rename to src/Functions/intDivOrZero.cpp diff --git a/dbms/src/Functions/intExp10.cpp b/src/Functions/intExp10.cpp similarity index 100% rename from dbms/src/Functions/intExp10.cpp rename to src/Functions/intExp10.cpp diff --git a/dbms/src/Functions/intExp2.cpp b/src/Functions/intExp2.cpp similarity index 100% rename from dbms/src/Functions/intExp2.cpp rename to src/Functions/intExp2.cpp diff --git a/src/Functions/isConstant.cpp b/src/Functions/isConstant.cpp new file mode 100644 index 00000000000..5416fbd2d3e --- /dev/null +++ b/src/Functions/isConstant.cpp @@ -0,0 +1,52 @@ +#include +#include +#include +#include + + +namespace DB +{ + +/// Returns 1 if and only if the argument is constant expression. +/// This function exists for development, debugging and demonstration purposes. +class FunctionIsConstant : public IFunction +{ +public: + static constexpr auto name = "isConstant"; + static FunctionPtr create(const Context &) + { + return std::make_shared(); + } + + String getName() const override + { + return name; + } + + bool useDefaultImplementationForNulls() const override { return false; } + + size_t getNumberOfArguments() const override + { + return 1; + } + + DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override + { + return std::make_shared(); + } + + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override + { + const auto & elem = block.getByPosition(arguments[0]); + block.getByPosition(result).column = ColumnUInt8::create(input_rows_count, isColumnConst(*elem.column)); + } +}; + + +void registerFunctionIsConstant(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} + diff --git a/dbms/src/Functions/isFinite.cpp b/src/Functions/isFinite.cpp similarity index 100% rename from dbms/src/Functions/isFinite.cpp rename to src/Functions/isFinite.cpp diff --git a/dbms/src/Functions/isInfinite.cpp b/src/Functions/isInfinite.cpp similarity index 100% rename from dbms/src/Functions/isInfinite.cpp rename to src/Functions/isInfinite.cpp diff --git a/dbms/src/Functions/isNaN.cpp b/src/Functions/isNaN.cpp similarity index 100% rename from dbms/src/Functions/isNaN.cpp rename to src/Functions/isNaN.cpp diff --git a/dbms/src/Functions/isNotNull.cpp b/src/Functions/isNotNull.cpp similarity index 100% rename from dbms/src/Functions/isNotNull.cpp rename to src/Functions/isNotNull.cpp diff --git a/dbms/src/Functions/isNull.cpp b/src/Functions/isNull.cpp similarity index 100% rename from dbms/src/Functions/isNull.cpp rename to src/Functions/isNull.cpp diff --git a/dbms/src/Functions/isValidUTF8.cpp b/src/Functions/isValidUTF8.cpp similarity index 100% rename from dbms/src/Functions/isValidUTF8.cpp rename to src/Functions/isValidUTF8.cpp diff --git a/dbms/src/Functions/jumpConsistentHash.cpp b/src/Functions/jumpConsistentHash.cpp similarity index 100% rename from dbms/src/Functions/jumpConsistentHash.cpp rename to src/Functions/jumpConsistentHash.cpp diff --git a/dbms/src/Functions/lcm.cpp b/src/Functions/lcm.cpp similarity index 100% rename from dbms/src/Functions/lcm.cpp rename to src/Functions/lcm.cpp diff --git a/dbms/src/Functions/least.cpp b/src/Functions/least.cpp similarity index 96% rename from dbms/src/Functions/least.cpp rename to src/Functions/least.cpp index 47af759c956..f2e7c1f15d2 100644 --- a/dbms/src/Functions/least.cpp +++ b/src/Functions/least.cpp @@ -57,7 +57,7 @@ using FunctionLeast = FunctionBinaryArithmetic; void registerFunctionLeast(FunctionFactory & factory) { - factory.registerFunction(); + factory.registerFunction(FunctionFactory::CaseInsensitive); } } diff --git a/dbms/src/Functions/lengthUTF8.cpp b/src/Functions/lengthUTF8.cpp similarity index 100% rename from dbms/src/Functions/lengthUTF8.cpp rename to src/Functions/lengthUTF8.cpp diff --git a/dbms/src/Functions/less.cpp b/src/Functions/less.cpp similarity index 100% rename from dbms/src/Functions/less.cpp rename to src/Functions/less.cpp diff --git a/dbms/src/Functions/lessOrEquals.cpp b/src/Functions/lessOrEquals.cpp similarity index 100% rename from dbms/src/Functions/lessOrEquals.cpp rename to src/Functions/lessOrEquals.cpp diff --git a/dbms/src/Functions/lgamma.cpp b/src/Functions/lgamma.cpp similarity index 100% rename from dbms/src/Functions/lgamma.cpp rename to src/Functions/lgamma.cpp diff --git a/dbms/src/Functions/likePatternToRegexp.h b/src/Functions/likePatternToRegexp.h similarity index 100% rename from dbms/src/Functions/likePatternToRegexp.h rename to src/Functions/likePatternToRegexp.h diff --git a/dbms/src/Functions/log.cpp b/src/Functions/log.cpp similarity index 100% rename from dbms/src/Functions/log.cpp rename to src/Functions/log.cpp diff --git a/dbms/src/Functions/log10.cpp b/src/Functions/log10.cpp similarity index 100% rename from dbms/src/Functions/log10.cpp rename to src/Functions/log10.cpp diff --git a/dbms/src/Functions/log2.cpp b/src/Functions/log2.cpp similarity index 100% rename from dbms/src/Functions/log2.cpp rename to src/Functions/log2.cpp diff --git a/dbms/src/Functions/lowCardinalityIndices.cpp b/src/Functions/lowCardinalityIndices.cpp similarity index 100% rename from dbms/src/Functions/lowCardinalityIndices.cpp rename to src/Functions/lowCardinalityIndices.cpp diff --git a/dbms/src/Functions/lowCardinalityKeys.cpp b/src/Functions/lowCardinalityKeys.cpp similarity index 100% rename from dbms/src/Functions/lowCardinalityKeys.cpp rename to src/Functions/lowCardinalityKeys.cpp diff --git a/dbms/src/Functions/lower.cpp b/src/Functions/lower.cpp similarity index 100% rename from dbms/src/Functions/lower.cpp rename to src/Functions/lower.cpp diff --git a/dbms/src/Functions/lowerUTF8.cpp b/src/Functions/lowerUTF8.cpp similarity index 100% rename from dbms/src/Functions/lowerUTF8.cpp rename to src/Functions/lowerUTF8.cpp diff --git a/dbms/src/Functions/materialize.cpp b/src/Functions/materialize.cpp similarity index 100% rename from dbms/src/Functions/materialize.cpp rename to src/Functions/materialize.cpp diff --git a/dbms/src/Functions/minus.cpp b/src/Functions/minus.cpp similarity index 100% rename from dbms/src/Functions/minus.cpp rename to src/Functions/minus.cpp diff --git a/dbms/src/Functions/modulo.cpp b/src/Functions/modulo.cpp similarity index 98% rename from dbms/src/Functions/modulo.cpp rename to src/Functions/modulo.cpp index 9e4409ca91b..631b7d12263 100644 --- a/dbms/src/Functions/modulo.cpp +++ b/src/Functions/modulo.cpp @@ -1,8 +1,8 @@ #include #include -#ifdef __SSE2__ - #define LIBDIVIDE_USE_SSE2 1 +#if defined(__SSE2__) +# define LIBDIVIDE_SSE2 1 #endif #include diff --git a/dbms/src/Functions/moduloOrZero.cpp b/src/Functions/moduloOrZero.cpp similarity index 100% rename from dbms/src/Functions/moduloOrZero.cpp rename to src/Functions/moduloOrZero.cpp diff --git a/dbms/src/Functions/multiIf.cpp b/src/Functions/multiIf.cpp similarity index 96% rename from dbms/src/Functions/multiIf.cpp rename to src/Functions/multiIf.cpp index 2340f7826c7..021f4f419cb 100644 --- a/dbms/src/Functions/multiIf.cpp +++ b/src/Functions/multiIf.cpp @@ -33,8 +33,7 @@ class FunctionMultiIf final : public FunctionIfBase { public: static constexpr auto name = "multiIf"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - explicit FunctionMultiIf(const Context & context_) : context(context_) {} + static FunctionPtr create(const Context &) { return std::make_shared(); } public: String getName() const override { return name; } @@ -176,7 +175,7 @@ public: else { /// Cast all columns to result type. - converted_columns_holder.emplace_back(castColumn(source_col, return_type, context)); + converted_columns_holder.emplace_back(castColumn(source_col, return_type)); instruction.source = converted_columns_holder.back().get(); } @@ -225,9 +224,6 @@ public: block.getByPosition(result).column = std::move(res); } - -private: - const Context & context; }; void registerFunctionMultiIf(FunctionFactory & factory) diff --git a/dbms/src/Functions/multiSearchAllPositions.cpp b/src/Functions/multiSearchAllPositions.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositions.cpp rename to src/Functions/multiSearchAllPositions.cpp diff --git a/dbms/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp b/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp rename to src/Functions/multiSearchAllPositionsCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchAllPositionsUTF8.cpp b/src/Functions/multiSearchAllPositionsUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositionsUTF8.cpp rename to src/Functions/multiSearchAllPositionsUTF8.cpp diff --git a/dbms/src/Functions/multiSearchAny.cpp b/src/Functions/multiSearchAny.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAny.cpp rename to src/Functions/multiSearchAny.cpp diff --git a/dbms/src/Functions/multiSearchAnyCaseInsensitive.cpp b/src/Functions/multiSearchAnyCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAnyCaseInsensitive.cpp rename to src/Functions/multiSearchAnyCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchAnyUTF8.cpp b/src/Functions/multiSearchAnyUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAnyUTF8.cpp rename to src/Functions/multiSearchAnyUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndex.cpp b/src/Functions/multiSearchFirstIndex.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndex.cpp rename to src/Functions/multiSearchFirstIndex.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndexCaseInsensitive.cpp b/src/Functions/multiSearchFirstIndexCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndexCaseInsensitive.cpp rename to src/Functions/multiSearchFirstIndexCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndexUTF8.cpp b/src/Functions/multiSearchFirstIndexUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndexUTF8.cpp rename to src/Functions/multiSearchFirstIndexUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstPosition.cpp b/src/Functions/multiSearchFirstPosition.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPosition.cpp rename to src/Functions/multiSearchFirstPosition.cpp diff --git a/dbms/src/Functions/multiSearchFirstPositionCaseInsensitive.cpp b/src/Functions/multiSearchFirstPositionCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPositionCaseInsensitive.cpp rename to src/Functions/multiSearchFirstPositionCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstPositionUTF8.cpp b/src/Functions/multiSearchFirstPositionUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPositionUTF8.cpp rename to src/Functions/multiSearchFirstPositionUTF8.cpp diff --git a/dbms/src/Functions/multiply.cpp b/src/Functions/multiply.cpp similarity index 100% rename from dbms/src/Functions/multiply.cpp rename to src/Functions/multiply.cpp diff --git a/dbms/src/Functions/negate.cpp b/src/Functions/negate.cpp similarity index 100% rename from dbms/src/Functions/negate.cpp rename to src/Functions/negate.cpp diff --git a/dbms/src/Functions/neighbor.cpp b/src/Functions/neighbor.cpp similarity index 96% rename from dbms/src/Functions/neighbor.cpp rename to src/Functions/neighbor.cpp index dc83aeb0d31..7c26693f7e8 100644 --- a/dbms/src/Functions/neighbor.cpp +++ b/src/Functions/neighbor.cpp @@ -27,9 +27,7 @@ class FunctionNeighbor : public IFunction { public: static constexpr auto name = "neighbor"; - static FunctionPtr create(const Context & context) { return std::make_shared(context); } - - explicit FunctionNeighbor(const Context & context_) : context(context_) {} + static FunctionPtr create(const Context &) { return std::make_shared(); } /// Get the name of the function. String getName() const override { return name; } @@ -83,14 +81,14 @@ public: const ColumnWithTypeAndName & offset_elem = block.getByPosition(arguments[1]); bool has_defaults = arguments.size() == 3; - ColumnPtr source_column_casted = castColumn(source_elem, result_type, context); + ColumnPtr source_column_casted = castColumn(source_elem, result_type); ColumnPtr offset_column = offset_elem.column; ColumnPtr default_column_casted; if (has_defaults) { const ColumnWithTypeAndName & default_elem = block.getByPosition(arguments[2]); - default_column_casted = castColumn(default_elem, result_type, context); + default_column_casted = castColumn(default_elem, result_type); } bool source_is_constant = isColumnConst(*source_column_casted); @@ -181,9 +179,6 @@ public: block.getByPosition(result).column = std::move(result_column); } } - -private: - const Context & context; }; void registerFunctionNeighbor(FunctionFactory & factory) diff --git a/dbms/src/Functions/notEmpty.cpp b/src/Functions/notEmpty.cpp similarity index 100% rename from dbms/src/Functions/notEmpty.cpp rename to src/Functions/notEmpty.cpp diff --git a/dbms/src/Functions/notEquals.cpp b/src/Functions/notEquals.cpp similarity index 100% rename from dbms/src/Functions/notEquals.cpp rename to src/Functions/notEquals.cpp diff --git a/dbms/src/Functions/now.cpp b/src/Functions/now.cpp similarity index 100% rename from dbms/src/Functions/now.cpp rename to src/Functions/now.cpp diff --git a/dbms/src/Functions/now64.cpp b/src/Functions/now64.cpp similarity index 100% rename from dbms/src/Functions/now64.cpp rename to src/Functions/now64.cpp diff --git a/dbms/src/Functions/nullIf.cpp b/src/Functions/nullIf.cpp similarity index 100% rename from dbms/src/Functions/nullIf.cpp rename to src/Functions/nullIf.cpp diff --git a/dbms/src/Functions/pi.cpp b/src/Functions/pi.cpp similarity index 100% rename from dbms/src/Functions/pi.cpp rename to src/Functions/pi.cpp diff --git a/dbms/src/Functions/plus.cpp b/src/Functions/plus.cpp similarity index 100% rename from dbms/src/Functions/plus.cpp rename to src/Functions/plus.cpp diff --git a/dbms/src/Functions/pointInEllipses.cpp b/src/Functions/pointInEllipses.cpp similarity index 100% rename from dbms/src/Functions/pointInEllipses.cpp rename to src/Functions/pointInEllipses.cpp diff --git a/dbms/src/Functions/pointInPolygon.cpp b/src/Functions/pointInPolygon.cpp similarity index 100% rename from dbms/src/Functions/pointInPolygon.cpp rename to src/Functions/pointInPolygon.cpp diff --git a/dbms/src/Functions/position.cpp b/src/Functions/position.cpp similarity index 100% rename from dbms/src/Functions/position.cpp rename to src/Functions/position.cpp diff --git a/dbms/src/Functions/positionCaseInsensitive.cpp b/src/Functions/positionCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/positionCaseInsensitive.cpp rename to src/Functions/positionCaseInsensitive.cpp diff --git a/dbms/src/Functions/positionCaseInsensitiveUTF8.cpp b/src/Functions/positionCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/positionCaseInsensitiveUTF8.cpp rename to src/Functions/positionCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/positionUTF8.cpp b/src/Functions/positionUTF8.cpp similarity index 100% rename from dbms/src/Functions/positionUTF8.cpp rename to src/Functions/positionUTF8.cpp diff --git a/dbms/src/Functions/pow.cpp b/src/Functions/pow.cpp similarity index 100% rename from dbms/src/Functions/pow.cpp rename to src/Functions/pow.cpp diff --git a/dbms/src/Functions/rand.cpp b/src/Functions/rand.cpp similarity index 100% rename from dbms/src/Functions/rand.cpp rename to src/Functions/rand.cpp diff --git a/dbms/src/Functions/rand64.cpp b/src/Functions/rand64.cpp similarity index 100% rename from dbms/src/Functions/rand64.cpp rename to src/Functions/rand64.cpp diff --git a/dbms/src/Functions/randConstant.cpp b/src/Functions/randConstant.cpp similarity index 100% rename from dbms/src/Functions/randConstant.cpp rename to src/Functions/randConstant.cpp diff --git a/dbms/src/Functions/randomPrintableASCII.cpp b/src/Functions/randomPrintableASCII.cpp similarity index 100% rename from dbms/src/Functions/randomPrintableASCII.cpp rename to src/Functions/randomPrintableASCII.cpp diff --git a/dbms/src/Functions/regexpQuoteMeta.cpp b/src/Functions/regexpQuoteMeta.cpp similarity index 100% rename from dbms/src/Functions/regexpQuoteMeta.cpp rename to src/Functions/regexpQuoteMeta.cpp diff --git a/dbms/src/Functions/registerFunctions.cpp b/src/Functions/registerFunctions.cpp similarity index 100% rename from dbms/src/Functions/registerFunctions.cpp rename to src/Functions/registerFunctions.cpp diff --git a/dbms/src/Functions/registerFunctions.h b/src/Functions/registerFunctions.h similarity index 100% rename from dbms/src/Functions/registerFunctions.h rename to src/Functions/registerFunctions.h diff --git a/dbms/src/Functions/registerFunctionsArithmetic.cpp b/src/Functions/registerFunctionsArithmetic.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsArithmetic.cpp rename to src/Functions/registerFunctionsArithmetic.cpp diff --git a/dbms/src/Functions/registerFunctionsComparison.cpp b/src/Functions/registerFunctionsComparison.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsComparison.cpp rename to src/Functions/registerFunctionsComparison.cpp diff --git a/dbms/src/Functions/registerFunctionsConditional.cpp b/src/Functions/registerFunctionsConditional.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsConditional.cpp rename to src/Functions/registerFunctionsConditional.cpp diff --git a/dbms/src/Functions/registerFunctionsConsistentHashing.cpp b/src/Functions/registerFunctionsConsistentHashing.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsConsistentHashing.cpp rename to src/Functions/registerFunctionsConsistentHashing.cpp diff --git a/dbms/src/Functions/registerFunctionsDateTime.cpp b/src/Functions/registerFunctionsDateTime.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsDateTime.cpp rename to src/Functions/registerFunctionsDateTime.cpp diff --git a/dbms/src/Functions/registerFunctionsGeo.cpp b/src/Functions/registerFunctionsGeo.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsGeo.cpp rename to src/Functions/registerFunctionsGeo.cpp diff --git a/dbms/src/Functions/registerFunctionsHigherOrder.cpp b/src/Functions/registerFunctionsHigherOrder.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsHigherOrder.cpp rename to src/Functions/registerFunctionsHigherOrder.cpp diff --git a/dbms/src/Functions/registerFunctionsIntrospection.cpp b/src/Functions/registerFunctionsIntrospection.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsIntrospection.cpp rename to src/Functions/registerFunctionsIntrospection.cpp diff --git a/dbms/src/Functions/registerFunctionsMath.cpp b/src/Functions/registerFunctionsMath.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsMath.cpp rename to src/Functions/registerFunctionsMath.cpp diff --git a/dbms/src/Functions/registerFunctionsMiscellaneous.cpp b/src/Functions/registerFunctionsMiscellaneous.cpp similarity index 98% rename from dbms/src/Functions/registerFunctionsMiscellaneous.cpp rename to src/Functions/registerFunctionsMiscellaneous.cpp index 44e26542c7d..30cab4cc53a 100644 --- a/dbms/src/Functions/registerFunctionsMiscellaneous.cpp +++ b/src/Functions/registerFunctionsMiscellaneous.cpp @@ -56,6 +56,7 @@ void registerFunctionBasename(FunctionFactory &); void registerFunctionTransform(FunctionFactory &); void registerFunctionGetMacro(FunctionFactory &); void registerFunctionGetScalar(FunctionFactory &); +void registerFunctionIsConstant(FunctionFactory &); #if USE_ICU void registerFunctionConvertCharset(FunctionFactory &); @@ -114,6 +115,7 @@ void registerFunctionsMiscellaneous(FunctionFactory & factory) registerFunctionTransform(factory); registerFunctionGetMacro(factory); registerFunctionGetScalar(factory); + registerFunctionIsConstant(factory); #if USE_ICU registerFunctionConvertCharset(factory); diff --git a/dbms/src/Functions/registerFunctionsNull.cpp b/src/Functions/registerFunctionsNull.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsNull.cpp rename to src/Functions/registerFunctionsNull.cpp diff --git a/dbms/src/Functions/registerFunctionsRandom.cpp b/src/Functions/registerFunctionsRandom.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsRandom.cpp rename to src/Functions/registerFunctionsRandom.cpp diff --git a/dbms/src/Functions/registerFunctionsReinterpret.cpp b/src/Functions/registerFunctionsReinterpret.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsReinterpret.cpp rename to src/Functions/registerFunctionsReinterpret.cpp diff --git a/dbms/src/Functions/registerFunctionsString.cpp b/src/Functions/registerFunctionsString.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsString.cpp rename to src/Functions/registerFunctionsString.cpp diff --git a/dbms/src/Functions/registerFunctionsStringSearch.cpp b/src/Functions/registerFunctionsStringSearch.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsStringSearch.cpp rename to src/Functions/registerFunctionsStringSearch.cpp diff --git a/dbms/src/Functions/registerFunctionsTuple.cpp b/src/Functions/registerFunctionsTuple.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsTuple.cpp rename to src/Functions/registerFunctionsTuple.cpp diff --git a/dbms/src/Functions/registerFunctionsVisitParam.cpp b/src/Functions/registerFunctionsVisitParam.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsVisitParam.cpp rename to src/Functions/registerFunctionsVisitParam.cpp diff --git a/dbms/src/Functions/reinterpretAsFixedString.cpp b/src/Functions/reinterpretAsFixedString.cpp similarity index 100% rename from dbms/src/Functions/reinterpretAsFixedString.cpp rename to src/Functions/reinterpretAsFixedString.cpp diff --git a/dbms/src/Functions/reinterpretAsString.cpp b/src/Functions/reinterpretAsString.cpp similarity index 100% rename from dbms/src/Functions/reinterpretAsString.cpp rename to src/Functions/reinterpretAsString.cpp diff --git a/dbms/src/Functions/reinterpretStringAs.cpp b/src/Functions/reinterpretStringAs.cpp similarity index 100% rename from dbms/src/Functions/reinterpretStringAs.cpp rename to src/Functions/reinterpretStringAs.cpp diff --git a/dbms/src/Functions/repeat.cpp b/src/Functions/repeat.cpp similarity index 100% rename from dbms/src/Functions/repeat.cpp rename to src/Functions/repeat.cpp diff --git a/dbms/src/Functions/replicate.cpp b/src/Functions/replicate.cpp similarity index 100% rename from dbms/src/Functions/replicate.cpp rename to src/Functions/replicate.cpp diff --git a/dbms/src/Functions/reverse.cpp b/src/Functions/reverse.cpp similarity index 100% rename from dbms/src/Functions/reverse.cpp rename to src/Functions/reverse.cpp diff --git a/dbms/src/Functions/reverseUTF8.cpp b/src/Functions/reverseUTF8.cpp similarity index 100% rename from dbms/src/Functions/reverseUTF8.cpp rename to src/Functions/reverseUTF8.cpp diff --git a/dbms/src/Functions/roundAge.cpp b/src/Functions/roundAge.cpp similarity index 100% rename from dbms/src/Functions/roundAge.cpp rename to src/Functions/roundAge.cpp diff --git a/dbms/src/Functions/roundDuration.cpp b/src/Functions/roundDuration.cpp similarity index 100% rename from dbms/src/Functions/roundDuration.cpp rename to src/Functions/roundDuration.cpp diff --git a/dbms/src/Functions/roundToExp2.cpp b/src/Functions/roundToExp2.cpp similarity index 100% rename from dbms/src/Functions/roundToExp2.cpp rename to src/Functions/roundToExp2.cpp diff --git a/dbms/src/Functions/rowNumberInAllBlocks.cpp b/src/Functions/rowNumberInAllBlocks.cpp similarity index 100% rename from dbms/src/Functions/rowNumberInAllBlocks.cpp rename to src/Functions/rowNumberInAllBlocks.cpp diff --git a/dbms/src/Functions/rowNumberInBlock.cpp b/src/Functions/rowNumberInBlock.cpp similarity index 100% rename from dbms/src/Functions/rowNumberInBlock.cpp rename to src/Functions/rowNumberInBlock.cpp diff --git a/dbms/src/Functions/runningAccumulate.cpp b/src/Functions/runningAccumulate.cpp similarity index 100% rename from dbms/src/Functions/runningAccumulate.cpp rename to src/Functions/runningAccumulate.cpp diff --git a/dbms/src/Functions/runningDifference.cpp b/src/Functions/runningDifference.cpp similarity index 100% rename from dbms/src/Functions/runningDifference.cpp rename to src/Functions/runningDifference.cpp diff --git a/dbms/src/Functions/runningDifference.h b/src/Functions/runningDifference.h similarity index 100% rename from dbms/src/Functions/runningDifference.h rename to src/Functions/runningDifference.h diff --git a/dbms/src/Functions/runningDifferenceStartingWithFirstValue.cpp b/src/Functions/runningDifferenceStartingWithFirstValue.cpp similarity index 100% rename from dbms/src/Functions/runningDifferenceStartingWithFirstValue.cpp rename to src/Functions/runningDifferenceStartingWithFirstValue.cpp diff --git a/dbms/src/Functions/sigmoid.cpp b/src/Functions/sigmoid.cpp similarity index 100% rename from dbms/src/Functions/sigmoid.cpp rename to src/Functions/sigmoid.cpp diff --git a/dbms/src/Functions/sin.cpp b/src/Functions/sin.cpp similarity index 100% rename from dbms/src/Functions/sin.cpp rename to src/Functions/sin.cpp diff --git a/dbms/src/Functions/sleep.cpp b/src/Functions/sleep.cpp similarity index 100% rename from dbms/src/Functions/sleep.cpp rename to src/Functions/sleep.cpp diff --git a/dbms/src/Functions/sleep.h b/src/Functions/sleep.h similarity index 100% rename from dbms/src/Functions/sleep.h rename to src/Functions/sleep.h diff --git a/dbms/src/Functions/sleepEachRow.cpp b/src/Functions/sleepEachRow.cpp similarity index 100% rename from dbms/src/Functions/sleepEachRow.cpp rename to src/Functions/sleepEachRow.cpp diff --git a/dbms/src/Functions/sqrt.cpp b/src/Functions/sqrt.cpp similarity index 100% rename from dbms/src/Functions/sqrt.cpp rename to src/Functions/sqrt.cpp diff --git a/dbms/src/Functions/startsWith.cpp b/src/Functions/startsWith.cpp similarity index 100% rename from dbms/src/Functions/startsWith.cpp rename to src/Functions/startsWith.cpp diff --git a/dbms/src/Functions/stringToH3.cpp b/src/Functions/stringToH3.cpp similarity index 100% rename from dbms/src/Functions/stringToH3.cpp rename to src/Functions/stringToH3.cpp diff --git a/dbms/src/Functions/substring.cpp b/src/Functions/substring.cpp similarity index 100% rename from dbms/src/Functions/substring.cpp rename to src/Functions/substring.cpp diff --git a/dbms/src/Functions/subtractDays.cpp b/src/Functions/subtractDays.cpp similarity index 100% rename from dbms/src/Functions/subtractDays.cpp rename to src/Functions/subtractDays.cpp diff --git a/dbms/src/Functions/subtractHours.cpp b/src/Functions/subtractHours.cpp similarity index 100% rename from dbms/src/Functions/subtractHours.cpp rename to src/Functions/subtractHours.cpp diff --git a/dbms/src/Functions/subtractMinutes.cpp b/src/Functions/subtractMinutes.cpp similarity index 100% rename from dbms/src/Functions/subtractMinutes.cpp rename to src/Functions/subtractMinutes.cpp diff --git a/dbms/src/Functions/subtractMonths.cpp b/src/Functions/subtractMonths.cpp similarity index 100% rename from dbms/src/Functions/subtractMonths.cpp rename to src/Functions/subtractMonths.cpp diff --git a/dbms/src/Functions/subtractQuarters.cpp b/src/Functions/subtractQuarters.cpp similarity index 100% rename from dbms/src/Functions/subtractQuarters.cpp rename to src/Functions/subtractQuarters.cpp diff --git a/dbms/src/Functions/subtractSeconds.cpp b/src/Functions/subtractSeconds.cpp similarity index 100% rename from dbms/src/Functions/subtractSeconds.cpp rename to src/Functions/subtractSeconds.cpp diff --git a/dbms/src/Functions/subtractWeeks.cpp b/src/Functions/subtractWeeks.cpp similarity index 100% rename from dbms/src/Functions/subtractWeeks.cpp rename to src/Functions/subtractWeeks.cpp diff --git a/dbms/src/Functions/subtractYears.cpp b/src/Functions/subtractYears.cpp similarity index 100% rename from dbms/src/Functions/subtractYears.cpp rename to src/Functions/subtractYears.cpp diff --git a/dbms/src/Functions/sumburConsistentHash.cpp b/src/Functions/sumburConsistentHash.cpp similarity index 100% rename from dbms/src/Functions/sumburConsistentHash.cpp rename to src/Functions/sumburConsistentHash.cpp diff --git a/dbms/src/Functions/tan.cpp b/src/Functions/tan.cpp similarity index 100% rename from dbms/src/Functions/tan.cpp rename to src/Functions/tan.cpp diff --git a/dbms/src/Functions/tanh.cpp b/src/Functions/tanh.cpp similarity index 100% rename from dbms/src/Functions/tanh.cpp rename to src/Functions/tanh.cpp diff --git a/dbms/src/Functions/tests/CMakeLists.txt b/src/Functions/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/tests/CMakeLists.txt rename to src/Functions/tests/CMakeLists.txt diff --git a/dbms/src/Functions/tests/number_traits.cpp b/src/Functions/tests/number_traits.cpp similarity index 100% rename from dbms/src/Functions/tests/number_traits.cpp rename to src/Functions/tests/number_traits.cpp diff --git a/dbms/src/Functions/tgamma.cpp b/src/Functions/tgamma.cpp similarity index 100% rename from dbms/src/Functions/tgamma.cpp rename to src/Functions/tgamma.cpp diff --git a/dbms/src/Functions/throwIf.cpp b/src/Functions/throwIf.cpp similarity index 100% rename from dbms/src/Functions/throwIf.cpp rename to src/Functions/throwIf.cpp diff --git a/dbms/src/Functions/timeSlot.cpp b/src/Functions/timeSlot.cpp similarity index 100% rename from dbms/src/Functions/timeSlot.cpp rename to src/Functions/timeSlot.cpp diff --git a/dbms/src/Functions/timeSlots.cpp b/src/Functions/timeSlots.cpp similarity index 100% rename from dbms/src/Functions/timeSlots.cpp rename to src/Functions/timeSlots.cpp diff --git a/dbms/src/Functions/timezone.cpp b/src/Functions/timezone.cpp similarity index 100% rename from dbms/src/Functions/timezone.cpp rename to src/Functions/timezone.cpp diff --git a/dbms/src/Functions/toColumnTypeName.cpp b/src/Functions/toColumnTypeName.cpp similarity index 100% rename from dbms/src/Functions/toColumnTypeName.cpp rename to src/Functions/toColumnTypeName.cpp diff --git a/dbms/src/Functions/toCustomWeek.cpp b/src/Functions/toCustomWeek.cpp similarity index 100% rename from dbms/src/Functions/toCustomWeek.cpp rename to src/Functions/toCustomWeek.cpp diff --git a/dbms/src/Functions/toDayOfMonth.cpp b/src/Functions/toDayOfMonth.cpp similarity index 100% rename from dbms/src/Functions/toDayOfMonth.cpp rename to src/Functions/toDayOfMonth.cpp diff --git a/dbms/src/Functions/toDayOfWeek.cpp b/src/Functions/toDayOfWeek.cpp similarity index 100% rename from dbms/src/Functions/toDayOfWeek.cpp rename to src/Functions/toDayOfWeek.cpp diff --git a/dbms/src/Functions/toDayOfYear.cpp b/src/Functions/toDayOfYear.cpp similarity index 100% rename from dbms/src/Functions/toDayOfYear.cpp rename to src/Functions/toDayOfYear.cpp diff --git a/dbms/src/Functions/toHour.cpp b/src/Functions/toHour.cpp similarity index 100% rename from dbms/src/Functions/toHour.cpp rename to src/Functions/toHour.cpp diff --git a/dbms/src/Functions/toISOWeek.cpp b/src/Functions/toISOWeek.cpp similarity index 100% rename from dbms/src/Functions/toISOWeek.cpp rename to src/Functions/toISOWeek.cpp diff --git a/dbms/src/Functions/toISOYear.cpp b/src/Functions/toISOYear.cpp similarity index 100% rename from dbms/src/Functions/toISOYear.cpp rename to src/Functions/toISOYear.cpp diff --git a/dbms/src/Functions/toLowCardinality.cpp b/src/Functions/toLowCardinality.cpp similarity index 100% rename from dbms/src/Functions/toLowCardinality.cpp rename to src/Functions/toLowCardinality.cpp diff --git a/dbms/src/Functions/toMinute.cpp b/src/Functions/toMinute.cpp similarity index 100% rename from dbms/src/Functions/toMinute.cpp rename to src/Functions/toMinute.cpp diff --git a/dbms/src/Functions/toMonday.cpp b/src/Functions/toMonday.cpp similarity index 100% rename from dbms/src/Functions/toMonday.cpp rename to src/Functions/toMonday.cpp diff --git a/dbms/src/Functions/toMonth.cpp b/src/Functions/toMonth.cpp similarity index 100% rename from dbms/src/Functions/toMonth.cpp rename to src/Functions/toMonth.cpp diff --git a/dbms/src/Functions/toNullable.cpp b/src/Functions/toNullable.cpp similarity index 100% rename from dbms/src/Functions/toNullable.cpp rename to src/Functions/toNullable.cpp diff --git a/dbms/src/Functions/toQuarter.cpp b/src/Functions/toQuarter.cpp similarity index 100% rename from dbms/src/Functions/toQuarter.cpp rename to src/Functions/toQuarter.cpp diff --git a/dbms/src/Functions/toRelativeDayNum.cpp b/src/Functions/toRelativeDayNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeDayNum.cpp rename to src/Functions/toRelativeDayNum.cpp diff --git a/dbms/src/Functions/toRelativeHourNum.cpp b/src/Functions/toRelativeHourNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeHourNum.cpp rename to src/Functions/toRelativeHourNum.cpp diff --git a/dbms/src/Functions/toRelativeMinuteNum.cpp b/src/Functions/toRelativeMinuteNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeMinuteNum.cpp rename to src/Functions/toRelativeMinuteNum.cpp diff --git a/dbms/src/Functions/toRelativeMonthNum.cpp b/src/Functions/toRelativeMonthNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeMonthNum.cpp rename to src/Functions/toRelativeMonthNum.cpp diff --git a/dbms/src/Functions/toRelativeQuarterNum.cpp b/src/Functions/toRelativeQuarterNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeQuarterNum.cpp rename to src/Functions/toRelativeQuarterNum.cpp diff --git a/dbms/src/Functions/toRelativeSecondNum.cpp b/src/Functions/toRelativeSecondNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeSecondNum.cpp rename to src/Functions/toRelativeSecondNum.cpp diff --git a/dbms/src/Functions/toRelativeWeekNum.cpp b/src/Functions/toRelativeWeekNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeWeekNum.cpp rename to src/Functions/toRelativeWeekNum.cpp diff --git a/dbms/src/Functions/toRelativeYearNum.cpp b/src/Functions/toRelativeYearNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeYearNum.cpp rename to src/Functions/toRelativeYearNum.cpp diff --git a/dbms/src/Functions/toSecond.cpp b/src/Functions/toSecond.cpp similarity index 100% rename from dbms/src/Functions/toSecond.cpp rename to src/Functions/toSecond.cpp diff --git a/dbms/src/Functions/toStartOfDay.cpp b/src/Functions/toStartOfDay.cpp similarity index 100% rename from dbms/src/Functions/toStartOfDay.cpp rename to src/Functions/toStartOfDay.cpp diff --git a/dbms/src/Functions/toStartOfFifteenMinutes.cpp b/src/Functions/toStartOfFifteenMinutes.cpp similarity index 100% rename from dbms/src/Functions/toStartOfFifteenMinutes.cpp rename to src/Functions/toStartOfFifteenMinutes.cpp diff --git a/dbms/src/Functions/toStartOfFiveMinute.cpp b/src/Functions/toStartOfFiveMinute.cpp similarity index 100% rename from dbms/src/Functions/toStartOfFiveMinute.cpp rename to src/Functions/toStartOfFiveMinute.cpp diff --git a/dbms/src/Functions/toStartOfHour.cpp b/src/Functions/toStartOfHour.cpp similarity index 100% rename from dbms/src/Functions/toStartOfHour.cpp rename to src/Functions/toStartOfHour.cpp diff --git a/dbms/src/Functions/toStartOfISOYear.cpp b/src/Functions/toStartOfISOYear.cpp similarity index 100% rename from dbms/src/Functions/toStartOfISOYear.cpp rename to src/Functions/toStartOfISOYear.cpp diff --git a/dbms/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp similarity index 100% rename from dbms/src/Functions/toStartOfInterval.cpp rename to src/Functions/toStartOfInterval.cpp diff --git a/dbms/src/Functions/toStartOfMinute.cpp b/src/Functions/toStartOfMinute.cpp similarity index 100% rename from dbms/src/Functions/toStartOfMinute.cpp rename to src/Functions/toStartOfMinute.cpp diff --git a/dbms/src/Functions/toStartOfMonth.cpp b/src/Functions/toStartOfMonth.cpp similarity index 100% rename from dbms/src/Functions/toStartOfMonth.cpp rename to src/Functions/toStartOfMonth.cpp diff --git a/dbms/src/Functions/toStartOfQuarter.cpp b/src/Functions/toStartOfQuarter.cpp similarity index 100% rename from dbms/src/Functions/toStartOfQuarter.cpp rename to src/Functions/toStartOfQuarter.cpp diff --git a/dbms/src/Functions/toStartOfTenMinutes.cpp b/src/Functions/toStartOfTenMinutes.cpp similarity index 100% rename from dbms/src/Functions/toStartOfTenMinutes.cpp rename to src/Functions/toStartOfTenMinutes.cpp diff --git a/dbms/src/Functions/toStartOfYear.cpp b/src/Functions/toStartOfYear.cpp similarity index 100% rename from dbms/src/Functions/toStartOfYear.cpp rename to src/Functions/toStartOfYear.cpp diff --git a/dbms/src/Functions/toTime.cpp b/src/Functions/toTime.cpp similarity index 100% rename from dbms/src/Functions/toTime.cpp rename to src/Functions/toTime.cpp diff --git a/dbms/src/Functions/toTimeZone.cpp b/src/Functions/toTimeZone.cpp similarity index 100% rename from dbms/src/Functions/toTimeZone.cpp rename to src/Functions/toTimeZone.cpp diff --git a/dbms/src/Functions/toTypeName.cpp b/src/Functions/toTypeName.cpp similarity index 100% rename from dbms/src/Functions/toTypeName.cpp rename to src/Functions/toTypeName.cpp diff --git a/dbms/src/Functions/toValidUTF8.cpp b/src/Functions/toValidUTF8.cpp similarity index 100% rename from dbms/src/Functions/toValidUTF8.cpp rename to src/Functions/toValidUTF8.cpp diff --git a/dbms/src/Functions/toYYYYMM.cpp b/src/Functions/toYYYYMM.cpp similarity index 100% rename from dbms/src/Functions/toYYYYMM.cpp rename to src/Functions/toYYYYMM.cpp diff --git a/dbms/src/Functions/toYYYYMMDD.cpp b/src/Functions/toYYYYMMDD.cpp similarity index 100% rename from dbms/src/Functions/toYYYYMMDD.cpp rename to src/Functions/toYYYYMMDD.cpp diff --git a/dbms/src/Functions/toYYYYMMDDhhmmss.cpp b/src/Functions/toYYYYMMDDhhmmss.cpp similarity index 100% rename from dbms/src/Functions/toYYYYMMDDhhmmss.cpp rename to src/Functions/toYYYYMMDDhhmmss.cpp diff --git a/dbms/src/Functions/toYear.cpp b/src/Functions/toYear.cpp similarity index 100% rename from dbms/src/Functions/toYear.cpp rename to src/Functions/toYear.cpp diff --git a/dbms/src/Functions/today.cpp b/src/Functions/today.cpp similarity index 100% rename from dbms/src/Functions/today.cpp rename to src/Functions/today.cpp diff --git a/dbms/src/Functions/transform.cpp b/src/Functions/transform.cpp similarity index 100% rename from dbms/src/Functions/transform.cpp rename to src/Functions/transform.cpp diff --git a/dbms/src/Functions/trap.cpp b/src/Functions/trap.cpp similarity index 100% rename from dbms/src/Functions/trap.cpp rename to src/Functions/trap.cpp diff --git a/dbms/src/Functions/trim.cpp b/src/Functions/trim.cpp similarity index 100% rename from dbms/src/Functions/trim.cpp rename to src/Functions/trim.cpp diff --git a/dbms/src/Functions/tryBase64Decode.cpp b/src/Functions/tryBase64Decode.cpp similarity index 100% rename from dbms/src/Functions/tryBase64Decode.cpp rename to src/Functions/tryBase64Decode.cpp diff --git a/dbms/src/Functions/tuple.cpp b/src/Functions/tuple.cpp similarity index 100% rename from dbms/src/Functions/tuple.cpp rename to src/Functions/tuple.cpp diff --git a/dbms/src/Functions/tupleElement.cpp b/src/Functions/tupleElement.cpp similarity index 100% rename from dbms/src/Functions/tupleElement.cpp rename to src/Functions/tupleElement.cpp diff --git a/dbms/src/Functions/upper.cpp b/src/Functions/upper.cpp similarity index 100% rename from dbms/src/Functions/upper.cpp rename to src/Functions/upper.cpp diff --git a/dbms/src/Functions/upperUTF8.cpp b/src/Functions/upperUTF8.cpp similarity index 100% rename from dbms/src/Functions/upperUTF8.cpp rename to src/Functions/upperUTF8.cpp diff --git a/dbms/src/Functions/uptime.cpp b/src/Functions/uptime.cpp similarity index 100% rename from dbms/src/Functions/uptime.cpp rename to src/Functions/uptime.cpp diff --git a/dbms/src/Functions/version.cpp b/src/Functions/version.cpp similarity index 100% rename from dbms/src/Functions/version.cpp rename to src/Functions/version.cpp diff --git a/dbms/src/Functions/visibleWidth.cpp b/src/Functions/visibleWidth.cpp similarity index 100% rename from dbms/src/Functions/visibleWidth.cpp rename to src/Functions/visibleWidth.cpp diff --git a/dbms/src/Functions/visitParamExtractBool.cpp b/src/Functions/visitParamExtractBool.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractBool.cpp rename to src/Functions/visitParamExtractBool.cpp diff --git a/dbms/src/Functions/visitParamExtractFloat.cpp b/src/Functions/visitParamExtractFloat.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractFloat.cpp rename to src/Functions/visitParamExtractFloat.cpp diff --git a/dbms/src/Functions/visitParamExtractInt.cpp b/src/Functions/visitParamExtractInt.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractInt.cpp rename to src/Functions/visitParamExtractInt.cpp diff --git a/dbms/src/Functions/visitParamExtractRaw.cpp b/src/Functions/visitParamExtractRaw.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractRaw.cpp rename to src/Functions/visitParamExtractRaw.cpp diff --git a/dbms/src/Functions/visitParamExtractString.cpp b/src/Functions/visitParamExtractString.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractString.cpp rename to src/Functions/visitParamExtractString.cpp diff --git a/dbms/src/Functions/visitParamExtractUInt.cpp b/src/Functions/visitParamExtractUInt.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractUInt.cpp rename to src/Functions/visitParamExtractUInt.cpp diff --git a/dbms/src/Functions/visitParamHas.cpp b/src/Functions/visitParamHas.cpp similarity index 100% rename from dbms/src/Functions/visitParamHas.cpp rename to src/Functions/visitParamHas.cpp diff --git a/dbms/src/Functions/yandexConsistentHash.cpp b/src/Functions/yandexConsistentHash.cpp similarity index 100% rename from dbms/src/Functions/yandexConsistentHash.cpp rename to src/Functions/yandexConsistentHash.cpp diff --git a/dbms/src/Functions/yesterday.cpp b/src/Functions/yesterday.cpp similarity index 100% rename from dbms/src/Functions/yesterday.cpp rename to src/Functions/yesterday.cpp diff --git a/dbms/src/IO/AIO.cpp b/src/IO/AIO.cpp similarity index 85% rename from dbms/src/IO/AIO.cpp rename to src/IO/AIO.cpp index 33fb79fcf95..f0422d04434 100644 --- a/dbms/src/IO/AIO.cpp +++ b/src/IO/AIO.cpp @@ -1,12 +1,12 @@ -#if defined(__linux__) - -#include -#include -#include -#include - #include +#if defined(OS_LINUX) + +# include + +# include +# include + /** Small wrappers for asynchronous I/O. */ @@ -53,17 +53,10 @@ AIOContext::~AIOContext() io_destroy(ctx); } -#elif defined(__FreeBSD__) +#elif defined(OS_FREEBSD) -# include -# include -# include -# include -# include # include -# include - /** Small wrappers for asynchronous I/O. */ @@ -123,7 +116,7 @@ int io_submit(int ctx, long nr, struct iocb * iocbpp[]) int io_getevents(int ctx, long, long max_nr, struct kevent * events, struct timespec * timeout) { - return kevent(ctx, NULL, 0, events, max_nr, timeout); + return kevent(ctx, nullptr, 0, events, max_nr, timeout); } diff --git a/src/IO/AIO.h b/src/IO/AIO.h new file mode 100644 index 00000000000..499d1f3bf60 --- /dev/null +++ b/src/IO/AIO.h @@ -0,0 +1,76 @@ +#pragma once + +#include + +#if defined(OS_LINUX) + +/// https://stackoverflow.com/questions/20759750/resolving-redefinition-of-timespec-in-time-h +# define timespec linux_timespec +# define timeval linux_timeval +# define itimerspec linux_itimerspec +# define sigset_t linux_sigset_t + +# include + +# undef timespec +# undef timeval +# undef itimerspec +# undef sigset_t + + +/** Small wrappers for asynchronous I/O. + */ + +int io_setup(unsigned nr, aio_context_t * ctxp); + +int io_destroy(aio_context_t ctx); + +/// last argument is an array of pointers technically speaking +int io_submit(aio_context_t ctx, long nr, struct iocb * iocbpp[]); + +int io_getevents(aio_context_t ctx, long min_nr, long max_nr, io_event * events, struct timespec * timeout); + + +struct AIOContext : private boost::noncopyable +{ + aio_context_t ctx; + + AIOContext(unsigned int nr_events = 128); + ~AIOContext(); +}; + +#elif defined(OS_FREEBSD) + +# include +# include +# include +# include + +typedef struct kevent io_event; +typedef int aio_context_t; + +struct iocb +{ + struct aiocb aio; + long aio_data; +}; + +int io_setup(void); + +int io_destroy(void); + +/// last argument is an array of pointers technically speaking +int io_submit(int ctx, long nr, struct iocb * iocbpp[]); + +int io_getevents(int ctx, long min_nr, long max_nr, struct kevent * events, struct timespec * timeout); + + +struct AIOContext : private boost::noncopyable +{ + int ctx; + + AIOContext(unsigned int nr_events = 128); + ~AIOContext(); +}; + +#endif diff --git a/dbms/src/IO/AIOContextPool.cpp b/src/IO/AIOContextPool.cpp similarity index 100% rename from dbms/src/IO/AIOContextPool.cpp rename to src/IO/AIOContextPool.cpp diff --git a/dbms/src/IO/AIOContextPool.h b/src/IO/AIOContextPool.h similarity index 100% rename from dbms/src/IO/AIOContextPool.h rename to src/IO/AIOContextPool.h diff --git a/dbms/src/IO/AsynchronousWriteBuffer.h b/src/IO/AsynchronousWriteBuffer.h similarity index 100% rename from dbms/src/IO/AsynchronousWriteBuffer.h rename to src/IO/AsynchronousWriteBuffer.h diff --git a/dbms/src/IO/BitHelpers.h b/src/IO/BitHelpers.h similarity index 100% rename from dbms/src/IO/BitHelpers.h rename to src/IO/BitHelpers.h diff --git a/dbms/src/IO/BrotliReadBuffer.cpp b/src/IO/BrotliReadBuffer.cpp similarity index 100% rename from dbms/src/IO/BrotliReadBuffer.cpp rename to src/IO/BrotliReadBuffer.cpp diff --git a/dbms/src/IO/BrotliReadBuffer.h b/src/IO/BrotliReadBuffer.h similarity index 100% rename from dbms/src/IO/BrotliReadBuffer.h rename to src/IO/BrotliReadBuffer.h diff --git a/dbms/src/IO/BrotliWriteBuffer.cpp b/src/IO/BrotliWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/BrotliWriteBuffer.cpp rename to src/IO/BrotliWriteBuffer.cpp diff --git a/dbms/src/IO/BrotliWriteBuffer.h b/src/IO/BrotliWriteBuffer.h similarity index 100% rename from dbms/src/IO/BrotliWriteBuffer.h rename to src/IO/BrotliWriteBuffer.h diff --git a/dbms/src/IO/BufferBase.h b/src/IO/BufferBase.h similarity index 100% rename from dbms/src/IO/BufferBase.h rename to src/IO/BufferBase.h diff --git a/dbms/src/IO/BufferWithOwnMemory.h b/src/IO/BufferWithOwnMemory.h similarity index 100% rename from dbms/src/IO/BufferWithOwnMemory.h rename to src/IO/BufferWithOwnMemory.h diff --git a/dbms/src/IO/CMakeLists.txt b/src/IO/CMakeLists.txt similarity index 100% rename from dbms/src/IO/CMakeLists.txt rename to src/IO/CMakeLists.txt diff --git a/dbms/src/IO/CascadeWriteBuffer.cpp b/src/IO/CascadeWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/CascadeWriteBuffer.cpp rename to src/IO/CascadeWriteBuffer.cpp diff --git a/dbms/src/IO/CascadeWriteBuffer.h b/src/IO/CascadeWriteBuffer.h similarity index 100% rename from dbms/src/IO/CascadeWriteBuffer.h rename to src/IO/CascadeWriteBuffer.h diff --git a/dbms/src/IO/CompressionMethod.cpp b/src/IO/CompressionMethod.cpp similarity index 100% rename from dbms/src/IO/CompressionMethod.cpp rename to src/IO/CompressionMethod.cpp diff --git a/dbms/src/IO/CompressionMethod.h b/src/IO/CompressionMethod.h similarity index 100% rename from dbms/src/IO/CompressionMethod.h rename to src/IO/CompressionMethod.h diff --git a/dbms/src/IO/ConcatReadBuffer.h b/src/IO/ConcatReadBuffer.h similarity index 100% rename from dbms/src/IO/ConcatReadBuffer.h rename to src/IO/ConcatReadBuffer.h diff --git a/dbms/src/IO/ConnectionTimeouts.h b/src/IO/ConnectionTimeouts.h similarity index 100% rename from dbms/src/IO/ConnectionTimeouts.h rename to src/IO/ConnectionTimeouts.h diff --git a/dbms/src/IO/DoubleConverter.cpp b/src/IO/DoubleConverter.cpp similarity index 100% rename from dbms/src/IO/DoubleConverter.cpp rename to src/IO/DoubleConverter.cpp diff --git a/dbms/src/IO/DoubleConverter.h b/src/IO/DoubleConverter.h similarity index 100% rename from dbms/src/IO/DoubleConverter.h rename to src/IO/DoubleConverter.h diff --git a/dbms/src/IO/HDFSCommon.cpp b/src/IO/HDFSCommon.cpp similarity index 100% rename from dbms/src/IO/HDFSCommon.cpp rename to src/IO/HDFSCommon.cpp diff --git a/dbms/src/IO/HDFSCommon.h b/src/IO/HDFSCommon.h similarity index 100% rename from dbms/src/IO/HDFSCommon.h rename to src/IO/HDFSCommon.h diff --git a/dbms/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp similarity index 100% rename from dbms/src/IO/HTTPCommon.cpp rename to src/IO/HTTPCommon.cpp diff --git a/dbms/src/IO/HTTPCommon.h b/src/IO/HTTPCommon.h similarity index 100% rename from dbms/src/IO/HTTPCommon.h rename to src/IO/HTTPCommon.h diff --git a/dbms/src/IO/HashingReadBuffer.h b/src/IO/HashingReadBuffer.h similarity index 100% rename from dbms/src/IO/HashingReadBuffer.h rename to src/IO/HashingReadBuffer.h diff --git a/dbms/src/IO/HashingWriteBuffer.cpp b/src/IO/HashingWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/HashingWriteBuffer.cpp rename to src/IO/HashingWriteBuffer.cpp diff --git a/dbms/src/IO/HashingWriteBuffer.h b/src/IO/HashingWriteBuffer.h similarity index 100% rename from dbms/src/IO/HashingWriteBuffer.h rename to src/IO/HashingWriteBuffer.h diff --git a/dbms/src/IO/HexWriteBuffer.cpp b/src/IO/HexWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/HexWriteBuffer.cpp rename to src/IO/HexWriteBuffer.cpp diff --git a/dbms/src/IO/HexWriteBuffer.h b/src/IO/HexWriteBuffer.h similarity index 100% rename from dbms/src/IO/HexWriteBuffer.h rename to src/IO/HexWriteBuffer.h diff --git a/dbms/src/IO/IReadableWriteBuffer.h b/src/IO/IReadableWriteBuffer.h similarity index 100% rename from dbms/src/IO/IReadableWriteBuffer.h rename to src/IO/IReadableWriteBuffer.h diff --git a/dbms/src/IO/LimitReadBuffer.cpp b/src/IO/LimitReadBuffer.cpp similarity index 100% rename from dbms/src/IO/LimitReadBuffer.cpp rename to src/IO/LimitReadBuffer.cpp diff --git a/dbms/src/IO/LimitReadBuffer.h b/src/IO/LimitReadBuffer.h similarity index 100% rename from dbms/src/IO/LimitReadBuffer.h rename to src/IO/LimitReadBuffer.h diff --git a/dbms/src/IO/MMapReadBufferFromFile.cpp b/src/IO/MMapReadBufferFromFile.cpp similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFile.cpp rename to src/IO/MMapReadBufferFromFile.cpp diff --git a/dbms/src/IO/MMapReadBufferFromFile.h b/src/IO/MMapReadBufferFromFile.h similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFile.h rename to src/IO/MMapReadBufferFromFile.h diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp b/src/IO/MMapReadBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp rename to src/IO/MMapReadBufferFromFileDescriptor.cpp diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.h b/src/IO/MMapReadBufferFromFileDescriptor.h similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFileDescriptor.h rename to src/IO/MMapReadBufferFromFileDescriptor.h diff --git a/dbms/src/IO/MemoryReadWriteBuffer.cpp b/src/IO/MemoryReadWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/MemoryReadWriteBuffer.cpp rename to src/IO/MemoryReadWriteBuffer.cpp diff --git a/dbms/src/IO/MemoryReadWriteBuffer.h b/src/IO/MemoryReadWriteBuffer.h similarity index 100% rename from dbms/src/IO/MemoryReadWriteBuffer.h rename to src/IO/MemoryReadWriteBuffer.h diff --git a/dbms/src/IO/NullWriteBuffer.cpp b/src/IO/NullWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/NullWriteBuffer.cpp rename to src/IO/NullWriteBuffer.cpp diff --git a/dbms/src/IO/NullWriteBuffer.h b/src/IO/NullWriteBuffer.h similarity index 100% rename from dbms/src/IO/NullWriteBuffer.h rename to src/IO/NullWriteBuffer.h diff --git a/dbms/src/IO/Operators.h b/src/IO/Operators.h similarity index 100% rename from dbms/src/IO/Operators.h rename to src/IO/Operators.h diff --git a/dbms/src/IO/PeekableReadBuffer.cpp b/src/IO/PeekableReadBuffer.cpp similarity index 100% rename from dbms/src/IO/PeekableReadBuffer.cpp rename to src/IO/PeekableReadBuffer.cpp diff --git a/dbms/src/IO/PeekableReadBuffer.h b/src/IO/PeekableReadBuffer.h similarity index 100% rename from dbms/src/IO/PeekableReadBuffer.h rename to src/IO/PeekableReadBuffer.h diff --git a/dbms/src/IO/Progress.cpp b/src/IO/Progress.cpp similarity index 100% rename from dbms/src/IO/Progress.cpp rename to src/IO/Progress.cpp diff --git a/dbms/src/IO/Progress.h b/src/IO/Progress.h similarity index 100% rename from dbms/src/IO/Progress.h rename to src/IO/Progress.h diff --git a/dbms/src/IO/ReadBuffer.h b/src/IO/ReadBuffer.h similarity index 100% rename from dbms/src/IO/ReadBuffer.h rename to src/IO/ReadBuffer.h diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/src/IO/ReadBufferAIO.cpp similarity index 100% rename from dbms/src/IO/ReadBufferAIO.cpp rename to src/IO/ReadBufferAIO.cpp diff --git a/dbms/src/IO/ReadBufferAIO.h b/src/IO/ReadBufferAIO.h similarity index 100% rename from dbms/src/IO/ReadBufferAIO.h rename to src/IO/ReadBufferAIO.h diff --git a/dbms/src/IO/ReadBufferFromFile.cpp b/src/IO/ReadBufferFromFile.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromFile.cpp rename to src/IO/ReadBufferFromFile.cpp diff --git a/dbms/src/IO/ReadBufferFromFile.h b/src/IO/ReadBufferFromFile.h similarity index 100% rename from dbms/src/IO/ReadBufferFromFile.h rename to src/IO/ReadBufferFromFile.h diff --git a/dbms/src/IO/ReadBufferFromFileBase.cpp b/src/IO/ReadBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromFileBase.cpp rename to src/IO/ReadBufferFromFileBase.cpp diff --git a/dbms/src/IO/ReadBufferFromFileBase.h b/src/IO/ReadBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/ReadBufferFromFileBase.h rename to src/IO/ReadBufferFromFileBase.h diff --git a/dbms/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromFileDescriptor.cpp rename to src/IO/ReadBufferFromFileDescriptor.cpp diff --git a/dbms/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h similarity index 100% rename from dbms/src/IO/ReadBufferFromFileDescriptor.h rename to src/IO/ReadBufferFromFileDescriptor.h diff --git a/dbms/src/IO/ReadBufferFromHDFS.cpp b/src/IO/ReadBufferFromHDFS.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromHDFS.cpp rename to src/IO/ReadBufferFromHDFS.cpp diff --git a/dbms/src/IO/ReadBufferFromHDFS.h b/src/IO/ReadBufferFromHDFS.h similarity index 100% rename from dbms/src/IO/ReadBufferFromHDFS.h rename to src/IO/ReadBufferFromHDFS.h diff --git a/dbms/src/IO/ReadBufferFromIStream.cpp b/src/IO/ReadBufferFromIStream.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromIStream.cpp rename to src/IO/ReadBufferFromIStream.cpp diff --git a/dbms/src/IO/ReadBufferFromIStream.h b/src/IO/ReadBufferFromIStream.h similarity index 100% rename from dbms/src/IO/ReadBufferFromIStream.h rename to src/IO/ReadBufferFromIStream.h diff --git a/dbms/src/IO/ReadBufferFromMemory.cpp b/src/IO/ReadBufferFromMemory.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromMemory.cpp rename to src/IO/ReadBufferFromMemory.cpp diff --git a/dbms/src/IO/ReadBufferFromMemory.h b/src/IO/ReadBufferFromMemory.h similarity index 100% rename from dbms/src/IO/ReadBufferFromMemory.h rename to src/IO/ReadBufferFromMemory.h diff --git a/dbms/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromPocoSocket.cpp rename to src/IO/ReadBufferFromPocoSocket.cpp diff --git a/dbms/src/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h similarity index 100% rename from dbms/src/IO/ReadBufferFromPocoSocket.h rename to src/IO/ReadBufferFromPocoSocket.h diff --git a/dbms/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromS3.cpp rename to src/IO/ReadBufferFromS3.cpp diff --git a/dbms/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h similarity index 100% rename from dbms/src/IO/ReadBufferFromS3.h rename to src/IO/ReadBufferFromS3.h diff --git a/dbms/src/IO/ReadBufferFromString.h b/src/IO/ReadBufferFromString.h similarity index 100% rename from dbms/src/IO/ReadBufferFromString.h rename to src/IO/ReadBufferFromString.h diff --git a/dbms/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp similarity index 99% rename from dbms/src/IO/ReadHelpers.cpp rename to src/IO/ReadHelpers.cpp index 75682906242..dccb413af2c 100644 --- a/dbms/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -74,12 +74,12 @@ UInt128 stringToUUID(const String & str) void NO_INLINE throwAtAssertionFailed(const char * s, ReadBuffer & buf) { WriteBufferFromOwnString out; - out << "Cannot parse input: expected " << escape << s; + out << "Cannot parse input: expected " << quote << s; if (buf.eof()) out << " at end of stream."; else - out << " before: " << escape << String(buf.position(), std::min(SHOW_CHARS_ON_SYNTAX_ERROR, buf.buffer().end() - buf.position())); + out << " before: " << quote << String(buf.position(), std::min(SHOW_CHARS_ON_SYNTAX_ERROR, buf.buffer().end() - buf.position())); throw Exception(out.str(), ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED); } diff --git a/dbms/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h similarity index 100% rename from dbms/src/IO/ReadHelpers.h rename to src/IO/ReadHelpers.h diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp similarity index 100% rename from dbms/src/IO/ReadWriteBufferFromHTTP.cpp rename to src/IO/ReadWriteBufferFromHTTP.cpp diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h similarity index 100% rename from dbms/src/IO/ReadWriteBufferFromHTTP.h rename to src/IO/ReadWriteBufferFromHTTP.h diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp new file mode 100644 index 00000000000..3aca41a9c9a --- /dev/null +++ b/src/IO/S3Common.cpp @@ -0,0 +1,158 @@ +#include + +#if USE_AWS_S3 + +# include +# include + +# include +# include +# include +# include +# include +# include + + +namespace +{ +const std::pair & convertLogLevel(Aws::Utils::Logging::LogLevel log_level) +{ + static const std::unordered_map> mapping = { + {Aws::Utils::Logging::LogLevel::Off, {LogsLevel::none, Message::PRIO_FATAL}}, + {Aws::Utils::Logging::LogLevel::Fatal, {LogsLevel::error, Message::PRIO_FATAL}}, + {Aws::Utils::Logging::LogLevel::Error, {LogsLevel::error, Message::PRIO_ERROR}}, + {Aws::Utils::Logging::LogLevel::Warn, {LogsLevel::warning, Message::PRIO_WARNING}}, + {Aws::Utils::Logging::LogLevel::Info, {LogsLevel::information, Message::PRIO_INFORMATION}}, + {Aws::Utils::Logging::LogLevel::Debug, {LogsLevel::debug, Message::PRIO_DEBUG}}, + {Aws::Utils::Logging::LogLevel::Trace, {LogsLevel::trace, Message::PRIO_TRACE}}, + }; + return mapping.at(log_level); +} + +class AWSLogger final : public Aws::Utils::Logging::LogSystemInterface +{ +public: + ~AWSLogger() final = default; + + Aws::Utils::Logging::LogLevel GetLogLevel() const final { return Aws::Utils::Logging::LogLevel::Trace; } + + void Log(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * format_str, ...) final // NOLINT + { + auto & [level, prio] = convertLogLevel(log_level); + LOG_SIMPLE(log, std::string(tag) + ": " + format_str, level, prio); + } + + void LogStream(Aws::Utils::Logging::LogLevel log_level, const char * tag, const Aws::OStringStream & message_stream) final + { + auto & [level, prio] = convertLogLevel(log_level); + LOG_SIMPLE(log, std::string(tag) + ": " + message_stream.str(), level, prio); + } + + void Flush() final {} + +private: + Poco::Logger * log = &Poco::Logger::get("AWSClient"); +}; +} + +namespace DB +{ +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +namespace S3 +{ + ClientFactory::ClientFactory() + { + aws_options = Aws::SDKOptions {}; + Aws::InitAPI(aws_options); + Aws::Utils::Logging::InitializeAWSLogging(std::make_shared()); + } + + ClientFactory::~ClientFactory() + { + Aws::Utils::Logging::ShutdownAWSLogging(); + Aws::ShutdownAPI(aws_options); + } + + ClientFactory & ClientFactory::instance() + { + static ClientFactory ret; + return ret; + } + + std::shared_ptr ClientFactory::create( // NOLINT + const String & endpoint, + const String & access_key_id, + const String & secret_access_key) + { + Aws::Client::ClientConfiguration cfg; + if (!endpoint.empty()) + cfg.endpointOverride = endpoint; + + Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key); + + return std::make_shared( + credentials, // Aws credentials. + std::move(cfg), // Client configuration. + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, // Sign policy. + endpoint.empty() // Use virtual addressing only if endpoint is not specified. + ); + } + + + URI::URI(const Poco::URI & uri_) + { + /// Case when bucket name represented in domain name of S3 URL. + /// E.g. (https://bucket-name.s3.Region.amazonaws.com/key) + /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access + static const RE2 virtual_hosted_style_pattern(R"((.+\.)?s3[.\-][a-z0-9\-.]+)"); + /// Case when bucket name and key represented in path of S3 URL. + /// E.g. (https://s3.Region.amazonaws.com/bucket-name/key) + /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access + static const RE2 path_style_pattern("([^/]+)/(.*)"); + + uri = uri_; + + if (uri.getHost().empty()) + throw Exception("Host is empty in S3 URI: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + + endpoint = uri.getScheme() + "://" + uri.getAuthority(); + + if (re2::RE2::FullMatch(uri.getAuthority(), virtual_hosted_style_pattern, &bucket)) + { + if (!bucket.empty()) + bucket.pop_back(); /// Remove '.' character from the end of the bucket name. + + /// S3 specification requires at least 3 and at most 63 characters in bucket name. + /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html + if (bucket.length() < 3 || bucket.length() > 63) + throw Exception( + "Bucket name length out of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + + /// Remove leading '/' from path to extract key. + key = uri.getPath().substr(1); + if (key.empty() || key == "/") + throw Exception("Key name is empty in S3 URI: " + key + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + } + else if (re2::RE2::PartialMatch(uri.getPath(), path_style_pattern, &bucket, &key)) + { + /// S3 specification requires at least 3 and at most 63 characters in bucket name. + /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html + if (bucket.length() < 3 || bucket.length() > 63) + throw Exception( + "Bucket name length out of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + + if (key.empty() || key == "/") + throw Exception("Key name is empty in S3 URI: " + key + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + } + else + throw Exception("Bucket or key name are invalid in S3 URI: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + } +} + +} + +#endif diff --git a/dbms/src/IO/S3Common.h b/src/IO/S3Common.h similarity index 100% rename from dbms/src/IO/S3Common.h rename to src/IO/S3Common.h diff --git a/dbms/src/IO/SeekableReadBuffer.h b/src/IO/SeekableReadBuffer.h similarity index 100% rename from dbms/src/IO/SeekableReadBuffer.h rename to src/IO/SeekableReadBuffer.h diff --git a/dbms/src/IO/UncompressedCache.h b/src/IO/UncompressedCache.h similarity index 100% rename from dbms/src/IO/UncompressedCache.h rename to src/IO/UncompressedCache.h diff --git a/dbms/src/IO/UseSSL.cpp b/src/IO/UseSSL.cpp similarity index 100% rename from dbms/src/IO/UseSSL.cpp rename to src/IO/UseSSL.cpp diff --git a/dbms/src/IO/UseSSL.h b/src/IO/UseSSL.h similarity index 100% rename from dbms/src/IO/UseSSL.h rename to src/IO/UseSSL.h diff --git a/dbms/src/IO/VarInt.h b/src/IO/VarInt.h similarity index 100% rename from dbms/src/IO/VarInt.h rename to src/IO/VarInt.h diff --git a/dbms/src/IO/WriteBuffer.h b/src/IO/WriteBuffer.h similarity index 100% rename from dbms/src/IO/WriteBuffer.h rename to src/IO/WriteBuffer.h diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/src/IO/WriteBufferAIO.cpp similarity index 100% rename from dbms/src/IO/WriteBufferAIO.cpp rename to src/IO/WriteBufferAIO.cpp diff --git a/dbms/src/IO/WriteBufferAIO.h b/src/IO/WriteBufferAIO.h similarity index 100% rename from dbms/src/IO/WriteBufferAIO.h rename to src/IO/WriteBufferAIO.h diff --git a/dbms/src/IO/WriteBufferFromArena.h b/src/IO/WriteBufferFromArena.h similarity index 100% rename from dbms/src/IO/WriteBufferFromArena.h rename to src/IO/WriteBufferFromArena.h diff --git a/dbms/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFile.cpp rename to src/IO/WriteBufferFromFile.cpp diff --git a/dbms/src/IO/WriteBufferFromFile.h b/src/IO/WriteBufferFromFile.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFile.h rename to src/IO/WriteBufferFromFile.h diff --git a/dbms/src/IO/WriteBufferFromFileBase.cpp b/src/IO/WriteBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFileBase.cpp rename to src/IO/WriteBufferFromFileBase.cpp diff --git a/dbms/src/IO/WriteBufferFromFileBase.h b/src/IO/WriteBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFileBase.h rename to src/IO/WriteBufferFromFileBase.h diff --git a/dbms/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptor.cpp rename to src/IO/WriteBufferFromFileDescriptor.cpp diff --git a/dbms/src/IO/WriteBufferFromFileDescriptor.h b/src/IO/WriteBufferFromFileDescriptor.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptor.h rename to src/IO/WriteBufferFromFileDescriptor.h diff --git a/dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp b/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp rename to src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp diff --git a/dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h b/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h rename to src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h diff --git a/dbms/src/IO/WriteBufferFromHDFS.cpp b/src/IO/WriteBufferFromHDFS.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromHDFS.cpp rename to src/IO/WriteBufferFromHDFS.cpp diff --git a/dbms/src/IO/WriteBufferFromHDFS.h b/src/IO/WriteBufferFromHDFS.h similarity index 100% rename from dbms/src/IO/WriteBufferFromHDFS.h rename to src/IO/WriteBufferFromHDFS.h diff --git a/dbms/src/IO/WriteBufferFromHTTP.cpp b/src/IO/WriteBufferFromHTTP.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTP.cpp rename to src/IO/WriteBufferFromHTTP.cpp diff --git a/dbms/src/IO/WriteBufferFromHTTP.h b/src/IO/WriteBufferFromHTTP.h similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTP.h rename to src/IO/WriteBufferFromHTTP.h diff --git a/dbms/src/IO/WriteBufferFromHTTPServerResponse.cpp b/src/IO/WriteBufferFromHTTPServerResponse.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTPServerResponse.cpp rename to src/IO/WriteBufferFromHTTPServerResponse.cpp diff --git a/dbms/src/IO/WriteBufferFromHTTPServerResponse.h b/src/IO/WriteBufferFromHTTPServerResponse.h similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTPServerResponse.h rename to src/IO/WriteBufferFromHTTPServerResponse.h diff --git a/dbms/src/IO/WriteBufferFromOStream.cpp b/src/IO/WriteBufferFromOStream.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromOStream.cpp rename to src/IO/WriteBufferFromOStream.cpp diff --git a/dbms/src/IO/WriteBufferFromOStream.h b/src/IO/WriteBufferFromOStream.h similarity index 100% rename from dbms/src/IO/WriteBufferFromOStream.h rename to src/IO/WriteBufferFromOStream.h diff --git a/dbms/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromPocoSocket.cpp rename to src/IO/WriteBufferFromPocoSocket.cpp diff --git a/dbms/src/IO/WriteBufferFromPocoSocket.h b/src/IO/WriteBufferFromPocoSocket.h similarity index 100% rename from dbms/src/IO/WriteBufferFromPocoSocket.h rename to src/IO/WriteBufferFromPocoSocket.h diff --git a/dbms/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromS3.cpp rename to src/IO/WriteBufferFromS3.cpp diff --git a/dbms/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h similarity index 100% rename from dbms/src/IO/WriteBufferFromS3.h rename to src/IO/WriteBufferFromS3.h diff --git a/dbms/src/IO/WriteBufferFromString.h b/src/IO/WriteBufferFromString.h similarity index 100% rename from dbms/src/IO/WriteBufferFromString.h rename to src/IO/WriteBufferFromString.h diff --git a/dbms/src/IO/WriteBufferFromTemporaryFile.cpp b/src/IO/WriteBufferFromTemporaryFile.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromTemporaryFile.cpp rename to src/IO/WriteBufferFromTemporaryFile.cpp diff --git a/dbms/src/IO/WriteBufferFromTemporaryFile.h b/src/IO/WriteBufferFromTemporaryFile.h similarity index 100% rename from dbms/src/IO/WriteBufferFromTemporaryFile.h rename to src/IO/WriteBufferFromTemporaryFile.h diff --git a/dbms/src/IO/WriteBufferFromVector.h b/src/IO/WriteBufferFromVector.h similarity index 100% rename from dbms/src/IO/WriteBufferFromVector.h rename to src/IO/WriteBufferFromVector.h diff --git a/dbms/src/IO/WriteBufferValidUTF8.cpp b/src/IO/WriteBufferValidUTF8.cpp similarity index 100% rename from dbms/src/IO/WriteBufferValidUTF8.cpp rename to src/IO/WriteBufferValidUTF8.cpp diff --git a/dbms/src/IO/WriteBufferValidUTF8.h b/src/IO/WriteBufferValidUTF8.h similarity index 100% rename from dbms/src/IO/WriteBufferValidUTF8.h rename to src/IO/WriteBufferValidUTF8.h diff --git a/dbms/src/IO/WriteHelpers.cpp b/src/IO/WriteHelpers.cpp similarity index 100% rename from dbms/src/IO/WriteHelpers.cpp rename to src/IO/WriteHelpers.cpp diff --git a/dbms/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h similarity index 100% rename from dbms/src/IO/WriteHelpers.h rename to src/IO/WriteHelpers.h diff --git a/dbms/src/IO/WriteIntText.h b/src/IO/WriteIntText.h similarity index 100% rename from dbms/src/IO/WriteIntText.h rename to src/IO/WriteIntText.h diff --git a/dbms/src/IO/ZlibDeflatingWriteBuffer.cpp b/src/IO/ZlibDeflatingWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/ZlibDeflatingWriteBuffer.cpp rename to src/IO/ZlibDeflatingWriteBuffer.cpp diff --git a/dbms/src/IO/ZlibDeflatingWriteBuffer.h b/src/IO/ZlibDeflatingWriteBuffer.h similarity index 100% rename from dbms/src/IO/ZlibDeflatingWriteBuffer.h rename to src/IO/ZlibDeflatingWriteBuffer.h diff --git a/dbms/src/IO/ZlibInflatingReadBuffer.cpp b/src/IO/ZlibInflatingReadBuffer.cpp similarity index 100% rename from dbms/src/IO/ZlibInflatingReadBuffer.cpp rename to src/IO/ZlibInflatingReadBuffer.cpp diff --git a/dbms/src/IO/ZlibInflatingReadBuffer.h b/src/IO/ZlibInflatingReadBuffer.h similarity index 100% rename from dbms/src/IO/ZlibInflatingReadBuffer.h rename to src/IO/ZlibInflatingReadBuffer.h diff --git a/dbms/src/IO/copyData.cpp b/src/IO/copyData.cpp similarity index 100% rename from dbms/src/IO/copyData.cpp rename to src/IO/copyData.cpp diff --git a/dbms/src/IO/copyData.h b/src/IO/copyData.h similarity index 100% rename from dbms/src/IO/copyData.h rename to src/IO/copyData.h diff --git a/dbms/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/createReadBufferFromFileBase.cpp rename to src/IO/createReadBufferFromFileBase.cpp diff --git a/dbms/src/IO/createReadBufferFromFileBase.h b/src/IO/createReadBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/createReadBufferFromFileBase.h rename to src/IO/createReadBufferFromFileBase.h diff --git a/dbms/src/IO/createWriteBufferFromFileBase.cpp b/src/IO/createWriteBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/createWriteBufferFromFileBase.cpp rename to src/IO/createWriteBufferFromFileBase.cpp diff --git a/dbms/src/IO/createWriteBufferFromFileBase.h b/src/IO/createWriteBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/createWriteBufferFromFileBase.h rename to src/IO/createWriteBufferFromFileBase.h diff --git a/dbms/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp similarity index 97% rename from dbms/src/IO/parseDateTimeBestEffort.cpp rename to src/IO/parseDateTimeBestEffort.cpp index 6e747b13b3f..7e40909226c 100644 --- a/dbms/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -69,7 +69,6 @@ template inline void readDecimalNumber(T & res, size_t num_digits, const char * src) { #define READ_DECIMAL_NUMBER(N) do { res *= common::exp10_i32(N); readDecimalNumber(res, src); src += (N); num_digits -= (N); } while (false) - while (num_digits) { switch (num_digits) @@ -80,7 +79,7 @@ inline void readDecimalNumber(T & res, size_t num_digits, const char * src) default: READ_DECIMAL_NUMBER(4); break; } } -#undef DECIMAL_NUMBER_CASE +#undef READ_DECIMAL_NUMBER } struct DateTimeSubsecondPart @@ -90,7 +89,12 @@ struct DateTimeSubsecondPart }; template -ReturnType parseDateTimeBestEffortImpl(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, DateTimeSubsecondPart * fractional = nullptr) +ReturnType parseDateTimeBestEffortImpl( + time_t & res, + ReadBuffer & in, + const DateLUTImpl & local_time_zone, + const DateLUTImpl & utc_time_zone, + DateTimeSubsecondPart * fractional) { auto on_error = [](const std::string & message [[maybe_unused]], int code [[maybe_unused]]) { @@ -367,7 +371,10 @@ ReturnType parseDateTimeBestEffortImpl(time_t & res, ReadBuffer & in, const Date { char c = *in.position(); - if (c == ' ' || c == 'T') + /// 'T' is a separator between date and time according to ISO 8601. + /// But don't skip it if we didn't read the date part yet, because 'T' is also a prefix for 'Tue' and 'Thu'. + + if (c == ' ' || (c == 'T' && year && !has_time)) { ++in.position(); } @@ -582,12 +589,12 @@ ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuf void parseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) { - parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone); + parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr); } bool tryParseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) { - return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone); + return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr); } void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) diff --git a/dbms/src/IO/parseDateTimeBestEffort.h b/src/IO/parseDateTimeBestEffort.h similarity index 100% rename from dbms/src/IO/parseDateTimeBestEffort.h rename to src/IO/parseDateTimeBestEffort.h diff --git a/dbms/src/IO/readDecimalText.h b/src/IO/readDecimalText.h similarity index 91% rename from dbms/src/IO/readDecimalText.h rename to src/IO/readDecimalText.h index 5b2a3f76481..6edc300eac8 100644 --- a/dbms/src/IO/readDecimalText.h +++ b/src/IO/readDecimalText.h @@ -10,7 +10,10 @@ namespace ErrorCodes extern const int ARGUMENT_OUT_OF_BOUND; } - +/// Try to read Decimal into underlying type T from ReadBuffer. Throws if 'digits_only' is set and there's unexpected symbol in input. +/// Returns integer 'exponent' factor that x should be muntiplyed by to get correct Decimal value: result = x * 10^exponent. +/// Use 'digits' input as max allowed meaning decimal digits in result. Place actual meanin digits in 'digits' output. +/// Do not care about decimal scale, only about meaning digits in decimal text representation. template inline bool readDigits(ReadBuffer & buf, T & x, unsigned int & digits, int & exponent, bool digits_only = false) { diff --git a/dbms/src/IO/readFloatText.cpp b/src/IO/readFloatText.cpp similarity index 100% rename from dbms/src/IO/readFloatText.cpp rename to src/IO/readFloatText.cpp diff --git a/dbms/src/IO/readFloatText.h b/src/IO/readFloatText.h similarity index 95% rename from dbms/src/IO/readFloatText.h rename to src/IO/readFloatText.h index fc3ffc43a91..4e0825222a7 100644 --- a/dbms/src/IO/readFloatText.h +++ b/src/IO/readFloatText.h @@ -156,6 +156,9 @@ ReturnType readFloatTextPreciseImpl(T & x, ReadBuffer & buf) { switch (*buf.position()) { + case '+': + continue; + case '-': { negative = true; @@ -335,6 +338,7 @@ ReturnType readFloatTextFastImpl(T & x, ReadBuffer & in) ++in.position(); } + auto count_after_sign = in.count(); constexpr int significant_digits = std::numeric_limits::digits10; @@ -380,7 +384,7 @@ ReturnType readFloatTextFastImpl(T & x, ReadBuffer & in) if (in.eof()) { if constexpr (throw_exception) - throw Exception("Cannot read floating point value", ErrorCodes::CANNOT_PARSE_NUMBER); + throw Exception("Cannot read floating point value: nothing after exponent", ErrorCodes::CANNOT_PARSE_NUMBER); else return false; } @@ -418,11 +422,30 @@ ReturnType readFloatTextFastImpl(T & x, ReadBuffer & in) if (in.eof()) { if constexpr (throw_exception) - throw Exception("Cannot read floating point value", ErrorCodes::CANNOT_PARSE_NUMBER); + throw Exception("Cannot read floating point value: no digits read", ErrorCodes::CANNOT_PARSE_NUMBER); else return false; } + if (*in.position() == '+') + { + ++in.position(); + if (in.eof()) + { + if constexpr (throw_exception) + throw Exception("Cannot read floating point value: nothing after plus sign", ErrorCodes::CANNOT_PARSE_NUMBER); + else + return false; + } + else if (negative) + { + if constexpr (throw_exception) + throw Exception("Cannot read floating point value: plus after minus sign", ErrorCodes::CANNOT_PARSE_NUMBER); + else + return false; + } + } + if (*in.position() == 'i' || *in.position() == 'I') { if (assertOrParseInfinity(in)) diff --git a/dbms/src/IO/tests/CMakeLists.txt b/src/IO/tests/CMakeLists.txt similarity index 100% rename from dbms/src/IO/tests/CMakeLists.txt rename to src/IO/tests/CMakeLists.txt diff --git a/dbms/src/IO/tests/async_write.cpp b/src/IO/tests/async_write.cpp similarity index 100% rename from dbms/src/IO/tests/async_write.cpp rename to src/IO/tests/async_write.cpp diff --git a/dbms/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp b/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp similarity index 98% rename from dbms/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp rename to src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp index 3e5a1998380..c6208af2d5e 100644 --- a/dbms/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp +++ b/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp @@ -104,7 +104,7 @@ INSTANTIATE_TEST_SUITE_P(Basic, DateLUT::instance("Europe/Minsk") }, { - "When scale is 0, subsecond part is 0 despite beeing present in string.", + "When scale is 0, subsecond part is 0 despite being present in string.", "2019-09-16 19:20:17.123", 1568650817ULL, 0, diff --git a/dbms/src/IO/tests/gtest_DateTimeToString.cpp b/src/IO/tests/gtest_DateTimeToString.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_DateTimeToString.cpp rename to src/IO/tests/gtest_DateTimeToString.cpp diff --git a/dbms/src/IO/tests/gtest_aio_seek_back_after_eof.cpp b/src/IO/tests/gtest_aio_seek_back_after_eof.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_aio_seek_back_after_eof.cpp rename to src/IO/tests/gtest_aio_seek_back_after_eof.cpp diff --git a/dbms/src/IO/tests/gtest_bit_io.cpp b/src/IO/tests/gtest_bit_io.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_bit_io.cpp rename to src/IO/tests/gtest_bit_io.cpp diff --git a/dbms/src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp b/src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp rename to src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp diff --git a/dbms/src/IO/tests/gtest_peekable_read_buffer.cpp b/src/IO/tests/gtest_peekable_read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_peekable_read_buffer.cpp rename to src/IO/tests/gtest_peekable_read_buffer.cpp diff --git a/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp new file mode 100644 index 00000000000..accb81a6306 --- /dev/null +++ b/src/IO/tests/gtest_s3_uri.cpp @@ -0,0 +1,55 @@ +#include +#include + +#if USE_AWS_S3 + +# include + +namespace +{ +using namespace DB; + +class S3UriTest : public testing::TestWithParam +{ +}; + +TEST(S3UriTest, validPatterns) +{ + { + S3::URI uri(Poco::URI("https://jokserfn.s3.yandexcloud.net/data")); + ASSERT_EQ("https://jokserfn.s3.yandexcloud.net", uri.endpoint); + ASSERT_EQ("jokserfn", uri.bucket); + ASSERT_EQ("data", uri.key); + } + { + S3::URI uri(Poco::URI("https://storage.yandexcloud.net/jokserfn/data")); + ASSERT_EQ("https://storage.yandexcloud.net", uri.endpoint); + ASSERT_EQ("jokserfn", uri.bucket); + ASSERT_EQ("data", uri.key); + } +} + +TEST_P(S3UriTest, invalidPatterns) +{ + ASSERT_ANY_THROW(S3::URI(Poco::URI(GetParam()))); +} + +INSTANTIATE_TEST_SUITE_P( + S3, + S3UriTest, + testing::Values( + "https:///", + "https://jokserfn.s3.yandexcloud.net/", + "https://.s3.yandexcloud.net/key", + "https://s3.yandexcloud.net/key", + "https://jokserfn.s3yandexcloud.net/key", + "https://s3.yandexcloud.net/key/", + "https://s3.yandexcloud.net//", + "https://yandexcloud.net/", + "https://yandexcloud.net//", + "https://yandexcloud.net/bucket/", + "https://yandexcloud.net//key")); + +} + +#endif diff --git a/dbms/src/IO/tests/hashing_buffer.h b/src/IO/tests/hashing_buffer.h similarity index 100% rename from dbms/src/IO/tests/hashing_buffer.h rename to src/IO/tests/hashing_buffer.h diff --git a/dbms/src/IO/tests/hashing_read_buffer.cpp b/src/IO/tests/hashing_read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/hashing_read_buffer.cpp rename to src/IO/tests/hashing_read_buffer.cpp diff --git a/dbms/src/IO/tests/hashing_write_buffer.cpp b/src/IO/tests/hashing_write_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/hashing_write_buffer.cpp rename to src/IO/tests/hashing_write_buffer.cpp diff --git a/dbms/src/IO/tests/io_operators.cpp b/src/IO/tests/io_operators.cpp similarity index 100% rename from dbms/src/IO/tests/io_operators.cpp rename to src/IO/tests/io_operators.cpp diff --git a/dbms/src/IO/tests/limit_read_buffer.cpp b/src/IO/tests/limit_read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer.cpp rename to src/IO/tests/limit_read_buffer.cpp diff --git a/dbms/src/IO/tests/limit_read_buffer.reference b/src/IO/tests/limit_read_buffer.reference similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer.reference rename to src/IO/tests/limit_read_buffer.reference diff --git a/dbms/src/IO/tests/limit_read_buffer.sh b/src/IO/tests/limit_read_buffer.sh similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer.sh rename to src/IO/tests/limit_read_buffer.sh diff --git a/dbms/src/IO/tests/limit_read_buffer2.cpp b/src/IO/tests/limit_read_buffer2.cpp similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer2.cpp rename to src/IO/tests/limit_read_buffer2.cpp diff --git a/dbms/src/IO/tests/mempbrk.cpp b/src/IO/tests/mempbrk.cpp similarity index 100% rename from dbms/src/IO/tests/mempbrk.cpp rename to src/IO/tests/mempbrk.cpp diff --git a/dbms/src/IO/tests/o_direct_and_dirty_pages.cpp b/src/IO/tests/o_direct_and_dirty_pages.cpp similarity index 100% rename from dbms/src/IO/tests/o_direct_and_dirty_pages.cpp rename to src/IO/tests/o_direct_and_dirty_pages.cpp diff --git a/dbms/src/IO/tests/parse_date_time_best_effort.cpp b/src/IO/tests/parse_date_time_best_effort.cpp similarity index 100% rename from dbms/src/IO/tests/parse_date_time_best_effort.cpp rename to src/IO/tests/parse_date_time_best_effort.cpp diff --git a/dbms/src/IO/tests/parse_int_perf.cpp b/src/IO/tests/parse_int_perf.cpp similarity index 100% rename from dbms/src/IO/tests/parse_int_perf.cpp rename to src/IO/tests/parse_int_perf.cpp diff --git a/dbms/src/IO/tests/parse_int_perf2.cpp b/src/IO/tests/parse_int_perf2.cpp similarity index 100% rename from dbms/src/IO/tests/parse_int_perf2.cpp rename to src/IO/tests/parse_int_perf2.cpp diff --git a/dbms/src/IO/tests/read_buffer.cpp b/src/IO/tests/read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/read_buffer.cpp rename to src/IO/tests/read_buffer.cpp diff --git a/dbms/src/IO/tests/read_buffer_aio.cpp b/src/IO/tests/read_buffer_aio.cpp similarity index 100% rename from dbms/src/IO/tests/read_buffer_aio.cpp rename to src/IO/tests/read_buffer_aio.cpp diff --git a/dbms/src/IO/tests/read_buffer_perf.cpp b/src/IO/tests/read_buffer_perf.cpp similarity index 100% rename from dbms/src/IO/tests/read_buffer_perf.cpp rename to src/IO/tests/read_buffer_perf.cpp diff --git a/dbms/src/IO/tests/read_escaped_string.cpp b/src/IO/tests/read_escaped_string.cpp similarity index 100% rename from dbms/src/IO/tests/read_escaped_string.cpp rename to src/IO/tests/read_escaped_string.cpp diff --git a/dbms/src/IO/tests/read_float_perf.cpp b/src/IO/tests/read_float_perf.cpp similarity index 100% rename from dbms/src/IO/tests/read_float_perf.cpp rename to src/IO/tests/read_float_perf.cpp diff --git a/dbms/src/IO/tests/read_write_int.cpp b/src/IO/tests/read_write_int.cpp similarity index 100% rename from dbms/src/IO/tests/read_write_int.cpp rename to src/IO/tests/read_write_int.cpp diff --git a/dbms/src/IO/tests/ryu_test.cpp b/src/IO/tests/ryu_test.cpp similarity index 100% rename from dbms/src/IO/tests/ryu_test.cpp rename to src/IO/tests/ryu_test.cpp diff --git a/dbms/src/IO/tests/valid_utf8.cpp b/src/IO/tests/valid_utf8.cpp similarity index 100% rename from dbms/src/IO/tests/valid_utf8.cpp rename to src/IO/tests/valid_utf8.cpp diff --git a/dbms/src/IO/tests/valid_utf8_perf.cpp b/src/IO/tests/valid_utf8_perf.cpp similarity index 100% rename from dbms/src/IO/tests/valid_utf8_perf.cpp rename to src/IO/tests/valid_utf8_perf.cpp diff --git a/dbms/src/IO/tests/var_uint.cpp b/src/IO/tests/var_uint.cpp similarity index 100% rename from dbms/src/IO/tests/var_uint.cpp rename to src/IO/tests/var_uint.cpp diff --git a/dbms/src/IO/tests/write_buffer.cpp b/src/IO/tests/write_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/write_buffer.cpp rename to src/IO/tests/write_buffer.cpp diff --git a/dbms/src/IO/tests/write_buffer_aio.cpp b/src/IO/tests/write_buffer_aio.cpp similarity index 100% rename from dbms/src/IO/tests/write_buffer_aio.cpp rename to src/IO/tests/write_buffer_aio.cpp diff --git a/dbms/src/IO/tests/write_buffer_perf.cpp b/src/IO/tests/write_buffer_perf.cpp similarity index 100% rename from dbms/src/IO/tests/write_buffer_perf.cpp rename to src/IO/tests/write_buffer_perf.cpp diff --git a/dbms/src/IO/tests/write_int.cpp b/src/IO/tests/write_int.cpp similarity index 100% rename from dbms/src/IO/tests/write_int.cpp rename to src/IO/tests/write_int.cpp diff --git a/dbms/src/IO/tests/zlib_buffers.cpp b/src/IO/tests/zlib_buffers.cpp similarity index 100% rename from dbms/src/IO/tests/zlib_buffers.cpp rename to src/IO/tests/zlib_buffers.cpp diff --git a/dbms/src/IO/tests/zlib_ng_bug.cpp b/src/IO/tests/zlib_ng_bug.cpp similarity index 100% rename from dbms/src/IO/tests/zlib_ng_bug.cpp rename to src/IO/tests/zlib_ng_bug.cpp diff --git a/dbms/src/Interpreters/ActionLocksManager.cpp b/src/Interpreters/ActionLocksManager.cpp similarity index 100% rename from dbms/src/Interpreters/ActionLocksManager.cpp rename to src/Interpreters/ActionLocksManager.cpp diff --git a/dbms/src/Interpreters/ActionLocksManager.h b/src/Interpreters/ActionLocksManager.h similarity index 100% rename from dbms/src/Interpreters/ActionLocksManager.h rename to src/Interpreters/ActionLocksManager.h diff --git a/dbms/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp similarity index 98% rename from dbms/src/Interpreters/ActionsVisitor.cpp rename to src/Interpreters/ActionsVisitor.cpp index 4e008a81973..38656c47765 100644 --- a/dbms/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -64,7 +64,7 @@ static NamesAndTypesList::iterator findColumn(const String & name, NamesAndTypes } template -static Block createBlockFromCollection(const Collection & collection, const DataTypes & types) +static Block createBlockFromCollection(const Collection & collection, const DataTypes & types, const Context & context) { size_t columns_num = types.size(); MutableColumns columns(columns_num); @@ -77,7 +77,7 @@ static Block createBlockFromCollection(const Collection & collection, const Data if (columns_num == 1) { auto field = convertFieldToType(value, *types[0]); - if (!field.isNull()) + if (!field.isNull() || context.getSettingsRef().transform_null_in) columns[0]->insert(std::move(field)); } else @@ -100,7 +100,7 @@ static Block createBlockFromCollection(const Collection & collection, const Data for (; i < tuple_size; ++i) { tuple_values[i] = convertFieldToType(tuple[i], *types[i]); - if (tuple_values[i].isNull()) + if (tuple_values[i].isNull() && !context.getSettingsRef().transform_null_in) break; } @@ -170,23 +170,23 @@ SetPtr makeExplicitSet( if (left_type_depth == right_type_depth) { Array array{right_arg_value}; - block = createBlockFromCollection(array, set_element_types); + block = createBlockFromCollection(array, set_element_types, context); } /// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4)); etc. else if (left_type_depth + 1 == right_type_depth) { auto type_index = right_arg_type->getTypeId(); if (type_index == TypeIndex::Tuple) - block = createBlockFromCollection(DB::get(right_arg_value), set_element_types); + block = createBlockFromCollection(DB::get(right_arg_value), set_element_types, context); else if (type_index == TypeIndex::Array) - block = createBlockFromCollection(DB::get(right_arg_value), set_element_types); + block = createBlockFromCollection(DB::get(right_arg_value), set_element_types, context); else throw_unsupported_type(right_arg_type); } else throw_unsupported_type(right_arg_type); - SetPtr set = std::make_shared(size_limits, create_ordered_set); + SetPtr set = std::make_shared(size_limits, create_ordered_set, context.getSettingsRef().transform_null_in); set->setHeader(block); set->insertFromBlock(block); @@ -654,7 +654,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su return subquery_for_set.set; } - SetPtr set = std::make_shared(data.set_size_limit, false); + SetPtr set = std::make_shared(data.set_size_limit, false, data.context.getSettingsRef().transform_null_in); /** The following happens for GLOBAL INs: * - in the addExternalStorage function, the IN (SELECT ...) subquery is replaced with IN _data1, diff --git a/dbms/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h similarity index 100% rename from dbms/src/Interpreters/ActionsVisitor.h rename to src/Interpreters/ActionsVisitor.h diff --git a/dbms/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h similarity index 100% rename from dbms/src/Interpreters/AddDefaultDatabaseVisitor.h rename to src/Interpreters/AddDefaultDatabaseVisitor.h diff --git a/dbms/src/Interpreters/AggregateDescription.h b/src/Interpreters/AggregateDescription.h similarity index 100% rename from dbms/src/Interpreters/AggregateDescription.h rename to src/Interpreters/AggregateDescription.h diff --git a/dbms/src/Interpreters/AggregationCommon.h b/src/Interpreters/AggregationCommon.h similarity index 100% rename from dbms/src/Interpreters/AggregationCommon.h rename to src/Interpreters/AggregationCommon.h diff --git a/dbms/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp similarity index 100% rename from dbms/src/Interpreters/Aggregator.cpp rename to src/Interpreters/Aggregator.cpp diff --git a/dbms/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h similarity index 100% rename from dbms/src/Interpreters/Aggregator.h rename to src/Interpreters/Aggregator.h diff --git a/dbms/src/Interpreters/Aliases.h b/src/Interpreters/Aliases.h similarity index 100% rename from dbms/src/Interpreters/Aliases.h rename to src/Interpreters/Aliases.h diff --git a/dbms/src/Interpreters/ArrayJoinAction.cpp b/src/Interpreters/ArrayJoinAction.cpp similarity index 100% rename from dbms/src/Interpreters/ArrayJoinAction.cpp rename to src/Interpreters/ArrayJoinAction.cpp diff --git a/dbms/src/Interpreters/ArrayJoinAction.h b/src/Interpreters/ArrayJoinAction.h similarity index 100% rename from dbms/src/Interpreters/ArrayJoinAction.h rename to src/Interpreters/ArrayJoinAction.h diff --git a/dbms/src/Interpreters/ArrayJoinedColumnsVisitor.h b/src/Interpreters/ArrayJoinedColumnsVisitor.h similarity index 100% rename from dbms/src/Interpreters/ArrayJoinedColumnsVisitor.h rename to src/Interpreters/ArrayJoinedColumnsVisitor.h diff --git a/dbms/src/Interpreters/AsteriskSemantic.h b/src/Interpreters/AsteriskSemantic.h similarity index 100% rename from dbms/src/Interpreters/AsteriskSemantic.h rename to src/Interpreters/AsteriskSemantic.h diff --git a/dbms/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp similarity index 100% rename from dbms/src/Interpreters/AsynchronousMetrics.cpp rename to src/Interpreters/AsynchronousMetrics.cpp diff --git a/dbms/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h similarity index 100% rename from dbms/src/Interpreters/AsynchronousMetrics.h rename to src/Interpreters/AsynchronousMetrics.h diff --git a/dbms/src/Interpreters/BloomFilter.cpp b/src/Interpreters/BloomFilter.cpp similarity index 100% rename from dbms/src/Interpreters/BloomFilter.cpp rename to src/Interpreters/BloomFilter.cpp diff --git a/dbms/src/Interpreters/BloomFilter.h b/src/Interpreters/BloomFilter.h similarity index 100% rename from dbms/src/Interpreters/BloomFilter.h rename to src/Interpreters/BloomFilter.h diff --git a/dbms/src/Interpreters/BloomFilterHash.h b/src/Interpreters/BloomFilterHash.h similarity index 100% rename from dbms/src/Interpreters/BloomFilterHash.h rename to src/Interpreters/BloomFilterHash.h diff --git a/dbms/src/Interpreters/CMakeLists.txt b/src/Interpreters/CMakeLists.txt similarity index 100% rename from dbms/src/Interpreters/CMakeLists.txt rename to src/Interpreters/CMakeLists.txt diff --git a/dbms/src/Interpreters/CancellationCode.h b/src/Interpreters/CancellationCode.h similarity index 100% rename from dbms/src/Interpreters/CancellationCode.h rename to src/Interpreters/CancellationCode.h diff --git a/dbms/src/Interpreters/CatBoostModel.cpp b/src/Interpreters/CatBoostModel.cpp similarity index 100% rename from dbms/src/Interpreters/CatBoostModel.cpp rename to src/Interpreters/CatBoostModel.cpp diff --git a/dbms/src/Interpreters/CatBoostModel.h b/src/Interpreters/CatBoostModel.h similarity index 100% rename from dbms/src/Interpreters/CatBoostModel.h rename to src/Interpreters/CatBoostModel.h diff --git a/dbms/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp similarity index 100% rename from dbms/src/Interpreters/ClientInfo.cpp rename to src/Interpreters/ClientInfo.cpp diff --git a/dbms/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h similarity index 100% rename from dbms/src/Interpreters/ClientInfo.h rename to src/Interpreters/ClientInfo.h diff --git a/dbms/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp similarity index 100% rename from dbms/src/Interpreters/Cluster.cpp rename to src/Interpreters/Cluster.cpp diff --git a/dbms/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h similarity index 100% rename from dbms/src/Interpreters/Cluster.h rename to src/Interpreters/Cluster.h diff --git a/dbms/src/Interpreters/ClusterProxy/IStreamFactory.h b/src/Interpreters/ClusterProxy/IStreamFactory.h similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/IStreamFactory.h rename to src/Interpreters/ClusterProxy/IStreamFactory.h diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp similarity index 94% rename from dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp rename to src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 39bbb1eb667..533e6895f26 100644 --- a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -11,7 +11,6 @@ #include #include -#include #include #include #include @@ -82,10 +81,19 @@ Pipe createLocalStream(const ASTPtr & query_ast, const Block & header, const Con /// This flag means that pipeline must be tree-shaped, /// so we can't enable processors for InterpreterSelectQuery here. auto stream = interpreter.execute().in; - Pipe pipe(std::make_shared(std::move(stream))); + auto source = std::make_shared(std::move(stream)); + + bool add_totals_and_extremes_port = processed_stage == QueryProcessingStage::Complete; + if (add_totals_and_extremes_port) + { + source->addTotalsPort(); + source->addExtremesPort(); + } + + Pipe pipe(std::move(source)); pipe.addSimpleTransform(std::make_shared( - pipe.getHeader(), header, ConvertingTransform::MatchColumnsMode::Name, context)); + pipe.getHeader(), header, ConvertingTransform::MatchColumnsMode::Name)); return pipe; } @@ -95,7 +103,7 @@ Pipe createLocalStream(const ASTPtr & query_ast, const Block & header, const Con pipeline.addSimpleTransform([&](const Block & source_header) { return std::make_shared( - source_header, header, ConvertingTransform::MatchColumnsMode::Name, context); + source_header, header, ConvertingTransform::MatchColumnsMode::Name); }); /** Materialization is needed, since from remote servers the constants come materialized. @@ -130,7 +138,7 @@ void SelectStreamFactory::createForShard( Pipes & res) { bool force_add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState; - bool add_totals_port = processed_stage == QueryProcessingStage::Complete; + bool add_totals_and_extremes_port = processed_stage == QueryProcessingStage::Complete; auto modified_query_ast = query_ast->clone(); if (has_virtual_shard_num_column) @@ -153,8 +161,11 @@ void SelectStreamFactory::createForShard( auto source = std::make_shared(std::move(stream), force_add_agg_info); - if (add_totals_port) + if (add_totals_and_extremes_port) + { source->addTotalsPort(); + source->addExtremesPort(); + } res.emplace_back(std::move(source)); }; @@ -303,8 +314,11 @@ void SelectStreamFactory::createForShard( auto lazy_stream = std::make_shared("LazyShardWithLocalReplica", header, lazily_create_stream); auto source = std::make_shared(std::move(lazy_stream), force_add_agg_info); - if (add_totals_port) + if (add_totals_and_extremes_port) + { source->addTotalsPort(); + source->addExtremesPort(); + } res.emplace_back(std::move(source)); } diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h rename to src/Interpreters/ClusterProxy/SelectStreamFactory.h diff --git a/dbms/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/executeQuery.cpp rename to src/Interpreters/ClusterProxy/executeQuery.cpp diff --git a/dbms/src/Interpreters/ClusterProxy/executeQuery.h b/src/Interpreters/ClusterProxy/executeQuery.h similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/executeQuery.h rename to src/Interpreters/ClusterProxy/executeQuery.h diff --git a/dbms/src/Interpreters/CollectJoinOnKeysVisitor.cpp b/src/Interpreters/CollectJoinOnKeysVisitor.cpp similarity index 99% rename from dbms/src/Interpreters/CollectJoinOnKeysVisitor.cpp rename to src/Interpreters/CollectJoinOnKeysVisitor.cpp index 4648366a1f4..1eddbfc5666 100644 --- a/dbms/src/Interpreters/CollectJoinOnKeysVisitor.cpp +++ b/src/Interpreters/CollectJoinOnKeysVisitor.cpp @@ -2,7 +2,7 @@ #include #include -#include +#include namespace DB { diff --git a/dbms/src/Interpreters/CollectJoinOnKeysVisitor.h b/src/Interpreters/CollectJoinOnKeysVisitor.h similarity index 97% rename from dbms/src/Interpreters/CollectJoinOnKeysVisitor.h rename to src/Interpreters/CollectJoinOnKeysVisitor.h index 68109e460e5..8a1836a97ac 100644 --- a/dbms/src/Interpreters/CollectJoinOnKeysVisitor.h +++ b/src/Interpreters/CollectJoinOnKeysVisitor.h @@ -11,7 +11,7 @@ namespace DB { class ASTIdentifier; -class AnalyzedJoin; +class TableJoin; namespace ASOF { @@ -25,7 +25,7 @@ public: struct Data { - AnalyzedJoin & analyzed_join; + TableJoin & analyzed_join; const TableWithColumnNames & left_table; const TableWithColumnNames & right_table; const Aliases & aliases; diff --git a/dbms/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp similarity index 99% rename from dbms/src/Interpreters/Context.cpp rename to src/Interpreters/Context.cpp index 4dc72948f8a..6e30792277f 100644 --- a/dbms/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -665,12 +665,10 @@ String Context::getUserName() const return access->getUserName(); } -UUID Context::getUserID() const +std::optional Context::getUserID() const { auto lock = getLock(); - if (!user_id) - throw Exception("No current user", ErrorCodes::LOGICAL_ERROR); - return *user_id; + return user_id; } @@ -909,7 +907,6 @@ void Context::setSettings(const Settings & settings_) auto old_allow_introspection_functions = settings.allow_introspection_functions; settings = settings_; - active_default_settings = nullptr; if ((settings.readonly != old_readonly) || (settings.allow_ddl != old_allow_ddl) || (settings.allow_introspection_functions != old_allow_introspection_functions)) calculateAccessRights(); @@ -919,7 +916,6 @@ void Context::setSettings(const Settings & settings_) void Context::setSetting(const StringRef & name, const String & value) { auto lock = getLock(); - active_default_settings = nullptr; if (name == "profile") { setProfile(value); @@ -935,7 +931,6 @@ void Context::setSetting(const StringRef & name, const String & value) void Context::setSetting(const StringRef & name, const Field & value) { auto lock = getLock(); - active_default_settings = nullptr; if (name == "profile") { setProfile(value.safeGet()); @@ -962,20 +957,6 @@ void Context::applySettingsChanges(const SettingsChanges & changes) } -void Context::resetSettingsToDefault() -{ - auto lock = getLock(); - auto default_settings = getAccess()->getDefaultSettings(); - if (default_settings && (default_settings == active_default_settings)) - return; - if (default_settings) - setSettings(*default_settings); - else - setSettings(Settings{}); - active_default_settings = default_settings; -} - - void Context::checkSettingsConstraints(const SettingChange & change) const { if (auto settings_constraints = getSettingsConstraints()) diff --git a/dbms/src/Interpreters/Context.h b/src/Interpreters/Context.h similarity index 99% rename from dbms/src/Interpreters/Context.h rename to src/Interpreters/Context.h index e5b33e43614..f68b08bf9f0 100644 --- a/dbms/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -151,7 +151,6 @@ private: bool use_default_roles = false; std::shared_ptr access; std::shared_ptr initial_row_policy; - std::shared_ptr active_default_settings; String current_database; Settings settings; /// Setting for query execution. using ProgressCallback = std::function; @@ -233,7 +232,7 @@ public: UserPtr getUser() const; String getUserName() const; - UUID getUserID() const; + std::optional getUserID() const; void setCurrentRoles(const std::vector & current_roles_); void setCurrentRolesDefault(); @@ -346,9 +345,6 @@ public: void applySettingChange(const SettingChange & change); void applySettingsChanges(const SettingsChanges & changes); - /// Reset settings to the default values for the current user. - void resetSettingsToDefault(); - /// Checks the constraints. void checkSettingsConstraints(const SettingChange & change) const; void checkSettingsConstraints(const SettingsChanges & changes) const; diff --git a/dbms/src/Interpreters/CrossToInnerJoinVisitor.cpp b/src/Interpreters/CrossToInnerJoinVisitor.cpp similarity index 99% rename from dbms/src/Interpreters/CrossToInnerJoinVisitor.cpp rename to src/Interpreters/CrossToInnerJoinVisitor.cpp index 62bbd18c060..903f561b8bb 100644 --- a/dbms/src/Interpreters/CrossToInnerJoinVisitor.cpp +++ b/src/Interpreters/CrossToInnerJoinVisitor.cpp @@ -214,7 +214,7 @@ private: } }; -using CheckExpressionMatcher = ConstOneTypeMatcher; +using CheckExpressionMatcher = ConstOneTypeMatcher; using CheckExpressionVisitor = ConstInDepthNodeVisitor; diff --git a/dbms/src/Interpreters/CrossToInnerJoinVisitor.h b/src/Interpreters/CrossToInnerJoinVisitor.h similarity index 100% rename from dbms/src/Interpreters/CrossToInnerJoinVisitor.h rename to src/Interpreters/CrossToInnerJoinVisitor.h diff --git a/dbms/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp similarity index 99% rename from dbms/src/Interpreters/DDLWorker.cpp rename to src/Interpreters/DDLWorker.cpp index eaee356264d..e786849d121 100644 --- a/dbms/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -463,7 +463,7 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task) ParserQuery parser_query(end); String description; - task.query = parseQuery(parser_query, begin, end, description, 0); + task.query = parseQuery(parser_query, begin, end, description, 0, context.getSettingsRef().max_parser_depth); } // XXX: serious design flaw since `ASTQueryWithOnCluster` is not inherited from `IAST`! @@ -1377,4 +1377,9 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & cont } +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context) +{ + return executeDDLQueryOnCluster(query_ptr_, context, {}); +} + } diff --git a/dbms/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h similarity index 98% rename from dbms/src/Interpreters/DDLWorker.h rename to src/Interpreters/DDLWorker.h index 32b7cd5f172..62eba97032e 100644 --- a/dbms/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -24,6 +24,7 @@ struct DDLTask; /// Pushes distributed DDL query to the queue BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, AccessRightsElements && query_required_access); +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context); class DDLWorker diff --git a/dbms/src/Interpreters/DNSCacheUpdater.cpp b/src/Interpreters/DNSCacheUpdater.cpp similarity index 100% rename from dbms/src/Interpreters/DNSCacheUpdater.cpp rename to src/Interpreters/DNSCacheUpdater.cpp diff --git a/dbms/src/Interpreters/DNSCacheUpdater.h b/src/Interpreters/DNSCacheUpdater.h similarity index 100% rename from dbms/src/Interpreters/DNSCacheUpdater.h rename to src/Interpreters/DNSCacheUpdater.h diff --git a/dbms/src/Interpreters/DatabaseAndTableWithAlias.cpp b/src/Interpreters/DatabaseAndTableWithAlias.cpp similarity index 100% rename from dbms/src/Interpreters/DatabaseAndTableWithAlias.cpp rename to src/Interpreters/DatabaseAndTableWithAlias.cpp diff --git a/dbms/src/Interpreters/DatabaseAndTableWithAlias.h b/src/Interpreters/DatabaseAndTableWithAlias.h similarity index 79% rename from dbms/src/Interpreters/DatabaseAndTableWithAlias.h rename to src/Interpreters/DatabaseAndTableWithAlias.h index 92d6d40b455..e28d76b12f3 100644 --- a/dbms/src/Interpreters/DatabaseAndTableWithAlias.h +++ b/src/Interpreters/DatabaseAndTableWithAlias.h @@ -49,61 +49,51 @@ struct TableWithColumnNames { DatabaseAndTableWithAlias table; Names columns; - Names hidden_columns; + Names hidden_columns; /// Not general columns like MATERIALIZED and ALIAS. They are omitted in * and t.* results. TableWithColumnNames(const DatabaseAndTableWithAlias & table_, const Names & columns_) : table(table_) , columns(columns_) - {} + { + columns_set.insert(columns.begin(), columns.end()); + } TableWithColumnNames(const DatabaseAndTableWithAlias table_, Names && columns_, Names && hidden_columns_) : table(table_) , columns(columns_) , hidden_columns(hidden_columns_) - {} - - bool hasColumn(const String & name) const { - if (columns_set.empty()) - { - columns_set.insert(columns.begin(), columns.end()); - columns_set.insert(hidden_columns.begin(), hidden_columns.end()); - } - - return columns_set.count(name); + columns_set.insert(columns.begin(), columns.end()); + columns_set.insert(hidden_columns.begin(), hidden_columns.end()); } + bool hasColumn(const String & name) const { return columns_set.count(name); } + private: - mutable NameSet columns_set; + NameSet columns_set; }; struct TableWithColumnNamesAndTypes { DatabaseAndTableWithAlias table; NamesAndTypesList columns; - NamesAndTypesList hidden_columns; + NamesAndTypesList hidden_columns; /// Not general columns like MATERIALIZED and ALIAS. They are omitted in * and t.* results. TableWithColumnNamesAndTypes(const DatabaseAndTableWithAlias & table_, const NamesAndTypesList & columns_) : table(table_) , columns(columns_) - {} - - bool hasColumn(const String & name) const { - if (names.empty()) - { - for (auto & col : columns) - names.insert(col.name); - for (auto & col : hidden_columns) - names.insert(col.name); - } - - return names.count(name); + for (auto & col : columns) + names.insert(col.name); } + bool hasColumn(const String & name) const { return names.count(name); } + void addHiddenColumns(const NamesAndTypesList & addition) { hidden_columns.insert(hidden_columns.end(), addition.begin(), addition.end()); + for (auto & col : addition) + names.insert(col.name); } TableWithColumnNames removeTypes() const @@ -122,7 +112,7 @@ struct TableWithColumnNamesAndTypes } private: - mutable NameSet names; + NameSet names; }; std::vector getDatabaseAndTables(const ASTSelectQuery & select_query, const String & current_database); diff --git a/dbms/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp similarity index 97% rename from dbms/src/Interpreters/DatabaseCatalog.cpp rename to src/Interpreters/DatabaseCatalog.cpp index 6c860029148..3a2adc15355 100644 --- a/dbms/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -244,14 +244,17 @@ DatabasePtr DatabaseCatalog::detachDatabase(const String & database_name, bool d if (database_name == TEMPORARY_DATABASE) throw Exception("Cannot detach database with temporary tables.", ErrorCodes::DATABASE_ACCESS_DENIED); - std::lock_guard lock{databases_mutex}; - assertDatabaseExistsUnlocked(database_name); - auto db = databases.find(database_name)->second; + std::shared_ptr db; + { + std::lock_guard lock{databases_mutex}; + assertDatabaseExistsUnlocked(database_name); + db = databases.find(database_name)->second; - if (check_empty && !db->empty(*global_context)) - throw Exception("New table appeared in database being dropped or detached. Try again.", ErrorCodes::DATABASE_NOT_EMPTY); + if (check_empty && !db->empty(*global_context)) + throw Exception("New table appeared in database being dropped or detached. Try again.", ErrorCodes::DATABASE_NOT_EMPTY); - databases.erase(database_name); + databases.erase(database_name); + } db->shutdown(); diff --git a/dbms/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h similarity index 100% rename from dbms/src/Interpreters/DatabaseCatalog.h rename to src/Interpreters/DatabaseCatalog.h diff --git a/dbms/src/Interpreters/EmbeddedDictionaries.cpp b/src/Interpreters/EmbeddedDictionaries.cpp similarity index 100% rename from dbms/src/Interpreters/EmbeddedDictionaries.cpp rename to src/Interpreters/EmbeddedDictionaries.cpp diff --git a/dbms/src/Interpreters/EmbeddedDictionaries.h b/src/Interpreters/EmbeddedDictionaries.h similarity index 100% rename from dbms/src/Interpreters/EmbeddedDictionaries.h rename to src/Interpreters/EmbeddedDictionaries.h diff --git a/dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp rename to src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp diff --git a/dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.h b/src/Interpreters/ExecuteScalarSubqueriesVisitor.h similarity index 100% rename from dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.h rename to src/Interpreters/ExecuteScalarSubqueriesVisitor.h diff --git a/dbms/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp similarity index 99% rename from dbms/src/Interpreters/ExpressionActions.cpp rename to src/Interpreters/ExpressionActions.cpp index a94638dd22c..435e493ffa9 100644 --- a/dbms/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -147,7 +147,7 @@ ExpressionAction ExpressionAction::arrayJoin(const NameSet & array_joined_column return a; } -ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr table_join, JoinPtr join) +ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr table_join, JoinPtr join) { ExpressionAction a; a.type = JOIN; @@ -1206,7 +1206,7 @@ bool ExpressionAction::operator==(const ExpressionAction & other) const && result_name == other.result_name && argument_names == other.argument_names && same_array_join - && AnalyzedJoin::sameJoin(table_join.get(), other.table_join.get()) + && TableJoin::sameJoin(table_join.get(), other.table_join.get()) && projection == other.projection && is_function_compiled == other.is_function_compiled; } diff --git a/dbms/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h similarity index 98% rename from dbms/src/Interpreters/ExpressionActions.h rename to src/Interpreters/ExpressionActions.h index f36e8b89a9f..0c3027dfbab 100644 --- a/dbms/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -22,7 +22,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -class AnalyzedJoin; +class TableJoin; class IJoin; using JoinPtr = std::shared_ptr; @@ -97,7 +97,7 @@ public: std::shared_ptr array_join; /// For JOIN - std::shared_ptr table_join; + std::shared_ptr table_join; JoinPtr join; /// For PROJECT. @@ -114,7 +114,7 @@ public: static ExpressionAction project(const Names & projected_columns_); static ExpressionAction addAliases(const NamesWithAliases & aliased_columns_); static ExpressionAction arrayJoin(const NameSet & array_joined_columns, bool array_join_is_left, const Context & context); - static ExpressionAction ordinaryJoin(std::shared_ptr table_join, JoinPtr join); + static ExpressionAction ordinaryJoin(std::shared_ptr table_join, JoinPtr join); /// Which columns necessary to perform this action. Names getNeededColumns() const; diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp similarity index 98% rename from dbms/src/Interpreters/ExpressionAnalyzer.cpp rename to src/Interpreters/ExpressionAnalyzer.cpp index d1be66df217..bb2553c76a4 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -27,9 +27,9 @@ #include #include #include -#include +#include #include -#include +#include #include #include @@ -291,7 +291,7 @@ void SelectQueryExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr auto interpreter_subquery = interpretSubquery(subquery_or_table_name, context, {}, query_options); BlockIO res = interpreter_subquery->execute(); - SetPtr set = std::make_shared(settings.size_limits_for_set, true); + SetPtr set = std::make_shared(settings.size_limits_for_set, true, context.getSettingsRef().transform_null_in); set->setHeader(res.in->getHeader()); res.in->readPrefix(); @@ -502,7 +502,7 @@ bool SelectQueryExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, b return true; } -static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_element, std::shared_ptr analyzed_join, +static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_element, std::shared_ptr analyzed_join, const Context & context) { const auto & table_to_join = join_element.table_expression->as(); @@ -524,19 +524,19 @@ static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_elem return {}; } -static ExpressionActionsPtr createJoinedBlockActions(const Context & context, const AnalyzedJoin & analyzed_join) +static ExpressionActionsPtr createJoinedBlockActions(const Context & context, const TableJoin & analyzed_join) { ASTPtr expression_list = analyzed_join.rightKeysList(); auto syntax_result = SyntaxAnalyzer(context).analyze(expression_list, analyzed_join.columnsFromJoinedTable()); return ExpressionAnalyzer(expression_list, syntax_result, context).getActions(true, false); } -static std::shared_ptr makeJoin(std::shared_ptr analyzed_join, const Block & sample_block) +static std::shared_ptr makeJoin(std::shared_ptr analyzed_join, const Block & sample_block) { bool allow_merge_join = analyzed_join->allowMergeJoin(); if (analyzed_join->forceHashJoin() || (analyzed_join->preferMergeJoin() && !allow_merge_join)) - return std::make_shared(analyzed_join, sample_block); + return std::make_shared(analyzed_join, sample_block); else if (analyzed_join->forceMergeJoin() || (analyzed_join->preferMergeJoin() && allow_merge_join)) return std::make_shared(analyzed_join, sample_block); return std::make_shared(analyzed_join, sample_block); @@ -963,13 +963,15 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( auto finalize_chain = [&](ExpressionActionsChain & chain) { + chain.finalize(); + if (!finalized) { - chain.finalize(); finalize(chain, context, where_step_num); - chain.clear(); + finalized = true; } - finalized = true; + + chain.clear(); }; { diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h similarity index 99% rename from dbms/src/Interpreters/ExpressionAnalyzer.h rename to src/Interpreters/ExpressionAnalyzer.h index 61a3c7dccba..4322a897378 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -123,7 +123,7 @@ protected: SyntaxAnalyzerResultPtr syntax; const ConstStoragePtr & storage() const { return syntax->storage; } /// The main table in FROM clause, if exists. - const AnalyzedJoin & analyzedJoin() const { return *syntax->analyzed_join; } + const TableJoin & analyzedJoin() const { return *syntax->analyzed_join; } const NamesAndTypesList & sourceColumns() const { return syntax->required_source_columns; } const std::vector & aggregates() const { return syntax->aggregates; } NamesAndTypesList sourceWithJoinedColumns() const; diff --git a/dbms/src/Interpreters/ExpressionJIT.cpp b/src/Interpreters/ExpressionJIT.cpp similarity index 100% rename from dbms/src/Interpreters/ExpressionJIT.cpp rename to src/Interpreters/ExpressionJIT.cpp diff --git a/dbms/src/Interpreters/ExpressionJIT.h b/src/Interpreters/ExpressionJIT.h similarity index 100% rename from dbms/src/Interpreters/ExpressionJIT.h rename to src/Interpreters/ExpressionJIT.h diff --git a/dbms/src/Interpreters/ExternalDictionariesLoader.cpp b/src/Interpreters/ExternalDictionariesLoader.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalDictionariesLoader.cpp rename to src/Interpreters/ExternalDictionariesLoader.cpp diff --git a/dbms/src/Interpreters/ExternalDictionariesLoader.h b/src/Interpreters/ExternalDictionariesLoader.h similarity index 78% rename from dbms/src/Interpreters/ExternalDictionariesLoader.h rename to src/Interpreters/ExternalDictionariesLoader.h index 68913ffa166..4a54a9963e7 100644 --- a/dbms/src/Interpreters/ExternalDictionariesLoader.h +++ b/src/Interpreters/ExternalDictionariesLoader.h @@ -23,9 +23,12 @@ public: return std::static_pointer_cast(load(name)); } - DictPtr tryGetDictionary(const std::string & name) const + DictPtr tryGetDictionary(const std::string & name, bool load) const { - return std::static_pointer_cast(tryLoad(name)); + if (load) + return std::static_pointer_cast(tryLoad(name)); + else + return std::static_pointer_cast(getCurrentLoadResult(name).object); } static void resetAll(); diff --git a/dbms/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp similarity index 99% rename from dbms/src/Interpreters/ExternalLoader.cpp rename to src/Interpreters/ExternalLoader.cpp index 41358da4965..893d9aa61f9 100644 --- a/dbms/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -427,8 +427,6 @@ public: if (configs == new_configs) return; - LOG_TRACE(log, "Configuration of reloadable objects has changed"); - configs = new_configs; std::vector removed_names; @@ -437,7 +435,6 @@ public: auto new_config_it = new_configs->find(name); if (new_config_it == new_configs->end()) { - LOG_TRACE(log, "Reloadable object '" << name << "' is removed"); removed_names.emplace_back(name); } else @@ -448,8 +445,6 @@ public: if (!config_is_same) { /// Configuration has been changed. - LOG_TRACE(log, "Configuration has changed for reloadable " - "object '" << info.name << "'"); info.object_config = new_config; if (info.triedToLoad()) @@ -457,7 +452,7 @@ public: /// The object has been tried to load before, so it is currently in use or was in use /// and we should try to reload it with the new config. LOG_TRACE(log, "Will reload '" << name << "'" - " because its configuration has changed and" + " because its configuration has been changed and" " there were attempts to load it before"); startLoading(info, true); } @@ -473,7 +468,7 @@ public: Info & info = infos.emplace(name, Info{name, config}).first->second; if (always_load_everything) { - LOG_TRACE(log, "Will reload new object '" << name << "'" + LOG_TRACE(log, "Will load '" << name << "'" " because always_load_everything flag is set."); startLoading(info); } @@ -482,7 +477,15 @@ public: /// Remove from the map those objects which were removed from the configuration. for (const String & name : removed_names) - infos.erase(name); + { + if (auto it = infos.find(name); it != infos.end()) + { + const auto & info = it->second; + if (info.loaded() || info.isLoading()) + LOG_TRACE(log, "Unloading '" << name << "' because its configuration has been removed or detached"); + infos.erase(it); + } + } /// Maybe we have just added new objects which require to be loaded /// or maybe we have just removed object which were been loaded, diff --git a/dbms/src/Interpreters/ExternalLoader.h b/src/Interpreters/ExternalLoader.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoader.h rename to src/Interpreters/ExternalLoader.h diff --git a/dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp b/src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp rename to src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp diff --git a/dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.h b/src/Interpreters/ExternalLoaderDatabaseConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.h rename to src/Interpreters/ExternalLoaderDatabaseConfigRepository.h diff --git a/dbms/src/Interpreters/ExternalLoaderTempConfigRepository.cpp b/src/Interpreters/ExternalLoaderTempConfigRepository.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderTempConfigRepository.cpp rename to src/Interpreters/ExternalLoaderTempConfigRepository.cpp diff --git a/dbms/src/Interpreters/ExternalLoaderTempConfigRepository.h b/src/Interpreters/ExternalLoaderTempConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderTempConfigRepository.h rename to src/Interpreters/ExternalLoaderTempConfigRepository.h diff --git a/dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.cpp b/src/Interpreters/ExternalLoaderXMLConfigRepository.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.cpp rename to src/Interpreters/ExternalLoaderXMLConfigRepository.cpp diff --git a/dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.h b/src/Interpreters/ExternalLoaderXMLConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.h rename to src/Interpreters/ExternalLoaderXMLConfigRepository.h diff --git a/dbms/src/Interpreters/ExternalModelsLoader.cpp b/src/Interpreters/ExternalModelsLoader.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalModelsLoader.cpp rename to src/Interpreters/ExternalModelsLoader.cpp diff --git a/dbms/src/Interpreters/ExternalModelsLoader.h b/src/Interpreters/ExternalModelsLoader.h similarity index 100% rename from dbms/src/Interpreters/ExternalModelsLoader.h rename to src/Interpreters/ExternalModelsLoader.h diff --git a/dbms/src/Interpreters/ExtractExpressionInfoVisitor.cpp b/src/Interpreters/ExtractExpressionInfoVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/ExtractExpressionInfoVisitor.cpp rename to src/Interpreters/ExtractExpressionInfoVisitor.cpp diff --git a/dbms/src/Interpreters/ExtractExpressionInfoVisitor.h b/src/Interpreters/ExtractExpressionInfoVisitor.h similarity index 100% rename from dbms/src/Interpreters/ExtractExpressionInfoVisitor.h rename to src/Interpreters/ExtractExpressionInfoVisitor.h diff --git a/dbms/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp similarity index 100% rename from dbms/src/Interpreters/FillingRow.cpp rename to src/Interpreters/FillingRow.cpp diff --git a/dbms/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h similarity index 100% rename from dbms/src/Interpreters/FillingRow.h rename to src/Interpreters/FillingRow.h diff --git a/dbms/src/Interpreters/GetAggregatesVisitor.h b/src/Interpreters/GetAggregatesVisitor.h similarity index 100% rename from dbms/src/Interpreters/GetAggregatesVisitor.h rename to src/Interpreters/GetAggregatesVisitor.h diff --git a/dbms/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h similarity index 94% rename from dbms/src/Interpreters/GlobalSubqueriesVisitor.h rename to src/Interpreters/GlobalSubqueriesVisitor.h index e577219629c..78d98805814 100644 --- a/dbms/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -166,7 +167,19 @@ private: { if (func.name == "globalIn" || func.name == "globalNotIn") { - data.addExternalStorage(func.arguments->children[1]); + ASTPtr & ast = func.arguments->children[1]; + + /// Literal can use regular IN + if (ast->as()) + { + if (func.name == "globalIn") + func.name = "in"; + else + func.name = "notIn"; + return; + } + + data.addExternalStorage(ast); data.has_global_subqueries = true; } } diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp new file mode 100644 index 00000000000..b8da03acb8b --- /dev/null +++ b/src/Interpreters/HashJoin.cpp @@ -0,0 +1,1553 @@ +#include + +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_TYPE_OF_FIELD; + extern const int NOT_IMPLEMENTED; + extern const int UNSUPPORTED_JOIN_KEYS; + extern const int LOGICAL_ERROR; + extern const int SET_SIZE_LIMIT_EXCEEDED; + extern const int TYPE_MISMATCH; +} + +namespace +{ + +struct NotProcessedCrossJoin : public ExtraBlock +{ + size_t left_position; + size_t right_block; +}; + +} + +static ColumnPtr filterWithBlanks(ColumnPtr src_column, const IColumn::Filter & filter, bool inverse_filter = false) +{ + ColumnPtr column = src_column->convertToFullColumnIfConst(); + MutableColumnPtr mut_column = column->cloneEmpty(); + mut_column->reserve(column->size()); + + if (inverse_filter) + { + for (size_t row = 0; row < filter.size(); ++row) + { + if (filter[row]) + mut_column->insertDefault(); + else + mut_column->insertFrom(*column, row); + } + } + else + { + for (size_t row = 0; row < filter.size(); ++row) + { + if (filter[row]) + mut_column->insertFrom(*column, row); + else + mut_column->insertDefault(); + } + } + + return mut_column; +} + +static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable) +{ + if (nullable) + { + JoinCommon::convertColumnToNullable(column); + } + else + { + /// We have to replace values masked by NULLs with defaults. + if (column.column) + if (auto * nullable_column = checkAndGetColumn(*column.column)) + column.column = filterWithBlanks(column.column, nullable_column->getNullMapColumn().getData(), true); + + JoinCommon::removeColumnNullability(column); + } + + return std::move(column); +} + +static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable, const ColumnUInt8 & negative_null_map) +{ + if (nullable) + { + JoinCommon::convertColumnToNullable(column); + if (column.type->isNullable() && !negative_null_map.empty()) + { + MutableColumnPtr mutable_column = (*std::move(column.column)).mutate(); + assert_cast(*mutable_column).applyNegatedNullMap(negative_null_map); + column.column = std::move(mutable_column); + } + } + else + JoinCommon::removeColumnNullability(column); + + return std::move(column); +} + +static void changeNullability(MutableColumnPtr & mutable_column) +{ + ColumnPtr column = std::move(mutable_column); + if (auto * nullable = checkAndGetColumn(*column)) + column = nullable->getNestedColumnPtr(); + else + column = makeNullable(column); + + mutable_column = (*std::move(column)).mutate(); +} + +static ColumnPtr emptyNotNullableClone(const ColumnPtr & column) +{ + if (column->isNullable()) + return checkAndGetColumn(*column)->getNestedColumnPtr()->cloneEmpty(); + return column->cloneEmpty(); +} + +static ColumnPtr changeLowCardinality(const ColumnPtr & column, const ColumnPtr & dst_sample) +{ + if (dst_sample->lowCardinality()) + { + MutableColumnPtr lc = dst_sample->cloneEmpty(); + typeid_cast(*lc).insertRangeFromFullColumn(*column, 0, column->size()); + return lc; + } + + return column->convertToFullColumnIfLowCardinality(); +} + +/// Change both column nullability and low cardinality +static void changeColumnRepresentation(const ColumnPtr & src_column, ColumnPtr & dst_column) +{ + bool nullable_src = src_column->isNullable(); + bool nullable_dst = dst_column->isNullable(); + + ColumnPtr dst_not_null = emptyNotNullableClone(dst_column); + bool lowcard_src = emptyNotNullableClone(src_column)->lowCardinality(); + bool lowcard_dst = dst_not_null->lowCardinality(); + bool change_lowcard = (!lowcard_src && lowcard_dst) || (lowcard_src && !lowcard_dst); + + if (nullable_src && !nullable_dst) + { + auto * nullable = checkAndGetColumn(*src_column); + if (change_lowcard) + dst_column = changeLowCardinality(nullable->getNestedColumnPtr(), dst_column); + else + dst_column = nullable->getNestedColumnPtr(); + } + else if (!nullable_src && nullable_dst) + { + if (change_lowcard) + dst_column = makeNullable(changeLowCardinality(src_column, dst_not_null)); + else + dst_column = makeNullable(src_column); + } + else /// same nullability + { + if (change_lowcard) + { + if (auto * nullable = checkAndGetColumn(*src_column)) + { + dst_column = makeNullable(changeLowCardinality(nullable->getNestedColumnPtr(), dst_not_null)); + assert_cast(*dst_column->assumeMutable()).applyNullMap(nullable->getNullMapColumn()); + } + else + dst_column = changeLowCardinality(src_column, dst_not_null); + } + else + dst_column = src_column; + } +} + + +HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_) + : table_join(table_join_) + , kind(table_join->kind()) + , strictness(table_join->strictness()) + , key_names_right(table_join->keyNamesRight()) + , nullable_right_side(table_join->forceNullableRight()) + , nullable_left_side(table_join->forceNullableLeft()) + , any_take_last_row(any_take_last_row_) + , asof_inequality(table_join->getAsofInequality()) + , data(std::make_shared()) + , log(&Logger::get("HashJoin")) +{ + setSampleBlock(right_sample_block); +} + + +HashJoin::Type HashJoin::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes) +{ + size_t keys_size = key_columns.size(); + + if (keys_size == 0) + return Type::CROSS; + + bool all_fixed = true; + size_t keys_bytes = 0; + key_sizes.resize(keys_size); + for (size_t j = 0; j < keys_size; ++j) + { + if (!key_columns[j]->isFixedAndContiguous()) + { + all_fixed = false; + break; + } + key_sizes[j] = key_columns[j]->sizeOfValueIfFixed(); + keys_bytes += key_sizes[j]; + } + + /// If there is one numeric key that fits in 64 bits + if (keys_size == 1 && key_columns[0]->isNumeric()) + { + size_t size_of_field = key_columns[0]->sizeOfValueIfFixed(); + if (size_of_field == 1) + return Type::key8; + if (size_of_field == 2) + return Type::key16; + if (size_of_field == 4) + return Type::key32; + if (size_of_field == 8) + return Type::key64; + if (size_of_field == 16) + return Type::keys128; + throw Exception("Logical error: numeric column has sizeOfField not in 1, 2, 4, 8, 16.", ErrorCodes::LOGICAL_ERROR); + } + + /// If the keys fit in N bits, we will use a hash table for N-bit-packed keys + if (all_fixed && keys_bytes <= 16) + return Type::keys128; + if (all_fixed && keys_bytes <= 32) + return Type::keys256; + + /// If there is single string key, use hash table of it's values. + if (keys_size == 1 + && (typeid_cast(key_columns[0]) + || (isColumnConst(*key_columns[0]) && typeid_cast(&assert_cast(key_columns[0])->getDataColumn())))) + return Type::key_string; + + if (keys_size == 1 && typeid_cast(key_columns[0])) + return Type::key_fixed_string; + + /// Otherwise, will use set of cryptographic hashes of unambiguously serialized values. + return Type::hashed; +} + +static const IColumn * extractAsofColumn(const ColumnRawPtrs & key_columns) +{ + return key_columns.back(); +} + +template +static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes) +{ + if constexpr (is_asof_join) + { + auto key_column_copy = key_columns; + auto key_size_copy = key_sizes; + key_column_copy.pop_back(); + key_size_copy.pop_back(); + return KeyGetter(key_column_copy, key_size_copy, nullptr); + } + else + return KeyGetter(key_columns, key_sizes, nullptr); +} + +template +struct KeyGetterForTypeImpl; + +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodOneNumber; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodOneNumber; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodOneNumber; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodOneNumber; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodString; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodFixedString; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodKeysFixed; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodKeysFixed; +}; +template struct KeyGetterForTypeImpl +{ + using Type = ColumnsHashing::HashMethodHashed; +}; + +template +struct KeyGetterForType +{ + using Value = typename Data::value_type; + using Mapped_t = typename Data::mapped_type; + using Mapped = std::conditional_t, const Mapped_t, Mapped_t>; + using Type = typename KeyGetterForTypeImpl::Type; +}; + + +void HashJoin::init(Type type_) +{ + data->type = type_; + + if (kind == ASTTableJoin::Kind::Cross) + return; + joinDispatchInit(kind, strictness, data->maps); + joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { map.create(data->type); }); +} + +size_t HashJoin::getTotalRowCount() const +{ + size_t res = 0; + + if (data->type == Type::CROSS) + { + for (const auto & block : data->blocks) + res += block.rows(); + } + else + { + joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { res += map.getTotalRowCount(data->type); }); + } + + return res; +} + +size_t HashJoin::getTotalByteCount() const +{ + size_t res = 0; + + if (data->type == Type::CROSS) + { + for (const auto & block : data->blocks) + res += block.bytes(); + } + else + { + joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { res += map.getTotalByteCountImpl(data->type); }); + res += data->pool.size(); + } + + return res; +} + +void HashJoin::setSampleBlock(const Block & block) +{ + /// You have to restore this lock if you call the function outside of ctor. + //std::unique_lock lock(rwlock); + + LOG_DEBUG(log, "setSampleBlock: " << block.dumpStructure()); + + if (!empty()) + return; + + JoinCommon::splitAdditionalColumns(block, key_names_right, right_table_keys, sample_block_with_columns_to_add); + + initRequiredRightKeys(); + + JoinCommon::removeLowCardinalityInplace(right_table_keys); + initRightBlockStructure(data->sample_block); + + ColumnRawPtrs key_columns = JoinCommon::extractKeysForJoin(right_table_keys, key_names_right); + + JoinCommon::createMissedColumns(sample_block_with_columns_to_add); + if (nullable_right_side) + JoinCommon::convertColumnsToNullable(sample_block_with_columns_to_add); + + if (strictness == ASTTableJoin::Strictness::Asof) + { + if (kind != ASTTableJoin::Kind::Left and kind != ASTTableJoin::Kind::Inner) + throw Exception("ASOF only supports LEFT and INNER as base joins", ErrorCodes::NOT_IMPLEMENTED); + + const IColumn * asof_column = key_columns.back(); + size_t asof_size; + + asof_type = AsofRowRefs::getTypeSize(asof_column, asof_size); + if (!asof_type) + { + std::string msg = "ASOF join not supported for type: "; + msg += asof_column->getFamilyName(); + throw Exception(msg, ErrorCodes::BAD_TYPE_OF_FIELD); + } + + key_columns.pop_back(); + + if (key_columns.empty()) + throw Exception("ASOF join cannot be done without a joining column", ErrorCodes::LOGICAL_ERROR); + + /// this is going to set up the appropriate hash table for the direct lookup part of the join + /// However, this does not depend on the size of the asof join key (as that goes into the BST) + /// Therefore, add it back in such that it can be extracted appropriately from the full stored + /// key_columns and key_sizes + init(chooseMethod(key_columns, key_sizes)); + key_sizes.push_back(asof_size); + } + else + { + /// Choose data structure to use for JOIN. + init(chooseMethod(key_columns, key_sizes)); + } +} + +namespace +{ + /// Inserting an element into a hash table of the form `key -> reference to a string`, which will then be used by JOIN. + template + struct Inserter + { + static ALWAYS_INLINE void insertOne(const HashJoin & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, + Arena & pool) + { + auto emplace_result = key_getter.emplaceKey(map, i, pool); + + if (emplace_result.isInserted() || join.anyTakeLastRow()) + new (&emplace_result.getMapped()) typename Map::mapped_type(stored_block, i); + } + + static ALWAYS_INLINE void insertAll(const HashJoin &, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool) + { + auto emplace_result = key_getter.emplaceKey(map, i, pool); + + if (emplace_result.isInserted()) + new (&emplace_result.getMapped()) typename Map::mapped_type(stored_block, i); + else + { + /// The first element of the list is stored in the value of the hash table, the rest in the pool. + emplace_result.getMapped().insert({stored_block, i}, pool); + } + } + + static ALWAYS_INLINE void insertAsof(HashJoin & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool, + const IColumn * asof_column) + { + auto emplace_result = key_getter.emplaceKey(map, i, pool); + typename Map::mapped_type * time_series_map = &emplace_result.getMapped(); + + if (emplace_result.isInserted()) + time_series_map = new (time_series_map) typename Map::mapped_type(join.getAsofType()); + time_series_map->insert(join.getAsofType(), asof_column, stored_block, i); + } + }; + + + template + void NO_INLINE insertFromBlockImplTypeCase( + HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) + { + [[maybe_unused]] constexpr bool mapped_one = std::is_same_v || + std::is_same_v; + constexpr bool is_asof_join = STRICTNESS == ASTTableJoin::Strictness::Asof; + + const IColumn * asof_column [[maybe_unused]] = nullptr; + if constexpr (is_asof_join) + asof_column = extractAsofColumn(key_columns); + + auto key_getter = createKeyGetter(key_columns, key_sizes); + + for (size_t i = 0; i < rows; ++i) + { + if (has_null_map && (*null_map)[i]) + continue; + + if constexpr (is_asof_join) + Inserter::insertAsof(join, map, key_getter, stored_block, i, pool, asof_column); + else if constexpr (mapped_one) + Inserter::insertOne(join, map, key_getter, stored_block, i, pool); + else + Inserter::insertAll(join, map, key_getter, stored_block, i, pool); + } + } + + + template + void insertFromBlockImplType( + HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) + { + if (null_map) + insertFromBlockImplTypeCase(join, map, rows, key_columns, key_sizes, stored_block, null_map, pool); + else + insertFromBlockImplTypeCase(join, map, rows, key_columns, key_sizes, stored_block, null_map, pool); + } + + + template + void insertFromBlockImpl( + HashJoin & join, HashJoin::Type type, Maps & maps, size_t rows, const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) + { + switch (type) + { + case HashJoin::Type::EMPTY: break; + case HashJoin::Type::CROSS: break; /// Do nothing. We have already saved block, and it is enough. + + #define M(TYPE) \ + case HashJoin::Type::TYPE: \ + insertFromBlockImplType>::Type>(\ + join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, pool); \ + break; + APPLY_FOR_JOIN_VARIANTS(M) + #undef M + } + } +} + +void HashJoin::initRequiredRightKeys() +{ + const Names & left_keys = table_join->keyNamesLeft(); + const Names & right_keys = table_join->keyNamesRight(); + NameSet required_keys(table_join->requiredRightKeys().begin(), table_join->requiredRightKeys().end()); + + for (size_t i = 0; i < right_keys.size(); ++i) + { + const String & right_key_name = right_keys[i]; + + if (required_keys.count(right_key_name) && !required_right_keys.has(right_key_name)) + { + const auto & right_key = right_table_keys.getByName(right_key_name); + required_right_keys.insert(right_key); + required_right_keys_sources.push_back(left_keys[i]); + } + } +} + +void HashJoin::initRightBlockStructure(Block & saved_block_sample) +{ + /// We could remove key columns for LEFT | INNER HashJoin but we should keep them for JoinSwitcher (if any). + bool save_key_columns = !table_join->forceHashJoin() || isRightOrFull(kind); + if (save_key_columns) + { + saved_block_sample = right_table_keys.cloneEmpty(); + } + else if (strictness == ASTTableJoin::Strictness::Asof) + { + /// Save ASOF key + saved_block_sample.insert(right_table_keys.safeGetByPosition(right_table_keys.columns() - 1)); + } + + /// Save non key columns + for (auto & column : sample_block_with_columns_to_add) + saved_block_sample.insert(column); + + if (nullable_right_side) + JoinCommon::convertColumnsToNullable(saved_block_sample, (isFull(kind) ? right_table_keys.columns() : 0)); +} + +Block HashJoin::structureRightBlock(const Block & block) const +{ + Block structured_block; + for (auto & sample_column : savedBlockSample().getColumnsWithTypeAndName()) + { + ColumnWithTypeAndName column = block.getByName(sample_column.name); + if (sample_column.column->isNullable()) + JoinCommon::convertColumnToNullable(column); + structured_block.insert(column); + } + + return structured_block; +} + +bool HashJoin::addJoinedBlock(const Block & source_block, bool check_limits) +{ + if (empty()) + throw Exception("Logical error: HashJoin was not initialized", ErrorCodes::LOGICAL_ERROR); + + /// There's no optimization for right side const columns. Remove constness if any. + Block block = materializeBlock(source_block); + size_t rows = block.rows(); + + ColumnRawPtrs key_columns = JoinCommon::materializeColumnsInplace(block, key_names_right); + + /// We will insert to the map only keys, where all components are not NULL. + ConstNullMapPtr null_map{}; + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + + /// If RIGHT or FULL save blocks with nulls for NonJoinedBlockInputStream + UInt8 save_nullmap = 0; + if (isRightOrFull(kind) && null_map) + { + for (size_t i = 0; !save_nullmap && i < null_map->size(); ++i) + save_nullmap |= (*null_map)[i]; + } + + Block structured_block = structureRightBlock(block); + size_t total_rows = 0; + size_t total_bytes = 0; + + { + std::unique_lock lock(data->rwlock); + + data->blocks.emplace_back(std::move(structured_block)); + Block * stored_block = &data->blocks.back(); + + if (rows) + data->empty = false; + + if (kind != ASTTableJoin::Kind::Cross) + { + joinDispatch(kind, strictness, data->maps, [&](auto, auto strictness_, auto & map) + { + insertFromBlockImpl(*this, data->type, map, rows, key_columns, key_sizes, stored_block, null_map, data->pool); + }); + } + + if (save_nullmap) + data->blocks_nullmaps.emplace_back(stored_block, null_map_holder); + + if (!check_limits) + return true; + + /// TODO: Do not calculate them every time + total_rows = getTotalRowCount(); + total_bytes = getTotalByteCount(); + } + + return table_join->sizeLimits().check(total_rows, total_bytes, "JOIN", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED); +} + + +namespace +{ + +class AddedColumns +{ +public: + using TypeAndNames = std::vector>; + + AddedColumns(const Block & sample_block_with_columns_to_add, + const Block & block_with_columns_to_add, + const Block & block, + const Block & saved_block_sample, + const ColumnsWithTypeAndName & extras, + const HashJoin & join_, + const ColumnRawPtrs & key_columns_, + const Sizes & key_sizes_) + : join(join_) + , key_columns(key_columns_) + , key_sizes(key_sizes_) + , rows_to_add(block.rows()) + , need_filter(false) + { + size_t num_columns_to_add = sample_block_with_columns_to_add.columns(); + + columns.reserve(num_columns_to_add); + type_name.reserve(num_columns_to_add); + right_indexes.reserve(num_columns_to_add); + + for (auto & src_column : block_with_columns_to_add) + { + /// Don't insert column if it's in left block + if (!block.has(src_column.name)) + addColumn(src_column); + } + + for (auto & extra : extras) + addColumn(extra); + + for (auto & tn : type_name) + right_indexes.push_back(saved_block_sample.getPositionByName(tn.second)); + } + + size_t size() const { return columns.size(); } + + ColumnWithTypeAndName moveColumn(size_t i) + { + return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].first, type_name[i].second); + } + + template + void appendFromBlock(const Block & block, size_t row_num) + { + if constexpr (has_defaults) + applyLazyDefaults(); + + for (size_t j = 0; j < right_indexes.size(); ++j) + columns[j]->insertFrom(*block.getByPosition(right_indexes[j]).column, row_num); + } + + void appendDefaultRow() + { + ++lazy_defaults_count; + } + + void applyLazyDefaults() + { + if (lazy_defaults_count) + { + for (size_t j = 0; j < right_indexes.size(); ++j) + columns[j]->insertManyDefaults(lazy_defaults_count); + lazy_defaults_count = 0; + } + } + + const HashJoin & join; + const ColumnRawPtrs & key_columns; + const Sizes & key_sizes; + size_t rows_to_add; + std::unique_ptr offsets_to_replicate; + bool need_filter; + +private: + TypeAndNames type_name; + MutableColumns columns; + std::vector right_indexes; + size_t lazy_defaults_count = 0; + + void addColumn(const ColumnWithTypeAndName & src_column) + { + columns.push_back(src_column.column->cloneEmpty()); + columns.back()->reserve(src_column.column->size()); + type_name.emplace_back(src_column.type, src_column.name); + } +}; + +template +void addFoundRowAll(const typename Map::mapped_type & mapped, AddedColumns & added, IColumn::Offset & current_offset) +{ + if constexpr (add_missing) + added.applyLazyDefaults(); + + for (auto it = mapped.begin(); it.ok(); ++it) + { + added.appendFromBlock(*it->block, it->row_num); + ++current_offset; + } +}; + +template +void addNotFoundRow(AddedColumns & added [[maybe_unused]], IColumn::Offset & current_offset [[maybe_unused]]) +{ + if constexpr (add_missing) + { + added.appendDefaultRow(); + if constexpr (need_offset) + ++current_offset; + } +} + +template +void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]) +{ + if constexpr (need_filter) + filter[pos] = 1; +} + + +/// Joins right table columns which indexes are present in right_indexes using specified map. +/// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). +template +NO_INLINE IColumn::Filter joinRightColumns(const Map & map, AddedColumns & added_columns, const ConstNullMapPtr & null_map [[maybe_unused]]) +{ + constexpr bool is_any_join = STRICTNESS == ASTTableJoin::Strictness::Any; + constexpr bool is_all_join = STRICTNESS == ASTTableJoin::Strictness::All; + constexpr bool is_asof_join = STRICTNESS == ASTTableJoin::Strictness::Asof; + constexpr bool is_semi_join = STRICTNESS == ASTTableJoin::Strictness::Semi; + constexpr bool is_anti_join = STRICTNESS == ASTTableJoin::Strictness::Anti; + constexpr bool left = KIND == ASTTableJoin::Kind::Left; + constexpr bool right = KIND == ASTTableJoin::Kind::Right; + constexpr bool full = KIND == ASTTableJoin::Kind::Full; + + constexpr bool add_missing = (left || full) && !is_semi_join; + constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right); + + size_t rows = added_columns.rows_to_add; + IColumn::Filter filter; + if constexpr (need_filter) + filter = IColumn::Filter(rows, 0); + + Arena pool; + + if constexpr (need_replication) + added_columns.offsets_to_replicate = std::make_unique(rows); + + const IColumn * asof_column [[maybe_unused]] = nullptr; + if constexpr (is_asof_join) + asof_column = extractAsofColumn(added_columns.key_columns); + + auto key_getter = createKeyGetter(added_columns.key_columns, added_columns.key_sizes); + + IColumn::Offset current_offset = 0; + + for (size_t i = 0; i < rows; ++i) + { + if constexpr (has_null_map) + { + if ((*null_map)[i]) + { + addNotFoundRow(added_columns, current_offset); + + if constexpr (need_replication) + (*added_columns.offsets_to_replicate)[i] = current_offset; + continue; + } + } + + auto find_result = key_getter.findKey(map, i, pool); + + if (find_result.isFound()) + { + auto & mapped = find_result.getMapped(); + + if constexpr (is_asof_join) + { + const HashJoin & join = added_columns.join; + if (const RowRef * found = mapped.findAsof(join.getAsofType(), join.getAsofInequality(), asof_column, i)) + { + setUsed(filter, i); + mapped.setUsed(); + added_columns.appendFromBlock(*found->block, found->row_num); + } + else + addNotFoundRow(added_columns, current_offset); + } + else if constexpr (is_all_join) + { + setUsed(filter, i); + mapped.setUsed(); + addFoundRowAll(mapped, added_columns, current_offset); + } + else if constexpr ((is_any_join || is_semi_join) && right) + { + /// Use first appeared left key + it needs left columns replication + if (mapped.setUsedOnce()) + { + setUsed(filter, i); + addFoundRowAll(mapped, added_columns, current_offset); + } + } + else if constexpr (is_any_join && KIND == ASTTableJoin::Kind::Inner) + { + /// Use first appeared left key only + if (mapped.setUsedOnce()) + { + setUsed(filter, i); + added_columns.appendFromBlock(*mapped.block, mapped.row_num); + } + } + else if constexpr (is_any_join && full) + { + /// TODO + } + else if constexpr (is_anti_join) + { + if constexpr (right) + mapped.setUsed(); + } + else /// ANY LEFT, SEMI LEFT, old ANY (RightAny) + { + setUsed(filter, i); + mapped.setUsed(); + added_columns.appendFromBlock(*mapped.block, mapped.row_num); + } + } + else + { + if constexpr (is_anti_join && left) + setUsed(filter, i); + addNotFoundRow(added_columns, current_offset); + } + + if constexpr (need_replication) + (*added_columns.offsets_to_replicate)[i] = current_offset; + } + + added_columns.applyLazyDefaults(); + return filter; +} + +template +IColumn::Filter joinRightColumnsSwitchNullability(const Map & map, AddedColumns & added_columns, const ConstNullMapPtr & null_map) +{ + if (added_columns.need_filter) + { + if (null_map) + return joinRightColumns(map, added_columns, null_map); + else + return joinRightColumns(map, added_columns, nullptr); + } + else + { + if (null_map) + return joinRightColumns(map, added_columns, null_map); + else + return joinRightColumns(map, added_columns, nullptr); + } +} + +template +IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_columns, HashJoin::Type type, const ConstNullMapPtr & null_map) +{ + switch (type) + { + #define M(TYPE) \ + case HashJoin::Type::TYPE: \ + return joinRightColumnsSwitchNullability>::Type>(\ + *maps_.TYPE, added_columns, null_map);\ + break; + APPLY_FOR_JOIN_VARIANTS(M) + #undef M + + default: + throw Exception("Unsupported JOIN keys. Type: " + toString(static_cast(type)), ErrorCodes::UNSUPPORTED_JOIN_KEYS); + } +} + +} /// nameless + + +template +void HashJoin::joinBlockImpl( + Block & block, + const Names & key_names_left, + const Block & block_with_columns_to_add, + const Maps & maps_) const +{ + constexpr bool is_any_join = STRICTNESS == ASTTableJoin::Strictness::Any; + constexpr bool is_all_join = STRICTNESS == ASTTableJoin::Strictness::All; + constexpr bool is_asof_join = STRICTNESS == ASTTableJoin::Strictness::Asof; + constexpr bool is_semi_join = STRICTNESS == ASTTableJoin::Strictness::Semi; + constexpr bool is_anti_join = STRICTNESS == ASTTableJoin::Strictness::Anti; + + constexpr bool left = KIND == ASTTableJoin::Kind::Left; + constexpr bool right = KIND == ASTTableJoin::Kind::Right; + constexpr bool inner = KIND == ASTTableJoin::Kind::Inner; + constexpr bool full = KIND == ASTTableJoin::Kind::Full; + + constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right); + constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left)); + + /// Rare case, when keys are constant or low cardinality. To avoid code bloat, simply materialize them. + Columns materialized_keys = JoinCommon::materializeColumns(block, key_names_left); + ColumnRawPtrs key_columns = JoinCommon::getRawPointers(materialized_keys); + + /// Keys with NULL value in any column won't join to anything. + ConstNullMapPtr null_map{}; + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + + size_t existing_columns = block.columns(); + + /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. + * Because if they are constants, then in the "not joined" rows, they may have different values + * - default values, which can differ from the values of these constants. + */ + if constexpr (right || full) + { + materializeBlockInplace(block); + + if (nullable_left_side) + JoinCommon::convertColumnsToNullable(block); + } + + /** For LEFT/INNER JOIN, the saved blocks do not contain keys. + * For FULL/RIGHT JOIN, the saved blocks contain keys; + * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. + * For ASOF, the last column is used as the ASOF column + */ + ColumnsWithTypeAndName extras; + if constexpr (is_asof_join) + extras.push_back(right_table_keys.getByName(key_names_right.back())); + + AddedColumns added_columns(sample_block_with_columns_to_add, block_with_columns_to_add, block, savedBlockSample(), + extras, *this, key_columns, key_sizes); + bool has_required_right_keys = (required_right_keys.columns() != 0); + added_columns.need_filter = need_filter || has_required_right_keys; + + IColumn::Filter row_filter = switchJoinRightColumns(maps_, added_columns, data->type, null_map); + + for (size_t i = 0; i < added_columns.size(); ++i) + block.insert(added_columns.moveColumn(i)); + + std::vector right_keys_to_replicate [[maybe_unused]]; + + if constexpr (need_filter) + { + /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. + for (size_t i = 0; i < existing_columns; ++i) + block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(row_filter, -1); + + /// Add join key columns from right block if needed. + for (size_t i = 0; i < required_right_keys.columns(); ++i) + { + const auto & right_key = required_right_keys.getByPosition(i); + const auto & left_name = required_right_keys_sources[i]; + + const auto & col = block.getByName(left_name); + bool is_nullable = nullable_right_side || right_key.type->isNullable(); + block.insert(correctNullability({col.column, col.type, right_key.name}, is_nullable)); + } + } + else if (has_required_right_keys) + { + /// Some trash to represent IColumn::Filter as ColumnUInt8 needed for ColumnNullable::applyNullMap() + auto null_map_filter_ptr = ColumnUInt8::create(); + ColumnUInt8 & null_map_filter = assert_cast(*null_map_filter_ptr); + null_map_filter.getData().swap(row_filter); + const IColumn::Filter & filter = null_map_filter.getData(); + + /// Add join key columns from right block if needed. + for (size_t i = 0; i < required_right_keys.columns(); ++i) + { + const auto & right_key = required_right_keys.getByPosition(i); + const auto & left_name = required_right_keys_sources[i]; + + const auto & col = block.getByName(left_name); + bool is_nullable = nullable_right_side || right_key.type->isNullable(); + + ColumnPtr thin_column = filterWithBlanks(col.column, filter); + block.insert(correctNullability({thin_column, col.type, right_key.name}, is_nullable, null_map_filter)); + + if constexpr (need_replication) + right_keys_to_replicate.push_back(block.getPositionByName(right_key.name)); + } + } + + if constexpr (need_replication) + { + std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; + + /// If ALL ... JOIN - we replicate all the columns except the new ones. + for (size_t i = 0; i < existing_columns; ++i) + block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); + + /// Replicate additional right keys + for (size_t pos : right_keys_to_replicate) + block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); + } +} + +void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const +{ + size_t max_joined_block_rows = table_join->maxJoinedBlockRows(); + size_t start_left_row = 0; + size_t start_right_block = 0; + if (not_processed) + { + auto & continuation = static_cast(*not_processed); + start_left_row = continuation.left_position; + start_right_block = continuation.right_block; + not_processed.reset(); + } + + size_t num_existing_columns = block.columns(); + size_t num_columns_to_add = sample_block_with_columns_to_add.columns(); + + ColumnRawPtrs src_left_columns; + MutableColumns dst_columns; + + { + src_left_columns.reserve(num_existing_columns); + dst_columns.reserve(num_existing_columns + num_columns_to_add); + + for (const ColumnWithTypeAndName & left_column : block) + { + src_left_columns.push_back(left_column.column.get()); + dst_columns.emplace_back(src_left_columns.back()->cloneEmpty()); + } + + for (const ColumnWithTypeAndName & right_column : sample_block_with_columns_to_add) + dst_columns.emplace_back(right_column.column->cloneEmpty()); + + for (auto & dst : dst_columns) + dst->reserve(max_joined_block_rows); + } + + size_t rows_left = block.rows(); + size_t rows_added = 0; + + for (size_t left_row = start_left_row; left_row < rows_left; ++left_row) + { + size_t block_number = 0; + for (const Block & block_right : data->blocks) + { + ++block_number; + if (block_number < start_right_block) + continue; + + size_t rows_right = block_right.rows(); + rows_added += rows_right; + + for (size_t col_num = 0; col_num < num_existing_columns; ++col_num) + dst_columns[col_num]->insertManyFrom(*src_left_columns[col_num], left_row, rows_right); + + for (size_t col_num = 0; col_num < num_columns_to_add; ++col_num) + { + const IColumn & column_right = *block_right.getByPosition(col_num).column; + dst_columns[num_existing_columns + col_num]->insertRangeFrom(column_right, 0, rows_right); + } + } + + start_right_block = 0; + + if (rows_added > max_joined_block_rows) + { + not_processed = std::make_shared( + NotProcessedCrossJoin{{block.cloneEmpty()}, left_row, block_number + 1}); + not_processed->block.swap(block); + break; + } + } + + for (const ColumnWithTypeAndName & src_column : sample_block_with_columns_to_add) + block.insert(src_column); + + block = block.cloneWithColumns(std::move(dst_columns)); +} + +static void checkTypeOfKey(const Block & block_left, const Block & block_right) +{ + auto & [c1, left_type_origin, left_name] = block_left.safeGetByPosition(0); + auto & [c2, right_type_origin, right_name] = block_right.safeGetByPosition(0); + auto left_type = removeNullable(left_type_origin); + auto right_type = removeNullable(right_type_origin); + + if (!left_type->equals(*right_type)) + throw Exception("Type mismatch of columns to joinGet by: " + + left_name + " " + left_type->getName() + " at left, " + + right_name + " " + right_type->getName() + " at right", + ErrorCodes::TYPE_MISMATCH); +} + + +DataTypePtr HashJoin::joinGetReturnType(const String & column_name, bool or_null) const +{ + std::shared_lock lock(data->rwlock); + + if (!sample_block_with_columns_to_add.has(column_name)) + throw Exception("StorageJoin doesn't contain column " + column_name, ErrorCodes::LOGICAL_ERROR); + auto elem = sample_block_with_columns_to_add.getByName(column_name); + if (or_null) + elem.type = makeNullable(elem.type); + return elem.type; +} + + +template +void HashJoin::joinGetImpl(Block & block, const Block & block_with_columns_to_add, const Maps & maps_) const +{ + joinBlockImpl( + block, {block.getByPosition(0).name}, block_with_columns_to_add, maps_); +} + + +// TODO: support composite key +// TODO: return multiple columns as named tuple +// TODO: return array of values when strictness == ASTTableJoin::Strictness::All +void HashJoin::joinGet(Block & block, const String & column_name, bool or_null) const +{ + std::shared_lock lock(data->rwlock); + + if (key_names_right.size() != 1) + throw Exception("joinGet only supports StorageJoin containing exactly one key", ErrorCodes::LOGICAL_ERROR); + + checkTypeOfKey(block, right_table_keys); + + auto elem = sample_block_with_columns_to_add.getByName(column_name); + if (or_null) + elem.type = makeNullable(elem.type); + elem.column = elem.type->createColumn(); + + if ((strictness == ASTTableJoin::Strictness::Any || strictness == ASTTableJoin::Strictness::RightAny) && + kind == ASTTableJoin::Kind::Left) + { + joinGetImpl(block, {elem}, std::get(data->maps)); + } + else + throw Exception("joinGet only supports StorageJoin of type Left Any", ErrorCodes::LOGICAL_ERROR); +} + + +void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) +{ + std::shared_lock lock(data->rwlock); + + const Names & key_names_left = table_join->keyNamesLeft(); + JoinCommon::checkTypesOfKeys(block, key_names_left, right_table_keys, key_names_right); + + if (joinDispatch(kind, strictness, data->maps, [&](auto kind_, auto strictness_, auto & map) + { + joinBlockImpl(block, key_names_left, sample_block_with_columns_to_add, map); + })) + { + /// Joined + } + else if (kind == ASTTableJoin::Kind::Cross) + joinBlockImplCross(block, not_processed); + else + throw Exception("Logical error: unknown combination of JOIN", ErrorCodes::LOGICAL_ERROR); +} + + +void HashJoin::joinTotals(Block & block) const +{ + JoinCommon::joinTotals(totals, sample_block_with_columns_to_add, key_names_right, block); +} + + +template +struct AdderNonJoined +{ + static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_right) + { + constexpr bool mapped_asof = std::is_same_v; + [[maybe_unused]] constexpr bool mapped_one = std::is_same_v || std::is_same_v; + + if constexpr (mapped_asof) + { + /// Do nothing + } + else if constexpr (mapped_one) + { + for (size_t j = 0; j < columns_right.size(); ++j) + { + const auto & mapped_column = mapped.block->getByPosition(j).column; + columns_right[j]->insertFrom(*mapped_column, mapped.row_num); + } + + ++rows_added; + } + else + { + for (auto it = mapped.begin(); it.ok(); ++it) + { + for (size_t j = 0; j < columns_right.size(); ++j) + { + const auto & mapped_column = it->block->getByPosition(j).column; + columns_right[j]->insertFrom(*mapped_column, it->row_num); + } + + ++rows_added; + } + } + } +}; + + +/// Stream from not joined earlier rows of the right table. +class NonJoinedBlockInputStream : public IBlockInputStream +{ +public: + NonJoinedBlockInputStream(const HashJoin & parent_, const Block & result_sample_block_, UInt64 max_block_size_) + : parent(parent_) + , max_block_size(max_block_size_) + , result_sample_block(materializeBlock(result_sample_block_)) + { + bool remap_keys = parent.table_join->hasUsing(); + std::unordered_map left_to_right_key_remap; + + for (size_t i = 0; i < parent.table_join->keyNamesLeft().size(); ++i) + { + const String & left_key_name = parent.table_join->keyNamesLeft()[i]; + const String & right_key_name = parent.table_join->keyNamesRight()[i]; + + size_t left_key_pos = result_sample_block.getPositionByName(left_key_name); + size_t right_key_pos = parent.savedBlockSample().getPositionByName(right_key_name); + + if (remap_keys && !parent.required_right_keys.has(right_key_name)) + left_to_right_key_remap[left_key_pos] = right_key_pos; + } + + /// result_sample_block: left_sample_block + left expressions, right not key columns, required right keys + size_t left_columns_count = result_sample_block.columns() - + parent.sample_block_with_columns_to_add.columns() - parent.required_right_keys.columns(); + + for (size_t left_pos = 0; left_pos < left_columns_count; ++left_pos) + { + /// We need right 'x' for 'RIGHT JOIN ... USING(x)'. + if (left_to_right_key_remap.count(left_pos)) + { + size_t right_key_pos = left_to_right_key_remap[left_pos]; + setRightIndex(right_key_pos, left_pos); + } + else + column_indices_left.emplace_back(left_pos); + } + + const auto & saved_block_sample = parent.savedBlockSample(); + for (size_t right_pos = 0; right_pos < saved_block_sample.columns(); ++right_pos) + { + const String & name = saved_block_sample.getByPosition(right_pos).name; + if (!result_sample_block.has(name)) + continue; + + size_t result_position = result_sample_block.getPositionByName(name); + + /// Don't remap left keys twice. We need only qualified right keys here + if (result_position < left_columns_count) + continue; + + setRightIndex(right_pos, result_position); + } + + if (column_indices_left.size() + column_indices_right.size() + same_result_keys.size() != result_sample_block.columns()) + throw Exception("Error in columns mapping in RIGHT|FULL JOIN. Left: " + toString(column_indices_left.size()) + + ", right: " + toString(column_indices_right.size()) + + ", same: " + toString(same_result_keys.size()) + + ", result: " + toString(result_sample_block.columns()), + ErrorCodes::LOGICAL_ERROR); + } + + String getName() const override { return "NonJoined"; } + + Block getHeader() const override { return result_sample_block; } + + +protected: + Block readImpl() override + { + if (parent.data->blocks.empty()) + return Block(); + return createBlock(); + } + +private: + const HashJoin & parent; + UInt64 max_block_size; + + Block result_sample_block; + /// Indices of columns in result_sample_block that should be generated + std::vector column_indices_left; + /// Indices of columns that come from the right-side table: right_pos -> result_pos + std::unordered_map column_indices_right; + /// + std::unordered_map same_result_keys; + /// Which right columns (saved in parent) need nullability change before placing them in result block + std::vector right_nullability_adds; + std::vector right_nullability_removes; + /// Which right columns (saved in parent) need LowCardinality change before placing them in result block + std::vector> right_lowcard_changes; + + std::any position; + std::optional nulls_position; + + void setRightIndex(size_t right_pos, size_t result_position) + { + if (!column_indices_right.count(right_pos)) + { + column_indices_right[right_pos] = result_position; + extractColumnChanges(right_pos, result_position); + } + else + same_result_keys[result_position] = column_indices_right[right_pos]; + } + + void extractColumnChanges(size_t right_pos, size_t result_pos) + { + const auto & src = parent.savedBlockSample().getByPosition(right_pos).column; + const auto & dst = result_sample_block.getByPosition(result_pos).column; + + if (!src->isNullable() && dst->isNullable()) + right_nullability_adds.push_back(right_pos); + + if (src->isNullable() && !dst->isNullable()) + right_nullability_removes.push_back(right_pos); + + ColumnPtr src_not_null = emptyNotNullableClone(src); + ColumnPtr dst_not_null = emptyNotNullableClone(dst); + + if (src_not_null->lowCardinality() != dst_not_null->lowCardinality()) + right_lowcard_changes.push_back({right_pos, dst_not_null}); + } + + Block createBlock() + { + MutableColumns columns_right = parent.savedBlockSample().cloneEmptyColumns(); + + size_t rows_added = 0; + + auto fill_callback = [&](auto, auto strictness, auto & map) + { + rows_added = fillColumnsFromMap(map, columns_right); + }; + + if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps, fill_callback)) + throw Exception("Logical error: unknown JOIN strictness (must be on of: ANY, ALL, ASOF)", ErrorCodes::LOGICAL_ERROR); + + fillNullsFromBlocks(columns_right, rows_added); + + if (!rows_added) + return {}; + + for (size_t pos : right_nullability_removes) + changeNullability(columns_right[pos]); + + for (auto & [pos, dst_sample] : right_lowcard_changes) + columns_right[pos] = changeLowCardinality(std::move(columns_right[pos]), dst_sample)->assumeMutable(); + + for (size_t pos : right_nullability_adds) + changeNullability(columns_right[pos]); + + Block res = result_sample_block.cloneEmpty(); + + /// @note it's possible to make ColumnConst here and materialize it later + for (size_t pos : column_indices_left) + res.getByPosition(pos).column = res.getByPosition(pos).column->cloneResized(rows_added); + + for (auto & pr : column_indices_right) + { + auto & right_column = columns_right[pr.first]; + auto & result_column = res.getByPosition(pr.second).column; +#ifndef NDEBUG + if (result_column->getName() != right_column->getName()) + throw Exception("Wrong columns assign in RIGHT|FULL JOIN: " + result_column->getName() + + " " + right_column->getName(), ErrorCodes::LOGICAL_ERROR); +#endif + result_column = std::move(right_column); + } + + for (auto & pr : same_result_keys) + { + auto & src_column = res.getByPosition(pr.second).column; + auto & dst_column = res.getByPosition(pr.first).column; + changeColumnRepresentation(src_column, dst_column); + } + + return res; + } + + template + size_t fillColumnsFromMap(const Maps & maps, MutableColumns & columns_keys_and_right) + { + switch (parent.data->type) + { + #define M(TYPE) \ + case HashJoin::Type::TYPE: \ + return fillColumns(*maps.TYPE, columns_keys_and_right); + APPLY_FOR_JOIN_VARIANTS(M) + #undef M + default: + throw Exception("Unsupported JOIN keys. Type: " + toString(static_cast(parent.data->type)), + ErrorCodes::UNSUPPORTED_JOIN_KEYS); + } + + __builtin_unreachable(); + } + + template + size_t fillColumns(const Map & map, MutableColumns & columns_keys_and_right) + { + using Mapped = typename Map::mapped_type; + using Iterator = typename Map::const_iterator; + + size_t rows_added = 0; + + if (!position.has_value()) + position = std::make_any(map.begin()); + + Iterator & it = std::any_cast(position); + auto end = map.end(); + + for (; it != end; ++it) + { + const Mapped & mapped = it->getMapped(); + + if (mapped.getUsed()) + continue; + + AdderNonJoined::add(mapped, rows_added, columns_keys_and_right); + + if (rows_added >= max_block_size) + { + ++it; + break; + } + } + + return rows_added; + } + + void fillNullsFromBlocks(MutableColumns & columns_keys_and_right, size_t & rows_added) + { + if (!nulls_position.has_value()) + nulls_position = parent.data->blocks_nullmaps.begin(); + + auto end = parent.data->blocks_nullmaps.end(); + + for (auto & it = *nulls_position; it != end && rows_added < max_block_size; ++it) + { + const Block * block = it->first; + const NullMap & nullmap = assert_cast(*it->second).getData(); + + for (size_t row = 0; row < nullmap.size(); ++row) + { + if (nullmap[row]) + { + for (size_t col = 0; col < columns_keys_and_right.size(); ++col) + columns_keys_and_right[col]->insertFrom(*block->getByPosition(col).column, row); + ++rows_added; + } + } + } + } +}; + + +BlockInputStreamPtr HashJoin::createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const +{ + if (table_join->strictness() == ASTTableJoin::Strictness::Asof || + table_join->strictness() == ASTTableJoin::Strictness::Semi) + return {}; + + if (isRightOrFull(table_join->kind())) + return std::make_shared(*this, result_sample_block, max_block_size); + return {}; +} + + +bool HashJoin::hasStreamWithNonJoinedRows() const +{ + if (table_join->strictness() == ASTTableJoin::Strictness::Asof || + table_join->strictness() == ASTTableJoin::Strictness::Semi) + return false; + + return isRightOrFull(table_join->kind()); +} + +} diff --git a/src/Interpreters/HashJoin.h b/src/Interpreters/HashJoin.h new file mode 100644 index 00000000000..b769cfc61c5 --- /dev/null +++ b/src/Interpreters/HashJoin.h @@ -0,0 +1,390 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + + +namespace DB +{ + +class TableJoin; + +namespace JoinStuff +{ + +/// Base class with optional flag attached that's needed to implement RIGHT and FULL JOINs. +template +struct WithFlags; + +template +struct WithFlags : T +{ + using Base = T; + using T::T; + + mutable std::atomic used {}; + void setUsed() const { used.store(true, std::memory_order_relaxed); } /// Could be set simultaneously from different threads. + bool getUsed() const { return used; } + + bool setUsedOnce() const + { + /// fast check to prevent heavy CAS with seq_cst order + if (used.load(std::memory_order_relaxed)) + return false; + + bool expected = false; + return used.compare_exchange_strong(expected, true); + } +}; + +template +struct WithFlags : T +{ + using Base = T; + using T::T; + + void setUsed() const {} + bool getUsed() const { return true; } + bool setUsedOnce() const { return true; } +}; + +using MappedOne = WithFlags; +using MappedAll = WithFlags; +using MappedOneFlagged = WithFlags; +using MappedAllFlagged = WithFlags; +using MappedAsof = WithFlags; + +} + +/** Data structure for implementation of JOIN. + * It is just a hash table: keys -> rows of joined ("right") table. + * Additionally, CROSS JOIN is supported: instead of hash table, it use just set of blocks without keys. + * + * JOIN-s could be of these types: + * - ALL × LEFT/INNER/RIGHT/FULL + * - ANY × LEFT/INNER/RIGHT + * - SEMI/ANTI x LEFT/RIGHT + * - ASOF x LEFT/INNER + * - CROSS + * + * ALL means usual JOIN, when rows are multiplied by number of matching rows from the "right" table. + * ANY uses one line per unique key from right talbe. For LEFT JOIN it would be any row (with needed joined key) from the right table, + * for RIGHT JOIN it would be any row from the left table and for INNER one it would be any row from right and any row from left. + * SEMI JOIN filter left table by keys that are present in right table for LEFT JOIN, and filter right table by keys from left table + * for RIGHT JOIN. In other words SEMI JOIN returns only rows which joining keys present in another table. + * ANTI JOIN is the same as SEMI JOIN but returns rows with joining keys that are NOT present in another table. + * SEMI/ANTI JOINs allow to get values from both tables. For filter table it gets any row with joining same key. For ANTI JOIN it returns + * defaults other table columns. + * ASOF JOIN is not-equi join. For one key column it finds nearest value to join according to join inequality. + * It's expected that ANY|SEMI LEFT JOIN is more efficient that ALL one. + * + * If INNER is specified - leave only rows that have matching rows from "right" table. + * If LEFT is specified - in case when there is no matching row in "right" table, fill it with default values instead. + * If RIGHT is specified - first process as INNER, but track what rows from the right table was joined, + * and at the end, add rows from right table that was not joined and substitute default values for columns of left table. + * If FULL is specified - first process as LEFT, but track what rows from the right table was joined, + * and at the end, add rows from right table that was not joined and substitute default values for columns of left table. + * + * Thus, LEFT and RIGHT JOINs are not symmetric in terms of implementation. + * + * All JOINs (except CROSS) are done by equality condition on keys (equijoin). + * Non-equality and other conditions are not supported. + * + * Implementation: + * + * 1. Build hash table in memory from "right" table. + * This hash table is in form of keys -> row in case of ANY or keys -> [rows...] in case of ALL. + * This is done in insertFromBlock method. + * + * 2. Process "left" table and join corresponding rows from "right" table by lookups in the map. + * This is done in joinBlock methods. + * + * In case of ANY LEFT JOIN - form new columns with found values or default values. + * This is the most simple. Number of rows in left table does not change. + * + * In case of ANY INNER JOIN - form new columns with found values, + * and also build a filter - in what rows nothing was found. + * Then filter columns of "left" table. + * + * In case of ALL ... JOIN - form new columns with all found rows, + * and also fill 'offsets' array, describing how many times we need to replicate values of "left" table. + * Then replicate columns of "left" table. + * + * How Nullable keys are processed: + * + * NULLs never join to anything, even to each other. + * During building of map, we just skip keys with NULL value of any component. + * During joining, we simply treat rows with any NULLs in key as non joined. + * + * Default values for outer joins (LEFT, RIGHT, FULL): + * + * Behaviour is controlled by 'join_use_nulls' settings. + * If it is false, we substitute (global) default value for the data type, for non-joined rows + * (zero, empty string, etc. and NULL for Nullable data types). + * If it is true, we always generate Nullable column and substitute NULLs for non-joined rows, + * as in standard SQL. + */ +class HashJoin : public IJoin +{ +public: + HashJoin(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_ = false); + + bool empty() { return data->type == Type::EMPTY; } + + /** Add block of data from right hand of JOIN to the map. + * Returns false, if some limit was exceeded and you should not insert more data. + */ + bool addJoinedBlock(const Block & block, bool check_limits) override; + + /** Join data from the map (that was previously built by calls to addJoinedBlock) to the block with data from "left" table. + * Could be called from different threads in parallel. + */ + void joinBlock(Block & block, ExtraBlockPtr & not_processed) override; + + /// Infer the return type for joinGet function + DataTypePtr joinGetReturnType(const String & column_name, bool or_null) const; + + /// Used by joinGet function that turns StorageJoin into a dictionary + void joinGet(Block & block, const String & column_name, bool or_null) const; + + /** Keep "totals" (separate part of dataset, see WITH TOTALS) to use later. + */ + void setTotals(const Block & block) override { totals = block; } + bool hasTotals() const override { return totals; } + + void joinTotals(Block & block) const override; + + /** For RIGHT and FULL JOINs. + * A stream that will contain default values from left table, joined with rows from right table, that was not joined before. + * Use only after all calls to joinBlock was done. + * left_sample_block is passed without account of 'use_nulls' setting (columns will be converted to Nullable inside). + */ + BlockInputStreamPtr createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const override; + bool hasStreamWithNonJoinedRows() const override; + + /// Number of keys in all built JOIN maps. + size_t getTotalRowCount() const final; + /// Sum size in bytes of all buffers, used for JOIN maps and for all memory pools. + size_t getTotalByteCount() const final; + + bool alwaysReturnsEmptySet() const final { return isInnerOrRight(getKind()) && data->empty; } + + ASTTableJoin::Kind getKind() const { return kind; } + ASTTableJoin::Strictness getStrictness() const { return strictness; } + AsofRowRefs::Type getAsofType() const { return *asof_type; } + ASOF::Inequality getAsofInequality() const { return asof_inequality; } + bool anyTakeLastRow() const { return any_take_last_row; } + + /// Different types of keys for maps. + #define APPLY_FOR_JOIN_VARIANTS(M) \ + M(key8) \ + M(key16) \ + M(key32) \ + M(key64) \ + M(key_string) \ + M(key_fixed_string) \ + M(keys128) \ + M(keys256) \ + M(hashed) + + + /// Used for reading from StorageJoin and applying joinGet function + #define APPLY_FOR_JOIN_VARIANTS_LIMITED(M) \ + M(key8) \ + M(key16) \ + M(key32) \ + M(key64) \ + M(key_string) \ + M(key_fixed_string) + + enum class Type + { + EMPTY, + CROSS, + #define M(NAME) NAME, + APPLY_FOR_JOIN_VARIANTS(M) + #undef M + }; + + + /** Different data structures, that are used to perform JOIN. + */ + template + struct MapsTemplate + { + std::unique_ptr> key8; + std::unique_ptr> key16; + std::unique_ptr>> key32; + std::unique_ptr>> key64; + std::unique_ptr> key_string; + std::unique_ptr> key_fixed_string; + std::unique_ptr> keys128; + std::unique_ptr> keys256; + std::unique_ptr> hashed; + + void create(Type which) + { + switch (which) + { + case Type::EMPTY: break; + case Type::CROSS: break; + + #define M(NAME) \ + case Type::NAME: NAME = std::make_unique(); break; + APPLY_FOR_JOIN_VARIANTS(M) + #undef M + } + } + + size_t getTotalRowCount(Type which) const + { + switch (which) + { + case Type::EMPTY: return 0; + case Type::CROSS: return 0; + + #define M(NAME) \ + case Type::NAME: return NAME ? NAME->size() : 0; + APPLY_FOR_JOIN_VARIANTS(M) + #undef M + } + + __builtin_unreachable(); + } + + size_t getTotalByteCountImpl(Type which) const + { + switch (which) + { + case Type::EMPTY: return 0; + case Type::CROSS: return 0; + + #define M(NAME) \ + case Type::NAME: return NAME ? NAME->getBufferSizeInBytes() : 0; + APPLY_FOR_JOIN_VARIANTS(M) + #undef M + } + + __builtin_unreachable(); + } + }; + + using MapsOne = MapsTemplate; + using MapsAll = MapsTemplate; + using MapsOneFlagged = MapsTemplate; + using MapsAllFlagged = MapsTemplate; + using MapsAsof = MapsTemplate; + + using MapsVariant = std::variant; + using BlockNullmapList = std::deque>; + + struct RightTableData + { + /// Protect state for concurrent use in insertFromBlock and joinBlock. + /// @note that these methods could be called simultaneously only while use of StorageJoin. + mutable std::shared_mutex rwlock; + + Type type = Type::EMPTY; + bool empty = true; + + MapsVariant maps; + Block sample_block; /// Block as it would appear in the BlockList + BlocksList blocks; /// Blocks of "right" table. + BlockNullmapList blocks_nullmaps; /// Nullmaps for blocks of "right" table (if needed) + + /// Additional data - strings for string keys and continuation elements of single-linked lists of references to rows. + Arena pool; + }; + + void reuseJoinedData(const HashJoin & join) + { + data = join.data; + } + + std::shared_ptr getJoinedData() const + { + return data; + } + +private: + friend class NonJoinedBlockInputStream; + friend class JoinSource; + + std::shared_ptr table_join; + ASTTableJoin::Kind kind; + ASTTableJoin::Strictness strictness; + + /// Names of key columns in right-side table (in the order they appear in ON/USING clause). @note It could contain duplicates. + const Names & key_names_right; + + bool nullable_right_side; /// In case of LEFT and FULL joins, if use_nulls, convert right-side columns to Nullable. + bool nullable_left_side; /// In case of RIGHT and FULL joins, if use_nulls, convert left-side columns to Nullable. + bool any_take_last_row; /// Overwrite existing values when encountering the same key again + std::optional asof_type; + ASOF::Inequality asof_inequality; + + /// Right table data. StorageJoin shares it between many Join objects. + std::shared_ptr data; + Sizes key_sizes; + + /// Block with columns from the right-side table except key columns. + Block sample_block_with_columns_to_add; + /// Block with key columns in the same order they appear in the right-side table (duplicates appear once). + Block right_table_keys; + /// Block with key columns right-side table keys that are needed in result (would be attached after joined columns). + Block required_right_keys; + /// Left table column names that are sources for required_right_keys columns + std::vector required_right_keys_sources; + + Poco::Logger * log; + + Block totals; + + void init(Type type_); + + /** Set information about structure of right hand of JOIN (joined data). + */ + void setSampleBlock(const Block & block); + + const Block & savedBlockSample() const { return data->sample_block; } + + /// Modify (structure) right block to save it in block list + Block structureRightBlock(const Block & stored_block) const; + void initRightBlockStructure(Block & saved_block_sample); + void initRequiredRightKeys(); + + template + void joinBlockImpl( + Block & block, + const Names & key_names_left, + const Block & block_with_columns_to_add, + const Maps & maps) const; + + void joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const; + + template + void joinGetImpl(Block & block, const Block & block_with_columns_to_add, const Maps & maps_) const; + + static Type chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes); +}; + +} diff --git a/dbms/src/Interpreters/IExternalLoadable.cpp b/src/Interpreters/IExternalLoadable.cpp similarity index 100% rename from dbms/src/Interpreters/IExternalLoadable.cpp rename to src/Interpreters/IExternalLoadable.cpp diff --git a/dbms/src/Interpreters/IExternalLoadable.h b/src/Interpreters/IExternalLoadable.h similarity index 100% rename from dbms/src/Interpreters/IExternalLoadable.h rename to src/Interpreters/IExternalLoadable.h diff --git a/dbms/src/Interpreters/IExternalLoaderConfigRepository.h b/src/Interpreters/IExternalLoaderConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/IExternalLoaderConfigRepository.h rename to src/Interpreters/IExternalLoaderConfigRepository.h diff --git a/dbms/src/Interpreters/IInterpreter.h b/src/Interpreters/IInterpreter.h similarity index 100% rename from dbms/src/Interpreters/IInterpreter.h rename to src/Interpreters/IInterpreter.h diff --git a/dbms/src/Interpreters/IJoin.h b/src/Interpreters/IJoin.h similarity index 100% rename from dbms/src/Interpreters/IJoin.h rename to src/Interpreters/IJoin.h diff --git a/dbms/src/Interpreters/IdentifierSemantic.cpp b/src/Interpreters/IdentifierSemantic.cpp similarity index 100% rename from dbms/src/Interpreters/IdentifierSemantic.cpp rename to src/Interpreters/IdentifierSemantic.cpp diff --git a/dbms/src/Interpreters/IdentifierSemantic.h b/src/Interpreters/IdentifierSemantic.h similarity index 100% rename from dbms/src/Interpreters/IdentifierSemantic.h rename to src/Interpreters/IdentifierSemantic.h diff --git a/src/Interpreters/InDepthNodeVisitor.h b/src/Interpreters/InDepthNodeVisitor.h new file mode 100644 index 00000000000..3e0a8e16185 --- /dev/null +++ b/src/Interpreters/InDepthNodeVisitor.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ + +/// Visits AST tree in depth, call functions for nodes according to Matcher type data. +/// You need to define Data, visit() and needChildVisit() in Matcher class. +template +class InDepthNodeVisitor +{ +public: + using Data = typename Matcher::Data; + + InDepthNodeVisitor(Data & data_, std::ostream * ostr_ = nullptr) + : data(data_), + visit_depth(0), + ostr(ostr_) + {} + + void visit(T & ast) + { + DumpASTNode dump(*ast, ostr, visit_depth, typeid(Matcher).name()); + + if constexpr (!_top_to_bottom) + visitChildren(ast); + + Matcher::visit(ast, data); + + if constexpr (_top_to_bottom) + visitChildren(ast); + } + +private: + Data & data; + size_t visit_depth; + std::ostream * ostr; + + void visitChildren(T & ast) + { + for (auto & child : ast->children) + if (Matcher::needChildVisit(ast, child)) + visit(child); + } +}; + +template +using ConstInDepthNodeVisitor = InDepthNodeVisitor; + +struct NeedChild +{ + using Condition = bool (*)(const ASTPtr & node, const ASTPtr & child); + + static bool all(const ASTPtr &, const ASTPtr &) { return true; } + static bool none(const ASTPtr &, const ASTPtr &) { return false; } +}; + +/// Simple matcher for one node type. Use need_child function for complex traversal logic. +template +class OneTypeMatcher +{ +public: + using Data = Data_; + using TypeToVisit = typename Data::TypeToVisit; + + static bool needChildVisit(const ASTPtr & node, const ASTPtr & child) { return need_child(node, child); } + + static void visit(T & ast, Data & data) + { + if (auto * t = typeid_cast(ast.get())) + data.visit(*t, ast); + } +}; + +template +using ConstOneTypeMatcher = OneTypeMatcher; + +} diff --git a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.cpp b/src/Interpreters/InJoinSubqueriesPreprocessor.cpp similarity index 100% rename from dbms/src/Interpreters/InJoinSubqueriesPreprocessor.cpp rename to src/Interpreters/InJoinSubqueriesPreprocessor.cpp diff --git a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h b/src/Interpreters/InJoinSubqueriesPreprocessor.h similarity index 100% rename from dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h rename to src/Interpreters/InJoinSubqueriesPreprocessor.h diff --git a/dbms/src/Interpreters/InternalTextLogsQueue.cpp b/src/Interpreters/InternalTextLogsQueue.cpp similarity index 100% rename from dbms/src/Interpreters/InternalTextLogsQueue.cpp rename to src/Interpreters/InternalTextLogsQueue.cpp diff --git a/dbms/src/Interpreters/InternalTextLogsQueue.h b/src/Interpreters/InternalTextLogsQueue.h similarity index 100% rename from dbms/src/Interpreters/InternalTextLogsQueue.h rename to src/Interpreters/InternalTextLogsQueue.h diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp new file mode 100644 index 00000000000..c8517defdb7 --- /dev/null +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -0,0 +1,297 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int SUPPORT_IS_DISABLED; + extern const int INCORRECT_QUERY; +} + + +InterpreterAlterQuery::InterpreterAlterQuery(const ASTPtr & query_ptr_, const Context & context_) + : query_ptr(query_ptr_), context(context_) +{ +} + +BlockIO InterpreterAlterQuery::execute() +{ + const auto & alter = query_ptr->as(); + + if (!alter.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context, getRequiredAccess()); + + context.checkAccess(getRequiredAccess()); + auto table_id = context.resolveStorageID(alter, Context::ResolveOrdinary); + StoragePtr table = DatabaseCatalog::instance().getTable(table_id); + + /// Add default database to table identifiers that we can encounter in e.g. default expressions, + /// mutation expression, etc. + AddDefaultDatabaseVisitor visitor(table_id.getDatabaseName()); + ASTPtr command_list_ptr = alter.command_list->ptr(); + visitor.visit(command_list_ptr); + + AlterCommands alter_commands; + PartitionCommands partition_commands; + MutationCommands mutation_commands; + LiveViewCommands live_view_commands; + for (ASTAlterCommand * command_ast : alter.command_list->commands) + { + if (auto alter_command = AlterCommand::parse(command_ast)) + alter_commands.emplace_back(std::move(*alter_command)); + else if (auto partition_command = PartitionCommand::parse(command_ast)) + { + if (partition_command->type == PartitionCommand::DROP_DETACHED_PARTITION + && !context.getSettingsRef().allow_drop_detached) + throw DB::Exception("Cannot execute query: DROP DETACHED PART is disabled " + "(see allow_drop_detached setting)", ErrorCodes::SUPPORT_IS_DISABLED); + partition_commands.emplace_back(std::move(*partition_command)); + } + else if (auto mut_command = MutationCommand::parse(command_ast)) + { + if (mut_command->type == MutationCommand::MATERIALIZE_TTL && !table->hasAnyTTL()) + throw Exception("Cannot MATERIALIZE TTL as there is no TTL set for table " + + table->getStorageID().getNameForLogs(), ErrorCodes::INCORRECT_QUERY); + + mutation_commands.emplace_back(std::move(*mut_command)); + } + else if (auto live_view_command = LiveViewCommand::parse(command_ast)) + live_view_commands.emplace_back(std::move(*live_view_command)); + else + throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR); + } + + if (!mutation_commands.empty()) + { + auto table_lock_holder = table->lockStructureForShare( + false /* because mutation is executed asyncronously */, + context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + MutationsInterpreter(table, mutation_commands, context, false).validate(table_lock_holder); + table->mutate(mutation_commands, context); + } + + if (!partition_commands.empty()) + { + table->alterPartition(query_ptr, partition_commands, context); + } + + if (!live_view_commands.empty()) + { + live_view_commands.validate(*table); + for (const LiveViewCommand & command : live_view_commands) + { + auto live_view = std::dynamic_pointer_cast(table); + switch (command.type) + { + case LiveViewCommand::REFRESH: + live_view->refresh(context); + break; + } + } + } + + if (!alter_commands.empty()) + { + auto table_lock_holder = table->lockAlterIntention( + context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + StorageInMemoryMetadata metadata = table->getInMemoryMetadata(); + alter_commands.validate(metadata, context); + alter_commands.prepare(metadata); + table->checkAlterIsPossible(alter_commands, context.getSettingsRef()); + table->alter(alter_commands, context, table_lock_holder); + } + + return {}; +} + + +AccessRightsElements InterpreterAlterQuery::getRequiredAccess() const +{ + AccessRightsElements required_access; + const auto & alter = query_ptr->as(); + for (ASTAlterCommand * command : alter.command_list->commands) + boost::range::push_back(required_access, getRequiredAccessForCommand(*command, alter.database, alter.table)); + return required_access; +} + + +AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const ASTAlterCommand & command, const String & database, const String & table) +{ + AccessRightsElements required_access; + + auto column_name = [&]() -> String { return getIdentifierName(command.column); }; + auto column_name_from_col_decl = [&]() -> std::string_view { return command.col_decl->as().name; }; + auto column_names_from_update_assignments = [&]() -> std::vector + { + std::vector column_names; + for (const ASTPtr & assignment_ast : command.update_assignments->children) + column_names.emplace_back(assignment_ast->as().column_name); + return column_names; + }; + + switch (command.type) + { + case ASTAlterCommand::UPDATE: + { + required_access.emplace_back(AccessType::ALTER_UPDATE, database, table, column_names_from_update_assignments()); + break; + } + case ASTAlterCommand::DELETE: + { + required_access.emplace_back(AccessType::ALTER_DELETE, database, table); + break; + } + case ASTAlterCommand::ADD_COLUMN: + { + required_access.emplace_back(AccessType::ALTER_ADD_COLUMN, database, table, column_name_from_col_decl()); + break; + } + case ASTAlterCommand::DROP_COLUMN: + { + if (command.clear_column) + required_access.emplace_back(AccessType::ALTER_CLEAR_COLUMN, database, table, column_name()); + else + required_access.emplace_back(AccessType::ALTER_DROP_COLUMN, database, table, column_name()); + break; + } + case ASTAlterCommand::MODIFY_COLUMN: + { + required_access.emplace_back(AccessType::ALTER_MODIFY_COLUMN, database, table, column_name_from_col_decl()); + break; + } + case ASTAlterCommand::COMMENT_COLUMN: + { + required_access.emplace_back(AccessType::ALTER_COMMENT_COLUMN, database, table, column_name()); + break; + } + case ASTAlterCommand::MODIFY_ORDER_BY: + { + required_access.emplace_back(AccessType::ALTER_ORDER_BY, database, table); + break; + } + case ASTAlterCommand::ADD_INDEX: + { + required_access.emplace_back(AccessType::ALTER_ADD_INDEX, database, table); + break; + } + case ASTAlterCommand::DROP_INDEX: + { + if (command.clear_index) + required_access.emplace_back(AccessType::ALTER_CLEAR_INDEX, database, table); + else + required_access.emplace_back(AccessType::ALTER_DROP_INDEX, database, table); + break; + } + case ASTAlterCommand::MATERIALIZE_INDEX: + { + required_access.emplace_back(AccessType::ALTER_MATERIALIZE_INDEX, database, table); + break; + } + case ASTAlterCommand::ADD_CONSTRAINT: + { + required_access.emplace_back(AccessType::ALTER_ADD_CONSTRAINT, database, table); + break; + } + case ASTAlterCommand::DROP_CONSTRAINT: + { + required_access.emplace_back(AccessType::ALTER_DROP_CONSTRAINT, database, table); + break; + } + case ASTAlterCommand::MODIFY_TTL: + { + required_access.emplace_back(AccessType::ALTER_TTL, database, table); + break; + } + case ASTAlterCommand::MATERIALIZE_TTL: + { + required_access.emplace_back(AccessType::ALTER_MATERIALIZE_TTL, database, table); + break; + } + case ASTAlterCommand::MODIFY_SETTING: + { + required_access.emplace_back(AccessType::ALTER_SETTINGS, database, table); + break; + } + case ASTAlterCommand::ATTACH_PARTITION: + { + required_access.emplace_back(AccessType::INSERT, database, table); + break; + } + case ASTAlterCommand::DROP_PARTITION: [[fallthrough]]; + case ASTAlterCommand::DROP_DETACHED_PARTITION: + { + required_access.emplace_back(AccessType::ALTER_DELETE, database, table); + break; + } + case ASTAlterCommand::MOVE_PARTITION: + { + if ((command.move_destination_type == PartDestinationType::DISK) + || (command.move_destination_type == PartDestinationType::VOLUME)) + { + required_access.emplace_back(AccessType::ALTER_MOVE_PARTITION, database, table); + } + else if (command.move_destination_type == PartDestinationType::TABLE) + { + required_access.emplace_back(AccessType::SELECT | AccessType::ALTER_DELETE, database, table); + required_access.emplace_back(AccessType::INSERT, command.to_database, command.to_table); + } + break; + } + case ASTAlterCommand::REPLACE_PARTITION: + { + required_access.emplace_back(AccessType::SELECT, command.from_database, command.from_table); + required_access.emplace_back(AccessType::ALTER_DELETE | AccessType::INSERT, database, table); + break; + } + case ASTAlterCommand::FETCH_PARTITION: + { + required_access.emplace_back(AccessType::ALTER_FETCH_PARTITION, database, table); + break; + } + case ASTAlterCommand::FREEZE_PARTITION: [[fallthrough]]; + case ASTAlterCommand::FREEZE_ALL: + { + required_access.emplace_back(AccessType::ALTER_FREEZE_PARTITION, database, table); + break; + } + case ASTAlterCommand::MODIFY_QUERY: + { + required_access.emplace_back(AccessType::ALTER_VIEW_MODIFY_QUERY, database, table); + break; + } + case ASTAlterCommand::LIVE_VIEW_REFRESH: + { + required_access.emplace_back(AccessType::ALTER_VIEW_REFRESH, database, table); + break; + } + case ASTAlterCommand::RENAME_COLUMN: + { + required_access.emplace_back(AccessType::ALTER_RENAME_COLUMN, database, table, column_name()); + break; + } + case ASTAlterCommand::NO_TYPE: break; + } + + return required_access; +} + +} diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.h b/src/Interpreters/InterpreterAlterQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterAlterQuery.h rename to src/Interpreters/InterpreterAlterQuery.h diff --git a/dbms/src/Interpreters/InterpreterCheckQuery.cpp b/src/Interpreters/InterpreterCheckQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCheckQuery.cpp rename to src/Interpreters/InterpreterCheckQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCheckQuery.h b/src/Interpreters/InterpreterCheckQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCheckQuery.h rename to src/Interpreters/InterpreterCheckQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp similarity index 97% rename from dbms/src/Interpreters/InterpreterCreateQuery.cpp rename to src/Interpreters/InterpreterCreateQuery.cpp index b57604828e1..37e2c8c5945 100644 --- a/dbms/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -6,6 +6,8 @@ #include #include +#include + #include #include @@ -181,7 +183,7 @@ ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns) String type_name = column.type->getName(); auto pos = type_name.data(); const auto end = pos + type_name.size(); - column_declaration->type = parseQuery(storage_p, pos, end, "data type", 0); + column_declaration->type = parseQuery(storage_p, pos, end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); columns_list->children.emplace_back(column_declaration); } @@ -207,7 +209,7 @@ ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns) String type_name = column.type->getName(); auto type_name_pos = type_name.data(); const auto type_name_end = type_name_pos + type_name.size(); - column_declaration->type = parseQuery(storage_p, type_name_pos, type_name_end, "data type", 0); + column_declaration->type = parseQuery(storage_p, type_name_pos, type_name_end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); if (column.default_desc.expression) { @@ -227,7 +229,7 @@ ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns) auto codec_desc_pos = codec_desc.data(); const auto codec_desc_end = codec_desc_pos + codec_desc.size(); ParserIdentifierWithParameters codec_p; - column_declaration->codec = parseQuery(codec_p, codec_desc_pos, codec_desc_end, "column codec", 0); + column_declaration->codec = parseQuery(codec_p, codec_desc_pos, codec_desc_end, "column codec", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); } if (column.ttl) @@ -403,7 +405,8 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::setProperties(AS StoragePtr as_storage = DatabaseCatalog::instance().getTable({as_database_name, create.as_table}); /// as_storage->getColumns() and setEngine(...) must be called under structure lock of other_table for CREATE ... AS other_table. - as_storage_lock = as_storage->lockStructureForShare(context.getCurrentQueryId()); + as_storage_lock = as_storage->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); properties.columns = as_storage->getColumns(); /// Secondary indices make sense only for MergeTree family of storage engines. @@ -765,7 +768,14 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const } if (!create.to_table.empty()) - required_access.emplace_back(AccessType::INSERT, create.to_database, create.to_table); + required_access.emplace_back(AccessType::SELECT | AccessType::INSERT, create.to_database, create.to_table); + + if (create.storage && create.storage->engine) + { + auto source_access_type = StorageFactory::instance().getSourceAccessType(create.storage->engine->name); + if (source_access_type != AccessType::NONE) + required_access.emplace_back(source_access_type); + } return required_access; } diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateQuery.h rename to src/Interpreters/InterpreterCreateQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateQuotaQuery.cpp b/src/Interpreters/InterpreterCreateQuotaQuery.cpp similarity index 91% rename from dbms/src/Interpreters/InterpreterCreateQuotaQuery.cpp rename to src/Interpreters/InterpreterCreateQuotaQuery.cpp index 4b64615dd36..80987993c96 100644 --- a/dbms/src/Interpreters/InterpreterCreateQuotaQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuotaQuery.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -33,7 +34,7 @@ void updateQuotaFromQueryImpl(Quota & quota, const ASTCreateQuotaQuery & query, auto duration = query_limits.duration; auto it = boost::range::find_if(quota_all_limits, [&](const Quota::Limits & x) { return x.duration == duration; }); - if (query_limits.unset_tracking) + if (query_limits.drop) { if (it != quota_all_limits.end()) quota_all_limits.erase(it); @@ -58,6 +59,8 @@ void updateQuotaFromQueryImpl(Quota & quota, const ASTCreateQuotaQuery & query, { if (query_limits.max[resource_type]) quota_limits.max[resource_type] = *query_limits.max[resource_type]; + else + quota_limits.max[resource_type] = Quota::UNLIMITED; } } @@ -76,10 +79,16 @@ void updateQuotaFromQueryImpl(Quota & quota, const ASTCreateQuotaQuery & query, BlockIO InterpreterCreateQuotaQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); context.checkAccess(query.alter ? AccessType::ALTER_QUOTA : AccessType::CREATE_QUOTA); + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } + std::optional roles_from_query; if (query.roles) roles_from_query = ExtendedRoleSet{*query.roles, access_control, context.getUserID()}; diff --git a/dbms/src/Interpreters/InterpreterCreateQuotaQuery.h b/src/Interpreters/InterpreterCreateQuotaQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateQuotaQuery.h rename to src/Interpreters/InterpreterCreateQuotaQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateRoleQuery.cpp b/src/Interpreters/InterpreterCreateRoleQuery.cpp similarity index 95% rename from dbms/src/Interpreters/InterpreterCreateRoleQuery.cpp rename to src/Interpreters/InterpreterCreateRoleQuery.cpp index f64462d443b..ed9135b2bb6 100644 --- a/dbms/src/Interpreters/InterpreterCreateRoleQuery.cpp +++ b/src/Interpreters/InterpreterCreateRoleQuery.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,9 @@ BlockIO InterpreterCreateRoleQuery::execute() else context.checkAccess(AccessType::CREATE_ROLE); + if (!query.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context); + std::optional settings_from_query; if (query.settings) settings_from_query = SettingsProfileElements{*query.settings, access_control}; diff --git a/dbms/src/Interpreters/InterpreterCreateRoleQuery.h b/src/Interpreters/InterpreterCreateRoleQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateRoleQuery.h rename to src/Interpreters/InterpreterCreateRoleQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp similarity index 90% rename from dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp rename to src/Interpreters/InterpreterCreateRowPolicyQuery.cpp index 9ea47aba7bb..c3de3876c46 100644 --- a/dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp +++ b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -63,9 +64,15 @@ namespace BlockIO InterpreterCreateRowPolicyQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); - context.checkAccess(query.alter ? AccessType::ALTER_POLICY : AccessType::CREATE_POLICY); + context.checkAccess(query.alter ? AccessType::ALTER_ROW_POLICY : AccessType::CREATE_ROW_POLICY); + + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } std::optional roles_from_query; if (query.roles) diff --git a/dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.h b/src/Interpreters/InterpreterCreateRowPolicyQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.h rename to src/Interpreters/InterpreterCreateRowPolicyQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp b/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp similarity index 91% rename from dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp rename to src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp index 9d110a69516..cb0b5587bdc 100644 --- a/dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp +++ b/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp @@ -1,6 +1,8 @@ #include #include +#include #include +#include #include #include #include @@ -49,13 +51,19 @@ namespace BlockIO InterpreterCreateSettingsProfileQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); if (query.alter) context.checkAccess(AccessType::ALTER_SETTINGS_PROFILE); else context.checkAccess(AccessType::CREATE_SETTINGS_PROFILE); + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } + std::optional settings_from_query; if (query.settings) settings_from_query = SettingsProfileElements{*query.settings, access_control}; diff --git a/dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.h b/src/Interpreters/InterpreterCreateSettingsProfileQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.h rename to src/Interpreters/InterpreterCreateSettingsProfileQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateUserQuery.cpp b/src/Interpreters/InterpreterCreateUserQuery.cpp similarity index 94% rename from dbms/src/Interpreters/InterpreterCreateUserQuery.cpp rename to src/Interpreters/InterpreterCreateUserQuery.cpp index 5dba1fefc9c..78c7cc222ae 100644 --- a/dbms/src/Interpreters/InterpreterCreateUserQuery.cpp +++ b/src/Interpreters/InterpreterCreateUserQuery.cpp @@ -1,10 +1,11 @@ #include #include #include +#include #include +#include #include #include -#include #include #include @@ -67,7 +68,7 @@ namespace BlockIO InterpreterCreateUserQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); auto access = context.getAccess(); access->checkAccess(query.alter ? AccessType::ALTER_USER : AccessType::CREATE_USER); @@ -83,6 +84,9 @@ BlockIO InterpreterCreateUserQuery::execute() } } + if (!query.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context); + std::optional settings_from_query; if (query.settings) settings_from_query = SettingsProfileElements{*query.settings, access_control}; diff --git a/dbms/src/Interpreters/InterpreterCreateUserQuery.h b/src/Interpreters/InterpreterCreateUserQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateUserQuery.h rename to src/Interpreters/InterpreterCreateUserQuery.h diff --git a/dbms/src/Interpreters/InterpreterDescribeQuery.cpp b/src/Interpreters/InterpreterDescribeQuery.cpp similarity index 96% rename from dbms/src/Interpreters/InterpreterDescribeQuery.cpp rename to src/Interpreters/InterpreterDescribeQuery.cpp index cf7bb0458e9..f9c769a523e 100644 --- a/dbms/src/Interpreters/InterpreterDescribeQuery.cpp +++ b/src/Interpreters/InterpreterDescribeQuery.cpp @@ -89,7 +89,8 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl() table = DatabaseCatalog::instance().getTable(table_id); } - auto table_lock = table->lockStructureForShare(context.getInitialQueryId()); + auto table_lock = table->lockStructureForShare( + false, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout); columns = table->getColumns(); } diff --git a/dbms/src/Interpreters/InterpreterDescribeQuery.h b/src/Interpreters/InterpreterDescribeQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterDescribeQuery.h rename to src/Interpreters/InterpreterDescribeQuery.h diff --git a/dbms/src/Interpreters/InterpreterDropAccessEntityQuery.cpp b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp similarity index 92% rename from dbms/src/Interpreters/InterpreterDropAccessEntityQuery.cpp rename to src/Interpreters/InterpreterDropAccessEntityQuery.cpp index 12f33250188..e67e0659796 100644 --- a/dbms/src/Interpreters/InterpreterDropAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -37,7 +38,7 @@ namespace case Kind::USER: return AccessType::DROP_USER; case Kind::ROLE: return AccessType::DROP_ROLE; case Kind::QUOTA: return AccessType::DROP_QUOTA; - case Kind::ROW_POLICY: return AccessType::DROP_POLICY; + case Kind::ROW_POLICY: return AccessType::DROP_ROW_POLICY; case Kind::SETTINGS_PROFILE: return AccessType::DROP_SETTINGS_PROFILE; } __builtin_unreachable(); @@ -52,6 +53,9 @@ BlockIO InterpreterDropAccessEntityQuery::execute() std::type_index type = getType(query.kind); context.checkAccess(getRequiredAccessType(query.kind)); + if (!query.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context); + if (query.kind == Kind::ROW_POLICY) { Strings full_names; diff --git a/dbms/src/Interpreters/InterpreterDropAccessEntityQuery.h b/src/Interpreters/InterpreterDropAccessEntityQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterDropAccessEntityQuery.h rename to src/Interpreters/InterpreterDropAccessEntityQuery.h diff --git a/dbms/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp similarity index 94% rename from dbms/src/Interpreters/InterpreterDropQuery.cpp rename to src/Interpreters/InterpreterDropQuery.cpp index 42d9528abd5..91783352842 100644 --- a/dbms/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -93,17 +93,17 @@ BlockIO InterpreterDropQuery::executeToTable( context.checkAccess(table->isView() ? AccessType::DROP_VIEW : AccessType::DROP_TABLE, table_id); table->shutdown(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Drop table from memory, don't touch data and metadata database->detachTable(table_name); } else if (kind == ASTDropQuery::Kind::Truncate) { - context.checkAccess(table->isView() ? AccessType::TRUNCATE_VIEW : AccessType::TRUNCATE_TABLE, table_id); + context.checkAccess(AccessType::TRUNCATE, table_id); table->checkTableCanBeDropped(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Drop table data, don't touch metadata table->truncate(query_ptr, context, table_lock); } @@ -115,7 +115,7 @@ BlockIO InterpreterDropQuery::executeToTable( table->shutdown(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); const std::string metadata_file_without_extension = database->getMetadataPath() + escapeForFileName(table_id.table_name); const auto prev_metadata_name = metadata_file_without_extension + ".sql"; @@ -216,7 +216,8 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(const String & table_name, if (kind == ASTDropQuery::Kind::Truncate) { /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = + table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Drop table data, don't touch metadata table->truncate(query_ptr, context, table_lock); } @@ -225,7 +226,8 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(const String & table_name, context_handle.removeExternalTable(table_name); table->shutdown(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = + table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Delete table data table->drop(table_lock); table->is_dropped = true; @@ -316,7 +318,7 @@ AccessRightsElements InterpreterDropQuery::getRequiredAccessForDDLOnCluster() co if (drop.kind == ASTDropQuery::Kind::Drop) required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); else if (drop.kind == ASTDropQuery::Kind::Truncate) - required_access.emplace_back(AccessType::TRUNCATE_TABLE | AccessType::TRUNCATE_VIEW, drop.database, drop.table); + required_access.emplace_back(AccessType::TRUNCATE, drop.database, drop.table); else if (drop.kind == ASTDropQuery::Kind::Detach) required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); } diff --git a/dbms/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterDropQuery.h rename to src/Interpreters/InterpreterDropQuery.h diff --git a/dbms/src/Interpreters/InterpreterExistsQuery.cpp b/src/Interpreters/InterpreterExistsQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterExistsQuery.cpp rename to src/Interpreters/InterpreterExistsQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterExistsQuery.h b/src/Interpreters/InterpreterExistsQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterExistsQuery.h rename to src/Interpreters/InterpreterExistsQuery.h diff --git a/dbms/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterExplainQuery.cpp rename to src/Interpreters/InterpreterExplainQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterExplainQuery.h b/src/Interpreters/InterpreterExplainQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterExplainQuery.h rename to src/Interpreters/InterpreterExplainQuery.h diff --git a/dbms/src/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterFactory.cpp rename to src/Interpreters/InterpreterFactory.cpp diff --git a/dbms/src/Interpreters/InterpreterFactory.h b/src/Interpreters/InterpreterFactory.h similarity index 100% rename from dbms/src/Interpreters/InterpreterFactory.h rename to src/Interpreters/InterpreterFactory.h diff --git a/dbms/src/Interpreters/InterpreterGrantQuery.cpp b/src/Interpreters/InterpreterGrantQuery.cpp similarity index 93% rename from dbms/src/Interpreters/InterpreterGrantQuery.cpp rename to src/Interpreters/InterpreterGrantQuery.cpp index 5d215ff3a93..a5f13dbbbfe 100644 --- a/dbms/src/Interpreters/InterpreterGrantQuery.cpp +++ b/src/Interpreters/InterpreterGrantQuery.cpp @@ -1,6 +1,8 @@ #include #include +#include #include +#include #include #include #include @@ -59,7 +61,7 @@ namespace BlockIO InterpreterGrantQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); auto access = context.getAccess(); access->checkGrantOption(query.access_rights_elements); @@ -72,6 +74,12 @@ BlockIO InterpreterGrantQuery::execute() access->checkAdminOption(role_from_query); } + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } + std::vector to_roles = ExtendedRoleSet{*query.to_roles, access_control, context.getUserID()}.getMatchingIDs(access_control); String current_database = context.getCurrentDatabase(); diff --git a/dbms/src/Interpreters/InterpreterGrantQuery.h b/src/Interpreters/InterpreterGrantQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterGrantQuery.h rename to src/Interpreters/InterpreterGrantQuery.h diff --git a/dbms/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp similarity index 97% rename from dbms/src/Interpreters/InterpreterInsertQuery.cpp rename to src/Interpreters/InterpreterInsertQuery.cpp index f12ac68cede..c47d6278248 100644 --- a/dbms/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -109,7 +109,8 @@ BlockIO InterpreterInsertQuery::execute() BlockIO res; StoragePtr table = getTable(query); - auto table_lock = table->lockStructureForShare(context.getInitialQueryId()); + auto table_lock = table->lockStructureForShare( + true, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout); auto query_sample_block = getSampleBlock(query, table); if (!query.table_function) @@ -176,7 +177,7 @@ BlockIO InterpreterInsertQuery::execute() "Expected exactly one connection for shard " + toString(shard_info.shard_num), ErrorCodes::LOGICAL_ERROR); /// INSERT SELECT query returns empty block - auto in_stream = std::make_shared(*connections.front(), new_query_str, Block{}, context); + auto in_stream = std::make_shared(std::move(connections), new_query_str, Block{}, context); in_streams.push_back(in_stream); } out_streams.push_back(std::make_shared(Block())); @@ -251,7 +252,7 @@ BlockIO InterpreterInsertQuery::execute() for (auto & in_stream : in_streams) { in_stream = std::make_shared( - context, in_stream, out_streams.at(0)->getHeader(), ConvertingBlockInputStream::MatchColumnsMode::Position); + in_stream, out_streams.at(0)->getHeader(), ConvertingBlockInputStream::MatchColumnsMode::Position); } Block in_header = in_streams.at(0)->getHeader(); diff --git a/dbms/src/Interpreters/InterpreterInsertQuery.h b/src/Interpreters/InterpreterInsertQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterInsertQuery.h rename to src/Interpreters/InterpreterInsertQuery.h diff --git a/dbms/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp similarity index 97% rename from dbms/src/Interpreters/InterpreterKillQueryQuery.cpp rename to src/Interpreters/InterpreterKillQueryQuery.cpp index 196b2b4eef1..23f39ab3fc5 100644 --- a/dbms/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -267,7 +267,7 @@ BlockIO InterpreterKillQueryQuery::execute() else { ParserAlterCommand parser; - auto command_ast = parseQuery(parser, command_col.getDataAt(i).toString(), 0); + auto command_ast = parseQuery(parser, command_col.getDataAt(i).toString(), 0, context.getSettingsRef().max_parser_depth); required_access_rights = InterpreterAlterQuery::getRequiredAccessForCommand(command_ast->as(), table_id.database_name, table_id.table_name); if (!access->isGranted(&Poco::Logger::get("InterpreterKillQueryQuery"), required_access_rights)) { @@ -319,7 +319,7 @@ AccessRightsElements InterpreterKillQueryQuery::getRequiredAccessForDDLOnCluster if (query.type == ASTKillQueryQuery::Type::Query) required_access.emplace_back(AccessType::KILL_QUERY); else if (query.type == ASTKillQueryQuery::Type::Mutation) - required_access.emplace_back(AccessType::UPDATE | AccessType::DELETE | AccessType::MATERIALIZE_INDEX | AccessType::MATERIALIZE_TTL); + required_access.emplace_back(AccessType::ALTER_UPDATE | AccessType::ALTER_DELETE | AccessType::ALTER_MATERIALIZE_INDEX | AccessType::ALTER_MATERIALIZE_TTL); return required_access; } diff --git a/dbms/src/Interpreters/InterpreterKillQueryQuery.h b/src/Interpreters/InterpreterKillQueryQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterKillQueryQuery.h rename to src/Interpreters/InterpreterKillQueryQuery.h diff --git a/dbms/src/Interpreters/InterpreterOptimizeQuery.cpp b/src/Interpreters/InterpreterOptimizeQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterOptimizeQuery.cpp rename to src/Interpreters/InterpreterOptimizeQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterOptimizeQuery.h b/src/Interpreters/InterpreterOptimizeQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterOptimizeQuery.h rename to src/Interpreters/InterpreterOptimizeQuery.h diff --git a/dbms/src/Interpreters/InterpreterRenameQuery.cpp b/src/Interpreters/InterpreterRenameQuery.cpp similarity index 95% rename from dbms/src/Interpreters/InterpreterRenameQuery.cpp rename to src/Interpreters/InterpreterRenameQuery.cpp index 4f54f759510..9a4f4b1b197 100644 --- a/dbms/src/Interpreters/InterpreterRenameQuery.cpp +++ b/src/Interpreters/InterpreterRenameQuery.cpp @@ -79,7 +79,8 @@ BlockIO InterpreterRenameQuery::execute() { database_catalog.assertTableDoesntExist(StorageID(elem.to_database_name, elem.to_table_name)); auto from_table = database_catalog.getTable({elem.from_database_name, elem.from_table_name}); - auto from_table_lock = from_table->lockExclusively(context.getCurrentQueryId()); + auto from_table_lock = + from_table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); database_catalog.getDatabase(elem.from_database_name)->renameTable( context, diff --git a/dbms/src/Interpreters/InterpreterRenameQuery.h b/src/Interpreters/InterpreterRenameQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterRenameQuery.h rename to src/Interpreters/InterpreterRenameQuery.h diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp similarity index 99% rename from dbms/src/Interpreters/InterpreterSelectQuery.cpp rename to src/Interpreters/InterpreterSelectQuery.cpp index b08e0ce1146..691b3c1045b 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -48,8 +48,8 @@ #include #include #include -#include -#include +#include +#include #include #include @@ -87,7 +87,6 @@ #include #include #include -#include #include #include #include @@ -138,7 +137,7 @@ String InterpreterSelectQuery::generateFilterActions(ExpressionActionsPtr & acti for (const auto & column_str : prerequisite_columns) { ParserExpression expr_parser; - expr_list->children.push_back(parseQuery(expr_parser, column_str, 0)); + expr_list->children.push_back(parseQuery(expr_parser, column_str, 0, context->getSettingsRef().max_parser_depth)); } select_ast->setExpression(ASTSelectQuery::Expression::TABLES, std::make_shared()); @@ -255,7 +254,8 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (storage) { - table_lock = storage->lockStructureForShare(context->getInitialQueryId()); + table_lock = storage->lockStructureForShare( + false, context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout); table_id = storage->getStorageID(); } @@ -510,7 +510,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl(bool try_move_to_prewhere) } if (storage && !options.only_analyze) - from_stage = storage->getQueryProcessingStage(*context, query_ptr); + from_stage = storage->getQueryProcessingStage(*context, options.to_stage, query_ptr); /// Do I need to perform the first part of the pipeline - running on remote servers during distributed processing. bool first_stage = from_stage < QueryProcessingStage::WithMergeableState @@ -893,7 +893,13 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS default_totals = true; } - bool inflating_join = join && !typeid_cast(join.get()); + bool inflating_join = false; + if (join) + { + inflating_join = true; + if (auto * hash_join = typeid_cast(join.get())) + inflating_join = isCross(hash_join->getKind()); + } pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType type) { @@ -1544,7 +1550,7 @@ void InterpreterSelectQuery::executeFetchColumns( auto header = stream->getHeader(); auto mode = ConvertingBlockInputStream::MatchColumnsMode::Name; if (!blocksHaveEqualStructure(first_header, header)) - stream = std::make_shared(*context, stream, first_header, mode); + stream = std::make_shared(stream, first_header, mode); } } @@ -2535,8 +2541,7 @@ void InterpreterSelectQuery::executeExtremes(QueryPipeline & pipeline) if (!context->getSettingsRef().extremes) return; - auto transform = std::make_shared(pipeline.getHeader()); - pipeline.addExtremesTransform(std::move(transform)); + pipeline.addExtremesTransform(); } @@ -2586,7 +2591,7 @@ void InterpreterSelectQuery::unifyStreams(Pipeline & pipeline, Block header) auto mode = ConvertingBlockInputStream::MatchColumnsMode::Name; if (!blocksHaveEqualStructure(header, stream_header)) - stream = std::make_shared(*context, stream, header, mode); + stream = std::make_shared(stream, header, mode); } } diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h similarity index 99% rename from dbms/src/Interpreters/InterpreterSelectQuery.h rename to src/Interpreters/InterpreterSelectQuery.h index 0208af2431f..c50f4a2f7b7 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -212,7 +212,7 @@ private: String generateFilterActions(ExpressionActionsPtr & actions, const ASTPtr & row_policy_filter, const Names & prerequisite_columns = {}) const; /// Add ConvertingBlockInputStream to specified header. - void unifyStreams(Pipeline & pipeline, Block header); + static void unifyStreams(Pipeline & pipeline, Block header); enum class Modificator { diff --git a/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp similarity index 98% rename from dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp rename to src/Interpreters/InterpreterSelectWithUnionQuery.cpp index feec18a1af3..9cdb19b1934 100644 --- a/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -193,7 +193,7 @@ BlockInputStreams InterpreterSelectWithUnionQuery::executeWithMultipleStreams(Qu if (nested_interpreters.size() > 1) { for (auto & stream : nested_streams) - stream = std::make_shared(*context, stream, result_header,ConvertingBlockInputStream::MatchColumnsMode::Position); + stream = std::make_shared(stream, result_header,ConvertingBlockInputStream::MatchColumnsMode::Position); parent_pipeline.addInterpreterContext(context); } @@ -269,7 +269,7 @@ QueryPipeline InterpreterSelectWithUnionQuery::executeWithProcessors() if (!pipelines.empty()) { auto common_header = getCommonHeaderForUnion(headers); - main_pipeline.unitePipelines(std::move(pipelines), common_header, *context); + main_pipeline.unitePipelines(std::move(pipelines), common_header); } main_pipeline.addInterpreterContext(context); diff --git a/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.h b/src/Interpreters/InterpreterSelectWithUnionQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSelectWithUnionQuery.h rename to src/Interpreters/InterpreterSelectWithUnionQuery.h diff --git a/dbms/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterSetQuery.cpp rename to src/Interpreters/InterpreterSetQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterSetQuery.h b/src/Interpreters/InterpreterSetQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSetQuery.h rename to src/Interpreters/InterpreterSetQuery.h diff --git a/dbms/src/Interpreters/InterpreterSetRoleQuery.cpp b/src/Interpreters/InterpreterSetRoleQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterSetRoleQuery.cpp rename to src/Interpreters/InterpreterSetRoleQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterSetRoleQuery.h b/src/Interpreters/InterpreterSetRoleQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSetRoleQuery.h rename to src/Interpreters/InterpreterSetRoleQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp similarity index 85% rename from dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp rename to src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp index 52126b0507e..0d3b88facce 100644 --- a/dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include @@ -36,7 +38,7 @@ namespace ASTPtr getCreateQueryImpl( const User & user, const AccessControlManager * manager /* not used if attach_mode == true */, - bool attach_mode = false) + bool attach_mode) { auto query = std::make_shared(); query->name = user.getName(); @@ -71,7 +73,7 @@ namespace } - ASTPtr getCreateQueryImpl(const Role & role, const AccessControlManager * manager, bool attach_mode = false) + ASTPtr getCreateQueryImpl(const Role & role, const AccessControlManager * manager, bool attach_mode) { auto query = std::make_shared(); query->name = role.getName(); @@ -89,7 +91,7 @@ namespace } - ASTPtr getCreateQueryImpl(const SettingsProfile & profile, const AccessControlManager * manager, bool attach_mode = false) + ASTPtr getCreateQueryImpl(const SettingsProfile & profile, const AccessControlManager * manager, bool attach_mode) { auto query = std::make_shared(); query->name = profile.getName(); @@ -101,6 +103,8 @@ namespace query->settings = profile.elements.toAST(); else query->settings = profile.elements.toASTWithNames(*manager); + if (query->settings) + query->settings->setUseInheritKeyword(true); } if (!profile.to_roles.empty()) @@ -118,7 +122,7 @@ namespace ASTPtr getCreateQueryImpl( const Quota & quota, const AccessControlManager * manager /* not used if attach_mode == true */, - bool attach_mode = false) + bool attach_mode) { auto query = std::make_shared(); query->name = quota.getName(); @@ -133,7 +137,7 @@ namespace create_query_limits.duration = limits.duration; create_query_limits.randomize_interval = limits.randomize_interval; for (auto resource_type : ext::range(Quota::MAX_RESOURCE_TYPE)) - if (limits.max[resource_type]) + if (limits.max[resource_type] != Quota::UNLIMITED) create_query_limits.max[resource_type] = limits.max[resource_type]; query->all_limits.push_back(create_query_limits); } @@ -153,7 +157,7 @@ namespace ASTPtr getCreateQueryImpl( const RowPolicy & policy, const AccessControlManager * manager /* not used if attach_mode == true */, - bool attach_mode = false) + bool attach_mode) { auto query = std::make_shared(); query->name_parts = RowPolicy::FullNameParts{policy.getDatabase(), policy.getTableName(), policy.getName()}; @@ -168,7 +172,7 @@ namespace if (!condition.empty()) { ParserExpression parser; - ASTPtr expr = parseQuery(parser, condition, 0); + ASTPtr expr = parseQuery(parser, condition, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); query->conditions.push_back(std::pair{index, expr}); } } @@ -187,7 +191,7 @@ namespace ASTPtr getCreateQueryImpl( const IAccessEntity & entity, const AccessControlManager * manager /* not used if attach_mode == true */, - bool attach_mode = false) + bool attach_mode) { if (const User * user = typeid_cast(&entity)) return getCreateQueryImpl(*user, manager, attach_mode); @@ -256,28 +260,45 @@ BlockInputStreamPtr InterpreterShowCreateAccessEntityQuery::executeImpl() ASTPtr InterpreterShowCreateAccessEntityQuery::getCreateQuery(const ASTShowCreateAccessEntityQuery & show_query) const { const auto & access_control = context.getAccessControlManager(); + context.checkAccess(getRequiredAccess()); if (show_query.current_user) { auto user = context.getUser(); - return getCreateQueryImpl(*user, &access_control); + return getCreateQueryImpl(*user, &access_control, false); } if (show_query.current_quota) { auto quota = access_control.read(context.getQuota()->getUsageInfo().quota_id); - return getCreateQueryImpl(*quota, &access_control); + return getCreateQueryImpl(*quota, &access_control, false); } auto type = getType(show_query.kind); if (show_query.kind == Kind::ROW_POLICY) { RowPolicyPtr policy = access_control.read(show_query.row_policy_name.getFullName(context)); - return getCreateQueryImpl(*policy, &access_control); + return getCreateQueryImpl(*policy, &access_control, false); } auto entity = access_control.read(access_control.getID(type, show_query.name)); - return getCreateQueryImpl(*entity, &access_control); + return getCreateQueryImpl(*entity, &access_control, false); +} + + +AccessRightsElements InterpreterShowCreateAccessEntityQuery::getRequiredAccess() const +{ + const auto & show_query = query_ptr->as(); + AccessRightsElements res; + switch (show_query.kind) + { + case Kind::USER: res.emplace_back(AccessType::SHOW_USERS); break; + case Kind::ROLE: res.emplace_back(AccessType::SHOW_ROLES); break; + case Kind::ROW_POLICY: res.emplace_back(AccessType::SHOW_ROW_POLICIES); break; + case Kind::SETTINGS_PROFILE: res.emplace_back(AccessType::SHOW_SETTINGS_PROFILES); break; + case Kind::QUOTA: res.emplace_back(AccessType::SHOW_QUOTAS); break; + } + return res; } diff --git a/dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h similarity index 92% rename from dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h rename to src/Interpreters/InterpreterShowCreateAccessEntityQuery.h index 92025bedb6c..0183c59766f 100644 --- a/dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h @@ -9,6 +9,7 @@ namespace DB { class Context; class ASTShowCreateAccessEntityQuery; +class AccessRightsElements; struct IAccessEntity; @@ -30,6 +31,7 @@ public: private: BlockInputStreamPtr executeImpl(); ASTPtr getCreateQuery(const ASTShowCreateAccessEntityQuery & show_query) const; + AccessRightsElements getRequiredAccess() const; ASTPtr query_ptr; const Context & context; diff --git a/dbms/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp similarity index 98% rename from dbms/src/Interpreters/InterpreterShowCreateQuery.cpp rename to src/Interpreters/InterpreterShowCreateQuery.cpp index 8bee0b88fe8..4161b3500bd 100644 --- a/dbms/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -73,7 +73,7 @@ BlockInputStreamPtr InterpreterShowCreateQuery::executeImpl() throw Exception("Unable to show the create query of " + show_query->table + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY); std::stringstream stream; - formatAST(*create_query, stream, false, true); + formatAST(*create_query, stream, false, false); String res = stream.str(); MutableColumnPtr column = ColumnString::create(); diff --git a/dbms/src/Interpreters/InterpreterShowCreateQuery.h b/src/Interpreters/InterpreterShowCreateQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowCreateQuery.h rename to src/Interpreters/InterpreterShowCreateQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowGrantsQuery.cpp b/src/Interpreters/InterpreterShowGrantsQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowGrantsQuery.cpp rename to src/Interpreters/InterpreterShowGrantsQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowGrantsQuery.h b/src/Interpreters/InterpreterShowGrantsQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowGrantsQuery.h rename to src/Interpreters/InterpreterShowGrantsQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowProcesslistQuery.cpp b/src/Interpreters/InterpreterShowProcesslistQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowProcesslistQuery.cpp rename to src/Interpreters/InterpreterShowProcesslistQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowProcesslistQuery.h b/src/Interpreters/InterpreterShowProcesslistQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowProcesslistQuery.h rename to src/Interpreters/InterpreterShowProcesslistQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowQuotasQuery.cpp b/src/Interpreters/InterpreterShowQuotasQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowQuotasQuery.cpp rename to src/Interpreters/InterpreterShowQuotasQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowQuotasQuery.h b/src/Interpreters/InterpreterShowQuotasQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowQuotasQuery.h rename to src/Interpreters/InterpreterShowQuotasQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.cpp b/src/Interpreters/InterpreterShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.cpp rename to src/Interpreters/InterpreterShowRowPoliciesQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.h b/src/Interpreters/InterpreterShowRowPoliciesQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.h rename to src/Interpreters/InterpreterShowRowPoliciesQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowTablesQuery.cpp b/src/Interpreters/InterpreterShowTablesQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowTablesQuery.cpp rename to src/Interpreters/InterpreterShowTablesQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowTablesQuery.h b/src/Interpreters/InterpreterShowTablesQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowTablesQuery.h rename to src/Interpreters/InterpreterShowTablesQuery.h diff --git a/dbms/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp similarity index 83% rename from dbms/src/Interpreters/InterpreterSystemQuery.cpp rename to src/Interpreters/InterpreterSystemQuery.cpp index ff2001e4bc4..36713be1ff7 100644 --- a/dbms/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -102,19 +102,19 @@ void executeCommandsAndThrowIfError(Callables && ... commands) AccessType getRequiredAccessType(StorageActionBlockType action_type) { if (action_type == ActionLocks::PartsMerge) - return AccessType::STOP_MERGES; + return AccessType::SYSTEM_MERGES; else if (action_type == ActionLocks::PartsFetch) - return AccessType::STOP_FETCHES; + return AccessType::SYSTEM_FETCHES; else if (action_type == ActionLocks::PartsSend) - return AccessType::STOP_REPLICATED_SENDS; + return AccessType::SYSTEM_REPLICATED_SENDS; else if (action_type == ActionLocks::ReplicationQueue) - return AccessType::STOP_REPLICATION_QUEUES; + return AccessType::SYSTEM_REPLICATION_QUEUES; else if (action_type == ActionLocks::DistributedSend) - return AccessType::STOP_DISTRIBUTED_SENDS; + return AccessType::SYSTEM_DISTRIBUTED_SENDS; else if (action_type == ActionLocks::PartsTTLMerge) - return AccessType::STOP_TTL_MERGES; + return AccessType::SYSTEM_TTL_MERGES; else if (action_type == ActionLocks::PartsMove) - return AccessType::STOP_MOVES; + return AccessType::SYSTEM_MOVES; else throw Exception("Unknown action type: " + std::to_string(action_type), ErrorCodes::LOGICAL_ERROR); } @@ -183,42 +183,42 @@ BlockIO InterpreterSystemQuery::execute() switch (query.type) { case Type::SHUTDOWN: - context.checkAccess(AccessType::SHUTDOWN); + context.checkAccess(AccessType::SYSTEM_SHUTDOWN); if (kill(0, SIGTERM)) throwFromErrno("System call kill(0, SIGTERM) failed", ErrorCodes::CANNOT_KILL); break; case Type::KILL: - context.checkAccess(AccessType::SHUTDOWN); + context.checkAccess(AccessType::SYSTEM_SHUTDOWN); if (kill(0, SIGKILL)) throwFromErrno("System call kill(0, SIGKILL) failed", ErrorCodes::CANNOT_KILL); break; case Type::DROP_DNS_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_DNS_CACHE); DNSResolver::instance().dropCache(); /// Reinitialize clusters to update their resolved_addresses system_context.reloadClusterConfig(); break; case Type::DROP_MARK_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_MARK_CACHE); system_context.dropMarkCache(); break; case Type::DROP_UNCOMPRESSED_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_UNCOMPRESSED_CACHE); system_context.dropUncompressedCache(); break; #if USE_EMBEDDED_COMPILER case Type::DROP_COMPILED_EXPRESSION_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_COMPILED_EXPRESSION_CACHE); system_context.dropCompiledExpressionCache(); break; #endif case Type::RELOAD_DICTIONARY: - context.checkAccess(AccessType::RELOAD_DICTIONARY); + context.checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); system_context.getExternalDictionariesLoader().loadOrReload(query.target_dictionary); ExternalDictionariesLoader::resetAll(); break; case Type::RELOAD_DICTIONARIES: - context.checkAccess(AccessType::RELOAD_DICTIONARY); + context.checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); executeCommandsAndThrowIfError( [&] () { system_context.getExternalDictionariesLoader().reloadAllTriedToLoad(); }, [&] () { system_context.getEmbeddedDictionaries().reload(); } @@ -226,11 +226,11 @@ BlockIO InterpreterSystemQuery::execute() ExternalDictionariesLoader::resetAll(); break; case Type::RELOAD_EMBEDDED_DICTIONARIES: - context.checkAccess(AccessType::RELOAD_DICTIONARY); + context.checkAccess(AccessType::SYSTEM_RELOAD_EMBEDDED_DICTIONARIES); system_context.getEmbeddedDictionaries().reload(); break; case Type::RELOAD_CONFIG: - context.checkAccess(AccessType::RELOAD_CONFIG); + context.checkAccess(AccessType::SYSTEM_RELOAD_CONFIG); system_context.reloadConfig(); break; case Type::STOP_MERGES: @@ -290,7 +290,7 @@ BlockIO InterpreterSystemQuery::execute() ErrorCodes::BAD_ARGUMENTS); break; case Type::FLUSH_LOGS: - context.checkAccess(AccessType::FLUSH_LOGS); + context.checkAccess(AccessType::SYSTEM_FLUSH_LOGS); executeCommandsAndThrowIfError( [&] () { if (auto query_log = context.getQueryLog()) query_log->flush(); }, [&] () { if (auto part_log = context.getPartLog("")) part_log->flush(); }, @@ -313,7 +313,7 @@ BlockIO InterpreterSystemQuery::execute() StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, Context & system_context, bool need_ddl_guard) { - context.checkAccess(AccessType::RESTART_REPLICA, replica); + context.checkAccess(AccessType::SYSTEM_RESTART_REPLICA, replica); auto table_ddl_guard = need_ddl_guard ? DatabaseCatalog::instance().getDDLGuard(replica.getDatabaseName(), replica.getTableName()) : nullptr; auto [database, table] = DatabaseCatalog::instance().tryGetDatabaseAndTable(replica); @@ -326,7 +326,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, table->shutdown(); { /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); create_ast = database->getCreateTableQuery(system_context, replica.table_name); database->detachTable(replica.table_name); @@ -366,7 +366,7 @@ void InterpreterSystemQuery::restartReplicas(Context & system_context) for (auto iterator = database->getTablesIterator(system_context); iterator->isValid(); iterator->next()) { if (dynamic_cast(iterator->table().get())) - replica_names.emplace_back(iterator->table()->getStorageID()); + replica_names.emplace_back(StorageID{database->getDatabaseName(), iterator->name()}); } } @@ -387,7 +387,7 @@ void InterpreterSystemQuery::restartReplicas(Context & system_context) void InterpreterSystemQuery::syncReplica(ASTSystemQuery &) { - context.checkAccess(AccessType::SYNC_REPLICA, table_id); + context.checkAccess(AccessType::SYSTEM_SYNC_REPLICA, table_id); StoragePtr table = DatabaseCatalog::instance().getTable(table_id); if (auto storage_replicated = dynamic_cast(table.get())) @@ -408,7 +408,7 @@ void InterpreterSystemQuery::syncReplica(ASTSystemQuery &) void InterpreterSystemQuery::flushDistributed(ASTSystemQuery &) { - context.checkAccess(AccessType::FLUSH_DISTRIBUTED, table_id); + context.checkAccess(AccessType::SYSTEM_FLUSH_DISTRIBUTED, table_id); if (auto storage_distributed = dynamic_cast(DatabaseCatalog::instance().getTable(table_id).get())) storage_distributed->flushClusterNodesAllData(); @@ -427,7 +427,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() case Type::SHUTDOWN: [[fallthrough]]; case Type::KILL: { - required_access.emplace_back(AccessType::SHUTDOWN); + required_access.emplace_back(AccessType::SYSTEM_SHUTDOWN); break; } case Type::DROP_DNS_CACHE: [[fallthrough]]; @@ -437,107 +437,107 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() #endif case Type::DROP_UNCOMPRESSED_CACHE: { - required_access.emplace_back(AccessType::DROP_CACHE); + required_access.emplace_back(AccessType::SYSTEM_DROP_CACHE); break; } case Type::RELOAD_DICTIONARY: [[fallthrough]]; case Type::RELOAD_DICTIONARIES: [[fallthrough]]; case Type::RELOAD_EMBEDDED_DICTIONARIES: { - required_access.emplace_back(AccessType::RELOAD_DICTIONARY); + required_access.emplace_back(AccessType::SYSTEM_RELOAD_DICTIONARY); break; } case Type::RELOAD_CONFIG: { - required_access.emplace_back(AccessType::RELOAD_CONFIG); + required_access.emplace_back(AccessType::SYSTEM_RELOAD_CONFIG); break; } case Type::STOP_MERGES: [[fallthrough]]; case Type::START_MERGES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_MERGES); + required_access.emplace_back(AccessType::SYSTEM_MERGES); else - required_access.emplace_back(AccessType::STOP_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MERGES, query.database, query.table); break; } case Type::STOP_TTL_MERGES: [[fallthrough]]; case Type::START_TTL_MERGES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_TTL_MERGES); + required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES); else - required_access.emplace_back(AccessType::STOP_TTL_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.database, query.table); break; } case Type::STOP_MOVES: [[fallthrough]]; case Type::START_MOVES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_MOVES); + required_access.emplace_back(AccessType::SYSTEM_MOVES); else - required_access.emplace_back(AccessType::STOP_MOVES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MOVES, query.database, query.table); break; } case Type::STOP_FETCHES: [[fallthrough]]; case Type::START_FETCHES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_FETCHES); + required_access.emplace_back(AccessType::SYSTEM_FETCHES); else - required_access.emplace_back(AccessType::STOP_FETCHES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.database, query.table); break; } case Type::STOP_DISTRIBUTED_SENDS: [[fallthrough]]; case Type::START_DISTRIBUTED_SENDS: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_DISTRIBUTED_SENDS); + required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS); else - required_access.emplace_back(AccessType::STOP_DISTRIBUTED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.database, query.table); break; } case Type::STOP_REPLICATED_SENDS: [[fallthrough]]; case Type::START_REPLICATED_SENDS: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_REPLICATED_SENDS); + required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS); else - required_access.emplace_back(AccessType::STOP_REPLICATED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.database, query.table); break; } case Type::STOP_REPLICATION_QUEUES: [[fallthrough]]; case Type::START_REPLICATION_QUEUES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_REPLICATION_QUEUES); + required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES); else - required_access.emplace_back(AccessType::STOP_REPLICATION_QUEUES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.database, query.table); break; } case Type::SYNC_REPLICA: { - required_access.emplace_back(AccessType::SYNC_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.database, query.table); break; } case Type::RESTART_REPLICA: { - required_access.emplace_back(AccessType::RESTART_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.database, query.table); break; } case Type::RESTART_REPLICAS: { - required_access.emplace_back(AccessType::RESTART_REPLICA); + required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA); break; } case Type::FLUSH_DISTRIBUTED: { - required_access.emplace_back(AccessType::FLUSH_DISTRIBUTED, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.database, query.table); break; } case Type::FLUSH_LOGS: { - required_access.emplace_back(AccessType::FLUSH_LOGS); + required_access.emplace_back(AccessType::SYSTEM_FLUSH_LOGS); break; } case Type::STOP_LISTEN_QUERIES: break; diff --git a/dbms/src/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSystemQuery.h rename to src/Interpreters/InterpreterSystemQuery.h diff --git a/dbms/src/Interpreters/InterpreterUseQuery.cpp b/src/Interpreters/InterpreterUseQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterUseQuery.cpp rename to src/Interpreters/InterpreterUseQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterUseQuery.h b/src/Interpreters/InterpreterUseQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterUseQuery.h rename to src/Interpreters/InterpreterUseQuery.h diff --git a/dbms/src/Interpreters/InterpreterWatchQuery.cpp b/src/Interpreters/InterpreterWatchQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterWatchQuery.cpp rename to src/Interpreters/InterpreterWatchQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterWatchQuery.h b/src/Interpreters/InterpreterWatchQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterWatchQuery.h rename to src/Interpreters/InterpreterWatchQuery.h diff --git a/dbms/src/Interpreters/InterserverIOHandler.h b/src/Interpreters/InterserverIOHandler.h similarity index 100% rename from dbms/src/Interpreters/InterserverIOHandler.h rename to src/Interpreters/InterserverIOHandler.h diff --git a/dbms/src/Interpreters/JoinSwitcher.cpp b/src/Interpreters/JoinSwitcher.cpp similarity index 87% rename from dbms/src/Interpreters/JoinSwitcher.cpp rename to src/Interpreters/JoinSwitcher.cpp index 5636022b563..480d105ebb6 100644 --- a/dbms/src/Interpreters/JoinSwitcher.cpp +++ b/src/Interpreters/JoinSwitcher.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include @@ -17,13 +17,13 @@ static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, return std::move(column); } -JoinSwitcher::JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_) +JoinSwitcher::JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_) : limits(table_join_->sizeLimits()) , switched(false) , table_join(table_join_) , right_sample_block(right_sample_block_.cloneEmpty()) { - join = std::make_shared(table_join, right_sample_block); + join = std::make_shared(table_join, right_sample_block); if (!limits.hasLimits()) limits.max_bytes = table_join->defaultMaxBytes(); @@ -50,7 +50,7 @@ bool JoinSwitcher::addJoinedBlock(const Block & block, bool) void JoinSwitcher::switchJoin() { - std::shared_ptr joined_data = static_cast(*join).getJoinedData(); + std::shared_ptr joined_data = static_cast(*join).getJoinedData(); BlocksList right_blocks = std::move(joined_data->blocks); /// Destroy old join & create new one. Early destroy for memory saving. diff --git a/dbms/src/Interpreters/JoinSwitcher.h b/src/Interpreters/JoinSwitcher.h similarity index 92% rename from dbms/src/Interpreters/JoinSwitcher.h rename to src/Interpreters/JoinSwitcher.h index ecf042fb7ac..c0f03f08c4c 100644 --- a/dbms/src/Interpreters/JoinSwitcher.h +++ b/src/Interpreters/JoinSwitcher.h @@ -4,7 +4,7 @@ #include #include -#include +#include namespace DB { @@ -15,7 +15,7 @@ namespace DB class JoinSwitcher : public IJoin { public: - JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_); + JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_); /// Add block of data from right hand of JOIN into current join object. /// If join-in-memory memory limit exceeded switches to join-on-disk and continue with it. @@ -72,7 +72,7 @@ private: SizeLimits limits; bool switched; mutable std::mutex switch_mutex; - std::shared_ptr table_join; + std::shared_ptr table_join; const Block right_sample_block; /// Change join-in-memory to join-on-disk moving right hand JOIN data from one to another. diff --git a/dbms/src/Interpreters/JoinToSubqueryTransformVisitor.cpp b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp similarity index 96% rename from dbms/src/Interpreters/JoinToSubqueryTransformVisitor.cpp rename to src/Interpreters/JoinToSubqueryTransformVisitor.cpp index ca21a53b5b0..6a08e11ad9e 100644 --- a/dbms/src/Interpreters/JoinToSubqueryTransformVisitor.cpp +++ b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp @@ -9,10 +9,12 @@ #include #include #include +#include #include #include #include #include +#include namespace DB @@ -34,7 +36,7 @@ namespace ASTPtr makeSubqueryTemplate() { ParserTablesInSelectQueryElement parser(true); - ASTPtr subquery_template = parseQuery(parser, "(select * from _t) as `--.s`", 0); + ASTPtr subquery_template = parseQuery(parser, "(select * from _t) as `--.s`", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); if (!subquery_template) throw Exception("Cannot parse subquery template", ErrorCodes::LOGICAL_ERROR); return subquery_template; @@ -129,6 +131,8 @@ private: /// Make aliases maps (alias -> column_name, column_name -> alias) struct ColumnAliasesMatcher { + using Visitor = ConstInDepthNodeVisitor; + struct Data { const std::vector tables; @@ -137,6 +141,7 @@ struct ColumnAliasesMatcher std::unordered_map aliases; /// alias -> long_name std::vector> compound_identifiers; std::set allowed_long_names; /// original names allowed as aliases '--t.x as t.x' (select expressions only). + bool inside_function = false; explicit Data(const std::vector && tables_) : tables(tables_) @@ -192,6 +197,10 @@ struct ColumnAliasesMatcher static bool needChildVisit(const ASTPtr & node, const ASTPtr &) { + /// Do not go into subqueries. Function visits children itself. + if (node->as() || + node->as()) + return false; return !node->as(); } @@ -199,11 +208,24 @@ struct ColumnAliasesMatcher { if (auto * t = ast->as()) visit(*t, ast, data); + else if (auto * f = ast->as()) + visit(*f, ast, data); - if (ast->as() || ast->as()) + /// Do not allow asterisks but ignore them inside functions. I.e. allow 'count(*)'. + if (!data.inside_function && (ast->as() || ast->as())) throw Exception("Multiple JOIN do not support asterisks for complex queries yet", ErrorCodes::NOT_IMPLEMENTED); } + static void visit(const ASTFunction &, const ASTPtr & ast, Data & data) + { + /// Grandchild case: Function -> (ExpressionList) -> Asterisk + data.inside_function = true; + Visitor visitor(data); + for (auto & child : ast->children) + visitor.visit(child); + data.inside_function = false; + } + static void visit(const ASTIdentifier & const_node, const ASTPtr &, Data & data) { ASTIdentifier & node = const_cast(const_node); /// we know it's not const @@ -348,7 +370,7 @@ bool needRewrite(ASTSelectQuery & select, std::vector; using RewriteVisitor = InDepthNodeVisitor; using ExtractAsterisksVisitor = ConstInDepthNodeVisitor; -using ColumnAliasesVisitor = ConstInDepthNodeVisitor; +using ColumnAliasesVisitor = ColumnAliasesMatcher::Visitor; using AppendSemanticMatcher = OneTypeMatcher; using AppendSemanticVisitor = InDepthNodeVisitor; diff --git a/dbms/src/Interpreters/JoinToSubqueryTransformVisitor.h b/src/Interpreters/JoinToSubqueryTransformVisitor.h similarity index 100% rename from dbms/src/Interpreters/JoinToSubqueryTransformVisitor.h rename to src/Interpreters/JoinToSubqueryTransformVisitor.h diff --git a/dbms/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp similarity index 100% rename from dbms/src/Interpreters/JoinedTables.cpp rename to src/Interpreters/JoinedTables.cpp diff --git a/dbms/src/Interpreters/JoinedTables.h b/src/Interpreters/JoinedTables.h similarity index 94% rename from dbms/src/Interpreters/JoinedTables.h rename to src/Interpreters/JoinedTables.h index 66b3c8de609..3bcec883f30 100644 --- a/dbms/src/Interpreters/JoinedTables.h +++ b/src/Interpreters/JoinedTables.h @@ -27,6 +27,8 @@ public: StoragePtr getLeftTableStorage(); bool resolveTables(); + + /// Make fake tables_with_columns[0] in case we have predefined input in InterpreterSelectQuery void makeFakeTable(StoragePtr storage, const Block & source_header); const std::vector & tablesWithColumns() const { return tables_with_columns; } diff --git a/dbms/src/Interpreters/LogicalExpressionsOptimizer.cpp b/src/Interpreters/LogicalExpressionsOptimizer.cpp similarity index 100% rename from dbms/src/Interpreters/LogicalExpressionsOptimizer.cpp rename to src/Interpreters/LogicalExpressionsOptimizer.cpp diff --git a/dbms/src/Interpreters/LogicalExpressionsOptimizer.h b/src/Interpreters/LogicalExpressionsOptimizer.h similarity index 100% rename from dbms/src/Interpreters/LogicalExpressionsOptimizer.h rename to src/Interpreters/LogicalExpressionsOptimizer.h diff --git a/dbms/src/Interpreters/MarkTableIdentifiersVisitor.cpp b/src/Interpreters/MarkTableIdentifiersVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/MarkTableIdentifiersVisitor.cpp rename to src/Interpreters/MarkTableIdentifiersVisitor.cpp diff --git a/dbms/src/Interpreters/MarkTableIdentifiersVisitor.h b/src/Interpreters/MarkTableIdentifiersVisitor.h similarity index 100% rename from dbms/src/Interpreters/MarkTableIdentifiersVisitor.h rename to src/Interpreters/MarkTableIdentifiersVisitor.h diff --git a/dbms/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp similarity index 99% rename from dbms/src/Interpreters/MergeJoin.cpp rename to src/Interpreters/MergeJoin.cpp index fde6ba2003d..1a3a84004dd 100644 --- a/dbms/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -445,7 +445,7 @@ void MiniLSM::merge(std::function callback) } -MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block_) +MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block_) : table_join(table_join_) , size_limits(table_join->sizeLimits()) , right_sample_block(right_sample_block_) diff --git a/dbms/src/Interpreters/MergeJoin.h b/src/Interpreters/MergeJoin.h similarity index 96% rename from dbms/src/Interpreters/MergeJoin.h rename to src/Interpreters/MergeJoin.h index 74a11fc05e4..d62083df38e 100644 --- a/dbms/src/Interpreters/MergeJoin.h +++ b/src/Interpreters/MergeJoin.h @@ -13,7 +13,7 @@ namespace DB { -class AnalyzedJoin; +class TableJoin; class MergeJoinCursor; struct MergeJoinEqualRange; @@ -48,7 +48,7 @@ struct MiniLSM class MergeJoin : public IJoin { public: - MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block); + MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block); bool addJoinedBlock(const Block & block, bool check_limits) override; void joinBlock(Block &, ExtraBlockPtr & not_processed) override; @@ -76,7 +76,7 @@ private: using Cache = LRUCache, BlockByteWeight>; mutable std::shared_mutex rwlock; - std::shared_ptr table_join; + std::shared_ptr table_join; SizeLimits size_limits; SortDescription left_sort_description; SortDescription right_sort_description; diff --git a/dbms/src/Interpreters/MetricLog.cpp b/src/Interpreters/MetricLog.cpp similarity index 98% rename from dbms/src/Interpreters/MetricLog.cpp rename to src/Interpreters/MetricLog.cpp index 5622e0c65b0..bd898170705 100644 --- a/dbms/src/Interpreters/MetricLog.cpp +++ b/src/Interpreters/MetricLog.cpp @@ -70,6 +70,13 @@ void MetricLog::stopCollectMetric() } +void MetricLog::shutdown() +{ + stopCollectMetric(); + stopFlushThread(); +} + + inline UInt64 time_in_milliseconds(std::chrono::time_point timepoint) { return std::chrono::duration_cast(timepoint.time_since_epoch()).count(); diff --git a/dbms/src/Interpreters/MetricLog.h b/src/Interpreters/MetricLog.h similarity index 97% rename from dbms/src/Interpreters/MetricLog.h rename to src/Interpreters/MetricLog.h index c55bad2c12f..a90ce923494 100644 --- a/dbms/src/Interpreters/MetricLog.h +++ b/src/Interpreters/MetricLog.h @@ -34,6 +34,8 @@ class MetricLog : public SystemLog using SystemLog::SystemLog; public: + void shutdown() override; + /// Launches a background thread to collect metrics with interval void startCollectMetric(size_t collect_interval_milliseconds_); diff --git a/dbms/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp similarity index 95% rename from dbms/src/Interpreters/MutationsInterpreter.cpp rename to src/Interpreters/MutationsInterpreter.cpp index 669b72c6317..b1b226b157f 100644 --- a/dbms/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -36,34 +36,46 @@ namespace ErrorCodes namespace { -struct FirstNonDeterministicFuncData +/// Helps to detect situations, where non-deterministic functions may be used in mutations of Replicated*MergeTree. +class FirstNonDeterministicFuncMatcher { - using TypeToVisit = ASTFunction; - - explicit FirstNonDeterministicFuncData(const Context & context_) - : context{context_} - {} - - const Context & context; - std::optional nondeterministic_function_name; - - void visit(ASTFunction & function, ASTPtr &) +public: + struct Data { - if (nondeterministic_function_name) + const Context & context; + std::optional nondeterministic_function_name; + }; + +public: + static bool needChildVisit(const ASTPtr & /*node*/, const ASTPtr & child) + { + return child != nullptr; + } + + static void visit(const ASTPtr & node, Data & data) + { + if (data.nondeterministic_function_name) return; - const auto func = FunctionFactory::instance().get(function.name, context); - if (!func->isDeterministic()) - nondeterministic_function_name = func->getName(); + if (const auto * function = typeid_cast(node.get())) + { + /// Property of being deterministic for lambda expression is completely determined + /// by the contents of its definition, so we just proceed to it. + if (function->name != "lambda") + { + const auto func = FunctionFactory::instance().get(function->name, data.context); + if (!func->isDeterministic()) + data.nondeterministic_function_name = func->getName(); + } + } } }; -using FirstNonDeterministicFuncFinder = - InDepthNodeVisitor, true>; +using FirstNonDeterministicFuncFinder = InDepthNodeVisitor; std::optional findFirstNonDeterministicFuncName(const MutationCommand & command, const Context & context) { - FirstNonDeterministicFuncData finder_data(context); + FirstNonDeterministicFuncMatcher::Data finder_data{context, std::nullopt}; switch (command.type) { @@ -661,9 +673,11 @@ BlockInputStreamPtr MutationsInterpreter::addStreamsForLaterStages(const std::ve void MutationsInterpreter::validate(TableStructureReadLockHolder &) { + const Settings & settings = context.getSettingsRef(); + /// For Replicated* storages mutations cannot employ non-deterministic functions /// because that produces inconsistencies between replicas - if (startsWith(storage->getName(), "Replicated")) + if (startsWith(storage->getName(), "Replicated") && !settings.allow_nondeterministic_mutations) { for (const auto & command : commands) { diff --git a/dbms/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h similarity index 100% rename from dbms/src/Interpreters/MutationsInterpreter.h rename to src/Interpreters/MutationsInterpreter.h diff --git a/dbms/src/Interpreters/NullableUtils.cpp b/src/Interpreters/NullableUtils.cpp similarity index 84% rename from dbms/src/Interpreters/NullableUtils.cpp rename to src/Interpreters/NullableUtils.cpp index fe2801f5d11..5c0202d1de3 100644 --- a/dbms/src/Interpreters/NullableUtils.cpp +++ b/src/Interpreters/NullableUtils.cpp @@ -5,7 +5,7 @@ namespace DB { -ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map) +ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map, bool exact_null) { ColumnPtr null_map_holder; @@ -38,7 +38,12 @@ ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullM PaddedPODArray & mutable_null_map = assert_cast(*mutable_null_map_holder).getData(); const PaddedPODArray & other_null_map = column_nullable->getNullMapData(); for (size_t i = 0, size = mutable_null_map.size(); i < size; ++i) - mutable_null_map[i] |= other_null_map[i]; + { + if (exact_null) + mutable_null_map[i] &= other_null_map[i]; + else + mutable_null_map[i] |= other_null_map[i]; + } null_map_holder = std::move(mutable_null_map_holder); } diff --git a/dbms/src/Interpreters/NullableUtils.h b/src/Interpreters/NullableUtils.h similarity index 84% rename from dbms/src/Interpreters/NullableUtils.h rename to src/Interpreters/NullableUtils.h index ee3193919cd..054835f8bef 100644 --- a/dbms/src/Interpreters/NullableUtils.h +++ b/src/Interpreters/NullableUtils.h @@ -8,6 +8,6 @@ namespace DB * In 'null_map' return a map of positions where at least one column was NULL. * @returns ownership column of null_map. */ -ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map); +ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map, bool exact_null = false); } diff --git a/dbms/src/Interpreters/OptimizeIfChains.cpp b/src/Interpreters/OptimizeIfChains.cpp similarity index 100% rename from dbms/src/Interpreters/OptimizeIfChains.cpp rename to src/Interpreters/OptimizeIfChains.cpp diff --git a/dbms/src/Interpreters/OptimizeIfChains.h b/src/Interpreters/OptimizeIfChains.h similarity index 100% rename from dbms/src/Interpreters/OptimizeIfChains.h rename to src/Interpreters/OptimizeIfChains.h diff --git a/dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp rename to src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp diff --git a/dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.h b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.h similarity index 100% rename from dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.h rename to src/Interpreters/OptimizeIfWithConstantConditionVisitor.h diff --git a/dbms/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp similarity index 100% rename from dbms/src/Interpreters/PartLog.cpp rename to src/Interpreters/PartLog.cpp diff --git a/dbms/src/Interpreters/PartLog.h b/src/Interpreters/PartLog.h similarity index 100% rename from dbms/src/Interpreters/PartLog.h rename to src/Interpreters/PartLog.h diff --git a/dbms/src/Interpreters/PredicateExpressionsOptimizer.cpp b/src/Interpreters/PredicateExpressionsOptimizer.cpp similarity index 100% rename from dbms/src/Interpreters/PredicateExpressionsOptimizer.cpp rename to src/Interpreters/PredicateExpressionsOptimizer.cpp diff --git a/dbms/src/Interpreters/PredicateExpressionsOptimizer.h b/src/Interpreters/PredicateExpressionsOptimizer.h similarity index 100% rename from dbms/src/Interpreters/PredicateExpressionsOptimizer.h rename to src/Interpreters/PredicateExpressionsOptimizer.h diff --git a/dbms/src/Interpreters/PredicateRewriteVisitor.cpp b/src/Interpreters/PredicateRewriteVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/PredicateRewriteVisitor.cpp rename to src/Interpreters/PredicateRewriteVisitor.cpp diff --git a/dbms/src/Interpreters/PredicateRewriteVisitor.h b/src/Interpreters/PredicateRewriteVisitor.h similarity index 82% rename from dbms/src/Interpreters/PredicateRewriteVisitor.h rename to src/Interpreters/PredicateRewriteVisitor.h index e07df922c15..cc1b6472a4c 100644 --- a/dbms/src/Interpreters/PredicateRewriteVisitor.h +++ b/src/Interpreters/PredicateRewriteVisitor.h @@ -16,6 +16,14 @@ public: void visit(ASTSelectWithUnionQuery & union_select_query, ASTPtr &); + static bool needChild(const ASTPtr & node, const ASTPtr &) + { + if (node && node->as()) + return false; + + return true; + } + PredicateRewriteVisitorData(const Context & context_, const ASTs & predicates_, const Names & column_names_, bool optimize_final_); private: @@ -31,6 +39,6 @@ private: bool rewriteSubquery(ASTSelectQuery & subquery, const Names & outer_columns, const Names & inner_columns); }; -using PredicateRewriteMatcher = OneTypeMatcher; +using PredicateRewriteMatcher = OneTypeMatcher; using PredicateRewriteVisitor = InDepthNodeVisitor; } diff --git a/dbms/src/Interpreters/PreparedSets.h b/src/Interpreters/PreparedSets.h similarity index 100% rename from dbms/src/Interpreters/PreparedSets.h rename to src/Interpreters/PreparedSets.h diff --git a/dbms/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp similarity index 100% rename from dbms/src/Interpreters/ProcessList.cpp rename to src/Interpreters/ProcessList.cpp diff --git a/dbms/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h similarity index 100% rename from dbms/src/Interpreters/ProcessList.h rename to src/Interpreters/ProcessList.h diff --git a/dbms/src/Interpreters/ProfileEventsExt.cpp b/src/Interpreters/ProfileEventsExt.cpp similarity index 100% rename from dbms/src/Interpreters/ProfileEventsExt.cpp rename to src/Interpreters/ProfileEventsExt.cpp diff --git a/dbms/src/Interpreters/ProfileEventsExt.h b/src/Interpreters/ProfileEventsExt.h similarity index 100% rename from dbms/src/Interpreters/ProfileEventsExt.h rename to src/Interpreters/ProfileEventsExt.h diff --git a/dbms/src/Interpreters/QueryAliasesVisitor.cpp b/src/Interpreters/QueryAliasesVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/QueryAliasesVisitor.cpp rename to src/Interpreters/QueryAliasesVisitor.cpp diff --git a/dbms/src/Interpreters/QueryAliasesVisitor.h b/src/Interpreters/QueryAliasesVisitor.h similarity index 100% rename from dbms/src/Interpreters/QueryAliasesVisitor.h rename to src/Interpreters/QueryAliasesVisitor.h diff --git a/dbms/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp similarity index 100% rename from dbms/src/Interpreters/QueryLog.cpp rename to src/Interpreters/QueryLog.cpp diff --git a/dbms/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h similarity index 88% rename from dbms/src/Interpreters/QueryLog.h rename to src/Interpreters/QueryLog.h index 836b37095e9..ec14f5e97fb 100644 --- a/dbms/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace ProfileEvents @@ -22,13 +23,7 @@ namespace DB /// A struct which will be inserted as row into query_log table struct QueryLogElement { - enum Type : int8_t // Make it signed for compatibility with DataTypeEnum8 - { - QUERY_START = 1, - QUERY_FINISH = 2, - EXCEPTION_BEFORE_START = 3, - EXCEPTION_WHILE_PROCESSING = 4, - }; + using Type = QueryLogElementType; Type type = QUERY_START; diff --git a/dbms/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp similarity index 92% rename from dbms/src/Interpreters/QueryNormalizer.cpp rename to src/Interpreters/QueryNormalizer.cpp index 568b08b8f5a..86fbd108f51 100644 --- a/dbms/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -76,20 +76,7 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) if (it_alias != data.aliases.end() && current_alias != node.name) { if (!IdentifierSemantic::canBeAlias(node)) - { - /// This means that column had qualified name, which was translated (so, canBeAlias() returns false). - /// But there is an alias with the same name. So, let's use original name for that column. - /// If alias wasn't set, use original column name as alias. - /// That helps to avoid result set with columns which have same names but different values. - if (node.alias.empty()) - { - node.name.swap(node.alias); - node.restoreCompoundName(); - node.name.swap(node.alias); - } - return; - } /// We are alias for other column (node.name), but we are alias by /// ourselves to some other column diff --git a/dbms/src/Interpreters/QueryNormalizer.h b/src/Interpreters/QueryNormalizer.h similarity index 100% rename from dbms/src/Interpreters/QueryNormalizer.h rename to src/Interpreters/QueryNormalizer.h diff --git a/dbms/src/Interpreters/QueryPriorities.h b/src/Interpreters/QueryPriorities.h similarity index 100% rename from dbms/src/Interpreters/QueryPriorities.h rename to src/Interpreters/QueryPriorities.h diff --git a/dbms/src/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp similarity index 100% rename from dbms/src/Interpreters/QueryThreadLog.cpp rename to src/Interpreters/QueryThreadLog.cpp diff --git a/dbms/src/Interpreters/QueryThreadLog.h b/src/Interpreters/QueryThreadLog.h similarity index 100% rename from dbms/src/Interpreters/QueryThreadLog.h rename to src/Interpreters/QueryThreadLog.h diff --git a/src/Interpreters/RenameColumnVisitor.cpp b/src/Interpreters/RenameColumnVisitor.cpp new file mode 100644 index 00000000000..a22fa78e2cc --- /dev/null +++ b/src/Interpreters/RenameColumnVisitor.cpp @@ -0,0 +1,12 @@ +#include +#include + +namespace DB +{ +void RenameColumnData::visit(ASTIdentifier & identifier, ASTPtr &) +{ + std::optional identifier_column_name = IdentifierSemantic::getColumnName(identifier); + if (identifier_column_name && identifier_column_name == column_name) + identifier.name = rename_to; +} +} diff --git a/src/Interpreters/RenameColumnVisitor.h b/src/Interpreters/RenameColumnVisitor.h new file mode 100644 index 00000000000..318cde84f56 --- /dev/null +++ b/src/Interpreters/RenameColumnVisitor.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include + +namespace DB +{ +/// Data for RenameColumnVisitor which traverse tree and rename all columns with +/// name column_name to rename_to +struct RenameColumnData +{ + using TypeToVisit = ASTIdentifier; + + String column_name; + String rename_to; + + void visit(ASTIdentifier & identifier, ASTPtr & ast); +}; + +using RenameColumnMatcher = OneTypeMatcher; +using RenameColumnVisitor = InDepthNodeVisitor; +} diff --git a/dbms/src/Interpreters/ReplaceQueryParameterVisitor.cpp b/src/Interpreters/ReplaceQueryParameterVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/ReplaceQueryParameterVisitor.cpp rename to src/Interpreters/ReplaceQueryParameterVisitor.cpp diff --git a/dbms/src/Interpreters/ReplaceQueryParameterVisitor.h b/src/Interpreters/ReplaceQueryParameterVisitor.h similarity index 100% rename from dbms/src/Interpreters/ReplaceQueryParameterVisitor.h rename to src/Interpreters/ReplaceQueryParameterVisitor.h diff --git a/src/Interpreters/RequiredSourceColumnsData.cpp b/src/Interpreters/RequiredSourceColumnsData.cpp new file mode 100644 index 00000000000..b5a3544f22f --- /dev/null +++ b/src/Interpreters/RequiredSourceColumnsData.cpp @@ -0,0 +1,96 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +bool RequiredSourceColumnsData::addColumnAliasIfAny(const IAST & ast) +{ + String alias = ast.tryGetAlias(); + if (alias.empty()) + return false; + + if (required_names.count(alias)) + masked_columns.insert(alias); + + complex_aliases.insert(alias); + return true; +} + +void RequiredSourceColumnsData::addColumnIdentifier(const ASTIdentifier & node) +{ + if (!IdentifierSemantic::getColumnName(node)) + return; + + /// There should be no complex cases after query normalization. Names to aliases: one-to-many. + String alias = node.tryGetAlias(); + required_names[node.name].addInclusion(alias); +} + +bool RequiredSourceColumnsData::addArrayJoinAliasIfAny(const IAST & ast) +{ + String alias = ast.tryGetAlias(); + if (alias.empty()) + return false; + + array_join_columns.insert(alias); + return true; +} + +void RequiredSourceColumnsData::addArrayJoinIdentifier(const ASTIdentifier & node) +{ + array_join_columns.insert(node.name); +} + +size_t RequiredSourceColumnsData::nameInclusion(const String & name) const +{ + auto it = required_names.find(name); + if (it != required_names.end()) + return it->second.appears; + return 0; +} + +NameSet RequiredSourceColumnsData::requiredColumns() const +{ + NameSet required; + for (const auto & pr : required_names) + { + const auto & name = pr.first; + String table_name = Nested::extractTableName(name); + + /// Tech debt. There's its own logic for ARRAY JOIN columns. + if (array_join_columns.count(name) || array_join_columns.count(table_name)) + continue; + + if (!complex_aliases.count(name) || masked_columns.count(name)) + required.insert(name); + } + return required; +} + +std::ostream & operator << (std::ostream & os, const RequiredSourceColumnsData & cols) +{ + os << "required_names: "; + for (const auto & pr : cols.required_names) + { + os << "'" << pr.first << "'"; + for (auto & alias : pr.second.aliases) + os << "/'" << alias << "'"; + os << ", "; + } + os << "complex_aliases: "; + for (const auto & x : cols.complex_aliases) + os << "'" << x << "', "; + os << "masked_columns: "; + for (const auto & x : cols.masked_columns) + os << "'" << x << "', "; + os << "array_join_columns: "; + for (const auto & x : cols.array_join_columns) + os << "'" << x << "', "; + return os; +} + +} diff --git a/src/Interpreters/RequiredSourceColumnsData.h b/src/Interpreters/RequiredSourceColumnsData.h new file mode 100644 index 00000000000..de1f3bc2721 --- /dev/null +++ b/src/Interpreters/RequiredSourceColumnsData.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace DB +{ + +class ASTIdentifier; + +/// Information about table and column names extracted from ASTSelectQuery block. Do not include info from subselects. +struct RequiredSourceColumnsData +{ + struct NameInfo + { + std::set aliases; + size_t appears = 0; + + void addInclusion(const String & alias) + { + if (!alias.empty()) + aliases.insert(alias); + ++appears; + } + }; + + std::unordered_map required_names; + NameSet private_aliases; /// lambda aliases that should not be interpreted as required columns + NameSet complex_aliases; /// aliases to functions results: they are not required cause calculated by query itself + NameSet masked_columns; /// columns names masked by function aliases: we still need them in required columns + NameSet array_join_columns; /// Tech debt: we exclude ArrayJoin columns from general logic cause they have own logic outside + + bool has_table_join = false; + bool has_array_join = false; + + bool addColumnAliasIfAny(const IAST & ast); + void addColumnIdentifier(const ASTIdentifier & node); + bool addArrayJoinAliasIfAny(const IAST & ast); + void addArrayJoinIdentifier(const ASTIdentifier & node); + + NameSet requiredColumns() const; + size_t nameInclusion(const String & name) const; +}; + +std::ostream & operator << (std::ostream & os, const RequiredSourceColumnsData & cols); + +} diff --git a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp b/src/Interpreters/RequiredSourceColumnsVisitor.cpp similarity index 88% rename from dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp rename to src/Interpreters/RequiredSourceColumnsVisitor.cpp index 5a740805560..9542b2882c5 100644 --- a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp +++ b/src/Interpreters/RequiredSourceColumnsVisitor.cpp @@ -88,15 +88,15 @@ void RequiredSourceColumnsMatcher::visit(const ASTPtr & ast, Data & data) visit(*t, ast, data); return; } + if (auto * t = ast->as()) { - data.addTableAliasIfAny(*ast); visit(*t, ast, data); return; } + if (ast->as()) { - data.addTableAliasIfAny(*ast); return; } @@ -159,33 +159,14 @@ void RequiredSourceColumnsMatcher::visit(const ASTFunction & node, const ASTPtr void RequiredSourceColumnsMatcher::visit(const ASTTablesInSelectQueryElement & node, const ASTPtr &, Data & data) { - ASTTableExpression * expr = nullptr; - ASTTableJoin * join = nullptr; - for (auto & child : node.children) - { - if (auto * e = child->as()) - expr = e; - if (auto * j = child->as()) - join = j; - } - - if (join) - data.has_table_join = true; - data.tables.emplace_back(ColumnNamesContext::JoinedTable{expr, join}); + if (child->as()) + data.has_table_join = true; } /// ASTIdentifiers here are tables. Do not visit them as generic ones. -void RequiredSourceColumnsMatcher::visit(const ASTTableExpression & node, const ASTPtr &, Data & data) +void RequiredSourceColumnsMatcher::visit(const ASTTableExpression &, const ASTPtr &, Data &) { - if (node.database_and_table_name) - data.addTableAliasIfAny(*node.database_and_table_name); - - if (node.table_function) - data.addTableAliasIfAny(*node.table_function); - - if (node.subquery) - data.addTableAliasIfAny(*node.subquery); } void RequiredSourceColumnsMatcher::visit(const ASTArrayJoin & node, const ASTPtr &, Data & data) diff --git a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.h b/src/Interpreters/RequiredSourceColumnsVisitor.h similarity index 91% rename from dbms/src/Interpreters/RequiredSourceColumnsVisitor.h rename to src/Interpreters/RequiredSourceColumnsVisitor.h index 1f6ff482e3a..53decb3a849 100644 --- a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.h +++ b/src/Interpreters/RequiredSourceColumnsVisitor.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include namespace DB @@ -21,7 +21,7 @@ class RequiredSourceColumnsMatcher { public: using Visitor = ConstInDepthNodeVisitor; - using Data = ColumnNamesContext; + using Data = RequiredSourceColumnsData; static bool needChildVisit(const ASTPtr & node, const ASTPtr & child); static void visit(const ASTPtr & ast, Data & data); @@ -35,7 +35,7 @@ private: static void visit(const ASTSelectQuery & select, const ASTPtr &, Data & data); }; -/// Extracts all the information about columns and tables from ASTSelectQuery block into ColumnNamesContext object. +/// Extracts all the information about columns and tables from ASTSelectQuery block into Data object. /// It doesn't use anything but AST. It visits nodes from bottom to top except ASTFunction content to get aliases in right manner. /// @note There's some ambiguousness with nested columns names that can't be solved without schema. using RequiredSourceColumnsVisitor = RequiredSourceColumnsMatcher::Visitor; diff --git a/dbms/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp similarity index 100% rename from dbms/src/Interpreters/RowRefs.cpp rename to src/Interpreters/RowRefs.cpp diff --git a/dbms/src/Interpreters/RowRefs.h b/src/Interpreters/RowRefs.h similarity index 100% rename from dbms/src/Interpreters/RowRefs.h rename to src/Interpreters/RowRefs.h diff --git a/dbms/src/Interpreters/SelectQueryOptions.h b/src/Interpreters/SelectQueryOptions.h similarity index 100% rename from dbms/src/Interpreters/SelectQueryOptions.h rename to src/Interpreters/SelectQueryOptions.h diff --git a/dbms/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp similarity index 96% rename from dbms/src/Interpreters/Set.cpp rename to src/Interpreters/Set.cpp index 3c79ea5174d..bbb7fa4303a 100644 --- a/dbms/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -87,6 +87,8 @@ void NO_INLINE Set::insertFromBlockImplCase( { if ((*null_map)[i]) { + has_null = true; + if constexpr (build_filter) { (*out_filter)[i] = false; @@ -138,7 +140,7 @@ void Set::setHeader(const Block & header) /// We will insert to the Set only keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, transform_null_in); if (fill_set_elements) { @@ -178,7 +180,7 @@ bool Set::insertFromBlock(const Block & block) /// We will insert to the Set only keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, transform_null_in); /// Filter to extract distinct values from the block. ColumnUInt8::MutablePtr filter; @@ -249,7 +251,7 @@ void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & co { Field value = extractValueFromNode(elem, *types[0], context); - if (!value.isNull()) + if (!value.isNull() || context.getSettingsRef().transform_null_in) columns[0]->insert(value); } else if (const auto * func = elem->as()) @@ -284,7 +286,7 @@ void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & co : extractValueFromNode(func->arguments->children[i], *types[i], context); /// If at least one of the elements of the tuple has an impossible (outside the range of the type) value, then the entire tuple too. - if (value.isNull()) + if (value.isNull() && !context.getSettings().transform_null_in) break; tuple_values[i] = value; @@ -348,7 +350,8 @@ ColumnPtr Set::execute(const Block & block, bool negative) const /// We will check existence in Set only for keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, transform_null_in); executeOrdinary(key_columns, vec_res, negative, null_map); @@ -390,7 +393,12 @@ void NO_INLINE Set::executeImplCase( for (size_t i = 0; i < rows; ++i) { if (has_null_map && (*null_map)[i]) - vec_res[i] = negative; + { + if (transform_null_in && has_null) + vec_res[i] = !negative; + else + vec_res[i] = negative; + } else { auto find_result = state.findKey(method.data, i, pool); @@ -588,6 +596,14 @@ BoolMask MergeTreeSetIndex::checkInRange(const std::vector & key_ranges, }; } +bool MergeTreeSetIndex::hasMonotonicFunctionsChain() const +{ + for (const auto & mapping : indexes_mapping) + if (!mapping.functions.empty()) + return true; + return false; +} + void ValueWithInfinity::update(const Field & x) { /// Keep at most one element in column. @@ -599,8 +615,11 @@ void ValueWithInfinity::update(const Field & x) const IColumn & ValueWithInfinity::getColumnIfFinite() const { +#ifndef NDEBUG if (type != NORMAL) throw Exception("Trying to get column of infinite type", ErrorCodes::LOGICAL_ERROR); +#endif + return *column; } diff --git a/dbms/src/Interpreters/Set.h b/src/Interpreters/Set.h similarity index 96% rename from dbms/src/Interpreters/Set.h rename to src/Interpreters/Set.h index c9605d4e11e..0f84c2f5da8 100644 --- a/dbms/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -30,9 +30,9 @@ public: /// (that is useful only for checking that some value is in the set and may not store the original values), /// store all set elements in explicit form. /// This is needed for subsequent use for index. - Set(const SizeLimits & limits_, bool fill_set_elements_) + Set(const SizeLimits & limits_, bool fill_set_elements_, bool transform_null_in_) : log(&Logger::get("Set")), - limits(limits_), fill_set_elements(fill_set_elements_) + limits(limits_), fill_set_elements(fill_set_elements_), transform_null_in(transform_null_in_) { } @@ -113,6 +113,10 @@ private: /// Do we need to additionally store all elements of the set in explicit form for subsequent use for index. bool fill_set_elements; + bool transform_null_in; + + bool has_null = false; + /// Check if set contains all the data. bool is_created = false; @@ -227,6 +231,8 @@ public: size_t size() const { return ordered_set.at(0)->size(); } + bool hasMonotonicFunctionsChain() const; + BoolMask checkInRange(const std::vector & key_ranges, const DataTypes & data_types); private: diff --git a/dbms/src/Interpreters/SetVariants.cpp b/src/Interpreters/SetVariants.cpp similarity index 100% rename from dbms/src/Interpreters/SetVariants.cpp rename to src/Interpreters/SetVariants.cpp diff --git a/dbms/src/Interpreters/SetVariants.h b/src/Interpreters/SetVariants.h similarity index 86% rename from dbms/src/Interpreters/SetVariants.h rename to src/Interpreters/SetVariants.h index 454de98d35b..118cf0115ab 100644 --- a/dbms/src/Interpreters/SetVariants.h +++ b/src/Interpreters/SetVariants.h @@ -190,24 +190,24 @@ struct NonClearableSet * As in Aggregator, using consecutive keys cache doesn't improve performance * for FixedHashTables. */ - std::unique_ptr, false /* use_cache */>> key8; - std::unique_ptr, false /* use_cache */>> key16; + std::unique_ptr, false /* use_cache */>> key8; + std::unique_ptr, false /* use_cache */>> key16; /** Also for the experiment was tested the ability to use SmallSet, * as long as the number of elements in the set is small (and, if necessary, converted to a full-fledged HashSet). * But this experiment showed that there is an advantage only in rare cases. */ - std::unique_ptr>>> key32; - std::unique_ptr>>> key64; - std::unique_ptr>> key_string; - std::unique_ptr>> key_fixed_string; - std::unique_ptr>> keys128; - std::unique_ptr>> keys256; - std::unique_ptr>> hashed; + std::unique_ptr>>> key32; + std::unique_ptr>>> key64; + std::unique_ptr>> key_string; + std::unique_ptr>> key_fixed_string; + std::unique_ptr>> keys128; + std::unique_ptr>> keys256; + std::unique_ptr>> hashed; /// Support for nullable keys (for DISTINCT implementation). - std::unique_ptr, true>> nullable_keys128; - std::unique_ptr, true>> nullable_keys256; + std::unique_ptr, true>> nullable_keys128; + std::unique_ptr, true>> nullable_keys256; /** Unlike Aggregator, `concat` method is not used here. * This is done because `hashed` method, although slower, but in this case, uses less RAM. * since when you use it, the key values themselves are not stored. @@ -216,20 +216,20 @@ struct NonClearableSet struct ClearableSet { - std::unique_ptr, false /* use_cache */>> key8; - std::unique_ptr, false /*use_cache */>> key16; + std::unique_ptr, false /* use_cache */>> key8; + std::unique_ptr, false /*use_cache */>> key16; - std::unique_ptr>>> key32; - std::unique_ptr>>> key64; - std::unique_ptr>> key_string; - std::unique_ptr>> key_fixed_string; - std::unique_ptr>> keys128; - std::unique_ptr>> keys256; - std::unique_ptr>> hashed; + std::unique_ptr>>> key32; + std::unique_ptr>>> key64; + std::unique_ptr>> key_string; + std::unique_ptr>> key_fixed_string; + std::unique_ptr>> keys128; + std::unique_ptr>> keys256; + std::unique_ptr>> hashed; /// Support for nullable keys (for DISTINCT implementation). - std::unique_ptr, true>> nullable_keys128; - std::unique_ptr, true>> nullable_keys256; + std::unique_ptr, true>> nullable_keys128; + std::unique_ptr, true>> nullable_keys256; /** Unlike Aggregator, `concat` method is not used here. * This is done because `hashed` method, although slower, but in this case, uses less RAM. * since when you use it, the key values themselves are not stored. diff --git a/dbms/src/Interpreters/StorageID.cpp b/src/Interpreters/StorageID.cpp similarity index 100% rename from dbms/src/Interpreters/StorageID.cpp rename to src/Interpreters/StorageID.cpp diff --git a/dbms/src/Interpreters/StorageID.h b/src/Interpreters/StorageID.h similarity index 100% rename from dbms/src/Interpreters/StorageID.h rename to src/Interpreters/StorageID.h diff --git a/dbms/src/Interpreters/SubqueryForSet.cpp b/src/Interpreters/SubqueryForSet.cpp similarity index 98% rename from dbms/src/Interpreters/SubqueryForSet.cpp rename to src/Interpreters/SubqueryForSet.cpp index 98f670e5c64..72831970de7 100644 --- a/dbms/src/Interpreters/SubqueryForSet.cpp +++ b/src/Interpreters/SubqueryForSet.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include diff --git a/dbms/src/Interpreters/SubqueryForSet.h b/src/Interpreters/SubqueryForSet.h similarity index 100% rename from dbms/src/Interpreters/SubqueryForSet.h rename to src/Interpreters/SubqueryForSet.h diff --git a/dbms/src/Interpreters/SyntaxAnalyzer.cpp b/src/Interpreters/SyntaxAnalyzer.cpp similarity index 94% rename from dbms/src/Interpreters/SyntaxAnalyzer.cpp rename to src/Interpreters/SyntaxAnalyzer.cpp index f93d11fa1da..e19961e7a7c 100644 --- a/dbms/src/Interpreters/SyntaxAnalyzer.cpp +++ b/src/Interpreters/SyntaxAnalyzer.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -17,7 +18,7 @@ #include #include #include -#include +#include #include /// getSmallestColumn() #include #include @@ -60,25 +61,40 @@ namespace using LogAST = DebugASTLog; /// set to true to enable logs -/// Select implementation of countDistinct based on settings. +/// Select implementation of a function based on settings. /// Important that it is done as query rewrite. It means rewritten query /// will be sent to remote servers during distributed query execution, /// and on all remote servers, function implementation will be same. +template struct CustomizeFunctionsData { using TypeToVisit = ASTFunction; - const String & count_distinct; + const String & customized_func_name; void visit(ASTFunction & func, ASTPtr &) { - if (Poco::toLower(func.name) == "countdistinct") - func.name = count_distinct; + if (Poco::toLower(func.name) == func_name) + { + func.name = customized_func_name; + } } }; -using CustomizeFunctionsMatcher = OneTypeMatcher; -using CustomizeFunctionsVisitor = InDepthNodeVisitor; +char countdistinct[] = "countdistinct"; +using CustomizeFunctionsVisitor = InDepthNodeVisitor>, true>; + +char in[] = "in"; +using CustomizeInVisitor = InDepthNodeVisitor>, true>; + +char notIn[] = "notin"; +using CustomizeNotInVisitor = InDepthNodeVisitor>, true>; + +char globalIn[] = "globalin"; +using CustomizeGlobalInVisitor = InDepthNodeVisitor>, true>; + +char globalNotIn[] = "globalnotin"; +using CustomizeGlobalNotInVisitor = InDepthNodeVisitor>, true>; /// Translate qualified names such as db.table.column, table.column, table_alias.column to names' normal form. @@ -520,7 +536,7 @@ void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_defaul } /// Find the columns that are obtained by JOIN. -void collectJoinedColumns(AnalyzedJoin & analyzed_join, const ASTSelectQuery & select_query, +void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & select_query, const std::vector & tables, const Aliases & aliases) { const ASTTablesInSelectQueryElement * node = select_query.join(); @@ -572,7 +588,7 @@ void replaceJoinedTable(const ASTSelectQuery & select_query) if (table_id.alias.empty() && table_id.isShort()) { ParserTableExpression parser; - table_expr = parseQuery(parser, expr, 0)->as(); + table_expr = parseQuery(parser, expr, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH)->as(); } } } @@ -795,7 +811,7 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyzeSelect( const auto & settings = context.getSettingsRef(); const NameSet & source_columns_set = result.source_columns_set; - result.analyzed_join = std::make_shared(settings, context.getTemporaryVolume()); + result.analyzed_join = std::make_shared(settings, context.getTemporaryVolume()); if (remove_duplicates) renameDuplicatedColumns(select_query); @@ -889,6 +905,21 @@ void SyntaxAnalyzer::normalize(ASTPtr & query, Aliases & aliases, const Settings CustomizeFunctionsVisitor::Data data{settings.count_distinct_implementation}; CustomizeFunctionsVisitor(data).visit(query); + if (settings.transform_null_in) + { + CustomizeInVisitor::Data data_null_in{"nullIn"}; + CustomizeInVisitor(data_null_in).visit(query); + + CustomizeNotInVisitor::Data data_not_null_in{"notNullIn"}; + CustomizeNotInVisitor(data_not_null_in).visit(query); + + CustomizeGlobalInVisitor::Data data_global_null_in{"globalNullIn"}; + CustomizeGlobalInVisitor(data_global_null_in).visit(query); + + CustomizeGlobalNotInVisitor::Data data_global_not_null_in{"globalNotNullIn"}; + CustomizeGlobalNotInVisitor(data_global_not_null_in).visit(query); + } + /// Creates a dictionary `aliases`: alias -> ASTPtr QueryAliasesVisitor(aliases).visit(query); diff --git a/dbms/src/Interpreters/SyntaxAnalyzer.h b/src/Interpreters/SyntaxAnalyzer.h similarity index 98% rename from dbms/src/Interpreters/SyntaxAnalyzer.h rename to src/Interpreters/SyntaxAnalyzer.h index 01997a8f1ea..23e8a4b79aa 100644 --- a/dbms/src/Interpreters/SyntaxAnalyzer.h +++ b/src/Interpreters/SyntaxAnalyzer.h @@ -11,7 +11,7 @@ namespace DB { class ASTFunction; -class AnalyzedJoin; +class TableJoin; class Context; struct Settings; struct SelectQueryOptions; @@ -20,7 +20,7 @@ using Scalars = std::map; struct SyntaxAnalyzerResult { ConstStoragePtr storage; - std::shared_ptr analyzed_join; + std::shared_ptr analyzed_join; NamesAndTypesList source_columns; NameSet source_columns_set; /// Set of names of source_columns. diff --git a/dbms/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp similarity index 92% rename from dbms/src/Interpreters/SystemLog.cpp rename to src/Interpreters/SystemLog.cpp index a78342f8b17..fc0f2f98125 100644 --- a/dbms/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -83,6 +83,19 @@ SystemLogs::SystemLogs(Context & global_context, const Poco::Util::AbstractConfi size_t collect_interval_milliseconds = config.getUInt64("metric_log.collect_interval_milliseconds"); metric_log->startCollectMetric(collect_interval_milliseconds); } + + if (query_log) + logs.emplace_back(query_log.get()); + if (query_thread_log) + logs.emplace_back(query_thread_log.get()); + if (part_log) + logs.emplace_back(part_log.get()); + if (trace_log) + logs.emplace_back(trace_log.get()); + if (text_log) + logs.emplace_back(text_log.get()); + if (metric_log) + logs.emplace_back(metric_log.get()); } @@ -93,21 +106,8 @@ SystemLogs::~SystemLogs() void SystemLogs::shutdown() { - if (query_log) - query_log->shutdown(); - if (query_thread_log) - query_thread_log->shutdown(); - if (part_log) - part_log->shutdown(); - if (trace_log) - trace_log->shutdown(); - if (text_log) - text_log->shutdown(); - if (metric_log) - { - metric_log->stopCollectMetric(); - metric_log->shutdown(); - } + for (auto & log : logs) + log->shutdown(); } } diff --git a/dbms/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h similarity index 89% rename from dbms/src/Interpreters/SystemLog.h rename to src/Interpreters/SystemLog.h index 87da342ae1f..9b0c273674c 100644 --- a/dbms/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -2,10 +2,14 @@ #include #include +#include +#include + #include #include #include #include +#include #include #include #include @@ -59,13 +63,20 @@ namespace ErrorCodes #define DBMS_SYSTEM_LOG_QUEUE_SIZE 1048576 + class Context; -class QueryLog; -class QueryThreadLog; -class PartLog; -class TextLog; -class TraceLog; -class MetricLog; + + +class ISystemLog +{ +public: + virtual String getName() = 0; + virtual ASTPtr getCreateTableQuery() = 0; + virtual void flush() = 0; + virtual void shutdown() = 0; + virtual ~ISystemLog() = default; +}; + /// System logs should be destroyed in destructor of the last Context and before tables, /// because SystemLog destruction makes insert query while flushing data into underlying tables @@ -82,11 +93,13 @@ struct SystemLogs std::shared_ptr trace_log; /// Used to log traces from query profiler std::shared_ptr text_log; /// Used to log all text messages. std::shared_ptr metric_log; /// Used to log all metrics. + + std::vector logs; }; template -class SystemLog : private boost::noncopyable +class SystemLog : public ISystemLog, private boost::noncopyable { public: using Self = SystemLog; @@ -106,18 +119,28 @@ public: const String & storage_def_, size_t flush_interval_milliseconds_); - ~SystemLog(); - /** Append a record into log. * Writing to table will be done asynchronously and in case of failure, record could be lost. */ void add(const LogElement & element); + void stopFlushThread(); + /// Flush data in the buffer to disk - void flush(); + void flush() override; /// Stop the background flush thread before destructor. No more data will be written. - void shutdown(); + void shutdown() override + { + stopFlushThread(); + } + + String getName() override + { + return LogElement::name(); + } + + ASTPtr getCreateTableQuery() override; protected: Logger * log; @@ -250,7 +273,7 @@ void SystemLog::flush() template -void SystemLog::shutdown() +void SystemLog::stopFlushThread() { { std::unique_lock lock(mutex); @@ -270,13 +293,6 @@ void SystemLog::shutdown() } -template -SystemLog::~SystemLog() -{ - shutdown(); -} - - template void SystemLog::savingThreadFunction() { @@ -399,7 +415,7 @@ void SystemLog::prepareTable() rename->elements.emplace_back(elem); LOG_DEBUG(log, "Existing table " << description << " for system log has obsolete or different structure." - " Renaming it to " << backQuoteIfNeed(to.table)); + " Renaming it to " << backQuoteIfNeed(to.table)); InterpreterRenameQuery(rename, context).execute(); @@ -415,22 +431,7 @@ void SystemLog::prepareTable() /// Create the table. LOG_DEBUG(log, "Creating new table " << description << " for " + LogElement::name()); - auto create = std::make_shared(); - - create->database = table_id.database_name; - create->table = table_id.table_name; - - Block sample = LogElement::createBlock(); - - auto new_columns_list = std::make_shared(); - new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(sample.getNamesAndTypesList())); - create->set(create->columns_list, new_columns_list); - - ParserStorage storage_parser; - ASTPtr storage_ast = parseQuery( - storage_parser, storage_def.data(), storage_def.data() + storage_def.size(), - "Storage to create table for " + LogElement::name(), 0); - create->set(create->storage, storage_ast); + auto create = getCreateTableQuery(); InterpreterCreateQuery interpreter(create, context); interpreter.setInternal(true); @@ -442,4 +443,28 @@ void SystemLog::prepareTable() is_prepared = true; } + +template +ASTPtr SystemLog::getCreateTableQuery() +{ + auto create = std::make_shared(); + + create->database = table_id.database_name; + create->table = table_id.table_name; + + Block sample = LogElement::createBlock(); + + auto new_columns_list = std::make_shared(); + new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(sample.getNamesAndTypesList())); + create->set(create->columns_list, new_columns_list); + + ParserStorage storage_parser; + ASTPtr storage_ast = parseQuery( + storage_parser, storage_def.data(), storage_def.data() + storage_def.size(), + "Storage to create table for " + LogElement::name(), 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + create->set(create->storage, storage_ast); + + return create; +} + } diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp new file mode 100644 index 00000000000..339fe2dceb3 --- /dev/null +++ b/src/Interpreters/TableJoin.cpp @@ -0,0 +1,245 @@ +#include + +#include + +#include +#include + +#include + +#include + + +namespace DB +{ + +namespace ErrorCodes +{ +} + +TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_) + : size_limits(SizeLimits{settings.max_rows_in_join, settings.max_bytes_in_join, settings.join_overflow_mode}) + , default_max_bytes(settings.default_max_bytes_in_join) + , join_use_nulls(settings.join_use_nulls) + , max_joined_block_rows(settings.max_joined_block_size_rows) + , join_algorithm(settings.join_algorithm) + , partial_merge_join_optimizations(settings.partial_merge_join_optimizations) + , partial_merge_join_rows_in_right_blocks(settings.partial_merge_join_rows_in_right_blocks) + , tmp_volume(tmp_volume_) +{ + if (settings.partial_merge_join) + join_algorithm = JoinAlgorithm::PREFER_PARTIAL_MERGE; +} + +void TableJoin::addUsingKey(const ASTPtr & ast) +{ + key_names_left.push_back(ast->getColumnName()); + key_names_right.push_back(ast->getAliasOrColumnName()); + + key_asts_left.push_back(ast); + key_asts_right.push_back(ast); + + auto & right_key = key_names_right.back(); + if (renames.count(right_key)) + right_key = renames[right_key]; +} + +void TableJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast) +{ + key_names_left.push_back(left_table_ast->getColumnName()); + key_names_right.push_back(right_table_ast->getAliasOrColumnName()); + + key_asts_left.push_back(left_table_ast); + key_asts_right.push_back(right_table_ast); +} + +/// @return how many times right key appears in ON section. +size_t TableJoin::rightKeyInclusion(const String & name) const +{ + if (hasUsing()) + return 0; + + size_t count = 0; + for (const auto & key_name : key_names_right) + if (name == key_name) + ++count; + return count; +} + +void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix) +{ + NameSet joined_columns; + NamesAndTypesList dedup_columns; + + for (auto & column : columns_from_joined_table) + { + if (joined_columns.count(column.name)) + continue; + + joined_columns.insert(column.name); + + dedup_columns.push_back(column); + auto & inserted = dedup_columns.back(); + + /// Also qualify unusual column names - that does not look like identifiers. + + if (left_table_columns.count(column.name) || !isValidIdentifierBegin(column.name.at(0))) + inserted.name = right_table_prefix + column.name; + + original_names[inserted.name] = column.name; + if (inserted.name != column.name) + renames[column.name] = inserted.name; + } + + columns_from_joined_table.swap(dedup_columns); +} + +NameSet TableJoin::getQualifiedColumnsSet() const +{ + NameSet out; + for (const auto & names : original_names) + out.insert(names.first); + return out; +} + +NamesWithAliases TableJoin::getNamesWithAliases(const NameSet & required_columns) const +{ + NamesWithAliases out; + for (const auto & column : required_columns) + { + auto it = original_names.find(column); + if (it != original_names.end()) + out.emplace_back(it->second, it->first); /// {original_name, name} + } + return out; +} + +ASTPtr TableJoin::leftKeysList() const +{ + ASTPtr keys_list = std::make_shared(); + keys_list->children = key_asts_left; + return keys_list; +} + +ASTPtr TableJoin::rightKeysList() const +{ + ASTPtr keys_list = std::make_shared(); + if (hasOn()) + keys_list->children = key_asts_right; + return keys_list; +} + +Names TableJoin::requiredJoinedNames() const +{ + NameSet required_columns_set(key_names_right.begin(), key_names_right.end()); + for (const auto & joined_column : columns_added_by_join) + required_columns_set.insert(joined_column.name); + + return Names(required_columns_set.begin(), required_columns_set.end()); +} + +NameSet TableJoin::requiredRightKeys() const +{ + NameSet required; + for (const auto & name : key_names_right) + for (const auto & column : columns_added_by_join) + if (name == column.name) + required.insert(name); + return required; +} + +NamesWithAliases TableJoin::getRequiredColumns(const Block & sample, const Names & action_required_columns) const +{ + NameSet required_columns(action_required_columns.begin(), action_required_columns.end()); + + for (auto & column : requiredJoinedNames()) + if (!sample.has(column)) + required_columns.insert(column); + + return getNamesWithAliases(required_columns); +} + +void TableJoin::addJoinedColumn(const NameAndTypePair & joined_column) +{ + if (join_use_nulls && isLeftOrFull(table_join.kind)) + { + auto type = joined_column.type->canBeInsideNullable() ? makeNullable(joined_column.type) : joined_column.type; + columns_added_by_join.emplace_back(NameAndTypePair(joined_column.name, std::move(type))); + } + else + columns_added_by_join.push_back(joined_column); +} + +void TableJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) const +{ + bool right_or_full_join = isRightOrFull(table_join.kind); + bool left_or_full_join = isLeftOrFull(table_join.kind); + + for (auto & col : sample_block) + { + /// Materialize column. + /// Column is not empty if it is constant, but after Join all constants will be materialized. + /// So, we need remove constants from header. + if (col.column) + col.column = nullptr; + + bool make_nullable = join_use_nulls && right_or_full_join; + + if (make_nullable && col.type->canBeInsideNullable()) + col.type = makeNullable(col.type); + } + + for (const auto & col : columns_added_by_join) + { + auto res_type = col.type; + + bool make_nullable = join_use_nulls && left_or_full_join; + + if (make_nullable && res_type->canBeInsideNullable()) + res_type = makeNullable(res_type); + + sample_block.insert(ColumnWithTypeAndName(nullptr, res_type, col.name)); + } +} + +bool TableJoin::sameJoin(const TableJoin * x, const TableJoin * y) +{ + if (!x && !y) + return true; + if (!x || !y) + return false; + + return x->table_join.kind == y->table_join.kind + && x->table_join.strictness == y->table_join.strictness + && x->key_names_left == y->key_names_left + && x->key_names_right == y->key_names_right + && x->columns_added_by_join == y->columns_added_by_join; +} + +bool TableJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, ASTTableJoin::Kind kind_) const +{ + if (strictness_ == strictness() && kind_ == kind()) + return true; + + /// Compatibility: old ANY INNER == new SEMI LEFT + if (strictness_ == ASTTableJoin::Strictness::Semi && isLeft(kind_) && + strictness() == ASTTableJoin::Strictness::RightAny && isInner(kind())) + return true; + if (strictness() == ASTTableJoin::Strictness::Semi && isLeft(kind()) && + strictness_ == ASTTableJoin::Strictness::RightAny && isInner(kind_)) + return true; + + return false; +} + +bool TableJoin::allowMergeJoin() const +{ + bool is_any = (strictness() == ASTTableJoin::Strictness::Any); + bool is_all = (strictness() == ASTTableJoin::Strictness::All); + bool is_semi = (strictness() == ASTTableJoin::Strictness::Semi); + + bool allow_merge_join = (isLeft(kind()) && (is_any || is_all || is_semi)) || (isInner(kind()) && is_all); + return allow_merge_join; +} + +} diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h new file mode 100644 index 00000000000..0b5ed82411a --- /dev/null +++ b/src/Interpreters/TableJoin.h @@ -0,0 +1,139 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace DB +{ + +class Context; +class ASTSelectQuery; +struct DatabaseAndTableWithAlias; +class Block; + +struct Settings; + +class Volume; +using VolumePtr = std::shared_ptr; + +class TableJoin +{ + /** Query of the form `SELECT expr(x) AS k FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k` + * The join is made by column k. + * During the JOIN, + * - in the "right" table, it will be available by alias `k`, since `Project` action for the subquery was executed. + * - in the "left" table, it will be accessible by the name `expr(x)`, since `Project` action has not been executed yet. + * You must remember both of these options. + * + * Query of the form `SELECT ... from t1 ANY LEFT JOIN (SELECT ... from t2) ON expr(t1 columns) = expr(t2 columns)` + * to the subquery will be added expression `expr(t2 columns)`. + * It's possible to use name `expr(t2 columns)`. + */ + + friend class SyntaxAnalyzer; + + const SizeLimits size_limits; + const size_t default_max_bytes; + const bool join_use_nulls; + const size_t max_joined_block_rows = 0; + JoinAlgorithm join_algorithm; + const bool partial_merge_join_optimizations = false; + const size_t partial_merge_join_rows_in_right_blocks = 0; + + Names key_names_left; + Names key_names_right; /// Duplicating names are qualified. + ASTs key_asts_left; + ASTs key_asts_right; + ASTTableJoin table_join; + ASOF::Inequality asof_inequality = ASOF::Inequality::GreaterOrEquals; + + /// All columns which can be read from joined table. Duplicating names are qualified. + NamesAndTypesList columns_from_joined_table; + /// Columns will be added to block by JOIN. It's a subset of columns_from_joined_table with corrected Nullability + NamesAndTypesList columns_added_by_join; + + /// Name -> original name. Names are the same as in columns_from_joined_table list. + std::unordered_map original_names; + /// Original name -> name. Only ranamed columns. + std::unordered_map renames; + + VolumePtr tmp_volume; + +public: + TableJoin(const Settings &, VolumePtr tmp_volume); + + /// for StorageJoin + TableJoin(SizeLimits limits, bool use_nulls, ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, + const Names & key_names_right_) + : size_limits(limits) + , default_max_bytes(0) + , join_use_nulls(use_nulls) + , join_algorithm(JoinAlgorithm::HASH) + , key_names_right(key_names_right_) + { + table_join.kind = kind; + table_join.strictness = strictness; + } + + ASTTableJoin::Kind kind() const { return table_join.kind; } + ASTTableJoin::Strictness strictness() const { return table_join.strictness; } + bool sameStrictnessAndKind(ASTTableJoin::Strictness, ASTTableJoin::Kind) const; + const SizeLimits & sizeLimits() const { return size_limits; } + VolumePtr getTemporaryVolume() { return tmp_volume; } + bool allowMergeJoin() const; + bool preferMergeJoin() const { return join_algorithm == JoinAlgorithm::PREFER_PARTIAL_MERGE; } + bool forceMergeJoin() const { return join_algorithm == JoinAlgorithm::PARTIAL_MERGE; } + bool forceHashJoin() const { return join_algorithm == JoinAlgorithm::HASH; } + + bool forceNullableRight() const { return join_use_nulls && isLeftOrFull(table_join.kind); } + bool forceNullableLeft() const { return join_use_nulls && isRightOrFull(table_join.kind); } + size_t defaultMaxBytes() const { return default_max_bytes; } + size_t maxJoinedBlockRows() const { return max_joined_block_rows; } + size_t maxRowsInRightBlock() const { return partial_merge_join_rows_in_right_blocks; } + bool enablePartialMergeJoinOptimizations() const { return partial_merge_join_optimizations; } + + void addUsingKey(const ASTPtr & ast); + void addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast); + + bool hasUsing() const { return table_join.using_expression_list != nullptr; } + bool hasOn() const { return table_join.on_expression != nullptr; } + + NameSet getQualifiedColumnsSet() const; + NamesWithAliases getNamesWithAliases(const NameSet & required_columns) const; + NamesWithAliases getRequiredColumns(const Block & sample, const Names & action_required_columns) const; + + void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix); + size_t rightKeyInclusion(const String & name) const; + NameSet requiredRightKeys() const; + + void addJoinedColumn(const NameAndTypePair & joined_column); + void addJoinedColumnsAndCorrectNullability(Block & sample_block) const; + + void setAsofInequality(ASOF::Inequality inequality) { asof_inequality = inequality; } + ASOF::Inequality getAsofInequality() { return asof_inequality; } + + ASTPtr leftKeysList() const; + ASTPtr rightKeysList() const; /// For ON syntax only + + Names requiredJoinedNames() const; + const Names & keyNamesLeft() const { return key_names_left; } + const Names & keyNamesRight() const { return key_names_right; } + const NamesAndTypesList & columnsFromJoinedTable() const { return columns_from_joined_table; } + const NamesAndTypesList & columnsAddedByJoin() const { return columns_added_by_join; } + + /// StorageJoin overrides key names (cause of different names qualification) + void setRightKeys(const Names & keys) { key_names_right = keys; } + + static bool sameJoin(const TableJoin * x, const TableJoin * y); +}; + +} diff --git a/dbms/src/Interpreters/TablesStatus.cpp b/src/Interpreters/TablesStatus.cpp similarity index 100% rename from dbms/src/Interpreters/TablesStatus.cpp rename to src/Interpreters/TablesStatus.cpp diff --git a/dbms/src/Interpreters/TablesStatus.h b/src/Interpreters/TablesStatus.h similarity index 100% rename from dbms/src/Interpreters/TablesStatus.h rename to src/Interpreters/TablesStatus.h diff --git a/dbms/src/Interpreters/TextLog.cpp b/src/Interpreters/TextLog.cpp similarity index 100% rename from dbms/src/Interpreters/TextLog.cpp rename to src/Interpreters/TextLog.cpp diff --git a/dbms/src/Interpreters/TextLog.h b/src/Interpreters/TextLog.h similarity index 100% rename from dbms/src/Interpreters/TextLog.h rename to src/Interpreters/TextLog.h diff --git a/dbms/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp similarity index 100% rename from dbms/src/Interpreters/ThreadStatusExt.cpp rename to src/Interpreters/ThreadStatusExt.cpp diff --git a/dbms/src/Interpreters/TraceLog.cpp b/src/Interpreters/TraceLog.cpp similarity index 100% rename from dbms/src/Interpreters/TraceLog.cpp rename to src/Interpreters/TraceLog.cpp diff --git a/dbms/src/Interpreters/TraceLog.h b/src/Interpreters/TraceLog.h similarity index 100% rename from dbms/src/Interpreters/TraceLog.h rename to src/Interpreters/TraceLog.h diff --git a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp similarity index 88% rename from dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp rename to src/Interpreters/TranslateQualifiedNamesVisitor.cpp index a0f411dcc96..7c31a6db546 100644 --- a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -107,8 +108,9 @@ void TranslateQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &, IdentifierSemantic::setMembership(identifier, table_pos); /// In case if column from the joined table are in source columns, change it's name to qualified. + /// Also always leave unusual identifiers qualified. auto & table = data.tables[table_pos].table; - if (table_pos && data.hasColumn(short_name)) + if (table_pos && (data.hasColumn(short_name) || !isValidIdentifierBegin(short_name.at(0)))) IdentifierSemantic::setColumnLongName(identifier, table); else IdentifierSemantic::setColumnShortName(identifier, table); @@ -128,7 +130,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTFunction & node, const ASTPtr &, D func_arguments->children.clear(); } -void TranslateQualifiedNamesMatcher::visit(const ASTQualifiedAsterisk & , const ASTPtr & ast, Data & data) +void TranslateQualifiedNamesMatcher::visit(const ASTQualifiedAsterisk &, const ASTPtr & ast, Data & data) { if (ast->children.size() != 1) throw Exception("Logical error: qualified asterisk must have exactly one child", ErrorCodes::LOGICAL_ERROR); @@ -300,14 +302,45 @@ void TranslateQualifiedNamesMatcher::extractJoinUsingColumns(const ASTPtr ast, D } } -void RestoreQualifiedNamesData::visit(ASTIdentifier & identifier, ASTPtr & ast) + +void RestoreQualifiedNamesMatcher::Data::changeTable(ASTIdentifier & identifier) const +{ + auto match = IdentifierSemantic::canReferColumnToTable(identifier, distributed_table); + switch (match) + { + case IdentifierSemantic::ColumnMatch::AliasedTableName: + case IdentifierSemantic::ColumnMatch::TableName: + case IdentifierSemantic::ColumnMatch::DbAndTable: + IdentifierSemantic::setColumnLongName(identifier, remote_table); + break; + default: + break; + } +} + +bool RestoreQualifiedNamesMatcher::needChildVisit(ASTPtr &, const ASTPtr & child) +{ + /// Do not go into subqueries + if (child->as()) + return false; // NOLINT + return true; +} + +void RestoreQualifiedNamesMatcher::visit(ASTPtr & ast, Data & data) +{ + if (auto * t = ast->as()) + visit(*t, ast, data); +} + +void RestoreQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &, Data & data) { if (IdentifierSemantic::getColumnName(identifier)) { if (IdentifierSemantic::getMembership(identifier)) { - ast = identifier.clone(); - ast->as()->restoreCompoundName(); + identifier.restoreCompoundName(); + if (data.rename) + data.changeTable(identifier); } } } diff --git a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h b/src/Interpreters/TranslateQualifiedNamesVisitor.h similarity index 80% rename from dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h rename to src/Interpreters/TranslateQualifiedNamesVisitor.h index 51c6c2c42f0..e8c320671bf 100644 --- a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.h @@ -59,15 +59,24 @@ private: /// It finds columns and translate their names to the normal form. Expand asterisks and qualified asterisks with column names. using TranslateQualifiedNamesVisitor = TranslateQualifiedNamesMatcher::Visitor; -/// Restore ASTIdentifiers to long form -struct RestoreQualifiedNamesData -{ - using TypeToVisit = ASTIdentifier; - static void visit(ASTIdentifier & identifier, ASTPtr & ast); +/// Restore ASTIdentifiers to long form, change table name in case of distributed. +struct RestoreQualifiedNamesMatcher +{ + struct Data + { + DatabaseAndTableWithAlias distributed_table; + DatabaseAndTableWithAlias remote_table; + bool rename = false; + + void changeTable(ASTIdentifier & identifier) const; + }; + + static bool needChildVisit(ASTPtr & node, const ASTPtr & child); + static void visit(ASTPtr & ast, Data & data); + static void visit(ASTIdentifier & identifier, ASTPtr & ast, Data & data); }; -using RestoreQualifiedNamesMatcher = OneTypeMatcher; using RestoreQualifiedNamesVisitor = InDepthNodeVisitor; } diff --git a/dbms/src/Interpreters/addMissingDefaults.cpp b/src/Interpreters/addMissingDefaults.cpp similarity index 100% rename from dbms/src/Interpreters/addMissingDefaults.cpp rename to src/Interpreters/addMissingDefaults.cpp diff --git a/dbms/src/Interpreters/addMissingDefaults.h b/src/Interpreters/addMissingDefaults.h similarity index 100% rename from dbms/src/Interpreters/addMissingDefaults.h rename to src/Interpreters/addMissingDefaults.h diff --git a/dbms/src/Interpreters/addTypeConversionToAST.cpp b/src/Interpreters/addTypeConversionToAST.cpp similarity index 100% rename from dbms/src/Interpreters/addTypeConversionToAST.cpp rename to src/Interpreters/addTypeConversionToAST.cpp diff --git a/dbms/src/Interpreters/addTypeConversionToAST.h b/src/Interpreters/addTypeConversionToAST.h similarity index 100% rename from dbms/src/Interpreters/addTypeConversionToAST.h rename to src/Interpreters/addTypeConversionToAST.h diff --git a/dbms/src/Interpreters/asof.h b/src/Interpreters/asof.h similarity index 100% rename from dbms/src/Interpreters/asof.h rename to src/Interpreters/asof.h diff --git a/dbms/src/Interpreters/castColumn.cpp b/src/Interpreters/castColumn.cpp similarity index 89% rename from dbms/src/Interpreters/castColumn.cpp rename to src/Interpreters/castColumn.cpp index dd281540b51..2e6604f7df5 100644 --- a/dbms/src/Interpreters/castColumn.cpp +++ b/src/Interpreters/castColumn.cpp @@ -38,9 +38,4 @@ ColumnPtr castColumn(const ColumnWithTypeAndName & arg, const DataTypePtr & type return temporary_block.getByPosition(2).column; } -ColumnPtr castColumn(const ColumnWithTypeAndName & arg, const DataTypePtr & type, const Context &) -{ - return castColumn(arg, type); -} - } diff --git a/src/Interpreters/castColumn.h b/src/Interpreters/castColumn.h new file mode 100644 index 00000000000..44c7f071c26 --- /dev/null +++ b/src/Interpreters/castColumn.h @@ -0,0 +1,9 @@ +#pragma once + +#include + + +namespace DB +{ +ColumnPtr castColumn(const ColumnWithTypeAndName & arg, const DataTypePtr & type); +} diff --git a/dbms/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp similarity index 100% rename from dbms/src/Interpreters/convertFieldToType.cpp rename to src/Interpreters/convertFieldToType.cpp diff --git a/dbms/src/Interpreters/convertFieldToType.h b/src/Interpreters/convertFieldToType.h similarity index 100% rename from dbms/src/Interpreters/convertFieldToType.h rename to src/Interpreters/convertFieldToType.h diff --git a/dbms/src/Interpreters/createBlockSelector.cpp b/src/Interpreters/createBlockSelector.cpp similarity index 98% rename from dbms/src/Interpreters/createBlockSelector.cpp rename to src/Interpreters/createBlockSelector.cpp index 2b08ca0845c..0759b9d9601 100644 --- a/dbms/src/Interpreters/createBlockSelector.cpp +++ b/src/Interpreters/createBlockSelector.cpp @@ -5,8 +5,8 @@ #include -#ifdef __SSE2__ - #define LIBDIVIDE_USE_SSE2 1 +#if defined(__SSE2__) +# define LIBDIVIDE_SSE2 1 #endif #include diff --git a/dbms/src/Interpreters/createBlockSelector.h b/src/Interpreters/createBlockSelector.h similarity index 100% rename from dbms/src/Interpreters/createBlockSelector.h rename to src/Interpreters/createBlockSelector.h diff --git a/dbms/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp similarity index 100% rename from dbms/src/Interpreters/evaluateConstantExpression.cpp rename to src/Interpreters/evaluateConstantExpression.cpp diff --git a/dbms/src/Interpreters/evaluateConstantExpression.h b/src/Interpreters/evaluateConstantExpression.h similarity index 100% rename from dbms/src/Interpreters/evaluateConstantExpression.h rename to src/Interpreters/evaluateConstantExpression.h diff --git a/dbms/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp similarity index 97% rename from dbms/src/Interpreters/executeQuery.cpp rename to src/Interpreters/executeQuery.cpp index c9c66832f08..68bebb83619 100644 --- a/dbms/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -157,7 +157,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c /// Log the start of query execution into the table if necessary. QueryLogElement elem; - elem.type = QueryLogElement::EXCEPTION_BEFORE_START; + elem.type = QueryLogElementType::EXCEPTION_BEFORE_START; elem.event_time = current_time; elem.query_start_time = current_time; @@ -175,7 +175,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c /// Update performance counters before logging to query_log CurrentThread::finalizePerformanceCounters(); - if (settings.log_queries) + if (settings.log_queries && elem.type >= settings.log_queries_min_type) if (auto query_log = context.getQueryLog()) query_log->add(elem); } @@ -400,7 +400,7 @@ static std::tuple executeQueryImpl( { QueryLogElement elem; - elem.type = QueryLogElement::QUERY_START; + elem.type = QueryLogElementType::QUERY_START; elem.event_time = current_time; elem.query_start_time = current_time; @@ -412,7 +412,7 @@ static std::tuple executeQueryImpl( bool log_queries = settings.log_queries && !internal; /// Log into system table start of query execution, if need. - if (log_queries) + if (log_queries && elem.type >= settings.log_queries_min_type) { if (settings.log_query_settings) elem.query_settings = std::make_shared(context.getSettingsRef()); @@ -422,7 +422,7 @@ static std::tuple executeQueryImpl( } /// Also make possible for caller to log successful query finish and exception during execution. - auto finish_callback = [elem, &context, log_queries] (IBlockInputStream * stream_in, IBlockOutputStream * stream_out) mutable + auto finish_callback = [elem, &context, log_queries, log_queries_min_type = settings.log_queries_min_type] (IBlockInputStream * stream_in, IBlockOutputStream * stream_out) mutable { QueryStatus * process_list_elem = context.getProcessListElement(); @@ -436,7 +436,7 @@ static std::tuple executeQueryImpl( double elapsed_seconds = info.elapsed_seconds; - elem.type = QueryLogElement::QUERY_FINISH; + elem.type = QueryLogElementType::QUERY_FINISH; elem.event_time = time(nullptr); elem.query_duration_ms = elapsed_seconds * 1000; @@ -484,19 +484,19 @@ static std::tuple executeQueryImpl( elem.thread_ids = std::move(info.thread_ids); elem.profile_counters = std::move(info.profile_counters); - if (log_queries) + if (log_queries && elem.type >= log_queries_min_type) { if (auto query_log = context.getQueryLog()) query_log->add(elem); } }; - auto exception_callback = [elem, &context, log_queries, quota(quota)] () mutable + auto exception_callback = [elem, &context, log_queries, log_queries_min_type = settings.log_queries_min_type, quota(quota)] () mutable { if (quota) quota->used(Quota::ERRORS, 1, /* check_exceeded = */ false); - elem.type = QueryLogElement::EXCEPTION_WHILE_PROCESSING; + elem.type = QueryLogElementType::EXCEPTION_WHILE_PROCESSING; elem.event_time = time(nullptr); elem.query_duration_ms = 1000 * (elem.event_time - elem.query_start_time); @@ -529,7 +529,7 @@ static std::tuple executeQueryImpl( logException(context, elem); /// In case of exception we log internal queries also - if (log_queries) + if (log_queries && elem.type >= log_queries_min_type) { if (auto query_log = context.getQueryLog()) query_log->add(elem); diff --git a/dbms/src/Interpreters/executeQuery.h b/src/Interpreters/executeQuery.h similarity index 100% rename from dbms/src/Interpreters/executeQuery.h rename to src/Interpreters/executeQuery.h diff --git a/dbms/src/Interpreters/getClusterName.cpp b/src/Interpreters/getClusterName.cpp similarity index 100% rename from dbms/src/Interpreters/getClusterName.cpp rename to src/Interpreters/getClusterName.cpp diff --git a/dbms/src/Interpreters/getClusterName.h b/src/Interpreters/getClusterName.h similarity index 100% rename from dbms/src/Interpreters/getClusterName.h rename to src/Interpreters/getClusterName.h diff --git a/dbms/src/Interpreters/getTableExpressions.cpp b/src/Interpreters/getTableExpressions.cpp similarity index 100% rename from dbms/src/Interpreters/getTableExpressions.cpp rename to src/Interpreters/getTableExpressions.cpp diff --git a/dbms/src/Interpreters/getTableExpressions.h b/src/Interpreters/getTableExpressions.h similarity index 100% rename from dbms/src/Interpreters/getTableExpressions.h rename to src/Interpreters/getTableExpressions.h diff --git a/dbms/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp similarity index 100% rename from dbms/src/Interpreters/inplaceBlockConversions.cpp rename to src/Interpreters/inplaceBlockConversions.cpp diff --git a/dbms/src/Interpreters/inplaceBlockConversions.h b/src/Interpreters/inplaceBlockConversions.h similarity index 100% rename from dbms/src/Interpreters/inplaceBlockConversions.h rename to src/Interpreters/inplaceBlockConversions.h diff --git a/dbms/src/Interpreters/interpretSubquery.cpp b/src/Interpreters/interpretSubquery.cpp similarity index 100% rename from dbms/src/Interpreters/interpretSubquery.cpp rename to src/Interpreters/interpretSubquery.cpp diff --git a/dbms/src/Interpreters/interpretSubquery.h b/src/Interpreters/interpretSubquery.h similarity index 100% rename from dbms/src/Interpreters/interpretSubquery.h rename to src/Interpreters/interpretSubquery.h diff --git a/src/Interpreters/joinDispatch.h b/src/Interpreters/joinDispatch.h new file mode 100644 index 00000000000..af16550e17e --- /dev/null +++ b/src/Interpreters/joinDispatch.h @@ -0,0 +1,106 @@ +#pragma once + +#include +#include + +#include + + +/** Used in implementation of Join to process different data structures. + */ + +namespace DB +{ + +template +struct MapGetter; + +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOneFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOneFlagged; }; + +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOneFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; + +template <> struct MapGetter { using Map = HashJoin::MapsAll; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; + +/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; + +/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; + +template +struct MapGetter +{ + using Map = HashJoin::MapsAsof; +}; + + +static constexpr std::array STRICTNESSES = { + ASTTableJoin::Strictness::RightAny, + ASTTableJoin::Strictness::Any, + ASTTableJoin::Strictness::All, + ASTTableJoin::Strictness::Asof, + ASTTableJoin::Strictness::Semi, + ASTTableJoin::Strictness::Anti, +}; + +static constexpr std::array KINDS = { + ASTTableJoin::Kind::Left, + ASTTableJoin::Kind::Inner, + ASTTableJoin::Kind::Full, + ASTTableJoin::Kind::Right +}; + +/// Init specified join map +inline bool joinDispatchInit(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, HashJoin::MapsVariant & maps) +{ + return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) + { + constexpr auto i = ij / STRICTNESSES.size(); + constexpr auto j = ij % STRICTNESSES.size(); + if (kind == KINDS[i] && strictness == STRICTNESSES[j]) + { + maps = typename MapGetter::Map(); + return true; + } + return false; + }); +} + +/// Call function on specified join map +template +inline bool joinDispatch(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, MapsVariant & maps, Func && func) +{ + return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) + { + // NOTE: Avoid using nested static loop as GCC and CLANG have bugs in different ways + // See https://stackoverflow.com/questions/44386415/gcc-and-clang-disagree-about-c17-constexpr-lambda-captures + constexpr auto i = ij / STRICTNESSES.size(); + constexpr auto j = ij % STRICTNESSES.size(); + if (kind == KINDS[i] && strictness == STRICTNESSES[j]) + { + func( + std::integral_constant(), + std::integral_constant(), + std::get::Map>(maps)); + return true; + } + return false; + }); +} + +} diff --git a/dbms/src/Interpreters/join_common.cpp b/src/Interpreters/join_common.cpp similarity index 100% rename from dbms/src/Interpreters/join_common.cpp rename to src/Interpreters/join_common.cpp diff --git a/dbms/src/Interpreters/join_common.h b/src/Interpreters/join_common.h similarity index 100% rename from dbms/src/Interpreters/join_common.h rename to src/Interpreters/join_common.h diff --git a/dbms/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp similarity index 98% rename from dbms/src/Interpreters/loadMetadata.cpp rename to src/Interpreters/loadMetadata.cpp index 53954faa2c0..42bef47a501 100644 --- a/dbms/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -36,7 +36,7 @@ static void executeCreateQuery( bool has_force_restore_data_flag) { ParserCreateQuery parser; - ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "in file " + file_name, 0); + ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "in file " + file_name, 0, context.getSettingsRef().max_parser_depth); auto & ast_create_query = ast->as(); ast_create_query.attach = true; diff --git a/dbms/src/Interpreters/loadMetadata.h b/src/Interpreters/loadMetadata.h similarity index 100% rename from dbms/src/Interpreters/loadMetadata.h rename to src/Interpreters/loadMetadata.h diff --git a/src/Interpreters/misc.h b/src/Interpreters/misc.h new file mode 100644 index 00000000000..0fd0e12a4bb --- /dev/null +++ b/src/Interpreters/misc.h @@ -0,0 +1,21 @@ +#pragma once + +namespace DB +{ + +inline bool functionIsInOperator(const std::string & name) +{ + return name == "in" || name == "notIn" || name == "nullIn" || name == "notNullIn"; +} + +inline bool functionIsInOrGlobalInOperator(const std::string & name) +{ + return functionIsInOperator(name) || name == "globalIn" || name == "globalNotIn" || name == "globalNullIn" || name == "globalNotNullIn"; +} + +inline bool functionIsLikeOperator(const std::string & name) +{ + return name == "like" || name == "notLike"; +} + +} diff --git a/dbms/src/Interpreters/sortBlock.cpp b/src/Interpreters/sortBlock.cpp similarity index 100% rename from dbms/src/Interpreters/sortBlock.cpp rename to src/Interpreters/sortBlock.cpp diff --git a/dbms/src/Interpreters/sortBlock.h b/src/Interpreters/sortBlock.h similarity index 100% rename from dbms/src/Interpreters/sortBlock.h rename to src/Interpreters/sortBlock.h diff --git a/src/Interpreters/tests/CMakeLists.txt b/src/Interpreters/tests/CMakeLists.txt new file mode 100644 index 00000000000..26ebf007e6c --- /dev/null +++ b/src/Interpreters/tests/CMakeLists.txt @@ -0,0 +1,64 @@ +add_executable (expression expression.cpp) +target_link_libraries (expression PRIVATE dbms clickhouse_parsers) + +add_executable (create_query create_query.cpp) +target_link_libraries (create_query PRIVATE dbms clickhouse_parsers) + +add_executable (select_query select_query.cpp) +target_link_libraries (select_query PRIVATE clickhouse_storages_system dbms clickhouse_common_io) + +add_executable (aggregate aggregate.cpp) +target_link_libraries (aggregate PRIVATE dbms) + +add_executable (hash_map hash_map.cpp) +target_include_directories (hash_map SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) +target_link_libraries (hash_map PRIVATE dbms) + +add_executable (hash_map_lookup hash_map_lookup.cpp) +target_include_directories (hash_map_lookup SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) +target_link_libraries (hash_map_lookup PRIVATE dbms) + +add_executable (hash_map3 hash_map3.cpp) +target_include_directories(hash_map3 SYSTEM BEFORE PRIVATE ${METROHASH_INCLUDE_DIR}) +target_link_libraries (hash_map3 PRIVATE dbms ${FARMHASH_LIBRARIES} ${METROHASH_LIBRARIES}) + +add_executable (hash_map_string hash_map_string.cpp) +target_include_directories (hash_map_string SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) +target_link_libraries (hash_map_string PRIVATE dbms) + +add_executable (hash_map_string_2 hash_map_string_2.cpp) +target_link_libraries (hash_map_string_2 PRIVATE dbms) + +add_executable (hash_map_string_3 hash_map_string_3.cpp) +target_include_directories(hash_map_string_3 SYSTEM BEFORE PRIVATE ${METROHASH_INCLUDE_DIR}) +target_link_libraries (hash_map_string_3 PRIVATE dbms ${FARMHASH_LIBRARIES} ${METROHASH_LIBRARIES}) + +add_executable (hash_map_string_small hash_map_string_small.cpp) +target_include_directories (hash_map_string_small SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) +target_link_libraries (hash_map_string_small PRIVATE dbms) + +add_executable (string_hash_map string_hash_map.cpp) +target_link_libraries (string_hash_map PRIVATE dbms) + +add_executable (string_hash_map_aggregation string_hash_map.cpp) +target_link_libraries (string_hash_map_aggregation PRIVATE dbms) + +add_executable (two_level_hash_map two_level_hash_map.cpp) +target_include_directories (two_level_hash_map SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) +target_link_libraries (two_level_hash_map PRIVATE dbms) + +add_executable (in_join_subqueries_preprocessor in_join_subqueries_preprocessor.cpp) +target_link_libraries (in_join_subqueries_preprocessor PRIVATE dbms clickhouse_parsers) +add_check(in_join_subqueries_preprocessor) + +add_executable (expression_analyzer expression_analyzer.cpp) +target_link_libraries (expression_analyzer PRIVATE dbms clickhouse_storages_system clickhouse_parsers clickhouse_common_io) +add_check(expression_analyzer) + +add_executable (users users.cpp) +target_link_libraries (users PRIVATE dbms clickhouse_common_config) + +if (OS_LINUX) + add_executable (internal_iotop internal_iotop.cpp) + target_link_libraries (internal_iotop PRIVATE dbms) +endif () diff --git a/dbms/src/Interpreters/tests/aggregate.cpp b/src/Interpreters/tests/aggregate.cpp similarity index 100% rename from dbms/src/Interpreters/tests/aggregate.cpp rename to src/Interpreters/tests/aggregate.cpp diff --git a/dbms/src/Interpreters/tests/create_query.cpp b/src/Interpreters/tests/create_query.cpp similarity index 99% rename from dbms/src/Interpreters/tests/create_query.cpp rename to src/Interpreters/tests/create_query.cpp index 20a0bfcb062..ad006de3be6 100644 --- a/dbms/src/Interpreters/tests/create_query.cpp +++ b/src/Interpreters/tests/create_query.cpp @@ -76,7 +76,7 @@ try ") ENGINE = Log"; ParserCreateQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); Context context = Context::createGlobal(); context.makeGlobalContext(); diff --git a/dbms/src/Interpreters/tests/expression.cpp b/src/Interpreters/tests/expression.cpp similarity index 99% rename from dbms/src/Interpreters/tests/expression.cpp rename to src/Interpreters/tests/expression.cpp index 8d64b4f64ce..5432e405d1c 100644 --- a/dbms/src/Interpreters/tests/expression.cpp +++ b/src/Interpreters/tests/expression.cpp @@ -41,7 +41,7 @@ int main(int argc, char ** argv) "s1 < s2 AND x % 3 < x % 5"; ParserSelectQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); formatAST(*ast, std::cerr); std::cerr << std::endl; diff --git a/dbms/src/Interpreters/tests/expression_analyzer.cpp b/src/Interpreters/tests/expression_analyzer.cpp similarity index 98% rename from dbms/src/Interpreters/tests/expression_analyzer.cpp rename to src/Interpreters/tests/expression_analyzer.cpp index 2f8b6b2aef2..66b4900584f 100644 --- a/dbms/src/Interpreters/tests/expression_analyzer.cpp +++ b/src/Interpreters/tests/expression_analyzer.cpp @@ -72,7 +72,7 @@ private: ParserSelectQuery parser; std::string message; auto text = query.data(); - if (ASTPtr ast = tryParseQuery(parser, text, text + query.size(), message, false, "", false, 0)) + if (ASTPtr ast = tryParseQuery(parser, text, text + query.size(), message, false, "", false, 0, 0)) return ast; throw Exception(message, ErrorCodes::SYNTAX_ERROR); } diff --git a/src/Interpreters/tests/gtest_cycle_aliases.cpp b/src/Interpreters/tests/gtest_cycle_aliases.cpp new file mode 100644 index 00000000000..593db93de3e --- /dev/null +++ b/src/Interpreters/tests/gtest_cycle_aliases.cpp @@ -0,0 +1,24 @@ +#include + +#include +#include +#include +#include +#include + +using namespace DB; + +TEST(QueryNormalizer, SimpleCycleAlias) +{ + String query = "a as b, b as a"; + ParserExpressionList parser(false); + ASTPtr ast = parseQuery(parser, query, 0, 0); + + Aliases aliases; + aliases["a"] = parseQuery(parser, "b as a", 0, 0)->children[0]; + aliases["b"] = parseQuery(parser, "a as b", 0, 0)->children[0]; + + Settings settings; + QueryNormalizer::Data normalizer_data(aliases, settings); + EXPECT_THROW(QueryNormalizer(normalizer_data).visit(ast), Exception); +} diff --git a/dbms/src/Interpreters/tests/gtest_merge_tree_set_index.cpp b/src/Interpreters/tests/gtest_merge_tree_set_index.cpp similarity index 100% rename from dbms/src/Interpreters/tests/gtest_merge_tree_set_index.cpp rename to src/Interpreters/tests/gtest_merge_tree_set_index.cpp diff --git a/dbms/src/Interpreters/tests/hash_map.cpp b/src/Interpreters/tests/hash_map.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map.cpp rename to src/Interpreters/tests/hash_map.cpp diff --git a/dbms/src/Interpreters/tests/hash_map3.cpp b/src/Interpreters/tests/hash_map3.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map3.cpp rename to src/Interpreters/tests/hash_map3.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_lookup.cpp b/src/Interpreters/tests/hash_map_lookup.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_lookup.cpp rename to src/Interpreters/tests/hash_map_lookup.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string.cpp b/src/Interpreters/tests/hash_map_string.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string.cpp rename to src/Interpreters/tests/hash_map_string.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string_2.cpp b/src/Interpreters/tests/hash_map_string_2.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string_2.cpp rename to src/Interpreters/tests/hash_map_string_2.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string_3.cpp b/src/Interpreters/tests/hash_map_string_3.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string_3.cpp rename to src/Interpreters/tests/hash_map_string_3.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string_small.cpp b/src/Interpreters/tests/hash_map_string_small.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string_small.cpp rename to src/Interpreters/tests/hash_map_string_small.cpp diff --git a/dbms/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp b/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp similarity index 99% rename from dbms/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp rename to src/Interpreters/tests/in_join_subqueries_preprocessor.cpp index 9a17f03f32a..412557f13b8 100644 --- a/dbms/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp +++ b/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp @@ -1224,7 +1224,7 @@ bool parse(DB::ASTPtr & ast, const std::string & query) std::string message; auto begin = query.data(); auto end = begin + query.size(); - ast = DB::tryParseQuery(parser, begin, end, message, false, "", false, 0); + ast = DB::tryParseQuery(parser, begin, end, message, false, "", false, 0, 0); return ast != nullptr; } diff --git a/dbms/src/Interpreters/tests/internal_iotop.cpp b/src/Interpreters/tests/internal_iotop.cpp similarity index 100% rename from dbms/src/Interpreters/tests/internal_iotop.cpp rename to src/Interpreters/tests/internal_iotop.cpp diff --git a/dbms/src/Interpreters/tests/select_query.cpp b/src/Interpreters/tests/select_query.cpp similarity index 100% rename from dbms/src/Interpreters/tests/select_query.cpp rename to src/Interpreters/tests/select_query.cpp diff --git a/dbms/src/Interpreters/tests/string_hash_map.cpp b/src/Interpreters/tests/string_hash_map.cpp similarity index 100% rename from dbms/src/Interpreters/tests/string_hash_map.cpp rename to src/Interpreters/tests/string_hash_map.cpp diff --git a/dbms/src/Interpreters/tests/two_level_hash_map.cpp b/src/Interpreters/tests/two_level_hash_map.cpp similarity index 100% rename from dbms/src/Interpreters/tests/two_level_hash_map.cpp rename to src/Interpreters/tests/two_level_hash_map.cpp diff --git a/dbms/src/Interpreters/tests/users.cpp b/src/Interpreters/tests/users.cpp similarity index 100% rename from dbms/src/Interpreters/tests/users.cpp rename to src/Interpreters/tests/users.cpp diff --git a/dbms/src/NOTICE b/src/NOTICE similarity index 100% rename from dbms/src/NOTICE rename to src/NOTICE diff --git a/dbms/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp similarity index 96% rename from dbms/src/Parsers/ASTAlterQuery.cpp rename to src/Parsers/ASTAlterQuery.cpp index 50d751a9c3b..9ec2fad5768 100644 --- a/dbms/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -56,6 +56,11 @@ ASTPtr ASTAlterCommand::clone() const res->values = values->clone(); res->children.push_back(res->values); } + if (rename_to) + { + res->rename_to = rename_to->clone(); + res->children.push_back(res->rename_to); + } return res; } @@ -285,6 +290,15 @@ void ASTAlterCommand::formatImpl( { settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "REFRESH " << (settings.hilite ? hilite_none : ""); } + else if (type == ASTAlterCommand::RENAME_COLUMN) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "RENAME COLUMN " << (if_exists ? "IF EXISTS " : "") + << (settings.hilite ? hilite_none : ""); + column->formatImpl(settings, state, frame); + + settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO "; + rename_to->formatImpl(settings, state, frame); + } else throw Exception("Unexpected type of ALTER", ErrorCodes::UNEXPECTED_AST_STRUCTURE); } diff --git a/dbms/src/Parsers/ASTAlterQuery.h b/src/Parsers/ASTAlterQuery.h similarity index 98% rename from dbms/src/Parsers/ASTAlterQuery.h rename to src/Parsers/ASTAlterQuery.h index de36394a9c3..85e9a4d7552 100644 --- a/dbms/src/Parsers/ASTAlterQuery.h +++ b/src/Parsers/ASTAlterQuery.h @@ -29,6 +29,7 @@ public: DROP_COLUMN, MODIFY_COLUMN, COMMENT_COLUMN, + RENAME_COLUMN, MODIFY_ORDER_BY, MODIFY_TTL, MATERIALIZE_TTL, @@ -69,6 +70,7 @@ public: /** The ADD COLUMN query here optionally stores the name of the column following AFTER * The DROP query stores the column name for deletion here + * Also used for RENAME COLUMN. */ ASTPtr column; @@ -155,6 +157,9 @@ public: String to_database; String to_table; + /// Target column name + ASTPtr rename_to; + String getID(char delim) const override { return "AlterCommand" + (delim + std::to_string(static_cast(type))); } ASTPtr clone() const override; diff --git a/dbms/src/Parsers/ASTAssignment.h b/src/Parsers/ASTAssignment.h similarity index 100% rename from dbms/src/Parsers/ASTAssignment.h rename to src/Parsers/ASTAssignment.h diff --git a/dbms/src/Parsers/ASTAsterisk.cpp b/src/Parsers/ASTAsterisk.cpp similarity index 100% rename from dbms/src/Parsers/ASTAsterisk.cpp rename to src/Parsers/ASTAsterisk.cpp diff --git a/dbms/src/Parsers/ASTAsterisk.h b/src/Parsers/ASTAsterisk.h similarity index 100% rename from dbms/src/Parsers/ASTAsterisk.h rename to src/Parsers/ASTAsterisk.h diff --git a/dbms/src/Parsers/ASTCheckQuery.h b/src/Parsers/ASTCheckQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCheckQuery.h rename to src/Parsers/ASTCheckQuery.h diff --git a/dbms/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ASTColumnDeclaration.cpp rename to src/Parsers/ASTColumnDeclaration.cpp diff --git a/dbms/src/Parsers/ASTColumnDeclaration.h b/src/Parsers/ASTColumnDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTColumnDeclaration.h rename to src/Parsers/ASTColumnDeclaration.h diff --git a/dbms/src/Parsers/ASTColumnsMatcher.cpp b/src/Parsers/ASTColumnsMatcher.cpp similarity index 100% rename from dbms/src/Parsers/ASTColumnsMatcher.cpp rename to src/Parsers/ASTColumnsMatcher.cpp diff --git a/dbms/src/Parsers/ASTColumnsMatcher.h b/src/Parsers/ASTColumnsMatcher.h similarity index 100% rename from dbms/src/Parsers/ASTColumnsMatcher.h rename to src/Parsers/ASTColumnsMatcher.h diff --git a/dbms/src/Parsers/ASTConstraintDeclaration.cpp b/src/Parsers/ASTConstraintDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ASTConstraintDeclaration.cpp rename to src/Parsers/ASTConstraintDeclaration.cpp diff --git a/dbms/src/Parsers/ASTConstraintDeclaration.h b/src/Parsers/ASTConstraintDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTConstraintDeclaration.h rename to src/Parsers/ASTConstraintDeclaration.h diff --git a/dbms/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTCreateQuery.cpp rename to src/Parsers/ASTCreateQuery.cpp diff --git a/dbms/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCreateQuery.h rename to src/Parsers/ASTCreateQuery.h diff --git a/dbms/src/Parsers/ASTCreateQuotaQuery.cpp b/src/Parsers/ASTCreateQuotaQuery.cpp similarity index 85% rename from dbms/src/Parsers/ASTCreateQuotaQuery.cpp rename to src/Parsers/ASTCreateQuotaQuery.cpp index 7613fce6167..cd064756fb6 100644 --- a/dbms/src/Parsers/ASTCreateQuotaQuery.cpp +++ b/src/Parsers/ASTCreateQuotaQuery.cpp @@ -28,16 +28,17 @@ namespace } - void formatLimit(ResourceType resource_type, ResourceAmount max, const IAST::FormatSettings & settings) + void formatLimit(ResourceType resource_type, ResourceAmount max, bool first, const IAST::FormatSettings & settings) { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " MAX " << Quota::resourceTypeToKeyword(resource_type) - << (settings.hilite ? IAST::hilite_none : ""); + if (first) + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " MAX" << (settings.hilite ? IAST::hilite_none : ""); + else + settings.ostr << ","; - settings.ostr << (settings.hilite ? IAST::hilite_operator : "") << " = " << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << " " << (settings.hilite ? IAST::hilite_keyword : "") << Quota::resourceTypeToKeyword(resource_type) + << (settings.hilite ? IAST::hilite_none : "") << " "; - if (max == Quota::UNLIMITED) - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "ANY" << (settings.hilite ? IAST::hilite_none : ""); - else if (resource_type == Quota::EXECUTION_TIME) + if (resource_type == Quota::EXECUTION_TIME) settings.ostr << Quota::executionTimeToSeconds(max); else settings.ostr << max; @@ -59,9 +60,9 @@ namespace << interval_kind.toKeyword() << (settings.hilite ? IAST::hilite_none : ""); - if (limits.unset_tracking) + if (limits.drop) { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " UNSET TRACKING" << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " NO LIMITS" << (settings.hilite ? IAST::hilite_none : ""); } else { @@ -70,14 +71,12 @@ namespace { if (limits.max[resource_type]) { - if (limit_found) - settings.ostr << ","; + formatLimit(resource_type, *limits.max[resource_type], !limit_found, settings); limit_found = true; - formatLimit(resource_type, *limits.max[resource_type], settings); } } if (!limit_found) - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " TRACKING" << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " TRACKING ONLY" << (settings.hilite ? IAST::hilite_none : ""); } } @@ -135,6 +134,8 @@ void ASTCreateQuotaQuery::formatImpl(const FormatSettings & settings, FormatStat settings.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(settings); + if (!new_name.empty()) formatRenameTo(new_name, settings); @@ -146,4 +147,12 @@ void ASTCreateQuotaQuery::formatImpl(const FormatSettings & settings, FormatStat if (roles && (!roles->empty() || alter)) formatToRoles(*roles, settings); } + + +void ASTCreateQuotaQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (roles) + roles->replaceCurrentUserTagWithName(current_user_name); +} + } diff --git a/src/Parsers/ASTCreateQuotaQuery.h b/src/Parsers/ASTCreateQuotaQuery.h new file mode 100644 index 00000000000..70f8cba6de0 --- /dev/null +++ b/src/Parsers/ASTCreateQuotaQuery.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ +class ASTExtendedRoleSet; + + +/** CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name + * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + * + * ALTER QUOTA [IF EXISTS] name + * [RENAME TO new_name] + * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + */ +class ASTCreateQuotaQuery : public IAST, public ASTQueryWithOnCluster +{ +public: + bool alter = false; + bool attach = false; + + bool if_exists = false; + bool if_not_exists = false; + bool or_replace = false; + + String name; + String new_name; + using KeyType = Quota::KeyType; + std::optional key_type; + + using ResourceType = Quota::ResourceType; + using ResourceAmount = Quota::ResourceAmount; + static constexpr size_t MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; + + struct Limits + { + std::optional max[MAX_RESOURCE_TYPE]; + bool drop = false; + std::chrono::seconds duration = std::chrono::seconds::zero(); + bool randomize_interval = false; + }; + std::vector all_limits; + + std::shared_ptr roles; + + String getID(char) const override; + ASTPtr clone() const override; + void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } +}; +} diff --git a/dbms/src/Parsers/ASTCreateRoleQuery.cpp b/src/Parsers/ASTCreateRoleQuery.cpp similarity index 98% rename from dbms/src/Parsers/ASTCreateRoleQuery.cpp rename to src/Parsers/ASTCreateRoleQuery.cpp index 3d69e4dac59..f3873f7a3eb 100644 --- a/dbms/src/Parsers/ASTCreateRoleQuery.cpp +++ b/src/Parsers/ASTCreateRoleQuery.cpp @@ -54,6 +54,8 @@ void ASTCreateRoleQuery::formatImpl(const FormatSettings & format, FormatState & format.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(format); + if (!new_name.empty()) formatRenameTo(new_name, format); diff --git a/dbms/src/Parsers/ASTCreateRoleQuery.h b/src/Parsers/ASTCreateRoleQuery.h similarity index 78% rename from dbms/src/Parsers/ASTCreateRoleQuery.h rename to src/Parsers/ASTCreateRoleQuery.h index 69bb9896fa3..ab306dd5dec 100644 --- a/dbms/src/Parsers/ASTCreateRoleQuery.h +++ b/src/Parsers/ASTCreateRoleQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -15,7 +16,7 @@ class ASTSettingsProfileElements; * [RENAME TO new_name] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ -class ASTCreateRoleQuery : public IAST +class ASTCreateRoleQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -33,5 +34,6 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/dbms/src/Parsers/ASTCreateRowPolicyQuery.cpp b/src/Parsers/ASTCreateRowPolicyQuery.cpp similarity index 95% rename from dbms/src/Parsers/ASTCreateRowPolicyQuery.cpp rename to src/Parsers/ASTCreateRowPolicyQuery.cpp index ac3d859e66f..9b36f5500c1 100644 --- a/dbms/src/Parsers/ASTCreateRowPolicyQuery.cpp +++ b/src/Parsers/ASTCreateRowPolicyQuery.cpp @@ -122,7 +122,7 @@ namespace String ASTCreateRowPolicyQuery::getID(char) const { - return "CREATE POLICY or ALTER POLICY query"; + return "CREATE ROW POLICY or ALTER ROW POLICY query"; } @@ -136,11 +136,11 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format { if (attach) { - settings.ostr << (settings.hilite ? hilite_keyword : "") << "ATTACH POLICY"; + settings.ostr << (settings.hilite ? hilite_keyword : "") << "ATTACH ROW POLICY"; } else { - settings.ostr << (settings.hilite ? hilite_keyword : "") << (alter ? "ALTER POLICY" : "CREATE POLICY") + settings.ostr << (settings.hilite ? hilite_keyword : "") << (alter ? "ALTER ROW POLICY" : "CREATE ROW POLICY") << (settings.hilite ? hilite_none : ""); } @@ -157,6 +157,8 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format settings.ostr << " " << backQuoteIfNeed(policy_name) << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : "") << (database.empty() ? String{} : backQuoteIfNeed(database) + ".") << table_name; + formatOnCluster(settings); + if (!new_policy_name.empty()) formatRenameTo(new_policy_name, settings); @@ -168,4 +170,11 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format if (roles && (!roles->empty() || alter)) formatToRoles(*roles, settings); } + + +void ASTCreateRowPolicyQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (roles) + roles->replaceCurrentUserTagWithName(current_user_name); +} } diff --git a/dbms/src/Parsers/ASTCreateRowPolicyQuery.h b/src/Parsers/ASTCreateRowPolicyQuery.h similarity index 80% rename from dbms/src/Parsers/ASTCreateRowPolicyQuery.h rename to src/Parsers/ASTCreateRowPolicyQuery.h index e58ed0ec46c..85ba674eeb0 100644 --- a/dbms/src/Parsers/ASTCreateRowPolicyQuery.h +++ b/src/Parsers/ASTCreateRowPolicyQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -25,7 +26,7 @@ class ASTExtendedRoleSet; * [WITH CHECK {condition | NONE}] [,...] * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] */ -class ASTCreateRowPolicyQuery : public IAST +class ASTCreateRowPolicyQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -47,5 +48,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/dbms/src/Parsers/ASTCreateSettingsProfileQuery.cpp b/src/Parsers/ASTCreateSettingsProfileQuery.cpp similarity index 91% rename from dbms/src/Parsers/ASTCreateSettingsProfileQuery.cpp rename to src/Parsers/ASTCreateSettingsProfileQuery.cpp index a5a5556baf3..8db82b0e1cb 100644 --- a/dbms/src/Parsers/ASTCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ASTCreateSettingsProfileQuery.cpp @@ -61,6 +61,8 @@ void ASTCreateSettingsProfileQuery::formatImpl(const FormatSettings & format, Fo format.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(format); + if (!new_name.empty()) formatRenameTo(new_name, format); @@ -71,4 +73,10 @@ void ASTCreateSettingsProfileQuery::formatImpl(const FormatSettings & format, Fo formatToRoles(*to_roles, format); } + +void ASTCreateSettingsProfileQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (to_roles) + to_roles->replaceCurrentUserTagWithName(current_user_name); +} } diff --git a/src/Parsers/ASTCreateSettingsProfileQuery.h b/src/Parsers/ASTCreateSettingsProfileQuery.h new file mode 100644 index 00000000000..eabe1ba441b --- /dev/null +++ b/src/Parsers/ASTCreateSettingsProfileQuery.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include + + +namespace DB +{ +class ASTSettingsProfileElements; +class ASTExtendedRoleSet; + + +/** CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name + * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + * + * ALTER SETTINGS PROFILE [IF EXISTS] name + * [RENAME TO new_name] + * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + */ +class ASTCreateSettingsProfileQuery : public IAST, public ASTQueryWithOnCluster +{ +public: + bool alter = false; + bool attach = false; + + bool if_exists = false; + bool if_not_exists = false; + bool or_replace = false; + + String name; + String new_name; + + std::shared_ptr settings; + + std::shared_ptr to_roles; + + String getID(char) const override; + ASTPtr clone() const override; + void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } +}; +} diff --git a/dbms/src/Parsers/ASTCreateUserQuery.cpp b/src/Parsers/ASTCreateUserQuery.cpp similarity index 98% rename from dbms/src/Parsers/ASTCreateUserQuery.cpp rename to src/Parsers/ASTCreateUserQuery.cpp index 0631d08ae74..c8e2a76dfa2 100644 --- a/dbms/src/Parsers/ASTCreateUserQuery.cpp +++ b/src/Parsers/ASTCreateUserQuery.cpp @@ -109,7 +109,7 @@ namespace { if (std::exchange(need_comma, true)) settings.ostr << ", "; - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "NAME REGEXP " << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "REGEXP " << (settings.hilite ? IAST::hilite_none : ""); bool need_comma2 = false; for (const auto & host_regexp : name_regexps) { @@ -184,6 +184,8 @@ void ASTCreateUserQuery::formatImpl(const FormatSettings & format, FormatState & format.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(format); + if (!new_name.empty()) formatRenameTo(new_name, format); @@ -195,7 +197,7 @@ void ASTCreateUserQuery::formatImpl(const FormatSettings & format, FormatState & if (add_hosts) formatHosts("ADD", *add_hosts, format); if (remove_hosts) - formatHosts("REMOVE", *remove_hosts, format); + formatHosts("DROP", *remove_hosts, format); if (default_roles) formatDefaultRoles(*default_roles, format); diff --git a/dbms/src/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h similarity index 76% rename from dbms/src/Parsers/ASTCreateUserQuery.h rename to src/Parsers/ASTCreateUserQuery.h index fc2aa0121ed..54dc51d783b 100644 --- a/dbms/src/Parsers/ASTCreateUserQuery.h +++ b/src/Parsers/ASTCreateUserQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -12,18 +13,18 @@ class ASTSettingsProfileElements; /** CREATE USER [IF NOT EXISTS | OR REPLACE] name * [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] - * [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...]] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] * [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - * [[ADD|REMOVE] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ -class ASTCreateUserQuery : public IAST +class ASTCreateUserQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -49,5 +50,6 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/dbms/src/Parsers/ASTDictionary.cpp b/src/Parsers/ASTDictionary.cpp similarity index 95% rename from dbms/src/Parsers/ASTDictionary.cpp rename to src/Parsers/ASTDictionary.cpp index 5c477c2aab7..9ff600333c5 100644 --- a/dbms/src/Parsers/ASTDictionary.cpp +++ b/src/Parsers/ASTDictionary.cpp @@ -24,6 +24,7 @@ void ASTDictionaryRange::formatImpl(const FormatSettings & settings, << "(" << (settings.hilite ? hilite_keyword : "") << "MIN " + << (settings.hilite ? hilite_none : "") << min_attr_name << " " << (settings.hilite ? hilite_keyword : "") << "MAX " @@ -52,6 +53,7 @@ void ASTDictionaryLifetime::formatImpl(const FormatSettings & settings, << "(" << (settings.hilite ? hilite_keyword : "") << "MIN " + << (settings.hilite ? hilite_none : "") << min_sec << " " << (settings.hilite ? hilite_keyword : "") << "MAX " @@ -86,7 +88,9 @@ void ASTDictionaryLayout::formatImpl(const FormatSettings & settings, << Poco::toUpper(layout_type) << (settings.hilite ? hilite_none : ""); - settings.ostr << "("; + if (has_brackets) + settings.ostr << "("; + if (parameter) { settings.ostr << (settings.hilite ? hilite_keyword : "") @@ -96,7 +100,10 @@ void ASTDictionaryLayout::formatImpl(const FormatSettings & settings, parameter->second->formatImpl(settings, state, frame); } - settings.ostr << ")"; + + if (has_brackets) + settings.ostr << ")"; + settings.ostr << ")"; } diff --git a/dbms/src/Parsers/ASTDictionary.h b/src/Parsers/ASTDictionary.h similarity index 97% rename from dbms/src/Parsers/ASTDictionary.h rename to src/Parsers/ASTDictionary.h index e146162cbdf..6982381f14d 100644 --- a/dbms/src/Parsers/ASTDictionary.h +++ b/src/Parsers/ASTDictionary.h @@ -33,6 +33,8 @@ public: String layout_type; /// optional parameter (size_in_cells) std::optional parameter; + /// has brackets after layout type + bool has_brackets = true; String getID(char) const override { return "Dictionary layout"; } diff --git a/dbms/src/Parsers/ASTDictionaryAttributeDeclaration.cpp b/src/Parsers/ASTDictionaryAttributeDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ASTDictionaryAttributeDeclaration.cpp rename to src/Parsers/ASTDictionaryAttributeDeclaration.cpp diff --git a/dbms/src/Parsers/ASTDictionaryAttributeDeclaration.h b/src/Parsers/ASTDictionaryAttributeDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTDictionaryAttributeDeclaration.h rename to src/Parsers/ASTDictionaryAttributeDeclaration.h diff --git a/dbms/src/Parsers/ASTDropAccessEntityQuery.cpp b/src/Parsers/ASTDropAccessEntityQuery.cpp similarity index 98% rename from dbms/src/Parsers/ASTDropAccessEntityQuery.cpp rename to src/Parsers/ASTDropAccessEntityQuery.cpp index 3896128ceb5..06a820bfbb5 100644 --- a/dbms/src/Parsers/ASTDropAccessEntityQuery.cpp +++ b/src/Parsers/ASTDropAccessEntityQuery.cpp @@ -75,5 +75,7 @@ void ASTDropAccessEntityQuery::formatImpl(const FormatSettings & settings, Forma settings.ostr << ' ' << backQuoteIfNeed(name); } } + + formatOnCluster(settings); } } diff --git a/dbms/src/Parsers/ASTDropAccessEntityQuery.h b/src/Parsers/ASTDropAccessEntityQuery.h similarity index 75% rename from dbms/src/Parsers/ASTDropAccessEntityQuery.h rename to src/Parsers/ASTDropAccessEntityQuery.h index 5f0b46bd896..a3b358dcfb9 100644 --- a/dbms/src/Parsers/ASTDropAccessEntityQuery.h +++ b/src/Parsers/ASTDropAccessEntityQuery.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -13,7 +14,7 @@ namespace DB * DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] * DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] */ -class ASTDropAccessEntityQuery : public IAST +class ASTDropAccessEntityQuery : public IAST, public ASTQueryWithOnCluster { public: enum class Kind @@ -34,5 +35,6 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/dbms/src/Parsers/ASTDropQuery.cpp b/src/Parsers/ASTDropQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTDropQuery.cpp rename to src/Parsers/ASTDropQuery.cpp diff --git a/dbms/src/Parsers/ASTDropQuery.h b/src/Parsers/ASTDropQuery.h similarity index 100% rename from dbms/src/Parsers/ASTDropQuery.h rename to src/Parsers/ASTDropQuery.h diff --git a/dbms/src/Parsers/ASTEnumElement.h b/src/Parsers/ASTEnumElement.h similarity index 100% rename from dbms/src/Parsers/ASTEnumElement.h rename to src/Parsers/ASTEnumElement.h diff --git a/dbms/src/Parsers/ASTExplainQuery.h b/src/Parsers/ASTExplainQuery.h similarity index 100% rename from dbms/src/Parsers/ASTExplainQuery.h rename to src/Parsers/ASTExplainQuery.h diff --git a/dbms/src/Parsers/ASTExpressionList.cpp b/src/Parsers/ASTExpressionList.cpp similarity index 100% rename from dbms/src/Parsers/ASTExpressionList.cpp rename to src/Parsers/ASTExpressionList.cpp diff --git a/dbms/src/Parsers/ASTExpressionList.h b/src/Parsers/ASTExpressionList.h similarity index 100% rename from dbms/src/Parsers/ASTExpressionList.h rename to src/Parsers/ASTExpressionList.h diff --git a/dbms/src/Parsers/ASTExtendedRoleSet.cpp b/src/Parsers/ASTExtendedRoleSet.cpp similarity index 87% rename from dbms/src/Parsers/ASTExtendedRoleSet.cpp rename to src/Parsers/ASTExtendedRoleSet.cpp index 3ac1052897d..9eb06a6a101 100644 --- a/dbms/src/Parsers/ASTExtendedRoleSet.cpp +++ b/src/Parsers/ASTExtendedRoleSet.cpp @@ -72,4 +72,21 @@ void ASTExtendedRoleSet::formatImpl(const FormatSettings & settings, FormatState } } } + + +void ASTExtendedRoleSet::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (current_user) + { + names.push_back(current_user_name); + current_user = false; + } + + if (except_current_user) + { + except_names.push_back(current_user_name); + except_current_user = false; + } +} + } diff --git a/dbms/src/Parsers/ASTExtendedRoleSet.h b/src/Parsers/ASTExtendedRoleSet.h similarity index 91% rename from dbms/src/Parsers/ASTExtendedRoleSet.h rename to src/Parsers/ASTExtendedRoleSet.h index 84190211087..8d619e5d6a0 100644 --- a/dbms/src/Parsers/ASTExtendedRoleSet.h +++ b/src/Parsers/ASTExtendedRoleSet.h @@ -18,6 +18,7 @@ public: bool id_mode = false; /// If true then `names` and `except_names` keeps UUIDs, not names. bool empty() const { return names.empty() && !current_user && !all; } + void replaceCurrentUserTagWithName(const String & current_user_name); String getID(char) const override { return "ExtendedRoleSet"; } ASTPtr clone() const override { return std::make_shared(*this); } diff --git a/dbms/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp similarity index 100% rename from dbms/src/Parsers/ASTFunction.cpp rename to src/Parsers/ASTFunction.cpp diff --git a/dbms/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h similarity index 100% rename from dbms/src/Parsers/ASTFunction.h rename to src/Parsers/ASTFunction.h diff --git a/dbms/src/Parsers/ASTFunctionWithKeyValueArguments.cpp b/src/Parsers/ASTFunctionWithKeyValueArguments.cpp similarity index 92% rename from dbms/src/Parsers/ASTFunctionWithKeyValueArguments.cpp rename to src/Parsers/ASTFunctionWithKeyValueArguments.cpp index 8fdeb90c25b..0843bddac7d 100644 --- a/dbms/src/Parsers/ASTFunctionWithKeyValueArguments.cpp +++ b/src/Parsers/ASTFunctionWithKeyValueArguments.cpp @@ -64,9 +64,9 @@ ASTPtr ASTFunctionWithKeyValueArguments::clone() const void ASTFunctionWithKeyValueArguments::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { - settings.ostr << (settings.hilite ? hilite_keyword : "") << Poco::toUpper(name) << (settings.hilite ? hilite_none : "") << "("; + settings.ostr << (settings.hilite ? hilite_keyword : "") << Poco::toUpper(name) << (settings.hilite ? hilite_none : "") << (has_brackets ? "(" : ""); elements->formatImpl(settings, state, frame); - settings.ostr << ")"; + settings.ostr << (has_brackets ? ")" : ""); settings.ostr << (settings.hilite ? hilite_none : ""); } diff --git a/dbms/src/Parsers/ASTFunctionWithKeyValueArguments.h b/src/Parsers/ASTFunctionWithKeyValueArguments.h similarity index 89% rename from dbms/src/Parsers/ASTFunctionWithKeyValueArguments.h rename to src/Parsers/ASTFunctionWithKeyValueArguments.h index e09e477417f..3f31b4a7c5b 100644 --- a/dbms/src/Parsers/ASTFunctionWithKeyValueArguments.h +++ b/src/Parsers/ASTFunctionWithKeyValueArguments.h @@ -44,6 +44,13 @@ public: String name; /// Expression list ASTPtr elements; + /// Has brackets around arguments + bool has_brackets; + + ASTFunctionWithKeyValueArguments(bool has_brackets_ = true) + : has_brackets(has_brackets_) + { + } public: String getID(char delim) const override; diff --git a/dbms/src/Parsers/ASTGrantQuery.cpp b/src/Parsers/ASTGrantQuery.cpp similarity index 92% rename from dbms/src/Parsers/ASTGrantQuery.cpp rename to src/Parsers/ASTGrantQuery.cpp index 94521d790f2..f91a5416011 100644 --- a/dbms/src/Parsers/ASTGrantQuery.cpp +++ b/src/Parsers/ASTGrantQuery.cpp @@ -122,19 +122,22 @@ ASTPtr ASTGrantQuery::clone() const void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (attach ? "ATTACH " : "") << ((kind == Kind::GRANT) ? "GRANT" : "REVOKE") - << (settings.hilite ? IAST::hilite_none : "") << " "; + << (settings.hilite ? IAST::hilite_none : ""); + + formatOnCluster(settings); if (kind == Kind::REVOKE) { if (grant_option) - settings.ostr << (settings.hilite ? hilite_keyword : "") << "GRANT OPTION FOR " << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << " GRANT OPTION FOR" << (settings.hilite ? hilite_none : ""); else if (admin_option) - settings.ostr << (settings.hilite ? hilite_keyword : "") << "ADMIN OPTION FOR " << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << " ADMIN OPTION FOR" << (settings.hilite ? hilite_none : ""); } if ((!!roles + !access_rights_elements.empty()) != 1) throw Exception("Either roles or access rights elements should be set", ErrorCodes::LOGICAL_ERROR); + settings.ostr << " "; if (roles) roles->format(settings); else @@ -150,4 +153,11 @@ void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, F settings.ostr << (settings.hilite ? hilite_keyword : "") << " WITH ADMIN OPTION" << (settings.hilite ? hilite_none : ""); } } + + +void ASTGrantQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (to_roles) + to_roles->replaceCurrentUserTagWithName(current_user_name); +} } diff --git a/dbms/src/Parsers/ASTGrantQuery.h b/src/Parsers/ASTGrantQuery.h similarity index 79% rename from dbms/src/Parsers/ASTGrantQuery.h rename to src/Parsers/ASTGrantQuery.h index 95b5f0b8448..e1ad8dc5dc5 100644 --- a/dbms/src/Parsers/ASTGrantQuery.h +++ b/src/Parsers/ASTGrantQuery.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -15,7 +16,7 @@ class ASTExtendedRoleSet; * GRANT role [,...] TO {user_name | role_name | CURRENT_USER} [,...] [WITH ADMIN OPTION] * REVOKE [ADMIN OPTION FOR] role [,...] FROM {user_name | role_name | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] */ -class ASTGrantQuery : public IAST +class ASTGrantQuery : public IAST, public ASTQueryWithOnCluster { public: enum class Kind @@ -34,5 +35,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/dbms/src/Parsers/ASTIdentifier.cpp b/src/Parsers/ASTIdentifier.cpp similarity index 100% rename from dbms/src/Parsers/ASTIdentifier.cpp rename to src/Parsers/ASTIdentifier.cpp diff --git a/dbms/src/Parsers/ASTIdentifier.h b/src/Parsers/ASTIdentifier.h similarity index 95% rename from dbms/src/Parsers/ASTIdentifier.h rename to src/Parsers/ASTIdentifier.h index 9e28a1461ca..c13c2c3f977 100644 --- a/dbms/src/Parsers/ASTIdentifier.h +++ b/src/Parsers/ASTIdentifier.h @@ -40,6 +40,8 @@ public: bool isShort() const { return name_parts.empty() || name == name_parts.back(); } void setShortName(const String & new_name); + + /// Restore name field from name_parts in case it was cropped by analyzer but we need a full form for future (re)analyze. void restoreCompoundName(); const String & shortName() const diff --git a/dbms/src/Parsers/ASTIndexDeclaration.h b/src/Parsers/ASTIndexDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTIndexDeclaration.h rename to src/Parsers/ASTIndexDeclaration.h diff --git a/dbms/src/Parsers/ASTInsertQuery.cpp b/src/Parsers/ASTInsertQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTInsertQuery.cpp rename to src/Parsers/ASTInsertQuery.cpp diff --git a/dbms/src/Parsers/ASTInsertQuery.h b/src/Parsers/ASTInsertQuery.h similarity index 100% rename from dbms/src/Parsers/ASTInsertQuery.h rename to src/Parsers/ASTInsertQuery.h diff --git a/dbms/src/Parsers/ASTKillQueryQuery.cpp b/src/Parsers/ASTKillQueryQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTKillQueryQuery.cpp rename to src/Parsers/ASTKillQueryQuery.cpp diff --git a/dbms/src/Parsers/ASTKillQueryQuery.h b/src/Parsers/ASTKillQueryQuery.h similarity index 100% rename from dbms/src/Parsers/ASTKillQueryQuery.h rename to src/Parsers/ASTKillQueryQuery.h diff --git a/dbms/src/Parsers/ASTLiteral.cpp b/src/Parsers/ASTLiteral.cpp similarity index 100% rename from dbms/src/Parsers/ASTLiteral.cpp rename to src/Parsers/ASTLiteral.cpp diff --git a/dbms/src/Parsers/ASTLiteral.h b/src/Parsers/ASTLiteral.h similarity index 100% rename from dbms/src/Parsers/ASTLiteral.h rename to src/Parsers/ASTLiteral.h diff --git a/dbms/src/Parsers/ASTNameTypePair.h b/src/Parsers/ASTNameTypePair.h similarity index 100% rename from dbms/src/Parsers/ASTNameTypePair.h rename to src/Parsers/ASTNameTypePair.h diff --git a/dbms/src/Parsers/ASTOptimizeQuery.cpp b/src/Parsers/ASTOptimizeQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTOptimizeQuery.cpp rename to src/Parsers/ASTOptimizeQuery.cpp diff --git a/dbms/src/Parsers/ASTOptimizeQuery.h b/src/Parsers/ASTOptimizeQuery.h similarity index 100% rename from dbms/src/Parsers/ASTOptimizeQuery.h rename to src/Parsers/ASTOptimizeQuery.h diff --git a/dbms/src/Parsers/ASTOrderByElement.cpp b/src/Parsers/ASTOrderByElement.cpp similarity index 100% rename from dbms/src/Parsers/ASTOrderByElement.cpp rename to src/Parsers/ASTOrderByElement.cpp diff --git a/dbms/src/Parsers/ASTOrderByElement.h b/src/Parsers/ASTOrderByElement.h similarity index 100% rename from dbms/src/Parsers/ASTOrderByElement.h rename to src/Parsers/ASTOrderByElement.h diff --git a/dbms/src/Parsers/ASTPartition.cpp b/src/Parsers/ASTPartition.cpp similarity index 100% rename from dbms/src/Parsers/ASTPartition.cpp rename to src/Parsers/ASTPartition.cpp diff --git a/dbms/src/Parsers/ASTPartition.h b/src/Parsers/ASTPartition.h similarity index 100% rename from dbms/src/Parsers/ASTPartition.h rename to src/Parsers/ASTPartition.h diff --git a/dbms/src/Parsers/ASTQualifiedAsterisk.cpp b/src/Parsers/ASTQualifiedAsterisk.cpp similarity index 100% rename from dbms/src/Parsers/ASTQualifiedAsterisk.cpp rename to src/Parsers/ASTQualifiedAsterisk.cpp diff --git a/dbms/src/Parsers/ASTQualifiedAsterisk.h b/src/Parsers/ASTQualifiedAsterisk.h similarity index 100% rename from dbms/src/Parsers/ASTQualifiedAsterisk.h rename to src/Parsers/ASTQualifiedAsterisk.h diff --git a/dbms/src/Parsers/ASTQueryParameter.cpp b/src/Parsers/ASTQueryParameter.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryParameter.cpp rename to src/Parsers/ASTQueryParameter.cpp diff --git a/dbms/src/Parsers/ASTQueryParameter.h b/src/Parsers/ASTQueryParameter.h similarity index 100% rename from dbms/src/Parsers/ASTQueryParameter.h rename to src/Parsers/ASTQueryParameter.h diff --git a/dbms/src/Parsers/ASTQueryWithOnCluster.cpp b/src/Parsers/ASTQueryWithOnCluster.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOnCluster.cpp rename to src/Parsers/ASTQueryWithOnCluster.cpp diff --git a/dbms/src/Parsers/ASTQueryWithOnCluster.h b/src/Parsers/ASTQueryWithOnCluster.h similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOnCluster.h rename to src/Parsers/ASTQueryWithOnCluster.h diff --git a/dbms/src/Parsers/ASTQueryWithOutput.cpp b/src/Parsers/ASTQueryWithOutput.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOutput.cpp rename to src/Parsers/ASTQueryWithOutput.cpp diff --git a/dbms/src/Parsers/ASTQueryWithOutput.h b/src/Parsers/ASTQueryWithOutput.h similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOutput.h rename to src/Parsers/ASTQueryWithOutput.h diff --git a/dbms/src/Parsers/ASTQueryWithTableAndOutput.cpp b/src/Parsers/ASTQueryWithTableAndOutput.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryWithTableAndOutput.cpp rename to src/Parsers/ASTQueryWithTableAndOutput.cpp diff --git a/dbms/src/Parsers/ASTQueryWithTableAndOutput.h b/src/Parsers/ASTQueryWithTableAndOutput.h similarity index 100% rename from dbms/src/Parsers/ASTQueryWithTableAndOutput.h rename to src/Parsers/ASTQueryWithTableAndOutput.h diff --git a/dbms/src/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h similarity index 100% rename from dbms/src/Parsers/ASTRenameQuery.h rename to src/Parsers/ASTRenameQuery.h diff --git a/dbms/src/Parsers/ASTSampleRatio.cpp b/src/Parsers/ASTSampleRatio.cpp similarity index 100% rename from dbms/src/Parsers/ASTSampleRatio.cpp rename to src/Parsers/ASTSampleRatio.cpp diff --git a/dbms/src/Parsers/ASTSampleRatio.h b/src/Parsers/ASTSampleRatio.h similarity index 100% rename from dbms/src/Parsers/ASTSampleRatio.h rename to src/Parsers/ASTSampleRatio.h diff --git a/dbms/src/Parsers/ASTSelectQuery.cpp b/src/Parsers/ASTSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSelectQuery.cpp rename to src/Parsers/ASTSelectQuery.cpp diff --git a/dbms/src/Parsers/ASTSelectQuery.h b/src/Parsers/ASTSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSelectQuery.h rename to src/Parsers/ASTSelectQuery.h diff --git a/dbms/src/Parsers/ASTSelectWithUnionQuery.cpp b/src/Parsers/ASTSelectWithUnionQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSelectWithUnionQuery.cpp rename to src/Parsers/ASTSelectWithUnionQuery.cpp diff --git a/dbms/src/Parsers/ASTSelectWithUnionQuery.h b/src/Parsers/ASTSelectWithUnionQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSelectWithUnionQuery.h rename to src/Parsers/ASTSelectWithUnionQuery.h diff --git a/dbms/src/Parsers/ASTSetQuery.h b/src/Parsers/ASTSetQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSetQuery.h rename to src/Parsers/ASTSetQuery.h diff --git a/dbms/src/Parsers/ASTSetRoleQuery.cpp b/src/Parsers/ASTSetRoleQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSetRoleQuery.cpp rename to src/Parsers/ASTSetRoleQuery.cpp diff --git a/dbms/src/Parsers/ASTSetRoleQuery.h b/src/Parsers/ASTSetRoleQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSetRoleQuery.h rename to src/Parsers/ASTSetRoleQuery.h diff --git a/dbms/src/Parsers/ASTSettingsProfileElement.cpp b/src/Parsers/ASTSettingsProfileElement.cpp similarity index 88% rename from dbms/src/Parsers/ASTSettingsProfileElement.cpp rename to src/Parsers/ASTSettingsProfileElement.cpp index b3f4032d14c..24f1aa60813 100644 --- a/dbms/src/Parsers/ASTSettingsProfileElement.cpp +++ b/src/Parsers/ASTSettingsProfileElement.cpp @@ -25,7 +25,8 @@ void ASTSettingsProfileElement::formatImpl(const FormatSettings & settings, Form { if (!parent_profile.empty()) { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "PROFILE " << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (use_inherit_keyword ? "INHERIT" : "PROFILE") << " " + << (settings.hilite ? IAST::hilite_none : ""); formatProfileNameOrID(parent_profile, id_mode, settings); return; } @@ -85,4 +86,11 @@ void ASTSettingsProfileElements::formatImpl(const FormatSettings & settings, For } } + +void ASTSettingsProfileElements::setUseInheritKeyword(bool use_inherit_keyword_) +{ + for (auto & element : elements) + element->use_inherit_keyword = use_inherit_keyword_; +} + } diff --git a/dbms/src/Parsers/ASTSettingsProfileElement.h b/src/Parsers/ASTSettingsProfileElement.h similarity index 89% rename from dbms/src/Parsers/ASTSettingsProfileElement.h rename to src/Parsers/ASTSettingsProfileElement.h index 0470b51cf85..ee1ee28c383 100644 --- a/dbms/src/Parsers/ASTSettingsProfileElement.h +++ b/src/Parsers/ASTSettingsProfileElement.h @@ -19,6 +19,7 @@ public: Field max_value; std::optional readonly; bool id_mode = false; /// If true then `parent_profile` keeps UUID, not a name. + bool use_inherit_keyword = false; /// If true then this element is a part of ASTCreateSettingsProfileQuery. bool empty() const { return parent_profile.empty() && name.empty(); } @@ -41,5 +42,7 @@ public: String getID(char) const override { return "SettingsProfileElements"; } ASTPtr clone() const override { return std::make_shared(*this); } void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + + void setUseInheritKeyword(bool use_inherit_keyword_); }; } diff --git a/dbms/src/Parsers/ASTShowCreateAccessEntityQuery.cpp b/src/Parsers/ASTShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowCreateAccessEntityQuery.cpp rename to src/Parsers/ASTShowCreateAccessEntityQuery.cpp diff --git a/dbms/src/Parsers/ASTShowCreateAccessEntityQuery.h b/src/Parsers/ASTShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowCreateAccessEntityQuery.h rename to src/Parsers/ASTShowCreateAccessEntityQuery.h diff --git a/dbms/src/Parsers/ASTShowGrantsQuery.cpp b/src/Parsers/ASTShowGrantsQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowGrantsQuery.cpp rename to src/Parsers/ASTShowGrantsQuery.cpp diff --git a/dbms/src/Parsers/ASTShowGrantsQuery.h b/src/Parsers/ASTShowGrantsQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowGrantsQuery.h rename to src/Parsers/ASTShowGrantsQuery.h diff --git a/dbms/src/Parsers/ASTShowProcesslistQuery.h b/src/Parsers/ASTShowProcesslistQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowProcesslistQuery.h rename to src/Parsers/ASTShowProcesslistQuery.h diff --git a/dbms/src/Parsers/ASTShowQuotasQuery.cpp b/src/Parsers/ASTShowQuotasQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowQuotasQuery.cpp rename to src/Parsers/ASTShowQuotasQuery.cpp diff --git a/dbms/src/Parsers/ASTShowQuotasQuery.h b/src/Parsers/ASTShowQuotasQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowQuotasQuery.h rename to src/Parsers/ASTShowQuotasQuery.h diff --git a/dbms/src/Parsers/ASTShowRowPoliciesQuery.cpp b/src/Parsers/ASTShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowRowPoliciesQuery.cpp rename to src/Parsers/ASTShowRowPoliciesQuery.cpp diff --git a/dbms/src/Parsers/ASTShowRowPoliciesQuery.h b/src/Parsers/ASTShowRowPoliciesQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowRowPoliciesQuery.h rename to src/Parsers/ASTShowRowPoliciesQuery.h diff --git a/dbms/src/Parsers/ASTShowTablesQuery.cpp b/src/Parsers/ASTShowTablesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowTablesQuery.cpp rename to src/Parsers/ASTShowTablesQuery.cpp diff --git a/dbms/src/Parsers/ASTShowTablesQuery.h b/src/Parsers/ASTShowTablesQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowTablesQuery.h rename to src/Parsers/ASTShowTablesQuery.h diff --git a/dbms/src/Parsers/ASTSubquery.cpp b/src/Parsers/ASTSubquery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSubquery.cpp rename to src/Parsers/ASTSubquery.cpp diff --git a/dbms/src/Parsers/ASTSubquery.h b/src/Parsers/ASTSubquery.h similarity index 100% rename from dbms/src/Parsers/ASTSubquery.h rename to src/Parsers/ASTSubquery.h diff --git a/dbms/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSystemQuery.cpp rename to src/Parsers/ASTSystemQuery.cpp diff --git a/dbms/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSystemQuery.h rename to src/Parsers/ASTSystemQuery.h diff --git a/dbms/src/Parsers/ASTTTLElement.cpp b/src/Parsers/ASTTTLElement.cpp similarity index 100% rename from dbms/src/Parsers/ASTTTLElement.cpp rename to src/Parsers/ASTTTLElement.cpp diff --git a/dbms/src/Parsers/ASTTTLElement.h b/src/Parsers/ASTTTLElement.h similarity index 100% rename from dbms/src/Parsers/ASTTTLElement.h rename to src/Parsers/ASTTTLElement.h diff --git a/dbms/src/Parsers/ASTTablesInSelectQuery.cpp b/src/Parsers/ASTTablesInSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTTablesInSelectQuery.cpp rename to src/Parsers/ASTTablesInSelectQuery.cpp diff --git a/dbms/src/Parsers/ASTTablesInSelectQuery.h b/src/Parsers/ASTTablesInSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ASTTablesInSelectQuery.h rename to src/Parsers/ASTTablesInSelectQuery.h diff --git a/dbms/src/Parsers/ASTUseQuery.h b/src/Parsers/ASTUseQuery.h similarity index 100% rename from dbms/src/Parsers/ASTUseQuery.h rename to src/Parsers/ASTUseQuery.h diff --git a/dbms/src/Parsers/ASTWatchQuery.h b/src/Parsers/ASTWatchQuery.h similarity index 100% rename from dbms/src/Parsers/ASTWatchQuery.h rename to src/Parsers/ASTWatchQuery.h diff --git a/dbms/src/Parsers/ASTWithAlias.cpp b/src/Parsers/ASTWithAlias.cpp similarity index 100% rename from dbms/src/Parsers/ASTWithAlias.cpp rename to src/Parsers/ASTWithAlias.cpp diff --git a/dbms/src/Parsers/ASTWithAlias.h b/src/Parsers/ASTWithAlias.h similarity index 100% rename from dbms/src/Parsers/ASTWithAlias.h rename to src/Parsers/ASTWithAlias.h diff --git a/src/Parsers/CMakeLists.txt b/src/Parsers/CMakeLists.txt new file mode 100644 index 00000000000..6e3ab9decb7 --- /dev/null +++ b/src/Parsers/CMakeLists.txt @@ -0,0 +1,14 @@ +include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) +add_headers_and_sources(clickhouse_parsers .) +add_library(clickhouse_parsers ${clickhouse_parsers_headers} ${clickhouse_parsers_sources}) +target_link_libraries(clickhouse_parsers PUBLIC clickhouse_common_io) +target_include_directories(clickhouse_parsers PUBLIC ${DBMS_INCLUDE_DIR}) + +if (USE_DEBUG_HELPERS) + set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Parsers/iostream_debug_helpers.h") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") +endif () + +if(ENABLE_TESTS) + add_subdirectory(tests) +endif() diff --git a/dbms/src/Parsers/CommonParsers.cpp b/src/Parsers/CommonParsers.cpp similarity index 100% rename from dbms/src/Parsers/CommonParsers.cpp rename to src/Parsers/CommonParsers.cpp diff --git a/dbms/src/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h similarity index 100% rename from dbms/src/Parsers/CommonParsers.h rename to src/Parsers/CommonParsers.h diff --git a/dbms/src/Parsers/DumpASTNode.h b/src/Parsers/DumpASTNode.h similarity index 100% rename from dbms/src/Parsers/DumpASTNode.h rename to src/Parsers/DumpASTNode.h diff --git a/dbms/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp similarity index 99% rename from dbms/src/Parsers/ExpressionElementParsers.cpp rename to src/Parsers/ExpressionElementParsers.cpp index c4e43829da9..30fa4a2e9fb 100644 --- a/dbms/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -1400,18 +1400,30 @@ bool ParserFunctionWithKeyValueArguments::parseImpl(Pos & pos, ASTPtr & node, Ex if (!id_parser.parse(pos, identifier, expected)) return false; - if (pos.get().type != TokenType::OpeningRoundBracket) - return false; - ++pos; + bool left_bracket_found = false; + if (pos.get().type != TokenType::OpeningRoundBracket) + { + if (!brackets_can_be_omitted) + return false; + } + else + { + ++pos; + left_bracket_found = true; + } + if (!pairs_list_parser.parse(pos, expr_list_args, expected)) return false; - if (pos.get().type != TokenType::ClosingRoundBracket) - return false; + if (left_bracket_found) + { + if (pos.get().type != TokenType::ClosingRoundBracket) + return false; + ++pos; + } - ++pos; - auto function = std::make_shared(); + auto function = std::make_shared(left_bracket_found); function->name = Poco::toLower(typeid_cast(*identifier.get()).name); function->elements = expr_list_args; function->children.push_back(function->elements); diff --git a/dbms/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h similarity index 98% rename from dbms/src/Parsers/ExpressionElementParsers.h rename to src/Parsers/ExpressionElementParsers.h index b9d8d5db42c..b02b29fb2e5 100644 --- a/dbms/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -346,9 +346,16 @@ protected: */ class ParserFunctionWithKeyValueArguments : public IParserBase { +public: + ParserFunctionWithKeyValueArguments(bool brackets_can_be_omitted_ = false) + : brackets_can_be_omitted(brackets_can_be_omitted_) {} protected: + const char * getName() const override { return "function with key-value arguments"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + + /// brackets for function arguments can be omitted + bool brackets_can_be_omitted; }; /** Data type or table engine, possibly with parameters. For example, UInt8 or see examples from ParserIdentifierWithParameters diff --git a/dbms/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp similarity index 100% rename from dbms/src/Parsers/ExpressionListParsers.cpp rename to src/Parsers/ExpressionListParsers.cpp diff --git a/dbms/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h similarity index 100% rename from dbms/src/Parsers/ExpressionListParsers.h rename to src/Parsers/ExpressionListParsers.h diff --git a/dbms/src/Parsers/IAST.cpp b/src/Parsers/IAST.cpp similarity index 100% rename from dbms/src/Parsers/IAST.cpp rename to src/Parsers/IAST.cpp diff --git a/dbms/src/Parsers/IAST.h b/src/Parsers/IAST.h similarity index 100% rename from dbms/src/Parsers/IAST.h rename to src/Parsers/IAST.h diff --git a/dbms/src/Parsers/IAST_fwd.h b/src/Parsers/IAST_fwd.h similarity index 100% rename from dbms/src/Parsers/IAST_fwd.h rename to src/Parsers/IAST_fwd.h diff --git a/dbms/src/Parsers/IParser.h b/src/Parsers/IParser.h similarity index 98% rename from dbms/src/Parsers/IParser.h rename to src/Parsers/IParser.h index 925140bd25e..69c199c201e 100644 --- a/dbms/src/Parsers/IParser.h +++ b/src/Parsers/IParser.h @@ -56,8 +56,6 @@ public: /// Token iterator augmented with depth information. This allows to control recursion depth. struct Pos : TokenIterator { - using TokenIterator::TokenIterator; - uint32_t depth = 0; uint32_t max_depth = 0; @@ -126,7 +124,7 @@ public: return parse(pos, node, expected); } - virtual ~IParser() {} + virtual ~IParser() = default; }; using ParserPtr = std::unique_ptr; diff --git a/dbms/src/Parsers/IParserBase.cpp b/src/Parsers/IParserBase.cpp similarity index 100% rename from dbms/src/Parsers/IParserBase.cpp rename to src/Parsers/IParserBase.cpp diff --git a/dbms/src/Parsers/IParserBase.h b/src/Parsers/IParserBase.h similarity index 100% rename from dbms/src/Parsers/IParserBase.h rename to src/Parsers/IParserBase.h diff --git a/dbms/src/Parsers/IdentifierQuotingStyle.h b/src/Parsers/IdentifierQuotingStyle.h similarity index 100% rename from dbms/src/Parsers/IdentifierQuotingStyle.h rename to src/Parsers/IdentifierQuotingStyle.h diff --git a/dbms/src/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp similarity index 100% rename from dbms/src/Parsers/Lexer.cpp rename to src/Parsers/Lexer.cpp diff --git a/dbms/src/Parsers/Lexer.h b/src/Parsers/Lexer.h similarity index 100% rename from dbms/src/Parsers/Lexer.h rename to src/Parsers/Lexer.h diff --git a/dbms/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp similarity index 97% rename from dbms/src/Parsers/ParserAlterQuery.cpp rename to src/Parsers/ParserAlterQuery.cpp index 1b647cf067c..623bca440bb 100644 --- a/dbms/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -27,6 +27,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_drop_column("DROP COLUMN"); ParserKeyword s_clear_column("CLEAR COLUMN"); ParserKeyword s_modify_column("MODIFY COLUMN"); + ParserKeyword s_rename_column("RENAME COLUMN"); ParserKeyword s_comment_column("COMMENT COLUMN"); ParserKeyword s_modify_order_by("MODIFY ORDER BY"); ParserKeyword s_modify_ttl("MODIFY TTL"); @@ -77,6 +78,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_delete_where("DELETE WHERE"); ParserKeyword s_update("UPDATE"); ParserKeyword s_where("WHERE"); + ParserKeyword s_to("TO"); ParserCompoundIdentifier parser_name; ParserStringLiteral parser_string_literal; @@ -121,6 +123,22 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected command->type = ASTAlterCommand::ADD_COLUMN; } + else if (s_rename_column.ignore(pos, expected)) + { + if (s_if_exists.ignore(pos, expected)) + command->if_exists = true; + + if (!parser_name.parse(pos, command->column, expected)) + return false; + + if (!s_to.ignore(pos, expected)) + return false; + + if (!parser_name.parse(pos, command->rename_to, expected)) + return false; + + command->type = ASTAlterCommand::RENAME_COLUMN; + } else if (s_drop_partition.ignore(pos, expected)) { if (!parser_partition.parse(pos, command->partition, expected)) diff --git a/dbms/src/Parsers/ParserAlterQuery.h b/src/Parsers/ParserAlterQuery.h similarity index 97% rename from dbms/src/Parsers/ParserAlterQuery.h rename to src/Parsers/ParserAlterQuery.h index 61a25b9b387..a0981c77ca6 100644 --- a/dbms/src/Parsers/ParserAlterQuery.h +++ b/src/Parsers/ParserAlterQuery.h @@ -12,6 +12,7 @@ namespace DB * [DROP COLUMN [IF EXISTS] col_to_drop, ...] * [CLEAR COLUMN [IF EXISTS] col_to_clear [IN PARTITION partition],] * [MODIFY COLUMN [IF EXISTS] col_to_modify type, ...] + * [RENAME COLUMN [IF EXISTS] col_name TO col_name] * [MODIFY PRIMARY KEY (a, b, c...)] * [MODIFY SETTING setting_name=setting_value, ...] * [COMMENT COLUMN [IF EXISTS] col_name string] diff --git a/dbms/src/Parsers/ParserCase.cpp b/src/Parsers/ParserCase.cpp similarity index 100% rename from dbms/src/Parsers/ParserCase.cpp rename to src/Parsers/ParserCase.cpp diff --git a/dbms/src/Parsers/ParserCase.h b/src/Parsers/ParserCase.h similarity index 100% rename from dbms/src/Parsers/ParserCase.h rename to src/Parsers/ParserCase.h diff --git a/dbms/src/Parsers/ParserCheckQuery.cpp b/src/Parsers/ParserCheckQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCheckQuery.cpp rename to src/Parsers/ParserCheckQuery.cpp diff --git a/dbms/src/Parsers/ParserCheckQuery.h b/src/Parsers/ParserCheckQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCheckQuery.h rename to src/Parsers/ParserCheckQuery.h diff --git a/dbms/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCreateQuery.cpp rename to src/Parsers/ParserCreateQuery.cpp diff --git a/dbms/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateQuery.h rename to src/Parsers/ParserCreateQuery.h diff --git a/dbms/src/Parsers/ParserCreateQuotaQuery.cpp b/src/Parsers/ParserCreateQuotaQuery.cpp similarity index 85% rename from dbms/src/Parsers/ParserCreateQuotaQuery.cpp rename to src/Parsers/ParserCreateQuotaQuery.cpp index 9a6afec6941..6007d6206ec 100644 --- a/dbms/src/Parsers/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/ParserCreateQuotaQuery.cpp @@ -63,12 +63,22 @@ namespace }); } - bool parseLimit(IParserBase::Pos & pos, Expected & expected, ResourceType & resource_type, ResourceAmount & max) + bool parseLimit(IParserBase::Pos & pos, Expected & expected, bool first, ResourceType & resource_type, ResourceAmount & max) { return IParserBase::wrapParseImpl(pos, [&] { - if (!ParserKeyword{"MAX"}.ignore(pos, expected)) - return false; + if (first) + { + if (!ParserKeyword{"MAX"}.ignore(pos, expected)) + return false; + } + else + { + if (!ParserToken{TokenType::Comma}.ignore(pos, expected)) + return false; + + ParserKeyword{"MAX"}.ignore(pos, expected); + } bool resource_type_set = false; for (auto rt : ext::range_with_static_cast(Quota::MAX_RESOURCE_TYPE)) @@ -83,9 +93,6 @@ namespace if (!resource_type_set) return false; - if (!ParserToken{TokenType::Equals}.ignore(pos, expected)) - return false; - ASTPtr max_ast; if (ParserNumber{}.parse(pos, max_ast, expected)) { @@ -95,10 +102,6 @@ namespace else max = applyVisitor(FieldVisitorConvertToNumber(), max_field); } - else if (ParserKeyword{"ANY"}.ignore(pos, expected)) - { - max = Quota::UNLIMITED; - } else return false; @@ -106,18 +109,7 @@ namespace }); } - bool parseCommaAndLimit(IParserBase::Pos & pos, Expected & expected, ResourceType & resource_type, ResourceAmount & max) - { - return IParserBase::wrapParseImpl(pos, [&] - { - if (!ParserToken{TokenType::Comma}.ignore(pos, expected)) - return false; - - return parseLimit(pos, expected, resource_type, max); - }); - } - - bool parseLimits(IParserBase::Pos & pos, Expected & expected, bool alter, ASTCreateQuotaQuery::Limits & limits) + bool parseLimits(IParserBase::Pos & pos, Expected & expected, ASTCreateQuotaQuery::Limits & limits) { return IParserBase::wrapParseImpl(pos, [&] { @@ -142,23 +134,22 @@ namespace new_limits.duration = std::chrono::seconds(static_cast(num_intervals * interval_kind.toAvgSeconds())); - if (alter && ParserKeyword{"UNSET TRACKING"}.ignore(pos, expected)) + if (ParserKeyword{"NO LIMITS"}.ignore(pos, expected)) { - new_limits.unset_tracking = true; + new_limits.drop = true; } - else if (ParserKeyword{"SET TRACKING"}.ignore(pos, expected) || ParserKeyword{"TRACKING"}.ignore(pos, expected)) + else if (ParserKeyword{"TRACKING ONLY"}.ignore(pos, expected)) { } else { - ParserKeyword{"SET"}.ignore(pos, expected); ResourceType resource_type; ResourceAmount max; - if (!parseLimit(pos, expected, resource_type, max)) + if (!parseLimit(pos, expected, true, resource_type, max)) return false; new_limits.max[resource_type] = max; - while (parseCommaAndLimit(pos, expected, resource_type, max)) + while (parseLimit(pos, expected, false, resource_type, max)) new_limits.max[resource_type] = max; } @@ -167,7 +158,7 @@ namespace }); } - bool parseAllLimits(IParserBase::Pos & pos, Expected & expected, bool alter, std::vector & all_limits) + bool parseAllLimits(IParserBase::Pos & pos, Expected & expected, std::vector & all_limits) { return IParserBase::wrapParseImpl(pos, [&] { @@ -175,7 +166,7 @@ namespace do { ASTCreateQuotaQuery::Limits limits; - if (!parseLimits(pos, expected, alter, limits)) + if (!parseLimits(pos, expected, limits)) { all_limits.resize(old_size); return false; @@ -199,6 +190,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -241,6 +240,7 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe String new_name; std::optional key_type; std::vector all_limits; + String cluster; while (true) { @@ -250,7 +250,10 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!key_type && parseKeyType(pos, expected, key_type)) continue; - if (parseAllLimits(pos, expected, alter, all_limits)) + if (parseAllLimits(pos, expected, all_limits)) + continue; + + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) continue; break; @@ -259,6 +262,9 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe std::shared_ptr roles; parseToRoles(pos, expected, attach_mode, roles); + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + auto query = std::make_shared(); node = query; @@ -266,6 +272,7 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->key_type = key_type; diff --git a/src/Parsers/ParserCreateQuotaQuery.h b/src/Parsers/ParserCreateQuotaQuery.h new file mode 100644 index 00000000000..786c8292b15 --- /dev/null +++ b/src/Parsers/ParserCreateQuotaQuery.h @@ -0,0 +1,36 @@ +#pragma once + +#include + + +namespace DB +{ +/** Parses queries like + * CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name + * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + * + * ALTER QUOTA [IF EXISTS] name + * [RENAME TO new_name] + * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} } [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + */ +class ParserCreateQuotaQuery : public IParserBase +{ +public: + ParserCreateQuotaQuery & enableAttachMode(bool enable_) { attach_mode = enable_; return *this; } + +protected: + const char * getName() const override { return "CREATE QUOTA or ALTER QUOTA query"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +private: + bool attach_mode = false; +}; +} diff --git a/dbms/src/Parsers/ParserCreateRoleQuery.cpp b/src/Parsers/ParserCreateRoleQuery.cpp similarity index 87% rename from dbms/src/Parsers/ParserCreateRoleQuery.cpp rename to src/Parsers/ParserCreateRoleQuery.cpp index e2b42c976b4..2a6f2dd2c90 100644 --- a/dbms/src/Parsers/ParserCreateRoleQuery.cpp +++ b/src/Parsers/ParserCreateRoleQuery.cpp @@ -41,6 +41,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -82,6 +90,8 @@ bool ParserCreateRoleQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec String new_name; std::shared_ptr settings; + String cluster; + while (true) { if (alter && parseRenameTo(pos, expected, new_name)) @@ -90,6 +100,9 @@ bool ParserCreateRoleQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (parseSettings(pos, expected, attach_mode, settings)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + break; } @@ -101,6 +114,7 @@ bool ParserCreateRoleQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->settings = std::move(settings); diff --git a/dbms/src/Parsers/ParserCreateRoleQuery.h b/src/Parsers/ParserCreateRoleQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateRoleQuery.h rename to src/Parsers/ParserCreateRoleQuery.h diff --git a/dbms/src/Parsers/ParserCreateRowPolicyQuery.cpp b/src/Parsers/ParserCreateRowPolicyQuery.cpp similarity index 92% rename from dbms/src/Parsers/ParserCreateRowPolicyQuery.cpp rename to src/Parsers/ParserCreateRowPolicyQuery.cpp index ab0fbc87e12..b6840f0ed6a 100644 --- a/dbms/src/Parsers/ParserCreateRowPolicyQuery.cpp +++ b/src/Parsers/ParserCreateRowPolicyQuery.cpp @@ -83,14 +83,13 @@ namespace static constexpr char delete_op[] = "DELETE"; std::vector ops; - bool keyword_for = false; if (ParserKeyword{"FOR"}.ignore(pos, expected)) { - keyword_for = true; do { if (ParserKeyword{"SELECT"}.ignore(pos, expected)) ops.push_back(select_op); +#if 0 /// INSERT, UPDATE, DELETE are not supported yet else if (ParserKeyword{"INSERT"}.ignore(pos, expected)) ops.push_back(insert_op); else if (ParserKeyword{"UPDATE"}.ignore(pos, expected)) @@ -100,6 +99,7 @@ namespace else if (ParserKeyword{"ALL"}.ignore(pos, expected)) { } +#endif else return false; } @@ -109,9 +109,11 @@ namespace if (ops.empty()) { ops.push_back(select_op); +#if 0 /// INSERT, UPDATE, DELETE are not supported yet ops.push_back(insert_op); ops.push_back(update_op); ops.push_back(delete_op); +#endif } std::optional filter; @@ -123,14 +125,15 @@ namespace if (!parseConditionalExpression(pos, expected, filter)) return false; } +#if 0 /// INSERT, UPDATE, DELETE are not supported yet if (ParserKeyword{"WITH CHECK"}.ignore(pos, expected)) { keyword_with_check = true; if (!parseConditionalExpression(pos, expected, check)) return false; } - - if (!keyword_for && !keyword_using && !keyword_with_check) +#endif + if (!keyword_using && !keyword_with_check) return false; if (filter && !check && !alter) @@ -200,6 +203,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -246,6 +257,7 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & String new_policy_name; std::optional is_restrictive; std::vector> conditions; + String cluster; while (true) { @@ -258,12 +270,18 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & if (parseMultipleConditions(pos, expected, alter, conditions)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + break; } std::shared_ptr roles; parseToRoles(pos, expected, attach_mode, roles); + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + auto query = std::make_shared(); node = query; @@ -272,6 +290,7 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name_parts = std::move(name_parts); query->new_policy_name = std::move(new_policy_name); query->is_restrictive = is_restrictive; diff --git a/dbms/src/Parsers/ParserCreateRowPolicyQuery.h b/src/Parsers/ParserCreateRowPolicyQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateRowPolicyQuery.h rename to src/Parsers/ParserCreateRowPolicyQuery.h diff --git a/dbms/src/Parsers/ParserCreateSettingsProfileQuery.cpp b/src/Parsers/ParserCreateSettingsProfileQuery.cpp similarity index 87% rename from dbms/src/Parsers/ParserCreateSettingsProfileQuery.cpp rename to src/Parsers/ParserCreateSettingsProfileQuery.cpp index c7c9e064f6c..83d0f0c1d91 100644 --- a/dbms/src/Parsers/ParserCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ParserCreateSettingsProfileQuery.cpp @@ -33,7 +33,7 @@ namespace return false; ASTPtr new_settings_ast; - if (!ParserSettingsProfileElements{}.useIDMode(id_mode).parse(pos, new_settings_ast, expected)) + if (!ParserSettingsProfileElements{}.useIDMode(id_mode).enableInheritKeyword(true).parse(pos, new_settings_ast, expected)) return false; if (!settings) @@ -57,6 +57,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -98,6 +106,8 @@ bool ParserCreateSettingsProfileQuery::parseImpl(Pos & pos, ASTPtr & node, Expec String new_name; std::shared_ptr settings; + String cluster; + while (true) { if (alter && parseRenameTo(pos, expected, new_name)) @@ -106,12 +116,18 @@ bool ParserCreateSettingsProfileQuery::parseImpl(Pos & pos, ASTPtr & node, Expec if (parseSettings(pos, expected, attach_mode, settings)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + break; } std::shared_ptr to_roles; parseToRoles(pos, expected, attach_mode, to_roles); + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + auto query = std::make_shared(); node = query; @@ -120,6 +136,7 @@ bool ParserCreateSettingsProfileQuery::parseImpl(Pos & pos, ASTPtr & node, Expec query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->settings = std::move(settings); diff --git a/dbms/src/Parsers/ParserCreateSettingsProfileQuery.h b/src/Parsers/ParserCreateSettingsProfileQuery.h similarity index 86% rename from dbms/src/Parsers/ParserCreateSettingsProfileQuery.h rename to src/Parsers/ParserCreateSettingsProfileQuery.h index 6797fc884fa..073a8ca75ae 100644 --- a/dbms/src/Parsers/ParserCreateSettingsProfileQuery.h +++ b/src/Parsers/ParserCreateSettingsProfileQuery.h @@ -7,11 +7,11 @@ namespace DB { /** Parses queries like * CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name - * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] * * ALTER SETTINGS PROFILE [IF EXISTS] name * [RENAME TO new_name] - * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] */ class ParserCreateSettingsProfileQuery : public IParserBase { diff --git a/dbms/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp similarity index 92% rename from dbms/src/Parsers/ParserCreateUserQuery.cpp rename to src/Parsers/ParserCreateUserQuery.cpp index 57f50c34116..76a06a0282f 100644 --- a/dbms/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes namespace { - bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name, String & new_host_pattern) + bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name, std::optional & new_host_pattern) { return IParserBase::wrapParseImpl(pos, [&] { @@ -166,7 +166,7 @@ namespace { new_hosts.addLocalHost(); } - else if (ParserKeyword{"NAME REGEXP"}.ignore(pos, expected)) + else if (ParserKeyword{"REGEXP"}.ignore(pos, expected)) { ASTPtr ast; if (!ParserList{std::make_unique(), std::make_unique(TokenType::Comma), false}.parse(pos, ast, expected)) @@ -250,6 +250,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -286,18 +294,19 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec } String name; - String host_pattern; + std::optional host_pattern; if (!parseUserName(pos, expected, name, host_pattern)) return false; String new_name; - String new_host_pattern; + std::optional new_host_pattern; std::optional authentication; std::optional hosts; std::optional add_hosts; std::optional remove_hosts; std::shared_ptr default_roles; std::shared_ptr settings; + String cluster; while (true) { @@ -313,12 +322,15 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!default_roles && parseDefaultRoles(pos, expected, attach_mode, default_roles)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + if (alter) { if (new_name.empty() && parseRenameTo(pos, expected, new_name, new_host_pattern)) continue; - if (parseHosts(pos, expected, "ADD", add_hosts) || parseHosts(pos, expected, "REMOVE", remove_hosts)) + if (parseHosts(pos, expected, "ADD", add_hosts) || parseHosts(pos, expected, "DROP", remove_hosts)) continue; } @@ -327,10 +339,10 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!hosts) { - if (!alter) - hosts.emplace().addLikePattern(host_pattern); - else if (alter && !new_name.empty()) - hosts.emplace().addLikePattern(new_host_pattern); + if (!alter && host_pattern) + hosts.emplace().addLikePattern(*host_pattern); + else if (alter && new_host_pattern) + hosts.emplace().addLikePattern(*new_host_pattern); } auto query = std::make_shared(); @@ -341,6 +353,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->authentication = std::move(authentication); diff --git a/dbms/src/Parsers/ParserCreateUserQuery.h b/src/Parsers/ParserCreateUserQuery.h similarity index 81% rename from dbms/src/Parsers/ParserCreateUserQuery.h rename to src/Parsers/ParserCreateUserQuery.h index bd6ab74d53f..d609894a7ec 100644 --- a/dbms/src/Parsers/ParserCreateUserQuery.h +++ b/src/Parsers/ParserCreateUserQuery.h @@ -8,13 +8,13 @@ namespace DB /** Parses queries like * CREATE USER [IF NOT EXISTS | OR REPLACE] name * [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] - * [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] * [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - * [[ADD|REMOVE] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ class ParserCreateUserQuery : public IParserBase diff --git a/dbms/src/Parsers/ParserDescribeTableQuery.cpp b/src/Parsers/ParserDescribeTableQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserDescribeTableQuery.cpp rename to src/Parsers/ParserDescribeTableQuery.cpp diff --git a/dbms/src/Parsers/ParserDescribeTableQuery.h b/src/Parsers/ParserDescribeTableQuery.h similarity index 100% rename from dbms/src/Parsers/ParserDescribeTableQuery.h rename to src/Parsers/ParserDescribeTableQuery.h diff --git a/dbms/src/Parsers/ParserDictionary.cpp b/src/Parsers/ParserDictionary.cpp similarity index 96% rename from dbms/src/Parsers/ParserDictionary.cpp rename to src/Parsers/ParserDictionary.cpp index ca9c2ad031a..2680c700296 100644 --- a/dbms/src/Parsers/ParserDictionary.cpp +++ b/src/Parsers/ParserDictionary.cpp @@ -109,7 +109,7 @@ bool ParserDictionaryRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expec bool ParserDictionaryLayout::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - ParserFunctionWithKeyValueArguments key_value_func_p; + ParserFunctionWithKeyValueArguments key_value_func_p(/* brackets_can_be_omitted = */ true); ASTPtr ast_func; if (!key_value_func_p.parse(pos, ast_func, expected)) return false; @@ -121,12 +121,17 @@ bool ParserDictionaryLayout::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; res->layout_type = func.name; + res->has_brackets = func.has_brackets; const ASTExpressionList & type_expr_list = func.elements->as(); /// there are no layout with more than 1 parameter if (type_expr_list.children.size() > 1) return false; + /// if layout has params than brackets must be specified + if (!type_expr_list.children.empty() && !res->has_brackets) + return false; + if (type_expr_list.children.size() == 1) { const ASTPair * pair = dynamic_cast(type_expr_list.children.at(0).get()); diff --git a/dbms/src/Parsers/ParserDictionary.h b/src/Parsers/ParserDictionary.h similarity index 100% rename from dbms/src/Parsers/ParserDictionary.h rename to src/Parsers/ParserDictionary.h diff --git a/dbms/src/Parsers/ParserDictionaryAttributeDeclaration.cpp b/src/Parsers/ParserDictionaryAttributeDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ParserDictionaryAttributeDeclaration.cpp rename to src/Parsers/ParserDictionaryAttributeDeclaration.cpp diff --git a/dbms/src/Parsers/ParserDictionaryAttributeDeclaration.h b/src/Parsers/ParserDictionaryAttributeDeclaration.h similarity index 100% rename from dbms/src/Parsers/ParserDictionaryAttributeDeclaration.h rename to src/Parsers/ParserDictionaryAttributeDeclaration.h diff --git a/dbms/src/Parsers/ParserDropAccessEntityQuery.cpp b/src/Parsers/ParserDropAccessEntityQuery.cpp similarity index 94% rename from dbms/src/Parsers/ParserDropAccessEntityQuery.cpp rename to src/Parsers/ParserDropAccessEntityQuery.cpp index 23e18d7d32c..ecda1691240 100644 --- a/dbms/src/Parsers/ParserDropAccessEntityQuery.cpp +++ b/src/Parsers/ParserDropAccessEntityQuery.cpp @@ -117,10 +117,18 @@ bool ParserDropAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & return false; } + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + auto query = std::make_shared(kind); node = query; query->if_exists = if_exists; + query->cluster = std::move(cluster); query->names = std::move(names); query->row_policies_names = std::move(row_policies_names); diff --git a/dbms/src/Parsers/ParserDropAccessEntityQuery.h b/src/Parsers/ParserDropAccessEntityQuery.h similarity index 100% rename from dbms/src/Parsers/ParserDropAccessEntityQuery.h rename to src/Parsers/ParserDropAccessEntityQuery.h diff --git a/dbms/src/Parsers/ParserDropQuery.cpp b/src/Parsers/ParserDropQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserDropQuery.cpp rename to src/Parsers/ParserDropQuery.cpp diff --git a/dbms/src/Parsers/ParserDropQuery.h b/src/Parsers/ParserDropQuery.h similarity index 100% rename from dbms/src/Parsers/ParserDropQuery.h rename to src/Parsers/ParserDropQuery.h diff --git a/dbms/src/Parsers/ParserExtendedRoleSet.cpp b/src/Parsers/ParserExtendedRoleSet.cpp similarity index 100% rename from dbms/src/Parsers/ParserExtendedRoleSet.cpp rename to src/Parsers/ParserExtendedRoleSet.cpp diff --git a/dbms/src/Parsers/ParserExtendedRoleSet.h b/src/Parsers/ParserExtendedRoleSet.h similarity index 100% rename from dbms/src/Parsers/ParserExtendedRoleSet.h rename to src/Parsers/ParserExtendedRoleSet.h diff --git a/dbms/src/Parsers/ParserGrantQuery.cpp b/src/Parsers/ParserGrantQuery.cpp similarity index 94% rename from dbms/src/Parsers/ParserGrantQuery.cpp rename to src/Parsers/ParserGrantQuery.cpp index f8533c27d88..64dde8f6524 100644 --- a/dbms/src/Parsers/ParserGrantQuery.cpp +++ b/src/Parsers/ParserGrantQuery.cpp @@ -17,15 +17,6 @@ namespace ErrorCodes namespace { - bool parseRoundBrackets(IParser::Pos & pos, Expected & expected) - { - return IParserBase::wrapParseImpl(pos, [&] - { - return ParserToken{TokenType::OpeningRoundBracket}.ignore(pos, expected) - && ParserToken{TokenType::ClosingRoundBracket}.ignore(pos, expected); - }); - } - bool parseAccessFlags(IParser::Pos & pos, Expected & expected, AccessFlags & access_flags) { static constexpr auto is_one_of_access_type_words = [](IParser::Pos & pos_) @@ -63,7 +54,6 @@ namespace return false; } - parseRoundBrackets(pos, expected); return true; }); } @@ -247,6 +237,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -269,6 +267,10 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) else return false; + String cluster; + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + bool grant_option = false; bool admin_option = false; if (kind == Kind::REVOKE) @@ -284,10 +286,16 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!parseAccessRightsElements(pos, expected, elements) && !parseRoles(pos, expected, attach, roles)) return false; + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + std::shared_ptr to_roles; if (!parseToRoles(pos, expected, kind, to_roles)) return false; + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + if (kind == Kind::GRANT) { if (ParserKeyword{"WITH GRANT OPTION"}.ignore(pos, expected)) @@ -296,6 +304,9 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) admin_option = true; } + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + if (grant_option && roles) throw Exception("GRANT OPTION should be specified for access types", ErrorCodes::SYNTAX_ERROR); if (admin_option && !elements.empty()) @@ -306,6 +317,7 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) query->kind = kind; query->attach = attach; + query->cluster = std::move(cluster); query->access_rights_elements = std::move(elements); query->roles = std::move(roles); query->to_roles = std::move(to_roles); diff --git a/dbms/src/Parsers/ParserGrantQuery.h b/src/Parsers/ParserGrantQuery.h similarity index 100% rename from dbms/src/Parsers/ParserGrantQuery.h rename to src/Parsers/ParserGrantQuery.h diff --git a/dbms/src/Parsers/ParserInsertQuery.cpp b/src/Parsers/ParserInsertQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserInsertQuery.cpp rename to src/Parsers/ParserInsertQuery.cpp diff --git a/dbms/src/Parsers/ParserInsertQuery.h b/src/Parsers/ParserInsertQuery.h similarity index 100% rename from dbms/src/Parsers/ParserInsertQuery.h rename to src/Parsers/ParserInsertQuery.h diff --git a/dbms/src/Parsers/ParserKillQueryQuery.cpp b/src/Parsers/ParserKillQueryQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserKillQueryQuery.cpp rename to src/Parsers/ParserKillQueryQuery.cpp diff --git a/dbms/src/Parsers/ParserKillQueryQuery.h b/src/Parsers/ParserKillQueryQuery.h similarity index 100% rename from dbms/src/Parsers/ParserKillQueryQuery.h rename to src/Parsers/ParserKillQueryQuery.h diff --git a/dbms/src/Parsers/ParserOptimizeQuery.cpp b/src/Parsers/ParserOptimizeQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserOptimizeQuery.cpp rename to src/Parsers/ParserOptimizeQuery.cpp diff --git a/dbms/src/Parsers/ParserOptimizeQuery.h b/src/Parsers/ParserOptimizeQuery.h similarity index 100% rename from dbms/src/Parsers/ParserOptimizeQuery.h rename to src/Parsers/ParserOptimizeQuery.h diff --git a/dbms/src/Parsers/ParserPartition.cpp b/src/Parsers/ParserPartition.cpp similarity index 100% rename from dbms/src/Parsers/ParserPartition.cpp rename to src/Parsers/ParserPartition.cpp diff --git a/dbms/src/Parsers/ParserPartition.h b/src/Parsers/ParserPartition.h similarity index 100% rename from dbms/src/Parsers/ParserPartition.h rename to src/Parsers/ParserPartition.h diff --git a/dbms/src/Parsers/ParserQuery.cpp b/src/Parsers/ParserQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserQuery.cpp rename to src/Parsers/ParserQuery.cpp diff --git a/dbms/src/Parsers/ParserQuery.h b/src/Parsers/ParserQuery.h similarity index 100% rename from dbms/src/Parsers/ParserQuery.h rename to src/Parsers/ParserQuery.h diff --git a/dbms/src/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp similarity index 100% rename from dbms/src/Parsers/ParserQueryWithOutput.cpp rename to src/Parsers/ParserQueryWithOutput.cpp diff --git a/dbms/src/Parsers/ParserQueryWithOutput.h b/src/Parsers/ParserQueryWithOutput.h similarity index 100% rename from dbms/src/Parsers/ParserQueryWithOutput.h rename to src/Parsers/ParserQueryWithOutput.h diff --git a/dbms/src/Parsers/ParserRenameQuery.cpp b/src/Parsers/ParserRenameQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserRenameQuery.cpp rename to src/Parsers/ParserRenameQuery.cpp diff --git a/dbms/src/Parsers/ParserRenameQuery.h b/src/Parsers/ParserRenameQuery.h similarity index 100% rename from dbms/src/Parsers/ParserRenameQuery.h rename to src/Parsers/ParserRenameQuery.h diff --git a/dbms/src/Parsers/ParserSampleRatio.cpp b/src/Parsers/ParserSampleRatio.cpp similarity index 100% rename from dbms/src/Parsers/ParserSampleRatio.cpp rename to src/Parsers/ParserSampleRatio.cpp diff --git a/dbms/src/Parsers/ParserSampleRatio.h b/src/Parsers/ParserSampleRatio.h similarity index 100% rename from dbms/src/Parsers/ParserSampleRatio.h rename to src/Parsers/ParserSampleRatio.h diff --git a/dbms/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSelectQuery.cpp rename to src/Parsers/ParserSelectQuery.cpp diff --git a/dbms/src/Parsers/ParserSelectQuery.h b/src/Parsers/ParserSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSelectQuery.h rename to src/Parsers/ParserSelectQuery.h diff --git a/dbms/src/Parsers/ParserSelectWithUnionQuery.cpp b/src/Parsers/ParserSelectWithUnionQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSelectWithUnionQuery.cpp rename to src/Parsers/ParserSelectWithUnionQuery.cpp diff --git a/dbms/src/Parsers/ParserSelectWithUnionQuery.h b/src/Parsers/ParserSelectWithUnionQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSelectWithUnionQuery.h rename to src/Parsers/ParserSelectWithUnionQuery.h diff --git a/dbms/src/Parsers/ParserSetQuery.cpp b/src/Parsers/ParserSetQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSetQuery.cpp rename to src/Parsers/ParserSetQuery.cpp diff --git a/dbms/src/Parsers/ParserSetQuery.h b/src/Parsers/ParserSetQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSetQuery.h rename to src/Parsers/ParserSetQuery.h diff --git a/dbms/src/Parsers/ParserSetRoleQuery.cpp b/src/Parsers/ParserSetRoleQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSetRoleQuery.cpp rename to src/Parsers/ParserSetRoleQuery.cpp diff --git a/dbms/src/Parsers/ParserSetRoleQuery.h b/src/Parsers/ParserSetRoleQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSetRoleQuery.h rename to src/Parsers/ParserSetRoleQuery.h diff --git a/dbms/src/Parsers/ParserSettingsProfileElement.cpp b/src/Parsers/ParserSettingsProfileElement.cpp similarity index 90% rename from dbms/src/Parsers/ParserSettingsProfileElement.cpp rename to src/Parsers/ParserSettingsProfileElement.cpp index 06fa58fde4e..31bc339f544 100644 --- a/dbms/src/Parsers/ParserSettingsProfileElement.cpp +++ b/src/Parsers/ParserSettingsProfileElement.cpp @@ -108,7 +108,8 @@ bool ParserSettingsProfileElement::parseImpl(Pos & pos, ASTPtr & node, Expected Field max_value; std::optional readonly; - if (ParserKeyword{"PROFILE"}.ignore(pos, expected)) + if (ParserKeyword{"PROFILE"}.ignore(pos, expected) || + (enable_inherit_keyword && ParserKeyword{"INHERIT"}.ignore(pos, expected))) { if (!parseProfileNameOrID(pos, expected, id_mode, parent_profile)) return false; @@ -120,9 +121,15 @@ bool ParserSettingsProfileElement::parseImpl(Pos & pos, ASTPtr & node, Expected return false; name = getIdentifierName(name_ast); + bool has_value_or_constraint = false; while (parseValue(pos, expected, value) || parseMinMaxValue(pos, expected, min_value, max_value) || parseReadonlyOrWritableKeyword(pos, expected, readonly)) - ; + { + has_value_or_constraint = true; + } + + if (!has_value_or_constraint) + return false; } auto result = std::make_shared(); @@ -133,6 +140,7 @@ bool ParserSettingsProfileElement::parseImpl(Pos & pos, ASTPtr & node, Expected result->max_value = std::move(max_value); result->readonly = readonly; result->id_mode = id_mode; + result->use_inherit_keyword = enable_inherit_keyword; node = result; return true; } @@ -142,12 +150,15 @@ bool ParserSettingsProfileElements::parseImpl(Pos & pos, ASTPtr & node, Expected { std::vector> elements; - if (!ParserKeyword{"NONE"}.ignore(pos, expected)) + if (ParserKeyword{"NONE"}.ignore(pos, expected)) + { + } + else { do { ASTPtr ast; - if (!ParserSettingsProfileElement{}.useIDMode(id_mode).parse(pos, ast, expected)) + if (!ParserSettingsProfileElement{}.useIDMode(id_mode).enableInheritKeyword(enable_inherit_keyword).parse(pos, ast, expected)) return false; auto element = typeid_cast>(ast); elements.push_back(std::move(element)); diff --git a/src/Parsers/ParserSettingsProfileElement.h b/src/Parsers/ParserSettingsProfileElement.h new file mode 100644 index 00000000000..309c797e645 --- /dev/null +++ b/src/Parsers/ParserSettingsProfileElement.h @@ -0,0 +1,42 @@ +#pragma once + +#include + + +namespace DB +{ +/** Parses a string like this: + * {variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE]} | PROFILE 'profile_name' + */ +class ParserSettingsProfileElement : public IParserBase +{ +public: + ParserSettingsProfileElement & useIDMode(bool enable_) { id_mode = enable_; return *this; } + ParserSettingsProfileElement & enableInheritKeyword(bool enable_) { enable_inherit_keyword = enable_; return *this; } + +protected: + const char * getName() const override { return "SettingsProfileElement"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +private: + bool id_mode = false; + bool enable_inherit_keyword = false; +}; + + +class ParserSettingsProfileElements : public IParserBase +{ +public: + ParserSettingsProfileElements & useIDMode(bool enable_) { id_mode = enable_; return *this; } + ParserSettingsProfileElements & enableInheritKeyword(bool enable_) { enable_inherit_keyword = enable_; return *this; } + +protected: + const char * getName() const override { return "SettingsProfileElements"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +private: + bool id_mode = false; + bool enable_inherit_keyword = false; +}; + +} diff --git a/dbms/src/Parsers/ParserShowCreateAccessEntityQuery.cpp b/src/Parsers/ParserShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowCreateAccessEntityQuery.cpp rename to src/Parsers/ParserShowCreateAccessEntityQuery.cpp diff --git a/dbms/src/Parsers/ParserShowCreateAccessEntityQuery.h b/src/Parsers/ParserShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowCreateAccessEntityQuery.h rename to src/Parsers/ParserShowCreateAccessEntityQuery.h diff --git a/dbms/src/Parsers/ParserShowGrantsQuery.cpp b/src/Parsers/ParserShowGrantsQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowGrantsQuery.cpp rename to src/Parsers/ParserShowGrantsQuery.cpp diff --git a/dbms/src/Parsers/ParserShowGrantsQuery.h b/src/Parsers/ParserShowGrantsQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowGrantsQuery.h rename to src/Parsers/ParserShowGrantsQuery.h diff --git a/dbms/src/Parsers/ParserShowProcesslistQuery.h b/src/Parsers/ParserShowProcesslistQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowProcesslistQuery.h rename to src/Parsers/ParserShowProcesslistQuery.h diff --git a/dbms/src/Parsers/ParserShowQuotasQuery.cpp b/src/Parsers/ParserShowQuotasQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowQuotasQuery.cpp rename to src/Parsers/ParserShowQuotasQuery.cpp diff --git a/dbms/src/Parsers/ParserShowQuotasQuery.h b/src/Parsers/ParserShowQuotasQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowQuotasQuery.h rename to src/Parsers/ParserShowQuotasQuery.h diff --git a/dbms/src/Parsers/ParserShowRowPoliciesQuery.cpp b/src/Parsers/ParserShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowRowPoliciesQuery.cpp rename to src/Parsers/ParserShowRowPoliciesQuery.cpp diff --git a/dbms/src/Parsers/ParserShowRowPoliciesQuery.h b/src/Parsers/ParserShowRowPoliciesQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowRowPoliciesQuery.h rename to src/Parsers/ParserShowRowPoliciesQuery.h diff --git a/dbms/src/Parsers/ParserShowTablesQuery.cpp b/src/Parsers/ParserShowTablesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowTablesQuery.cpp rename to src/Parsers/ParserShowTablesQuery.cpp diff --git a/dbms/src/Parsers/ParserShowTablesQuery.h b/src/Parsers/ParserShowTablesQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowTablesQuery.h rename to src/Parsers/ParserShowTablesQuery.h diff --git a/dbms/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSystemQuery.cpp rename to src/Parsers/ParserSystemQuery.cpp diff --git a/dbms/src/Parsers/ParserSystemQuery.h b/src/Parsers/ParserSystemQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSystemQuery.h rename to src/Parsers/ParserSystemQuery.h diff --git a/dbms/src/Parsers/ParserTablePropertiesQuery.cpp b/src/Parsers/ParserTablePropertiesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserTablePropertiesQuery.cpp rename to src/Parsers/ParserTablePropertiesQuery.cpp diff --git a/dbms/src/Parsers/ParserTablePropertiesQuery.h b/src/Parsers/ParserTablePropertiesQuery.h similarity index 100% rename from dbms/src/Parsers/ParserTablePropertiesQuery.h rename to src/Parsers/ParserTablePropertiesQuery.h diff --git a/dbms/src/Parsers/ParserTablesInSelectQuery.cpp b/src/Parsers/ParserTablesInSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserTablesInSelectQuery.cpp rename to src/Parsers/ParserTablesInSelectQuery.cpp diff --git a/dbms/src/Parsers/ParserTablesInSelectQuery.h b/src/Parsers/ParserTablesInSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ParserTablesInSelectQuery.h rename to src/Parsers/ParserTablesInSelectQuery.h diff --git a/dbms/src/Parsers/ParserUnionQueryElement.cpp b/src/Parsers/ParserUnionQueryElement.cpp similarity index 100% rename from dbms/src/Parsers/ParserUnionQueryElement.cpp rename to src/Parsers/ParserUnionQueryElement.cpp diff --git a/dbms/src/Parsers/ParserUnionQueryElement.h b/src/Parsers/ParserUnionQueryElement.h similarity index 100% rename from dbms/src/Parsers/ParserUnionQueryElement.h rename to src/Parsers/ParserUnionQueryElement.h diff --git a/dbms/src/Parsers/ParserUseQuery.cpp b/src/Parsers/ParserUseQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserUseQuery.cpp rename to src/Parsers/ParserUseQuery.cpp diff --git a/dbms/src/Parsers/ParserUseQuery.h b/src/Parsers/ParserUseQuery.h similarity index 100% rename from dbms/src/Parsers/ParserUseQuery.h rename to src/Parsers/ParserUseQuery.h diff --git a/dbms/src/Parsers/ParserWatchQuery.cpp b/src/Parsers/ParserWatchQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserWatchQuery.cpp rename to src/Parsers/ParserWatchQuery.cpp diff --git a/dbms/src/Parsers/ParserWatchQuery.h b/src/Parsers/ParserWatchQuery.h similarity index 100% rename from dbms/src/Parsers/ParserWatchQuery.h rename to src/Parsers/ParserWatchQuery.h diff --git a/dbms/src/Parsers/StringRange.h b/src/Parsers/StringRange.h similarity index 100% rename from dbms/src/Parsers/StringRange.h rename to src/Parsers/StringRange.h diff --git a/dbms/src/Parsers/TablePropertiesQueriesASTs.h b/src/Parsers/TablePropertiesQueriesASTs.h similarity index 100% rename from dbms/src/Parsers/TablePropertiesQueriesASTs.h rename to src/Parsers/TablePropertiesQueriesASTs.h diff --git a/dbms/src/Parsers/TokenIterator.cpp b/src/Parsers/TokenIterator.cpp similarity index 100% rename from dbms/src/Parsers/TokenIterator.cpp rename to src/Parsers/TokenIterator.cpp diff --git a/dbms/src/Parsers/TokenIterator.h b/src/Parsers/TokenIterator.h similarity index 100% rename from dbms/src/Parsers/TokenIterator.h rename to src/Parsers/TokenIterator.h diff --git a/dbms/src/Parsers/formatAST.cpp b/src/Parsers/formatAST.cpp similarity index 100% rename from dbms/src/Parsers/formatAST.cpp rename to src/Parsers/formatAST.cpp diff --git a/dbms/src/Parsers/formatAST.h b/src/Parsers/formatAST.h similarity index 100% rename from dbms/src/Parsers/formatAST.h rename to src/Parsers/formatAST.h diff --git a/dbms/src/Parsers/iostream_debug_helpers.cpp b/src/Parsers/iostream_debug_helpers.cpp similarity index 100% rename from dbms/src/Parsers/iostream_debug_helpers.cpp rename to src/Parsers/iostream_debug_helpers.cpp diff --git a/dbms/src/Parsers/iostream_debug_helpers.h b/src/Parsers/iostream_debug_helpers.h similarity index 100% rename from dbms/src/Parsers/iostream_debug_helpers.h rename to src/Parsers/iostream_debug_helpers.h diff --git a/src/Parsers/makeASTForLogicalFunction.cpp b/src/Parsers/makeASTForLogicalFunction.cpp new file mode 100644 index 00000000000..eaae38740aa --- /dev/null +++ b/src/Parsers/makeASTForLogicalFunction.cpp @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ +ASTPtr makeASTForLogicalNot(ASTPtr argument) +{ + bool b; + if (tryGetLiteralBool(argument.get(), b)) + return std::make_shared(Field{UInt8(!b)}); + + auto function = std::make_shared(); + auto exp_list = std::make_shared(); + function->name = "not"; + function->arguments = exp_list; + function->children.push_back(exp_list); + exp_list->children.push_back(argument); + return function; +} + + +ASTPtr makeASTForLogicalAnd(ASTs && arguments) +{ + bool partial_result = true; + boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool + { + bool b; + if (!tryGetLiteralBool(argument.get(), b)) + return false; + partial_result &= b; + return true; + }); + + if (!partial_result) + return std::make_shared(Field{UInt8(0)}); + if (arguments.empty()) + return std::make_shared(Field{UInt8(1)}); + if (arguments.size() == 1) + return arguments[0]; + + auto function = std::make_shared(); + auto exp_list = std::make_shared(); + function->name = "and"; + function->arguments = exp_list; + function->children.push_back(exp_list); + exp_list->children = std::move(arguments); + return function; +} + + +ASTPtr makeASTForLogicalOr(ASTs && arguments) +{ + bool partial_result = false; + boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool + { + bool b; + if (!tryGetLiteralBool(argument.get(), b)) + return false; + partial_result |= b; + return true; + }); + + if (partial_result) + return std::make_shared(Field{UInt8(1)}); + if (arguments.empty()) + return std::make_shared(Field{UInt8(0)}); + if (arguments.size() == 1) + return arguments[0]; + + auto function = std::make_shared(); + auto exp_list = std::make_shared(); + function->name = "or"; + function->arguments = exp_list; + function->children.push_back(exp_list); + exp_list->children = std::move(arguments); + return function; +} + + +bool tryGetLiteralBool(const IAST * ast, bool & value) +{ + if (!ast) + return false; + + try + { + if (const ASTLiteral * literal = ast->as()) + { + value = !literal->value.isNull() && applyVisitor(FieldVisitorConvertToNumber(), literal->value); + return true; + } + return false; + } + catch (...) + { + return false; + } +} +} diff --git a/src/Parsers/makeASTForLogicalFunction.h b/src/Parsers/makeASTForLogicalFunction.h new file mode 100644 index 00000000000..5c1096cab6e --- /dev/null +++ b/src/Parsers/makeASTForLogicalFunction.h @@ -0,0 +1,19 @@ +#pragma once + +#include + + +namespace DB +{ +/// Makes an AST calculating NOT argument. +ASTPtr makeASTForLogicalNot(ASTPtr argument); + +/// Makes an AST calculating argument1 AND argument2 AND ... AND argumentN. +ASTPtr makeASTForLogicalAnd(ASTs && arguments); + +/// Makes an AST calculating argument1 OR argument2 OR ... OR argumentN. +ASTPtr makeASTForLogicalOr(ASTs && arguments); + +/// Tries to extract a literal bool from AST. +bool tryGetLiteralBool(const IAST * ast, bool & value); +} diff --git a/dbms/src/Parsers/parseDatabaseAndTableName.cpp b/src/Parsers/parseDatabaseAndTableName.cpp similarity index 100% rename from dbms/src/Parsers/parseDatabaseAndTableName.cpp rename to src/Parsers/parseDatabaseAndTableName.cpp diff --git a/dbms/src/Parsers/parseDatabaseAndTableName.h b/src/Parsers/parseDatabaseAndTableName.h similarity index 100% rename from dbms/src/Parsers/parseDatabaseAndTableName.h rename to src/Parsers/parseDatabaseAndTableName.h diff --git a/dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp b/src/Parsers/parseIdentifierOrStringLiteral.cpp similarity index 100% rename from dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp rename to src/Parsers/parseIdentifierOrStringLiteral.cpp diff --git a/dbms/src/Parsers/parseIdentifierOrStringLiteral.h b/src/Parsers/parseIdentifierOrStringLiteral.h similarity index 100% rename from dbms/src/Parsers/parseIdentifierOrStringLiteral.h rename to src/Parsers/parseIdentifierOrStringLiteral.h diff --git a/dbms/src/Parsers/parseIntervalKind.cpp b/src/Parsers/parseIntervalKind.cpp similarity index 100% rename from dbms/src/Parsers/parseIntervalKind.cpp rename to src/Parsers/parseIntervalKind.cpp diff --git a/dbms/src/Parsers/parseIntervalKind.h b/src/Parsers/parseIntervalKind.h similarity index 100% rename from dbms/src/Parsers/parseIntervalKind.h rename to src/Parsers/parseIntervalKind.h diff --git a/dbms/src/Parsers/parseQuery.cpp b/src/Parsers/parseQuery.cpp similarity index 94% rename from dbms/src/Parsers/parseQuery.cpp rename to src/Parsers/parseQuery.cpp index b8ab940f2dd..ff537e0f70e 100644 --- a/dbms/src/Parsers/parseQuery.cpp +++ b/src/Parsers/parseQuery.cpp @@ -328,19 +328,28 @@ ASTPtr parseQuery( IParser & parser, const std::string & query, const std::string & query_description, - size_t max_query_size) + size_t max_query_size, + size_t max_parser_depth) { - return parseQuery(parser, query.data(), query.data() + query.size(), query_description, max_query_size); + return parseQuery(parser, query.data(), query.data() + query.size(), query_description, max_query_size, max_parser_depth); } -ASTPtr parseQuery(IParser & parser, const std::string & query, size_t max_query_size) +ASTPtr parseQuery( + IParser & parser, + const std::string & query, + size_t max_query_size, + size_t max_parser_depth) { - return parseQuery(parser, query.data(), query.data() + query.size(), parser.getName(), max_query_size); + return parseQuery(parser, query.data(), query.data() + query.size(), parser.getName(), max_query_size, max_parser_depth); } -std::pair splitMultipartQuery(const std::string & queries, std::vector & queries_list) +std::pair splitMultipartQuery( + const std::string & queries, + std::vector & queries_list, + size_t max_query_size, + size_t max_parser_depth) { ASTPtr ast; @@ -356,7 +365,7 @@ std::pair splitMultipartQuery(const std::string & queries, s { begin = pos; - ast = parseQueryAndMovePosition(parser, pos, end, "", true, 0); + ast = parseQueryAndMovePosition(parser, pos, end, "", true, max_query_size, max_parser_depth); auto * insert = ast->as(); diff --git a/dbms/src/Parsers/parseQuery.h b/src/Parsers/parseQuery.h similarity index 79% rename from dbms/src/Parsers/parseQuery.h rename to src/Parsers/parseQuery.h index feea204181e..14a9a85b22c 100644 --- a/dbms/src/Parsers/parseQuery.h +++ b/src/Parsers/parseQuery.h @@ -17,7 +17,7 @@ ASTPtr tryParseQuery( bool allow_multi_statements, /// If false, check for non-space characters after semicolon and set error message if any. size_t max_query_size, /// If (end - pos) > max_query_size and query is longer than max_query_size then throws "Max query size exceeded". /// Disabled if zero. Is used in order to check query size if buffer can contains data for INSERT query. - size_t max_parser_depth = 0); + size_t max_parser_depth); /// Parse query or throw an exception with error message. @@ -27,8 +27,8 @@ ASTPtr parseQueryAndMovePosition( const char * end, const std::string & description, bool allow_multi_statements, - size_t max_query_size = 0, - size_t max_parser_depth = 0); + size_t max_query_size, + size_t max_parser_depth); ASTPtr parseQuery( IParser & parser, @@ -36,24 +36,30 @@ ASTPtr parseQuery( const char * end, const std::string & description, size_t max_query_size, - size_t max_parser_depth = 0); + size_t max_parser_depth); ASTPtr parseQuery( IParser & parser, const std::string & query, const std::string & query_description, - size_t max_query_size); + size_t max_query_size, + size_t max_parser_depth); ASTPtr parseQuery( IParser & parser, const std::string & query, - size_t max_query_size); + size_t max_query_size, + size_t max_parser_depth); /** Split queries separated by ; on to list of single queries * Returns pointer to the end of last successfully parsed query (first), and true if all queries are successfully parsed (second) * NOTE: INSERT's data should be placed in single line. */ -std::pair splitMultipartQuery(const std::string & queries, std::vector & queries_list); +std::pair splitMultipartQuery( + const std::string & queries, + std::vector & queries_list, + size_t max_query_size, + size_t max_parser_depth); } diff --git a/src/Parsers/parseUserName.cpp b/src/Parsers/parseUserName.cpp new file mode 100644 index 00000000000..e6b91ba4af3 --- /dev/null +++ b/src/Parsers/parseUserName.cpp @@ -0,0 +1,64 @@ +#include +#include +#include +#include + + +namespace DB +{ +bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, std::optional & host_like_pattern) +{ + String name; + if (!parseIdentifierOrStringLiteral(pos, expected, name)) + return false; + + boost::algorithm::trim(name); + + std::optional pattern; + if (ParserToken{TokenType::At}.ignore(pos, expected)) + { + if (!parseIdentifierOrStringLiteral(pos, expected, pattern.emplace())) + return false; + + boost::algorithm::trim(*pattern); + } + + if (pattern && (pattern != "%")) + name += '@' + *pattern; + + user_name = std::move(name); + host_like_pattern = std::move(pattern); + return true; +} + + +bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name) +{ + std::optional unused_pattern; + return parseUserName(pos, expected, user_name, unused_pattern); +} + + +bool parseUserNameOrCurrentUserTag(IParser::Pos & pos, Expected & expected, String & user_name, bool & current_user) +{ + if (ParserKeyword{"CURRENT_USER"}.ignore(pos, expected) || ParserKeyword{"currentUser"}.ignore(pos, expected)) + { + if (ParserToken{TokenType::OpeningRoundBracket}.ignore(pos, expected)) + { + if (!ParserToken{TokenType::ClosingRoundBracket}.ignore(pos, expected)) + return false; + } + current_user = true; + return true; + } + + if (parseUserName(pos, expected, user_name)) + { + current_user = false; + return true; + } + + return false; +} + +} diff --git a/dbms/src/Parsers/parseUserName.h b/src/Parsers/parseUserName.h similarity index 94% rename from dbms/src/Parsers/parseUserName.h rename to src/Parsers/parseUserName.h index c3556f4dc59..641aa09d1f3 100644 --- a/dbms/src/Parsers/parseUserName.h +++ b/src/Parsers/parseUserName.h @@ -10,7 +10,7 @@ namespace DB /// The `host` can be an ip address, ip subnet, or a host name. /// The % and _ wildcard characters are permitted in `host`. /// These have the same meaning as for pattern-matching operations performed with the LIKE operator. -bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, String & host_like_pattern); +bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, std::optional & host_like_pattern); bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name); /// Parses either a user name or the 'CURRENT_USER' keyword (or some of the aliases). diff --git a/dbms/src/Parsers/queryToString.cpp b/src/Parsers/queryToString.cpp similarity index 100% rename from dbms/src/Parsers/queryToString.cpp rename to src/Parsers/queryToString.cpp diff --git a/dbms/src/Parsers/queryToString.h b/src/Parsers/queryToString.h similarity index 100% rename from dbms/src/Parsers/queryToString.h rename to src/Parsers/queryToString.h diff --git a/dbms/src/Parsers/tests/CMakeLists.txt b/src/Parsers/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Parsers/tests/CMakeLists.txt rename to src/Parsers/tests/CMakeLists.txt diff --git a/dbms/src/Parsers/tests/create_parser.cpp b/src/Parsers/tests/create_parser.cpp similarity index 95% rename from dbms/src/Parsers/tests/create_parser.cpp rename to src/Parsers/tests/create_parser.cpp index 6137d4d18da..fbdc967fa2a 100644 --- a/dbms/src/Parsers/tests/create_parser.cpp +++ b/src/Parsers/tests/create_parser.cpp @@ -12,7 +12,7 @@ int main(int, char **) std::string input = "CREATE TABLE hits (URL String, UserAgentMinor2 FixedString(2), EventTime DateTime) ENGINE = Log"; ParserCreateQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); formatAST(*ast, std::cerr); std::cerr << std::endl; diff --git a/dbms/src/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp similarity index 98% rename from dbms/src/Parsers/tests/gtest_dictionary_parser.cpp rename to src/Parsers/tests/gtest_dictionary_parser.cpp index 25dcbb326ad..60eeab5f615 100644 --- a/dbms/src/Parsers/tests/gtest_dictionary_parser.cpp +++ b/src/Parsers/tests/gtest_dictionary_parser.cpp @@ -43,7 +43,7 @@ TEST(ParserDictionaryDDL, SimpleDictionary) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); EXPECT_EQ(create->table, "dict1"); EXPECT_EQ(create->database, "test"); @@ -139,7 +139,7 @@ TEST(ParserDictionaryDDL, AttributesWithMultipleProperties) " SOURCE(CLICKHOUSE(HOST 'localhost'))"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); EXPECT_EQ(create->table, "dict2"); EXPECT_EQ(create->database, ""); @@ -186,7 +186,7 @@ TEST(ParserDictionaryDDL, CustomAttributePropertiesOrder) " LIFETIME(300)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); /// test attributes @@ -241,7 +241,7 @@ TEST(ParserDictionaryDDL, NestedSource) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); EXPECT_EQ(create->table, "dict4"); EXPECT_EQ(create->database, ""); @@ -289,7 +289,7 @@ TEST(ParserDictionaryDDL, Formatting) " RANGE(MIN second_column MAX third_column)"; ParserCreateDictionaryQuery parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); auto str = serializeAST(*create, true); EXPECT_EQ(str, "CREATE DICTIONARY test.dict5 (`key_column1` UInt64 DEFAULT 1 HIERARCHICAL INJECTIVE, `key_column2` String DEFAULT '', `second_column` UInt8 EXPRESSION intDiv(50, rand() % 1000), `third_column` UInt8) PRIMARY KEY key_column1, key_column2 SOURCE(MYSQL(HOST 'localhost' PORT 9000 USER 'default' REPLICA (HOST '127.0.0.1' PRIORITY 1) PASSWORD '')) LIFETIME(MIN 1 MAX 10) LAYOUT(CACHE(SIZE_IN_CELLS 50)) RANGE(MIN second_column MAX third_column)"); @@ -300,7 +300,7 @@ TEST(ParserDictionaryDDL, ParseDropQuery) String input1 = "DROP DICTIONARY test.dict1"; ParserDropQuery parser; - ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0); + ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0); ASTDropQuery * drop1 = ast1->as(); EXPECT_TRUE(drop1->is_dictionary); @@ -311,7 +311,7 @@ TEST(ParserDictionaryDDL, ParseDropQuery) String input2 = "DROP DICTIONARY IF EXISTS dict2"; - ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0); + ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0); ASTDropQuery * drop2 = ast2->as(); EXPECT_TRUE(drop2->is_dictionary); @@ -326,7 +326,7 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) String input1 = "SHOW CREATE DICTIONARY test.dict1"; ParserTablePropertiesQuery parser; - ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0); + ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0); ASTShowCreateDictionaryQuery * show1 = ast1->as(); EXPECT_EQ(show1->table, "dict1"); @@ -335,7 +335,7 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) String input2 = "EXISTS DICTIONARY dict2"; - ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0); + ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0); ASTExistsDictionaryQuery * show2 = ast2->as(); EXPECT_EQ(show2->table, "dict2"); diff --git a/dbms/src/Parsers/tests/lexer.cpp b/src/Parsers/tests/lexer.cpp similarity index 96% rename from dbms/src/Parsers/tests/lexer.cpp rename to src/Parsers/tests/lexer.cpp index d9135b08c28..074338d15b9 100644 --- a/dbms/src/Parsers/tests/lexer.cpp +++ b/src/Parsers/tests/lexer.cpp @@ -9,7 +9,7 @@ /// How to test: -/// for i in ~/work/ClickHouse/dbms/tests/queries/0_stateless/*.sql; do echo $i; grep -q 'FORMAT' $i || ./lexer < $i || break; done +/// for i in ~/work/ClickHouse/tests/queries/0_stateless/*.sql; do echo $i; grep -q 'FORMAT' $i || ./lexer < $i || break; done /// diff --git a/dbms/src/Parsers/tests/select_parser.cpp b/src/Parsers/tests/select_parser.cpp similarity index 97% rename from dbms/src/Parsers/tests/select_parser.cpp rename to src/Parsers/tests/select_parser.cpp index f5d94746aa1..7711f0d2527 100644 --- a/dbms/src/Parsers/tests/select_parser.cpp +++ b/src/Parsers/tests/select_parser.cpp @@ -22,7 +22,7 @@ try " FORMAT TabSeparated"; ParserQueryWithOutput parser; - ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); + ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); std::cout << "Success." << std::endl; formatAST(*ast, std::cerr); diff --git a/dbms/src/Processors/CMakeLists.txt b/src/Processors/CMakeLists.txt similarity index 100% rename from dbms/src/Processors/CMakeLists.txt rename to src/Processors/CMakeLists.txt diff --git a/dbms/src/Processors/Chunk.cpp b/src/Processors/Chunk.cpp similarity index 100% rename from dbms/src/Processors/Chunk.cpp rename to src/Processors/Chunk.cpp diff --git a/dbms/src/Processors/Chunk.h b/src/Processors/Chunk.h similarity index 100% rename from dbms/src/Processors/Chunk.h rename to src/Processors/Chunk.h diff --git a/dbms/src/Processors/ConcatProcessor.cpp b/src/Processors/ConcatProcessor.cpp similarity index 100% rename from dbms/src/Processors/ConcatProcessor.cpp rename to src/Processors/ConcatProcessor.cpp diff --git a/dbms/src/Processors/ConcatProcessor.h b/src/Processors/ConcatProcessor.h similarity index 100% rename from dbms/src/Processors/ConcatProcessor.h rename to src/Processors/ConcatProcessor.h diff --git a/dbms/src/Processors/DelayedPortsProcessor.cpp b/src/Processors/DelayedPortsProcessor.cpp similarity index 100% rename from dbms/src/Processors/DelayedPortsProcessor.cpp rename to src/Processors/DelayedPortsProcessor.cpp diff --git a/dbms/src/Processors/DelayedPortsProcessor.h b/src/Processors/DelayedPortsProcessor.h similarity index 100% rename from dbms/src/Processors/DelayedPortsProcessor.h rename to src/Processors/DelayedPortsProcessor.h diff --git a/dbms/src/Processors/Executors/ParallelPipelineExecutor.cpp b/src/Processors/Executors/ParallelPipelineExecutor.cpp similarity index 100% rename from dbms/src/Processors/Executors/ParallelPipelineExecutor.cpp rename to src/Processors/Executors/ParallelPipelineExecutor.cpp diff --git a/dbms/src/Processors/Executors/ParallelPipelineExecutor.h b/src/Processors/Executors/ParallelPipelineExecutor.h similarity index 100% rename from dbms/src/Processors/Executors/ParallelPipelineExecutor.h rename to src/Processors/Executors/ParallelPipelineExecutor.h diff --git a/dbms/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp similarity index 97% rename from dbms/src/Processors/Executors/PipelineExecutor.cpp rename to src/Processors/Executors/PipelineExecutor.cpp index 9108f5ac2a1..78229e4d379 100644 --- a/dbms/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -96,7 +96,7 @@ bool PipelineExecutor::addEdges(UInt64 node) { const IProcessor * proc = &it->getOutputPort().getProcessor(); auto output_port_number = proc->getOutputPortNumber(&it->getOutputPort()); - add_edge(*it, proc, graph[node].backEdges, true, from_input, output_port_number, &graph[node].post_updated_input_ports); + add_edge(*it, proc, graph[node].backEdges, true, from_input, output_port_number, graph[node].post_updated_input_ports.get()); } } @@ -111,7 +111,7 @@ bool PipelineExecutor::addEdges(UInt64 node) { const IProcessor * proc = &it->getInputPort().getProcessor(); auto input_port_number = proc->getInputPortNumber(&it->getInputPort()); - add_edge(*it, proc, graph[node].directEdges, false, input_port_number, from_output, &graph[node].post_updated_output_ports); + add_edge(*it, proc, graph[node].directEdges, false, input_port_number, from_output, graph[node].post_updated_output_ports.get()); } } @@ -221,7 +221,7 @@ bool PipelineExecutor::expandPipeline(Stack & stack, UInt64 pid) if (addEdges(node)) { - std::lock_guard guard(graph[node].status_mutex); + std::lock_guard guard(*graph[node].status_mutex); for (; num_back_edges < graph[node].backEdges.size(); ++num_back_edges) graph[node].updated_input_ports.emplace_back(num_back_edges); @@ -246,7 +246,7 @@ bool PipelineExecutor::tryAddProcessorToStackIfUpdated(Edge & edge, Queue & queu auto & node = graph[edge.to]; - std::unique_lock lock(node.status_mutex); + std::unique_lock lock(*node.status_mutex); ExecStatus status = node.status; @@ -263,6 +263,8 @@ bool PipelineExecutor::tryAddProcessorToStackIfUpdated(Edge & edge, Queue & queu node.status = ExecStatus::Preparing; return prepareProcessor(edge.to, thread_number, queue, std::move(lock)); } + else + graph[edge.to].processor->onUpdatePorts(); return true; } @@ -340,22 +342,22 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue } { - for (auto & edge_id : node.post_updated_input_ports) + for (auto & edge_id : *node.post_updated_input_ports) { auto edge = static_cast(edge_id); updated_back_edges.emplace_back(edge); edge->update_info.trigger(); } - for (auto & edge_id : node.post_updated_output_ports) + for (auto & edge_id : *node.post_updated_output_ports) { auto edge = static_cast(edge_id); updated_direct_edges.emplace_back(edge); edge->update_info.trigger(); } - node.post_updated_input_ports.clear(); - node.post_updated_output_ports.clear(); + node.post_updated_input_ports->clear(); + node.post_updated_output_ports->clear(); } } @@ -402,7 +404,7 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue while (!stack.empty()) { auto item = stack.top(); - if (!prepareProcessor(item, thread_number, queue, std::unique_lock(graph[item].status_mutex))) + if (!prepareProcessor(item, thread_number, queue, std::unique_lock(*graph[item].status_mutex))) return false; stack.pop(); @@ -519,7 +521,7 @@ void PipelineExecutor::executeSingleThread(size_t thread_num, size_t num_threads auto prepare_processor = [&](UInt64 pid, Queue & queue) { - if (!prepareProcessor(pid, thread_num, queue, std::unique_lock(graph[pid].status_mutex))) + if (!prepareProcessor(pid, thread_num, queue, std::unique_lock(*graph[pid].status_mutex))) finish(); }; @@ -729,7 +731,7 @@ void PipelineExecutor::executeImpl(size_t num_threads) UInt64 proc = stack.top(); stack.pop(); - prepareProcessor(proc, 0, queue, std::unique_lock(graph[proc].status_mutex)); + prepareProcessor(proc, 0, queue, std::unique_lock(*graph[proc].status_mutex)); while (!queue.empty()) { diff --git a/dbms/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h similarity index 95% rename from dbms/src/Processors/Executors/PipelineExecutor.h rename to src/Processors/Executors/PipelineExecutor.h index 673151bd5eb..c0ce9053e5a 100644 --- a/dbms/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -104,10 +104,10 @@ private: Edges backEdges; ExecStatus status; - std::mutex status_mutex; + std::unique_ptr status_mutex; - std::vector post_updated_input_ports; - std::vector post_updated_output_ports; + std::unique_ptr post_updated_input_ports; + std::unique_ptr post_updated_output_ports; /// Last state for profiling. IProcessor::Status last_processor_status = IProcessor::Status::NeedData; @@ -124,12 +124,10 @@ private: execution_state->processor = processor; execution_state->processors_id = processor_id; execution_state->has_quota = processor->hasQuota(); - } - Node(Node && other) noexcept - : processor(other.processor), status(other.status) - , execution_state(std::move(other.execution_state)) - { + status_mutex = std::make_unique(); + post_updated_input_ports = std::make_unique(); + post_updated_output_ports = std::make_unique(); } }; diff --git a/dbms/src/Processors/Executors/SequentialPipelineExecutor.cpp b/src/Processors/Executors/SequentialPipelineExecutor.cpp similarity index 100% rename from dbms/src/Processors/Executors/SequentialPipelineExecutor.cpp rename to src/Processors/Executors/SequentialPipelineExecutor.cpp diff --git a/dbms/src/Processors/Executors/SequentialPipelineExecutor.h b/src/Processors/Executors/SequentialPipelineExecutor.h similarity index 100% rename from dbms/src/Processors/Executors/SequentialPipelineExecutor.h rename to src/Processors/Executors/SequentialPipelineExecutor.h diff --git a/dbms/src/Processors/Executors/ThreadsQueue.h b/src/Processors/Executors/ThreadsQueue.h similarity index 100% rename from dbms/src/Processors/Executors/ThreadsQueue.h rename to src/Processors/Executors/ThreadsQueue.h diff --git a/dbms/src/Processors/Executors/TreeExecutorBlockInputStream.cpp b/src/Processors/Executors/TreeExecutorBlockInputStream.cpp similarity index 88% rename from dbms/src/Processors/Executors/TreeExecutorBlockInputStream.cpp rename to src/Processors/Executors/TreeExecutorBlockInputStream.cpp index ee5b254ccf9..f797fee3ab5 100644 --- a/dbms/src/Processors/Executors/TreeExecutorBlockInputStream.cpp +++ b/src/Processors/Executors/TreeExecutorBlockInputStream.cpp @@ -30,7 +30,10 @@ static void checkProcessorHasSingleOutput(IProcessor * processor) /// Check tree invariants (described in TreeExecutor.h). /// Collect sources with progress. -static void validateTree(const Processors & processors, IProcessor * root, IProcessor * totals_root, std::vector & sources) +static void validateTree( + const Processors & processors, + IProcessor * root, IProcessor * totals_root, IProcessor * extremes_root, + std::vector & sources) { std::unordered_map index; @@ -49,6 +52,8 @@ static void validateTree(const Processors & processors, IProcessor * root, IProc stack.push(root); if (totals_root) stack.push(totals_root); + if (extremes_root) + stack.push(extremes_root); while (!stack.empty()) { @@ -104,11 +109,15 @@ void TreeExecutorBlockInputStream::init() root = &output_port.getProcessor(); IProcessor * totals_root = nullptr; + IProcessor * extremes_root = nullptr; if (totals_port) totals_root = &totals_port->getProcessor(); - validateTree(processors, root, totals_root, sources_with_progress); + if (extremes_port) + extremes_root = &extremes_port->getProcessor(); + + validateTree(processors, root, totals_root, extremes_root, sources_with_progress); input_port = std::make_unique(getHeader(), root); connect(output_port, *input_port); @@ -121,15 +130,24 @@ void TreeExecutorBlockInputStream::init() input_totals_port->setNeeded(); } + if (extremes_port) + { + input_extremes_port = std::make_unique(extremes_port->getHeader(), root); + connect(*extremes_port, *input_extremes_port); + input_extremes_port->setNeeded(); + } + initRowsBeforeLimit(); } -void TreeExecutorBlockInputStream::execute(bool on_totals) +void TreeExecutorBlockInputStream::execute(bool on_totals, bool on_extremes) { std::stack stack; if (on_totals) stack.push(&totals_port->getProcessor()); + else if (on_extremes) + stack.push(&extremes_port->getProcessor()); else stack.push(root); @@ -146,7 +164,7 @@ void TreeExecutorBlockInputStream::execute(bool on_totals) } }; - while (!stack.empty()) + while (!stack.empty() && !is_cancelled) { IProcessor * node = stack.top(); @@ -277,17 +295,24 @@ void TreeExecutorBlockInputStream::initRowsBeforeLimit() Block TreeExecutorBlockInputStream::readImpl() { - while (true) + while (!is_cancelled) { if (input_port->isFinished()) { if (totals_port && !input_totals_port->isFinished()) { - execute(true); + execute(true, false); if (input_totals_port->hasData()) totals = getHeader().cloneWithColumns(input_totals_port->pull().detachColumns()); } + if (extremes_port && !input_extremes_port->isFinished()) + { + execute(false, true); + if (input_extremes_port->hasData()) + extremes = getHeader().cloneWithColumns(input_extremes_port->pull().detachColumns()); + } + if (rows_before_limit_at_least && rows_before_limit_at_least->hasAppliedLimit()) info.setRowsBeforeLimit(rows_before_limit_at_least->get()); @@ -311,8 +336,10 @@ Block TreeExecutorBlockInputStream::readImpl() return block; } - execute(false); + execute(false, false); } + + return {}; } void TreeExecutorBlockInputStream::setProgressCallback(const ProgressCallback & callback) @@ -348,4 +375,12 @@ void TreeExecutorBlockInputStream::addTotalRowsApprox(size_t value) sources_with_progress.front()->addTotalRowsApprox(value); } +void TreeExecutorBlockInputStream::cancel(bool kill) +{ + IBlockInputStream::cancel(kill); + + for (auto & processor : processors) + processor->cancel(); +} + } diff --git a/dbms/src/Processors/Executors/TreeExecutorBlockInputStream.h b/src/Processors/Executors/TreeExecutorBlockInputStream.h similarity index 91% rename from dbms/src/Processors/Executors/TreeExecutorBlockInputStream.h rename to src/Processors/Executors/TreeExecutorBlockInputStream.h index 24cab387eb8..d96492b3fb8 100644 --- a/dbms/src/Processors/Executors/TreeExecutorBlockInputStream.h +++ b/src/Processors/Executors/TreeExecutorBlockInputStream.h @@ -31,6 +31,7 @@ public: interpreter_context.emplace_back(context); totals_port = pipe.getTotalsPort(); + extremes_port = pipe.getExtremesPort(); processors = std::move(pipe).detachProcessors(); init(); } @@ -38,6 +39,8 @@ public: String getName() const override { return "TreeExecutor"; } Block getHeader() const override { return root->getOutputs().front().getHeader(); } + void cancel(bool kill) override; + /// This methods does not affect TreeExecutor as IBlockInputStream itself. /// They just passed to all SourceWithProgress processors. void setProgressCallback(const ProgressCallback & callback) final; @@ -52,10 +55,12 @@ protected: private: OutputPort & output_port; OutputPort * totals_port = nullptr; + OutputPort * extremes_port = nullptr; Processors processors; IProcessor * root = nullptr; std::unique_ptr input_port; std::unique_ptr input_totals_port; + std::unique_ptr input_extremes_port; RowsBeforeLimitCounterPtr rows_before_limit_at_least; /// Remember sources that support progress. @@ -65,7 +70,7 @@ private: void init(); /// Execute tree step-by-step until root returns next chunk or execution is finished. - void execute(bool on_totals); + void execute(bool on_totals, bool on_extremes); void initRowsBeforeLimit(); diff --git a/dbms/src/Processors/Executors/traverse.h b/src/Processors/Executors/traverse.h similarity index 100% rename from dbms/src/Processors/Executors/traverse.h rename to src/Processors/Executors/traverse.h diff --git a/dbms/src/Processors/ForkProcessor.cpp b/src/Processors/ForkProcessor.cpp similarity index 100% rename from dbms/src/Processors/ForkProcessor.cpp rename to src/Processors/ForkProcessor.cpp diff --git a/dbms/src/Processors/ForkProcessor.h b/src/Processors/ForkProcessor.h similarity index 100% rename from dbms/src/Processors/ForkProcessor.h rename to src/Processors/ForkProcessor.h diff --git a/dbms/src/Processors/Formats/IInputFormat.cpp b/src/Processors/Formats/IInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IInputFormat.cpp rename to src/Processors/Formats/IInputFormat.cpp diff --git a/dbms/src/Processors/Formats/IInputFormat.h b/src/Processors/Formats/IInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IInputFormat.h rename to src/Processors/Formats/IInputFormat.h diff --git a/dbms/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IOutputFormat.cpp rename to src/Processors/Formats/IOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IOutputFormat.h rename to src/Processors/Formats/IOutputFormat.h diff --git a/dbms/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IRowInputFormat.cpp rename to src/Processors/Formats/IRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/IRowInputFormat.h b/src/Processors/Formats/IRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IRowInputFormat.h rename to src/Processors/Formats/IRowInputFormat.h diff --git a/dbms/src/Processors/Formats/IRowOutputFormat.cpp b/src/Processors/Formats/IRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IRowOutputFormat.cpp rename to src/Processors/Formats/IRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/IRowOutputFormat.h b/src/Processors/Formats/IRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IRowOutputFormat.h rename to src/Processors/Formats/IRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp rename to src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp diff --git a/dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h rename to src/Processors/Formats/Impl/ArrowColumnToCHColumn.h diff --git a/dbms/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowInputFormat.cpp rename to src/Processors/Formats/Impl/AvroRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/AvroRowInputFormat.h b/src/Processors/Formats/Impl/AvroRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowInputFormat.h rename to src/Processors/Formats/Impl/AvroRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp rename to src/Processors/Formats/Impl/AvroRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.h b/src/Processors/Formats/Impl/AvroRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.h rename to src/Processors/Formats/Impl/AvroRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp b/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp rename to src/Processors/Formats/Impl/BinaryRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h b/src/Processors/Formats/Impl/BinaryRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h rename to src/Processors/Formats/Impl/BinaryRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp b/src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp rename to src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.h b/src/Processors/Formats/Impl/BinaryRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.h rename to src/Processors/Formats/Impl/BinaryRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/CMakeLists.txt b/src/Processors/Formats/Impl/CMakeLists.txt similarity index 100% rename from dbms/src/Processors/Formats/Impl/CMakeLists.txt rename to src/Processors/Formats/Impl/CMakeLists.txt diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp rename to src/Processors/Formats/Impl/CSVRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h rename to src/Processors/Formats/Impl/CSVRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp b/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp rename to src/Processors/Formats/Impl/CSVRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h b/src/Processors/Formats/Impl/CSVRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h rename to src/Processors/Formats/Impl/CSVRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp rename to src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h rename to src/Processors/Formats/Impl/CapnProtoRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp rename to src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp diff --git a/dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.h b/src/Processors/Formats/Impl/ConstantExpressionTemplate.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.h rename to src/Processors/Formats/Impl/ConstantExpressionTemplate.h diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp rename to src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h rename to src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp rename to src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h rename to src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.h b/src/Processors/Formats/Impl/JSONRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONRowOutputFormat.h diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp new file mode 100644 index 00000000000..b7da335f0c5 --- /dev/null +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -0,0 +1,194 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int INCORRECT_DATA; +} + +MsgPackRowInputFormat::MsgPackRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_) + : IRowInputFormat(header_, in_, std::move(params_)), buf(in), ctx(&reference_func, nullptr, msgpack::unpack_limit()), data_types(header_.getDataTypes()) {} + +int MsgPackRowInputFormat::unpack(msgpack::zone & zone, size_t & offset) +{ + offset = 0; + ctx.init(); + ctx.user().set_zone(zone); + return ctx.execute(buf.position(), buf.buffer().end() - buf.position(), offset); +} + +bool MsgPackRowInputFormat::readObject() +{ + if (buf.eof()) + return false; + + PeekableReadBufferCheckpoint checkpoint{buf}; + std::unique_ptr zone(new msgpack::zone); + size_t offset; + while (!unpack(*zone, offset)) + { + buf.position() = buf.buffer().end(); + if (buf.eof()) + throw Exception("Unexpected end of file while parsing msgpack object.", ErrorCodes::INCORRECT_DATA); + buf.position() = buf.buffer().end(); + buf.makeContinuousMemoryFromCheckpointToPos(); + buf.rollbackToCheckpoint(); + } + buf.position() += offset; + object_handle = msgpack::object_handle(ctx.data(), std::move(zone)); + return true; +} + +void MsgPackRowInputFormat::insertObject(IColumn & column, DataTypePtr data_type, const msgpack::object & object) +{ + switch (data_type->getTypeId()) + { + case TypeIndex::UInt8: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Date: [[fallthrough]]; + case TypeIndex::UInt16: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::DateTime: [[fallthrough]]; + case TypeIndex::UInt32: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::UInt64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int8: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int16: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int32: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Float32: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Float64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::DateTime64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::FixedString: [[fallthrough]]; + case TypeIndex::String: + { + msgpack::object_str obj_str = object.via.str; + column.insertData(obj_str.ptr, obj_str.size); + return; + } + case TypeIndex::Array: + { + msgpack::object_array object_array = object.via.array; + auto nested_type = assert_cast(*data_type).getNestedType(); + ColumnArray & column_array = assert_cast(column); + ColumnArray::Offsets & offsets = column_array.getOffsets(); + IColumn & nested_column = column_array.getData(); + for (size_t i = 0; i != object_array.size; ++i) + { + insertObject(nested_column, nested_type, object_array.ptr[i]); + } + offsets.push_back(offsets.back() + object_array.size); + return; + } + case TypeIndex::Nullable: + { + auto nested_type = removeNullable(data_type); + ColumnNullable & column_nullable = assert_cast(column); + if (object.type == msgpack::type::NIL) + column_nullable.insertDefault(); + else + insertObject(column_nullable.getNestedColumn(), nested_type, object); + return; + } + case TypeIndex::Nothing: + { + // Nothing to insert, MsgPack object is nil. + return; + } + default: + break; + } + throw Exception("Type " + data_type->getName() + " is not supported for MsgPack input format", ErrorCodes::ILLEGAL_COLUMN); +} + +bool MsgPackRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) +{ + size_t column_index = 0; + bool has_more_data = true; + for (; column_index != columns.size(); ++column_index) + { + has_more_data = readObject(); + if (!has_more_data) + break; + insertObject(*columns[column_index], data_types[column_index], object_handle.get()); + } + if (!has_more_data) + { + if (column_index != 0) + throw Exception("Not enough values to complete the row.", ErrorCodes::INCORRECT_DATA); + return false; + } + return true; +} + +void registerInputFormatProcessorMsgPack(FormatFactory & factory) +{ + factory.registerInputFormatProcessor("MsgPack", []( + ReadBuffer & buf, + const Block & sample, + const RowInputFormatParams & params, + const FormatSettings &) + { + return std::make_shared(sample, buf, params); + }); +} + +} diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.h b/src/Processors/Formats/Impl/MsgPackRowInputFormat.h new file mode 100644 index 00000000000..a426dc4950c --- /dev/null +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ + +class ReadBuffer; + +class MsgPackRowInputFormat : public IRowInputFormat +{ +public: + MsgPackRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_); + + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + String getName() const override { return "MagPackRowInputFormat"; } +private: + bool readObject(); + void insertObject(IColumn & column, DataTypePtr type, const msgpack::object & object); + int unpack(msgpack::zone & zone, size_t & offset); + + // msgpack makes a copy of object by default, this function tells unpacker not to copy. + static bool reference_func(msgpack::type::object_type, size_t, void *) { return true; } + + PeekableReadBuffer buf; + msgpack::object_handle object_handle; + msgpack::v1::detail::context ctx; + DataTypes data_types; +}; + +} diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp new file mode 100644 index 00000000000..cef7b001505 --- /dev/null +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -0,0 +1,159 @@ +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; +} + +MsgPackRowOutputFormat::MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback) + : IRowOutputFormat(header_, out_, callback), packer(out_) {} + +void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr data_type, size_t row_num) +{ + switch (data_type->getTypeId()) + { + case TypeIndex::UInt8: + { + packer.pack_uint8(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Date: [[fallthrough]]; + case TypeIndex::UInt16: + { + packer.pack_uint16(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::DateTime: [[fallthrough]]; + case TypeIndex::UInt32: + { + packer.pack_uint32(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::UInt64: + { + packer.pack_uint64(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int8: + { + packer.pack_int8(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int16: + { + packer.pack_int16(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int32: + { + packer.pack_int32(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int64: + { + packer.pack_int64(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Float32: + { + packer.pack_float(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Float64: + { + packer.pack_double(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::DateTime64: + { + packer.pack_uint64(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::String: + { + const StringRef & string = assert_cast(column).getDataAt(row_num); + packer.pack_str(string.size); + packer.pack_str_body(string.data, string.size); + return; + } + case TypeIndex::FixedString: + { + const StringRef & string = assert_cast(column).getDataAt(row_num); + packer.pack_str(string.size); + packer.pack_str_body(string.data, string.size); + return; + } + case TypeIndex::Array: + { + auto nested_type = assert_cast(*data_type).getNestedType(); + const ColumnArray & column_array = assert_cast(column); + const IColumn & nested_column = column_array.getData(); + const ColumnArray::Offsets & offsets = column_array.getOffsets(); + size_t offset = offsets[row_num - 1]; + size_t size = offsets[row_num] - offset; + packer.pack_array(size); + for (size_t i = 0; i < size; ++i) + { + serializeField(nested_column, nested_type, offset + i); + } + return; + } + case TypeIndex::Nullable: + { + auto nested_type = removeNullable(data_type); + const ColumnNullable & column_nullable = assert_cast(column); + if (!column_nullable.isNullAt(row_num)) + serializeField(column_nullable.getNestedColumn(), nested_type, row_num); + else + packer.pack_nil(); + return; + } + case TypeIndex::Nothing: + { + packer.pack_nil(); + return; + } + default: + break; + } + throw Exception("Type " + data_type->getName() + " is not supported for MsgPack output format", ErrorCodes::ILLEGAL_COLUMN); +} + +void MsgPackRowOutputFormat::write(const Columns & columns, size_t row_num) +{ + size_t num_columns = columns.size(); + for (size_t i = 0; i < num_columns; ++i) + { + serializeField(*columns[i], types[i], row_num); + } +} + +void registerOutputFormatProcessorMsgPack(FormatFactory & factory) +{ + factory.registerOutputFormatProcessor("MsgPack", []( + WriteBuffer & buf, + const Block & sample, + FormatFactory::WriteCallback callback, + const FormatSettings &) + { + return std::make_shared(buf, sample, callback); + }); +} + +} diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h new file mode 100644 index 00000000000..351920eb7c8 --- /dev/null +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include +#include +#include + + +namespace DB +{ + +class MsgPackRowOutputFormat : public IRowOutputFormat +{ +public: + MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback); + + String getName() const override { return "MsgPackRowOutputFormat"; } + + void write(const Columns & columns, size_t row_num) override; + void writeField(const IColumn &, const IDataType &, size_t) override {} + void serializeField(const IColumn & column, DataTypePtr data_type, size_t row_num); + +private: + msgpack::packer packer; +}; + +} diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp rename to src/Processors/Formats/Impl/MySQLOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h b/src/Processors/Formats/Impl/MySQLOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h rename to src/Processors/Formats/Impl/MySQLOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/NativeFormat.cpp b/src/Processors/Formats/Impl/NativeFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/NativeFormat.cpp rename to src/Processors/Formats/Impl/NativeFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/NullFormat.cpp b/src/Processors/Formats/Impl/NullFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/NullFormat.cpp rename to src/Processors/Formats/Impl/NullFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp rename to src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h rename to src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h b/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h rename to src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp rename to src/Processors/Formats/Impl/ORCBlockInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.h b/src/Processors/Formats/Impl/ORCBlockInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.h rename to src/Processors/Formats/Impl/ORCBlockInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp rename to src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h rename to src/Processors/Formats/Impl/ParquetBlockInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h rename to src/Processors/Formats/Impl/ParquetBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp index a816cdd5318..ae86a01a52a 100644 --- a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp @@ -225,7 +225,7 @@ void PrettyBlockOutputFormat::consumeTotals(Chunk chunk) { total_rows = 0; writeSuffixIfNot(); - writeCString("\nExtremes:\n", out); + writeCString("\nTotals:\n", out); write(chunk, PortKind::Totals); } @@ -233,7 +233,7 @@ void PrettyBlockOutputFormat::consumeExtremes(Chunk chunk) { total_rows = 0; writeSuffixIfNot(); - writeCString("\nTotals:\n", out); + writeCString("\nExtremes:\n", out); write(chunk, PortKind::Extremes); } diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h rename to src/Processors/Formats/Impl/PrettyBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h rename to src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h rename to src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp rename to src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h b/src/Processors/Formats/Impl/ProtobufRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h rename to src/Processors/Formats/Impl/ProtobufRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp b/src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp rename to src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h b/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h rename to src/Processors/Formats/Impl/ProtobufRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp rename to src/Processors/Formats/Impl/RegexpRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.h b/src/Processors/Formats/Impl/RegexpRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.h rename to src/Processors/Formats/Impl/RegexpRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp rename to src/Processors/Formats/Impl/TSKVRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h b/src/Processors/Formats/Impl/TSKVRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h rename to src/Processors/Formats/Impl/TSKVRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp b/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp rename to src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.h b/src/Processors/Formats/Impl/TSKVRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.h rename to src/Processors/Formats/Impl/TSKVRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h rename to src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp rename to src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h rename to src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp rename to src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h rename to src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h rename to src/Processors/Formats/Impl/TemplateBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp rename to src/Processors/Formats/Impl/TemplateRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.h b/src/Processors/Formats/Impl/TemplateRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.h rename to src/Processors/Formats/Impl/TemplateRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp rename to src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.h b/src/Processors/Formats/Impl/ValuesBlockInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.h rename to src/Processors/Formats/Impl/ValuesBlockInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp b/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp rename to src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h b/src/Processors/Formats/Impl/ValuesRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h rename to src/Processors/Formats/Impl/ValuesRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp b/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp rename to src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h b/src/Processors/Formats/Impl/VerticalRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h rename to src/Processors/Formats/Impl/VerticalRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp b/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp rename to src/Processors/Formats/Impl/XMLRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h b/src/Processors/Formats/Impl/XMLRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h rename to src/Processors/Formats/Impl/XMLRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/InputStreamFromInputFormat.h b/src/Processors/Formats/InputStreamFromInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/InputStreamFromInputFormat.h rename to src/Processors/Formats/InputStreamFromInputFormat.h diff --git a/dbms/src/Processors/Formats/LazyOutputFormat.cpp b/src/Processors/Formats/LazyOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/LazyOutputFormat.cpp rename to src/Processors/Formats/LazyOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/LazyOutputFormat.h rename to src/Processors/Formats/LazyOutputFormat.h diff --git a/dbms/src/Processors/Formats/OutputStreamToOutputFormat.cpp b/src/Processors/Formats/OutputStreamToOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/OutputStreamToOutputFormat.cpp rename to src/Processors/Formats/OutputStreamToOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/OutputStreamToOutputFormat.h b/src/Processors/Formats/OutputStreamToOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/OutputStreamToOutputFormat.h rename to src/Processors/Formats/OutputStreamToOutputFormat.h diff --git a/dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp b/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp similarity index 100% rename from dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp rename to src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp diff --git a/dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h b/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h similarity index 100% rename from dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h rename to src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h diff --git a/dbms/src/Processors/IAccumulatingTransform.cpp b/src/Processors/IAccumulatingTransform.cpp similarity index 100% rename from dbms/src/Processors/IAccumulatingTransform.cpp rename to src/Processors/IAccumulatingTransform.cpp diff --git a/dbms/src/Processors/IAccumulatingTransform.h b/src/Processors/IAccumulatingTransform.h similarity index 100% rename from dbms/src/Processors/IAccumulatingTransform.h rename to src/Processors/IAccumulatingTransform.h diff --git a/dbms/src/Processors/IInflatingTransform.cpp b/src/Processors/IInflatingTransform.cpp similarity index 100% rename from dbms/src/Processors/IInflatingTransform.cpp rename to src/Processors/IInflatingTransform.cpp diff --git a/dbms/src/Processors/IInflatingTransform.h b/src/Processors/IInflatingTransform.h similarity index 100% rename from dbms/src/Processors/IInflatingTransform.h rename to src/Processors/IInflatingTransform.h diff --git a/dbms/src/Processors/IProcessor.cpp b/src/Processors/IProcessor.cpp similarity index 100% rename from dbms/src/Processors/IProcessor.cpp rename to src/Processors/IProcessor.cpp diff --git a/dbms/src/Processors/IProcessor.h b/src/Processors/IProcessor.h similarity index 98% rename from dbms/src/Processors/IProcessor.h rename to src/Processors/IProcessor.h index a613e8008d0..b7c230cb6de 100644 --- a/dbms/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -233,6 +233,10 @@ public: onCancel(); } + /// Additional method which is called in case if ports were updated while work() method. + /// May be used to stop execution in rare cases. + virtual void onUpdatePorts() {} + virtual ~IProcessor() = default; auto & getInputs() { return inputs; } diff --git a/dbms/src/Processors/ISimpleTransform.cpp b/src/Processors/ISimpleTransform.cpp similarity index 100% rename from dbms/src/Processors/ISimpleTransform.cpp rename to src/Processors/ISimpleTransform.cpp diff --git a/dbms/src/Processors/ISimpleTransform.h b/src/Processors/ISimpleTransform.h similarity index 100% rename from dbms/src/Processors/ISimpleTransform.h rename to src/Processors/ISimpleTransform.h diff --git a/dbms/src/Processors/ISink.cpp b/src/Processors/ISink.cpp similarity index 100% rename from dbms/src/Processors/ISink.cpp rename to src/Processors/ISink.cpp diff --git a/dbms/src/Processors/ISink.h b/src/Processors/ISink.h similarity index 100% rename from dbms/src/Processors/ISink.h rename to src/Processors/ISink.h diff --git a/dbms/src/Processors/ISource.cpp b/src/Processors/ISource.cpp similarity index 100% rename from dbms/src/Processors/ISource.cpp rename to src/Processors/ISource.cpp diff --git a/dbms/src/Processors/ISource.h b/src/Processors/ISource.h similarity index 100% rename from dbms/src/Processors/ISource.h rename to src/Processors/ISource.h diff --git a/dbms/src/Processors/LimitTransform.cpp b/src/Processors/LimitTransform.cpp similarity index 100% rename from dbms/src/Processors/LimitTransform.cpp rename to src/Processors/LimitTransform.cpp diff --git a/dbms/src/Processors/LimitTransform.h b/src/Processors/LimitTransform.h similarity index 100% rename from dbms/src/Processors/LimitTransform.h rename to src/Processors/LimitTransform.h diff --git a/src/Processors/NullSink.h b/src/Processors/NullSink.h new file mode 100644 index 00000000000..5d304a0d68e --- /dev/null +++ b/src/Processors/NullSink.h @@ -0,0 +1,35 @@ +#pragma once +#include +#include + +namespace DB +{ + +/// Sink which closes input port and reads nothing. +class NullSink : public IProcessor +{ +public: + explicit NullSink(Block header) : IProcessor({std::move(header)}, {}) {} + String getName() const override { return "NullSink"; } + + Status prepare() override + { + inputs.front().close(); + return Status::Finished; + } + + InputPort & getPort() { return inputs.front(); } +}; + +/// Sink which reads everything and do nothing with it. +class EmptySink : public ISink +{ +public: + explicit EmptySink(Block header) : ISink(std::move(header)) {} + String getName() const override { return "EmptySink"; } + +protected: + void consume(Chunk) override {} +}; + +} diff --git a/dbms/src/Processors/Pipe.cpp b/src/Processors/Pipe.cpp similarity index 89% rename from dbms/src/Processors/Pipe.cpp rename to src/Processors/Pipe.cpp index 4461d714264..d9b21dbc854 100644 --- a/dbms/src/Processors/Pipe.cpp +++ b/src/Processors/Pipe.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB { @@ -48,7 +49,7 @@ static void checkSource(const IProcessor & source) throw Exception("Source for pipe should have single output, but it doesn't have any", ErrorCodes::LOGICAL_ERROR); - if (source.getOutputs().size() > 2) + if (source.getOutputs().size() > 1) throw Exception("Source for pipe should have single or two outputs, but " + source.getName() + " has " + toString(source.getOutputs().size()) + " outputs.", ErrorCodes::LOGICAL_ERROR); } @@ -56,18 +57,22 @@ static void checkSource(const IProcessor & source) Pipe::Pipe(ProcessorPtr source) { - checkSource(*source); - output_port = &source->getOutputs().front(); + if (auto * source_from_input_stream = typeid_cast(source.get())) + { + totals = source_from_input_stream->getTotalsPort(); + extremes = source_from_input_stream->getExtremesPort(); + } + else if (source->getOutputs().size() != 1) + checkSource(*source); - if (source->getOutputs().size() > 1) - totals = &source->getOutputs().back(); + output_port = &source->getOutputs().front(); processors.emplace_back(std::move(source)); max_parallel_streams = 1; } -Pipe::Pipe(Processors processors_, OutputPort * output_port_, OutputPort * totals_) - : processors(std::move(processors_)), output_port(output_port_), totals(totals_) +Pipe::Pipe(Processors processors_, OutputPort * output_port_, OutputPort * totals_, OutputPort * extremes_) + : processors(std::move(processors_)), output_port(output_port_), totals(totals_), extremes(extremes_) { } diff --git a/dbms/src/Processors/Pipe.h b/src/Processors/Pipe.h similarity index 94% rename from dbms/src/Processors/Pipe.h rename to src/Processors/Pipe.h index 60715d986af..42bbd4e06d0 100644 --- a/dbms/src/Processors/Pipe.h +++ b/src/Processors/Pipe.h @@ -47,8 +47,11 @@ public: void enableQuota(); + /// Totals and extremes port. void setTotalsPort(OutputPort * totals_) { totals = totals_; } + void setExtremesPort(OutputPort * extremes_) { extremes = extremes_; } OutputPort * getTotalsPort() const { return totals; } + OutputPort * getExtremesPort() const { return extremes; } size_t maxParallelStreams() const { return max_parallel_streams; } @@ -67,6 +70,7 @@ private: Processors processors; OutputPort * output_port = nullptr; OutputPort * totals = nullptr; + OutputPort * extremes = nullptr; /// It is the max number of processors which can be executed in parallel for each step. See QueryPipeline::Streams. size_t max_parallel_streams = 0; @@ -84,7 +88,7 @@ private: /// and therefore we can skip those checks. /// Note that Pipe represents a tree if it was created using public interface. But this constructor can't assert it. /// So, it's possible that TreeExecutorBlockInputStream could be unable to convert such Pipe to IBlockInputStream. - explicit Pipe(Processors processors_, OutputPort * output_port, OutputPort * totals); + explicit Pipe(Processors processors_, OutputPort * output_port, OutputPort * totals, OutputPort * extremes); friend class QueryPipeline; }; diff --git a/dbms/src/Processors/Port.cpp b/src/Processors/Port.cpp similarity index 100% rename from dbms/src/Processors/Port.cpp rename to src/Processors/Port.cpp diff --git a/dbms/src/Processors/Port.h b/src/Processors/Port.h similarity index 99% rename from dbms/src/Processors/Port.h rename to src/Processors/Port.h index e200b8c1ecb..63fef27e81a 100644 --- a/dbms/src/Processors/Port.h +++ b/src/Processors/Port.h @@ -30,7 +30,9 @@ class Port public: struct UpdateInfo { - std::vector * update_list = nullptr; + using UpdateList = std::vector; + + UpdateList * update_list = nullptr; void * id = nullptr; UInt64 version = 0; UInt64 prev_version = 0; diff --git a/dbms/src/Processors/QueryPipeline.cpp b/src/Processors/QueryPipeline.cpp similarity index 83% rename from dbms/src/Processors/QueryPipeline.cpp rename to src/Processors/QueryPipeline.cpp index ee6938a48a6..361334bfd80 100644 --- a/dbms/src/Processors/QueryPipeline.cpp +++ b/src/Processors/QueryPipeline.cpp @@ -60,6 +60,58 @@ void QueryPipeline::init(Pipe pipe) init(std::move(pipes)); } +static OutputPort * uniteExtremes(const std::vector & ports, const Block & header, Processors & processors) +{ + /// Here we calculate extremes for extremes in case we unite several pipelines. + /// Example: select number from numbers(2) union all select number from numbers(3) + + /// ->> Resize -> Extremes --(output port)----> Null + /// --(extremes port)--> ... + + auto resize = std::make_shared(header, ports.size(), 1); + auto extremes = std::make_shared(header); + auto sink = std::make_shared(header); + + auto * extremes_port = &extremes->getExtremesPort(); + + auto in = resize->getInputs().begin(); + for (auto & port : ports) + connect(*port, *(in++)); + + connect(resize->getOutputs().front(), extremes->getInputPort()); + connect(extremes->getOutputPort(), sink->getPort()); + + processors.emplace_back(std::move(resize)); + processors.emplace_back(std::move(extremes)); + processors.emplace_back(std::move(sink)); + + return extremes_port; +} + +static OutputPort * uniteTotals(const std::vector & ports, const Block & header, Processors & processors) +{ + /// Calculate totals fro several streams. + /// Take totals from first sources which has any, skip others. + + /// ->> Concat -> Limit + + auto concat = std::make_shared(header, ports.size()); + auto limit = std::make_shared(header, 1, 0); + + auto * totals_port = &limit->getOutputPort(); + + auto in = concat->getInputs().begin(); + for (auto & port : ports) + connect(*port, *(in++)); + + connect(concat->getOutputs().front(), limit->getInputPort()); + + processors.emplace_back(std::move(concat)); + processors.emplace_back(std::move(limit)); + + return totals_port; +} + void QueryPipeline::init(Pipes pipes) { if (initialized()) @@ -82,6 +134,7 @@ void QueryPipeline::init(Pipes pipes) } std::vector totals; + std::vector extremes; for (auto & pipe : pipes) { @@ -98,6 +151,12 @@ void QueryPipeline::init(Pipes pipes) totals.emplace_back(totals_port); } + if (auto * port = pipe.getExtremesPort()) + { + assertBlocksHaveEqualStructure(current_header, port->getHeader(), "QueryPipeline"); + extremes.emplace_back(port); + } + streams.addStream(&pipe.getPort(), pipe.maxParallelStreams()); auto cur_processors = std::move(pipe).detachProcessors(); processors.insert(processors.end(), cur_processors.begin(), cur_processors.end()); @@ -108,15 +167,15 @@ void QueryPipeline::init(Pipes pipes) if (totals.size() == 1) totals_having_port = totals.back(); else - { - auto resize = std::make_shared(current_header, totals.size(), 1); - auto in = resize->getInputs().begin(); - for (auto & total : totals) - connect(*total, *(in++)); + totals_having_port = uniteTotals(totals, current_header, processors); + } - totals_having_port = &resize->getOutputs().front(); - processors.emplace_back(std::move(resize)); - } + if (!extremes.empty()) + { + if (extremes.size() == 1) + extremes_port = extremes.back(); + else + extremes_port = uniteExtremes(extremes, current_header, processors); } } @@ -356,29 +415,31 @@ void QueryPipeline::dropTotalsIfHas() } } -void QueryPipeline::addExtremesTransform(ProcessorPtr transform) +void QueryPipeline::addExtremesTransform() { checkInitialized(); - if (!typeid_cast(transform.get())) - throw Exception("ExtremesTransform expected for QueryPipeline::addExtremesTransform.", - ErrorCodes::LOGICAL_ERROR); - if (extremes_port) throw Exception("Extremes transform was already added to pipeline.", ErrorCodes::LOGICAL_ERROR); - if (getNumStreams() != 1) - throw Exception("Cant't add Extremes transform because pipeline is expected to have single stream, " - "but it has " + toString(getNumStreams()) + " streams.", ErrorCodes::LOGICAL_ERROR); + std::vector extremes; + extremes.reserve(streams.size()); - connect(*streams.front(), transform->getInputs().front()); + for (auto & stream : streams) + { + auto transform = std::make_shared(current_header); + connect(*stream, transform->getInputPort()); - auto & outputs = transform->getOutputs(); + stream = &transform->getOutputPort(); + extremes.push_back(&transform->getExtremesPort()); - streams.assign({ &outputs.front() }); - extremes_port = &outputs.back(); - current_header = outputs.front().getHeader(); - processors.emplace_back(std::move(transform)); + processors.emplace_back(std::move(transform)); + } + + if (extremes.size() == 1) + extremes_port = extremes.front(); + else + extremes_port = uniteExtremes(extremes, current_header, processors); } void QueryPipeline::addCreatingSetsTransform(ProcessorPtr transform) @@ -444,17 +505,24 @@ void QueryPipeline::setOutput(ProcessorPtr output) } void QueryPipeline::unitePipelines( - std::vector && pipelines, const Block & common_header, const Context & context) + std::vector && pipelines, const Block & common_header) { checkInitialized(); addSimpleTransform([&](const Block & header) { return std::make_shared( - header, common_header, ConvertingTransform::MatchColumnsMode::Position, context); + header, common_header, ConvertingTransform::MatchColumnsMode::Position); }); std::vector extremes; + std::vector totals; + + if (extremes_port) + extremes.push_back(extremes_port); + + if (totals_having_port) + totals.push_back(totals_having_port); for (auto & pipeline : pipelines) { @@ -463,13 +531,13 @@ void QueryPipeline::unitePipelines( pipeline.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, common_header, ConvertingTransform::MatchColumnsMode::Position, context); + header, common_header, ConvertingTransform::MatchColumnsMode::Position); }); if (pipeline.extremes_port) { auto converting = std::make_shared( - pipeline.current_header, common_header, ConvertingTransform::MatchColumnsMode::Position, context); + pipeline.current_header, common_header, ConvertingTransform::MatchColumnsMode::Position); connect(*pipeline.extremes_port, converting->getInputPort()); extremes.push_back(&converting->getOutputPort()); @@ -479,17 +547,12 @@ void QueryPipeline::unitePipelines( /// Take totals only from first port. if (pipeline.totals_having_port) { - if (!totals_having_port) - { - auto converting = std::make_shared( - pipeline.current_header, common_header, ConvertingTransform::MatchColumnsMode::Position, context); + auto converting = std::make_shared( + pipeline.current_header, common_header, ConvertingTransform::MatchColumnsMode::Position); - connect(*pipeline.totals_having_port, converting->getInputPort()); - totals_having_port = &converting->getOutputPort(); - processors.push_back(std::move(converting)); - } - else - pipeline.dropTotalsIfHas(); + connect(*pipeline.totals_having_port, converting->getInputPort()); + totals.push_back(&converting->getOutputPort()); + processors.push_back(std::move(converting)); } processors.insert(processors.end(), pipeline.processors.begin(), pipeline.processors.end()); @@ -504,28 +567,18 @@ void QueryPipeline::unitePipelines( if (!extremes.empty()) { - size_t num_inputs = extremes.size() + (extremes_port ? 1u : 0u); - - if (num_inputs == 1) - extremes_port = extremes.front(); + if (extremes.size() == 1) + extremes_port = extremes.back(); else - { - /// Add extra processor for extremes. - auto resize = std::make_shared(current_header, num_inputs, 1); - auto input = resize->getInputs().begin(); + extremes_port = uniteExtremes(extremes, current_header, processors); + } - if (extremes_port) - connect(*extremes_port, *(input++)); - - for (auto & output : extremes) - connect(*output, *(input++)); - - auto transform = std::make_shared(current_header); - extremes_port = &transform->getOutputPort(); - - connect(resize->getOutputs().front(), transform->getInputPort()); - processors.emplace_back(std::move(transform)); - } + if (!totals.empty()) + { + if (totals.size() == 1) + totals_having_port = totals.back(); + else + totals_having_port = uniteTotals(totals, current_header, processors); } } @@ -644,7 +697,7 @@ void QueryPipeline::initRowsBeforeLimit() Pipe QueryPipeline::getPipe() && { resize(1); - Pipe pipe(std::move(processors), streams.at(0), totals_having_port); + Pipe pipe(std::move(processors), streams.at(0), totals_having_port, extremes_port); pipe.max_parallel_streams = streams.maxParallelStreams(); for (auto & lock : table_locks) @@ -659,6 +712,9 @@ Pipe QueryPipeline::getPipe() && if (totals_having_port) pipe.setTotalsPort(totals_having_port); + if (extremes_port) + pipe.setExtremesPort(extremes_port); + return pipe; } diff --git a/dbms/src/Processors/QueryPipeline.h b/src/Processors/QueryPipeline.h similarity index 98% rename from dbms/src/Processors/QueryPipeline.h rename to src/Processors/QueryPipeline.h index 9ce12e75b91..4c9c6e97bde 100644 --- a/dbms/src/Processors/QueryPipeline.h +++ b/src/Processors/QueryPipeline.h @@ -99,7 +99,7 @@ public: void addSimpleTransform(const ProcessorGetterWithStreamKind & getter); void addPipe(Processors pipe); void addTotalsHavingTransform(ProcessorPtr transform); - void addExtremesTransform(ProcessorPtr transform); + void addExtremesTransform(); void addCreatingSetsTransform(ProcessorPtr transform); void setOutput(ProcessorPtr output); @@ -121,7 +121,7 @@ public: void enableQuotaForCurrentStreams(); - void unitePipelines(std::vector && pipelines, const Block & common_header, const Context & context); + void unitePipelines(std::vector && pipelines, const Block & common_header); PipelineExecutorPtr execute(); diff --git a/dbms/src/Processors/QueueBuffer.h b/src/Processors/QueueBuffer.h similarity index 100% rename from dbms/src/Processors/QueueBuffer.h rename to src/Processors/QueueBuffer.h diff --git a/dbms/src/Processors/ResizeProcessor.cpp b/src/Processors/ResizeProcessor.cpp similarity index 100% rename from dbms/src/Processors/ResizeProcessor.cpp rename to src/Processors/ResizeProcessor.cpp diff --git a/dbms/src/Processors/ResizeProcessor.h b/src/Processors/ResizeProcessor.h similarity index 100% rename from dbms/src/Processors/ResizeProcessor.h rename to src/Processors/ResizeProcessor.h diff --git a/dbms/src/Processors/RowsBeforeLimitCounter.h b/src/Processors/RowsBeforeLimitCounter.h similarity index 96% rename from dbms/src/Processors/RowsBeforeLimitCounter.h rename to src/Processors/RowsBeforeLimitCounter.h index abee5a09405..36ea4a557a8 100644 --- a/dbms/src/Processors/RowsBeforeLimitCounter.h +++ b/src/Processors/RowsBeforeLimitCounter.h @@ -17,7 +17,7 @@ public: uint64_t get() const { return rows_before_limit.load(std::memory_order_acquire); } - void setAppliedLimit() { has_applied_limit.store(true, std::memory_order::release); } + void setAppliedLimit() { has_applied_limit.store(true, std::memory_order_release); } bool hasAppliedLimit() const { return has_applied_limit.load(std::memory_order_acquire); } private: diff --git a/dbms/src/Processors/Sources/NullSource.h b/src/Processors/Sources/NullSource.h similarity index 100% rename from dbms/src/Processors/Sources/NullSource.h rename to src/Processors/Sources/NullSource.h diff --git a/dbms/src/Processors/Sources/SinkToOutputStream.cpp b/src/Processors/Sources/SinkToOutputStream.cpp similarity index 100% rename from dbms/src/Processors/Sources/SinkToOutputStream.cpp rename to src/Processors/Sources/SinkToOutputStream.cpp diff --git a/dbms/src/Processors/Sources/SinkToOutputStream.h b/src/Processors/Sources/SinkToOutputStream.h similarity index 100% rename from dbms/src/Processors/Sources/SinkToOutputStream.h rename to src/Processors/Sources/SinkToOutputStream.h diff --git a/src/Processors/Sources/SourceFromInputStream.cpp b/src/Processors/Sources/SourceFromInputStream.cpp new file mode 100644 index 00000000000..e34fbd359ae --- /dev/null +++ b/src/Processors/Sources/SourceFromInputStream.cpp @@ -0,0 +1,200 @@ +#include +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +SourceFromInputStream::SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info_) + : ISourceWithProgress(stream_->getHeader()) + , force_add_aggregating_info(force_add_aggregating_info_) + , stream(std::move(stream_)) +{ + init(); +} + +void SourceFromInputStream::init() +{ + auto & sample = getPort().getHeader(); + for (auto & type : sample.getDataTypes()) + if (typeid_cast(type.get())) + has_aggregate_functions = true; +} + +void SourceFromInputStream::addTotalsPort() +{ + if (totals_port) + throw Exception("Totals port was already added for SourceFromInputStream.", ErrorCodes::LOGICAL_ERROR); + + outputs.emplace_back(outputs.front().getHeader(), this); + totals_port = &outputs.back(); +} + +void SourceFromInputStream::addExtremesPort() +{ + if (extremes_port) + throw Exception("Extremes port was already added for SourceFromInputStream.", ErrorCodes::LOGICAL_ERROR); + + outputs.emplace_back(outputs.front().getHeader(), this); + extremes_port = &outputs.back(); +} + +IProcessor::Status SourceFromInputStream::prepare() +{ + auto status = ISource::prepare(); + + if (status == Status::Finished) + { + is_generating_finished = true; + + /// Read postfix and get totals if needed. + if (!is_stream_finished && !isCancelled()) + return Status::Ready; + + if (totals_port && !totals_port->isFinished()) + { + if (has_totals) + { + if (!totals_port->canPush()) + return Status::PortFull; + + totals_port->push(std::move(totals)); + has_totals = false; + } + + totals_port->finish(); + } + + if (extremes_port && !extremes_port->isFinished()) + { + if (has_extremes) + { + if (!extremes_port->canPush()) + return Status::PortFull; + + extremes_port->push(std::move(extremes)); + has_extremes = false; + } + + extremes_port->finish(); + } + } + + return status; +} + +void SourceFromInputStream::work() +{ + if (!is_generating_finished) + { + try + { + ISource::work(); + } + catch (...) + { + /// Won't read suffix in case of exception. + is_stream_finished = true; + throw; + } + + return; + } + + if (is_stream_finished) + return; + + /// Don't cancel for RemoteBlockInputStream (otherwise readSuffix can stack) + if (!typeid_cast(stream.get())) + stream->cancel(false); + + if (rows_before_limit) + { + auto & info = stream->getProfileInfo(); + if (info.hasAppliedLimit()) + rows_before_limit->add(info.getRowsBeforeLimit()); + } + + stream->readSuffix(); + + if (auto totals_block = stream->getTotals()) + { + totals.setColumns(totals_block.getColumns(), 1); + has_totals = true; + } + + is_stream_finished = true; +} + +Chunk SourceFromInputStream::generate() +{ + if (is_stream_finished) + return {}; + + if (!is_stream_started) + { + stream->readPrefix(); + is_stream_started = true; + } + + auto block = stream->read(); + if (!block && !isCancelled()) + { + if (rows_before_limit) + { + auto & info = stream->getProfileInfo(); + if (info.hasAppliedLimit()) + rows_before_limit->add(info.getRowsBeforeLimit()); + } + + stream->readSuffix(); + + if (auto totals_block = stream->getTotals()) + { + if (totals_block.rows() > 0) /// Sometimes we can get empty totals. Skip it. + { + totals.setColumns(totals_block.getColumns(), totals_block.rows()); + has_totals = true; + } + } + + if (auto extremes_block = stream->getExtremes()) + { + if (extremes_block.rows() > 0) /// Sometimes we can get empty extremes. Skip it. + { + extremes.setColumns(extremes_block.getColumns(), extremes_block.rows()); + has_extremes = true; + } + } + + is_stream_finished = true; + return {}; + } + + if (isCancelled()) + return {}; + +#ifndef NDEBUG + assertBlocksHaveEqualStructure(getPort().getHeader(), block, "SourceFromInputStream"); +#endif + + UInt64 num_rows = block.rows(); + Chunk chunk(block.getColumns(), num_rows); + + if (force_add_aggregating_info || has_aggregate_functions) + { + auto info = std::make_shared(); + info->bucket_num = block.info.bucket_num; + info->is_overflows = block.info.is_overflows; + chunk.setChunkInfo(std::move(info)); + } + + return chunk; +} + +} diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.h b/src/Processors/Sources/SourceFromInputStream.h similarity index 79% rename from dbms/src/Processors/Sources/SourceFromInputStream.h rename to src/Processors/Sources/SourceFromInputStream.h index 0fc92164059..88a045e65a2 100644 --- a/dbms/src/Processors/Sources/SourceFromInputStream.h +++ b/src/Processors/Sources/SourceFromInputStream.h @@ -23,6 +23,10 @@ public: BlockInputStreamPtr & getStream() { return stream; } void addTotalsPort(); + void addExtremesPort(); + + OutputPort * getTotalsPort() const { return totals_port; } + OutputPort * getExtremesPort() const { return extremes_port; } void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) { rows_before_limit.swap(counter); } @@ -33,6 +37,13 @@ public: void setProgressCallback(const ProgressCallback & callback) final { stream->setProgressCallback(callback); } void addTotalRowsApprox(size_t value) final { stream->addTotalRowsApprox(value); } + /// Stop reading from stream if output port is finished. + void onUpdatePorts() override + { + if (getPort().isFinished()) + cancel(); + } + protected: void onCancel() override { stream->cancel(false); } @@ -44,9 +55,13 @@ private: RowsBeforeLimitCounterPtr rows_before_limit; Chunk totals; - bool has_totals_port = false; + OutputPort * totals_port = nullptr; bool has_totals = false; + Chunk extremes; + OutputPort * extremes_port = nullptr; + bool has_extremes = false; + bool is_generating_finished = false; bool is_stream_finished = false; bool is_stream_started = false; diff --git a/dbms/src/Processors/Sources/SourceFromSingleChunk.h b/src/Processors/Sources/SourceFromSingleChunk.h similarity index 100% rename from dbms/src/Processors/Sources/SourceFromSingleChunk.h rename to src/Processors/Sources/SourceFromSingleChunk.h diff --git a/dbms/src/Processors/Sources/SourceWithProgress.cpp b/src/Processors/Sources/SourceWithProgress.cpp similarity index 100% rename from dbms/src/Processors/Sources/SourceWithProgress.cpp rename to src/Processors/Sources/SourceWithProgress.cpp diff --git a/dbms/src/Processors/Sources/SourceWithProgress.h b/src/Processors/Sources/SourceWithProgress.h similarity index 100% rename from dbms/src/Processors/Sources/SourceWithProgress.h rename to src/Processors/Sources/SourceWithProgress.h diff --git a/dbms/src/Processors/Transforms/AddingConstColumnTransform.h b/src/Processors/Transforms/AddingConstColumnTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/AddingConstColumnTransform.h rename to src/Processors/Transforms/AddingConstColumnTransform.h diff --git a/dbms/src/Processors/Transforms/AddingMissedTransform.cpp b/src/Processors/Transforms/AddingMissedTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/AddingMissedTransform.cpp rename to src/Processors/Transforms/AddingMissedTransform.cpp diff --git a/dbms/src/Processors/Transforms/AddingMissedTransform.h b/src/Processors/Transforms/AddingMissedTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/AddingMissedTransform.h rename to src/Processors/Transforms/AddingMissedTransform.h diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp similarity index 98% rename from dbms/src/Processors/Transforms/AggregatingTransform.cpp rename to src/Processors/Transforms/AggregatingTransform.cpp index 4a76e0f2273..78607323996 100644 --- a/dbms/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -413,6 +413,9 @@ AggregatingTransform::~AggregatingTransform() = default; IProcessor::Status AggregatingTransform::prepare() { + /// There are one or two input ports. + /// The first one is used at aggregation step, the second one - while reading merged data from ConvertingAggregated + auto & output = outputs.front(); /// Last output is current. All other outputs should already be closed. auto & input = inputs.back(); @@ -432,7 +435,12 @@ IProcessor::Status AggregatingTransform::prepare() /// Finish data processing, prepare to generating. if (is_consume_finished && !is_generate_initialized) + { + /// Close input port in case max_rows_to_group_by was reached but not all data was read. + inputs.front().close(); + return Status::Ready; + } if (is_generate_initialized && !is_pipeline_created && !processors.empty()) return Status::ExpandPipeline; diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h new file mode 100644 index 00000000000..9c1e9d4e2db --- /dev/null +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -0,0 +1,120 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace DB +{ + +class AggregatedChunkInfo : public ChunkInfo +{ +public: + bool is_overflows = false; + Int32 bucket_num = -1; +}; + +class IBlockInputStream; +using BlockInputStreamPtr = std::shared_ptr; + +struct AggregatingTransformParams +{ + Aggregator::Params params; + Aggregator aggregator; + bool final; + + AggregatingTransformParams(const Aggregator::Params & params_, bool final_) + : params(params_), aggregator(params), final(final_) {} + + Block getHeader() const { return aggregator.getHeader(final); } +}; + +struct ManyAggregatedData +{ + ManyAggregatedDataVariants variants; + std::vector> mutexes; + std::atomic num_finished = 0; + + explicit ManyAggregatedData(size_t num_threads = 0) : variants(num_threads), mutexes(num_threads) + { + for (auto & elem : variants) + elem = std::make_shared(); + + for (auto & mut : mutexes) + mut = std::make_unique(); + } +}; + +using AggregatingTransformParamsPtr = std::shared_ptr; +using ManyAggregatedDataPtr = std::shared_ptr; + +/** Aggregates the stream of blocks using the specified key columns and aggregate functions. + * Columns with aggregate functions adds to the end of the block. + * If final = false, the aggregate functions are not finalized, that is, they are not replaced by their value, but contain an intermediate state of calculations. + * This is necessary so that aggregation can continue (for example, by combining streams of partially aggregated data). + * + * For every separate stream of data separate AggregatingTransform is created. + * Every AggregatingTransform reads data from the first port till is is not run out, or max_rows_to_group_by reached. + * When the last AggregatingTransform finish reading, the result of aggregation is needed to be merged together. + * This task is performed by ConvertingAggregatedToChunksTransform. + * Last AggregatingTransform expands pipeline and adds second input port, which reads from ConvertingAggregated. + * + * Aggregation data is passed by ManyAggregatedData structure, which is shared between all aggregating transforms. + * At aggregation step, every transform uses it's own AggregatedDataVariants structure. + * At merging step, all structures pass to ConvertingAggregatedToChunksTransform. + */ +class AggregatingTransform : public IProcessor +{ +public: + AggregatingTransform(Block header, AggregatingTransformParamsPtr params_); + + /// For Parallel aggregating. + AggregatingTransform(Block header, AggregatingTransformParamsPtr params_, + ManyAggregatedDataPtr many_data, size_t current_variant, + size_t temporary_data_merge_threads, size_t max_threads); + ~AggregatingTransform() override; + + String getName() const override { return "AggregatingTransform"; } + Status prepare() override; + void work() override; + Processors expandPipeline() override; + +protected: + void consume(Chunk chunk); + +private: + /// To read the data that was flushed into the temporary data file. + Processors processors; + + AggregatingTransformParamsPtr params; + Logger * log = &Logger::get("AggregatingTransform"); + + ColumnRawPtrs key_columns; + Aggregator::AggregateColumns aggregate_columns; + bool no_more_keys = false; + + ManyAggregatedDataPtr many_data; + AggregatedDataVariants & variants; + size_t max_threads = 1; + size_t temporary_data_merge_threads = 1; + + /// TODO: calculate time only for aggregation. + Stopwatch watch; + + UInt64 src_rows = 0; + UInt64 src_bytes = 0; + + bool is_generate_initialized = false; + bool is_consume_finished = false; + bool is_pipeline_created = false; + + Chunk current_chunk; + bool read_current_chunk = false; + + bool is_consume_started = false; + + void initGenerate(); +}; + +} diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.cpp b/src/Processors/Transforms/ConvertingTransform.cpp similarity index 93% rename from dbms/src/Processors/Transforms/ConvertingTransform.cpp rename to src/Processors/Transforms/ConvertingTransform.cpp index e801fe7cb26..e059606ee92 100644 --- a/dbms/src/Processors/Transforms/ConvertingTransform.cpp +++ b/src/Processors/Transforms/ConvertingTransform.cpp @@ -18,12 +18,11 @@ namespace ErrorCodes static ColumnPtr castColumnWithDiagnostic( const ColumnWithTypeAndName & src_elem, - const ColumnWithTypeAndName & res_elem, - const Context & context) + const ColumnWithTypeAndName & res_elem) { try { - return castColumn(src_elem, res_elem.type, context); + return castColumn(src_elem, res_elem.type); } catch (Exception & e) { @@ -36,10 +35,8 @@ static ColumnPtr castColumnWithDiagnostic( ConvertingTransform::ConvertingTransform( Block source_header_, Block result_header_, - MatchColumnsMode mode_, - const Context & context_) + MatchColumnsMode mode_) : ISimpleTransform(std::move(source_header_), std::move(result_header_), false) - , context(context_) , conversion(getOutputPort().getHeader().columns()) { auto & source = getInputPort().getHeader(); @@ -91,7 +88,7 @@ ConvertingTransform::ConvertingTransform( /// Check conversion by dry run CAST function. - castColumnWithDiagnostic(src_elem, res_elem, context); + castColumnWithDiagnostic(src_elem, res_elem); } } @@ -114,7 +111,7 @@ void ConvertingTransform::transform(Chunk & chunk) src_elem.column = src_columns[conversion[res_pos]]; auto res_elem = result.getByPosition(res_pos); - ColumnPtr converted = castColumnWithDiagnostic(src_elem, res_elem, context); + ColumnPtr converted = castColumnWithDiagnostic(src_elem, res_elem); if (!isColumnConst(*res_elem.column)) converted = converted->convertToFullColumnIfConst(); diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.h b/src/Processors/Transforms/ConvertingTransform.h similarity index 92% rename from dbms/src/Processors/Transforms/ConvertingTransform.h rename to src/Processors/Transforms/ConvertingTransform.h index b2412802ed6..45a6688c07a 100644 --- a/dbms/src/Processors/Transforms/ConvertingTransform.h +++ b/src/Processors/Transforms/ConvertingTransform.h @@ -31,8 +31,7 @@ public: ConvertingTransform( Block source_header_, Block result_header_, - MatchColumnsMode mode_, - const Context & context_); + MatchColumnsMode mode_); String getName() const override { return "Converting"; } @@ -40,8 +39,6 @@ protected: void transform(Chunk & chunk) override; private: - const Context & context; - /// How to construct result block. Position in source block, where to get each column. ColumnNumbers conversion; }; diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp b/src/Processors/Transforms/CreatingSetsTransform.cpp similarity index 99% rename from dbms/src/Processors/Transforms/CreatingSetsTransform.cpp rename to src/Processors/Transforms/CreatingSetsTransform.cpp index 79ceae64d92..f5637a21ede 100644 --- a/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp +++ b/src/Processors/Transforms/CreatingSetsTransform.cpp @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.h b/src/Processors/Transforms/CreatingSetsTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/CreatingSetsTransform.h rename to src/Processors/Transforms/CreatingSetsTransform.h diff --git a/dbms/src/Processors/Transforms/CubeTransform.cpp b/src/Processors/Transforms/CubeTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/CubeTransform.cpp rename to src/Processors/Transforms/CubeTransform.cpp diff --git a/dbms/src/Processors/Transforms/CubeTransform.h b/src/Processors/Transforms/CubeTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/CubeTransform.h rename to src/Processors/Transforms/CubeTransform.h diff --git a/dbms/src/Processors/Transforms/DistinctTransform.cpp b/src/Processors/Transforms/DistinctTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/DistinctTransform.cpp rename to src/Processors/Transforms/DistinctTransform.cpp diff --git a/dbms/src/Processors/Transforms/DistinctTransform.h b/src/Processors/Transforms/DistinctTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/DistinctTransform.h rename to src/Processors/Transforms/DistinctTransform.h diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.cpp b/src/Processors/Transforms/ExpressionTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/ExpressionTransform.cpp rename to src/Processors/Transforms/ExpressionTransform.cpp diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.h b/src/Processors/Transforms/ExpressionTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/ExpressionTransform.h rename to src/Processors/Transforms/ExpressionTransform.h diff --git a/dbms/src/Processors/Transforms/ExtremesTransform.cpp b/src/Processors/Transforms/ExtremesTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/ExtremesTransform.cpp rename to src/Processors/Transforms/ExtremesTransform.cpp diff --git a/dbms/src/Processors/Transforms/ExtremesTransform.h b/src/Processors/Transforms/ExtremesTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/ExtremesTransform.h rename to src/Processors/Transforms/ExtremesTransform.h diff --git a/dbms/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/FillingTransform.cpp rename to src/Processors/Transforms/FillingTransform.cpp diff --git a/dbms/src/Processors/Transforms/FillingTransform.h b/src/Processors/Transforms/FillingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/FillingTransform.h rename to src/Processors/Transforms/FillingTransform.h diff --git a/dbms/src/Processors/Transforms/FilterTransform.cpp b/src/Processors/Transforms/FilterTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/FilterTransform.cpp rename to src/Processors/Transforms/FilterTransform.cpp diff --git a/dbms/src/Processors/Transforms/FilterTransform.h b/src/Processors/Transforms/FilterTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/FilterTransform.h rename to src/Processors/Transforms/FilterTransform.h diff --git a/dbms/src/Processors/Transforms/FinishSortingTransform.cpp b/src/Processors/Transforms/FinishSortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/FinishSortingTransform.cpp rename to src/Processors/Transforms/FinishSortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/FinishSortingTransform.h b/src/Processors/Transforms/FinishSortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/FinishSortingTransform.h rename to src/Processors/Transforms/FinishSortingTransform.h diff --git a/dbms/src/Processors/Transforms/InflatingExpressionTransform.cpp b/src/Processors/Transforms/InflatingExpressionTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/InflatingExpressionTransform.cpp rename to src/Processors/Transforms/InflatingExpressionTransform.cpp diff --git a/dbms/src/Processors/Transforms/InflatingExpressionTransform.h b/src/Processors/Transforms/InflatingExpressionTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/InflatingExpressionTransform.h rename to src/Processors/Transforms/InflatingExpressionTransform.h diff --git a/dbms/src/Processors/Transforms/LimitByTransform.cpp b/src/Processors/Transforms/LimitByTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/LimitByTransform.cpp rename to src/Processors/Transforms/LimitByTransform.cpp diff --git a/dbms/src/Processors/Transforms/LimitByTransform.h b/src/Processors/Transforms/LimitByTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/LimitByTransform.h rename to src/Processors/Transforms/LimitByTransform.h diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp b/src/Processors/Transforms/LimitsCheckingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp rename to src/Processors/Transforms/LimitsCheckingTransform.cpp diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.h b/src/Processors/Transforms/LimitsCheckingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/LimitsCheckingTransform.h rename to src/Processors/Transforms/LimitsCheckingTransform.h diff --git a/dbms/src/Processors/Transforms/MaterializingTransform.cpp b/src/Processors/Transforms/MaterializingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MaterializingTransform.cpp rename to src/Processors/Transforms/MaterializingTransform.cpp diff --git a/dbms/src/Processors/Transforms/MaterializingTransform.h b/src/Processors/Transforms/MaterializingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MaterializingTransform.h rename to src/Processors/Transforms/MaterializingTransform.h diff --git a/dbms/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MergeSortingTransform.cpp rename to src/Processors/Transforms/MergeSortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergeSortingTransform.h rename to src/Processors/Transforms/MergeSortingTransform.h diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp similarity index 97% rename from dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp rename to src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp index 4c0323fcf6a..12d289deaed 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp +++ b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp @@ -275,15 +275,20 @@ void GroupingAggregatedTransform::work() { if (!single_level_chunks.empty()) { - auto & header = getOutputs().front().getHeader(); + auto & header = getInputs().front().getHeader(); /// Take header from input port. Output header is empty. auto block = header.cloneWithColumns(single_level_chunks.back().detachColumns()); single_level_chunks.pop_back(); auto blocks = params->aggregator.convertBlockToTwoLevel(block); for (auto & cur_block : blocks) { + if (!cur_block) + continue; + Int32 bucket = cur_block.info.bucket_num; - chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows())); + auto chunk_info = std::make_shared(); + chunk_info->bucket_num = bucket; + chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows(), std::move(chunk_info))); } } } diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h rename to src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp b/src/Processors/Transforms/MergingAggregatedTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp rename to src/Processors/Transforms/MergingAggregatedTransform.cpp diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.h b/src/Processors/Transforms/MergingAggregatedTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergingAggregatedTransform.h rename to src/Processors/Transforms/MergingAggregatedTransform.h diff --git a/dbms/src/Processors/Transforms/MergingSortedTransform.cpp b/src/Processors/Transforms/MergingSortedTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MergingSortedTransform.cpp rename to src/Processors/Transforms/MergingSortedTransform.cpp diff --git a/dbms/src/Processors/Transforms/MergingSortedTransform.h b/src/Processors/Transforms/MergingSortedTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergingSortedTransform.h rename to src/Processors/Transforms/MergingSortedTransform.h diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.cpp b/src/Processors/Transforms/PartialSortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/PartialSortingTransform.cpp rename to src/Processors/Transforms/PartialSortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.h b/src/Processors/Transforms/PartialSortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/PartialSortingTransform.h rename to src/Processors/Transforms/PartialSortingTransform.h diff --git a/dbms/src/Processors/Transforms/ReverseTransform.cpp b/src/Processors/Transforms/ReverseTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/ReverseTransform.cpp rename to src/Processors/Transforms/ReverseTransform.cpp diff --git a/dbms/src/Processors/Transforms/ReverseTransform.h b/src/Processors/Transforms/ReverseTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/ReverseTransform.h rename to src/Processors/Transforms/ReverseTransform.h diff --git a/dbms/src/Processors/Transforms/RollupTransform.cpp b/src/Processors/Transforms/RollupTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/RollupTransform.cpp rename to src/Processors/Transforms/RollupTransform.cpp diff --git a/dbms/src/Processors/Transforms/RollupTransform.h b/src/Processors/Transforms/RollupTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/RollupTransform.h rename to src/Processors/Transforms/RollupTransform.h diff --git a/dbms/src/Processors/Transforms/SortingTransform.cpp b/src/Processors/Transforms/SortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/SortingTransform.cpp rename to src/Processors/Transforms/SortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/SortingTransform.h b/src/Processors/Transforms/SortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/SortingTransform.h rename to src/Processors/Transforms/SortingTransform.h diff --git a/dbms/src/Processors/Transforms/TotalsHavingTransform.cpp b/src/Processors/Transforms/TotalsHavingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/TotalsHavingTransform.cpp rename to src/Processors/Transforms/TotalsHavingTransform.cpp diff --git a/dbms/src/Processors/Transforms/TotalsHavingTransform.h b/src/Processors/Transforms/TotalsHavingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/TotalsHavingTransform.h rename to src/Processors/Transforms/TotalsHavingTransform.h diff --git a/dbms/src/Processors/printPipeline.h b/src/Processors/printPipeline.h similarity index 100% rename from dbms/src/Processors/printPipeline.h rename to src/Processors/printPipeline.h diff --git a/dbms/src/Processors/tests/CMakeLists.txt b/src/Processors/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Processors/tests/CMakeLists.txt rename to src/Processors/tests/CMakeLists.txt diff --git a/dbms/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp b/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp similarity index 100% rename from dbms/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp rename to src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp diff --git a/dbms/src/Processors/tests/processors_test.cpp b/src/Processors/tests/processors_test.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test.cpp rename to src/Processors/tests/processors_test.cpp diff --git a/dbms/src/Processors/tests/processors_test_aggregation.cpp b/src/Processors/tests/processors_test_aggregation.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_aggregation.cpp rename to src/Processors/tests/processors_test_aggregation.cpp diff --git a/dbms/src/Processors/tests/processors_test_chain.cpp b/src/Processors/tests/processors_test_chain.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_chain.cpp rename to src/Processors/tests/processors_test_chain.cpp diff --git a/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp b/src/Processors/tests/processors_test_expand_pipeline.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_expand_pipeline.cpp rename to src/Processors/tests/processors_test_expand_pipeline.cpp diff --git a/dbms/src/Processors/tests/processors_test_merge.cpp b/src/Processors/tests/processors_test_merge.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_merge.cpp rename to src/Processors/tests/processors_test_merge.cpp diff --git a/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp b/src/Processors/tests/processors_test_merge_sorting_transform.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp rename to src/Processors/tests/processors_test_merge_sorting_transform.cpp diff --git a/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp b/src/Processors/tests/processors_test_merging_sorted_transform.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp rename to src/Processors/tests/processors_test_merging_sorted_transform.cpp diff --git a/dbms/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp similarity index 87% rename from dbms/src/Storages/AlterCommands.cpp rename to src/Storages/AlterCommands.cpp index a02e5b5a879..8eebaabf342 100644 --- a/dbms/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,7 @@ namespace ErrorCodes extern const int NOT_FOUND_COLUMN_IN_BLOCK; extern const int LOGICAL_ERROR; extern const int DUPLICATE_COLUMN; + extern const int NOT_IMPLEMENTED; } @@ -231,10 +233,21 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ else if (command_ast->type == ASTAlterCommand::MODIFY_QUERY) { AlterCommand command; + command.ast = command_ast->clone(); command.type = AlterCommand::MODIFY_QUERY; command.select = command_ast->select; return command; } + else if (command_ast->type == ASTAlterCommand::RENAME_COLUMN) + { + AlterCommand command; + command.ast = command_ast->clone(); + command.type = AlterCommand::RENAME_COLUMN; + command.column_name = command_ast->column->as().name; + command.rename_to = command_ast->rename_to->as().name; + command.if_exists = command_ast->if_exists; + return command; + } else return {}; } @@ -437,6 +450,24 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata) const settings_from_storage.push_back(change); } } + else if (type == RENAME_COLUMN) + { + metadata.columns.rename(column_name, rename_to); + RenameColumnData rename_data{column_name, rename_to}; + RenameColumnVisitor rename_visitor(rename_data); + for (auto & column : metadata.columns) + { + metadata.columns.modify(column.name, [&](ColumnDescription & column_to_modify) + { + if (column_to_modify.default_desc.expression) + rename_visitor.visit(column_to_modify.default_desc.expression); + if (column_to_modify.ttl) + rename_visitor.visit(column_to_modify.ttl); + }); + } + if (metadata.ttl_for_table_ast) + rename_visitor.visit(metadata.ttl_for_table_ast); + } else throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR); } @@ -519,7 +550,7 @@ bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metada if (ignore) return false; - if (type == DROP_COLUMN || type == DROP_INDEX) + if (type == DROP_COLUMN || type == DROP_INDEX || type == RENAME_COLUMN) return true; if (type != MODIFY_COLUMN || data_type == nullptr) @@ -585,6 +616,12 @@ std::optional AlterCommand::tryConvertToMutationCommand(const S result.predicate = nullptr; } + else if (type == RENAME_COLUMN) + { + result.type = MutationCommand::Type::RENAME_COLUMN; + result.column_name = column_name; + result.rename_to = rename_to; + } result.ast = ast->clone(); return result; @@ -619,6 +656,8 @@ String alterTypeToString(const AlterCommand::Type type) return "MODIFY SETTING"; case AlterCommand::Type::MODIFY_QUERY: return "MODIFY QUERY"; + case AlterCommand::Type::RENAME_COLUMN: + return "RENAME COLUMN"; } __builtin_unreachable(); } @@ -666,7 +705,8 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata) command.ignore = true; } else if (command.type == AlterCommand::DROP_COLUMN - || command.type == AlterCommand::COMMENT_COLUMN) + || command.type == AlterCommand::COMMENT_COLUMN + || command.type == AlterCommand::RENAME_COLUMN) { if (!has_column && command.if_exists) command.ignore = true; @@ -680,6 +720,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con auto all_columns = metadata.columns; /// Default expression for all added/modified columns ASTPtr default_expr_list = std::make_shared(); + NameToNameMap renames_map; for (size_t i = 0; i < size(); ++i) { auto & command = (*this)[i]; @@ -753,6 +794,52 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con if (metadata.settings_ast == nullptr) throw Exception{"Cannot alter settings, because table engine doesn't support settings changes", ErrorCodes::BAD_ARGUMENTS}; } + else if (command.type == AlterCommand::RENAME_COLUMN) + { + /// TODO Implement nested rename + if (metadata.columns.hasNested(command.column_name)) + { + throw Exception{"Cannot rename whole Nested struct", ErrorCodes::NOT_IMPLEMENTED}; + } + + if (!metadata.columns.has(command.column_name)) + { + if (!command.if_exists) + throw Exception{"Wrong column name. Cannot find column " + backQuote(command.column_name) + " to rename", + ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK}; + } + + if (metadata.columns.has(command.rename_to)) + throw Exception{"Cannot rename to " + backQuote(command.rename_to) + ": column with this name already exists", + ErrorCodes::DUPLICATE_COLUMN}; + + + if (renames_map.count(command.column_name)) + throw Exception{"Cannot rename column '" + backQuote(command.column_name) + "' to two different names in a single ALTER query", ErrorCodes::BAD_ARGUMENTS}; + + if (renames_map.count(command.rename_to)) + throw Exception{"Rename loop detected in ALTER query", + ErrorCodes::BAD_ARGUMENTS}; + + String from_nested_table_name = Nested::extractTableName(command.column_name); + String to_nested_table_name = Nested::extractTableName(command.rename_to); + bool from_nested = from_nested_table_name != command.column_name; + bool to_nested = to_nested_table_name != command.rename_to; + + if (from_nested && to_nested) + { + if (from_nested_table_name != to_nested_table_name) + throw Exception{"Cannot rename column from one nested name to another", ErrorCodes::BAD_ARGUMENTS}; + } + else if (!from_nested && !to_nested) + { + renames_map[command.column_name] = command.rename_to; + } + else + { + throw Exception{"Cannot rename column from nested struct to normal column and vice versa", ErrorCodes::BAD_ARGUMENTS}; + } + } /// Collect default expressions for MODIFY and ADD comands if (command.type == AlterCommand::MODIFY_COLUMN || command.type == AlterCommand::ADD_COLUMN) diff --git a/dbms/src/Storages/AlterCommands.h b/src/Storages/AlterCommands.h similarity index 98% rename from dbms/src/Storages/AlterCommands.h rename to src/Storages/AlterCommands.h index 886c8beaed9..be27ba6ac2b 100644 --- a/dbms/src/Storages/AlterCommands.h +++ b/src/Storages/AlterCommands.h @@ -35,6 +35,7 @@ struct AlterCommand MODIFY_TTL, MODIFY_SETTING, MODIFY_QUERY, + RENAME_COLUMN, }; Type type; @@ -96,6 +97,9 @@ struct AlterCommand /// For MODIFY_QUERY ASTPtr select = nullptr; + /// Target column name + String rename_to; + static std::optional parse(const ASTAlterCommand * command); void apply(StorageInMemoryMetadata & metadata) const; diff --git a/dbms/src/Storages/CMakeLists.txt b/src/Storages/CMakeLists.txt similarity index 100% rename from dbms/src/Storages/CMakeLists.txt rename to src/Storages/CMakeLists.txt diff --git a/dbms/src/Storages/CheckResults.h b/src/Storages/CheckResults.h similarity index 100% rename from dbms/src/Storages/CheckResults.h rename to src/Storages/CheckResults.h diff --git a/dbms/src/Storages/ColumnCodec.h b/src/Storages/ColumnCodec.h similarity index 100% rename from dbms/src/Storages/ColumnCodec.h rename to src/Storages/ColumnCodec.h diff --git a/dbms/src/Storages/ColumnDefault.cpp b/src/Storages/ColumnDefault.cpp similarity index 100% rename from dbms/src/Storages/ColumnDefault.cpp rename to src/Storages/ColumnDefault.cpp diff --git a/dbms/src/Storages/ColumnDefault.h b/src/Storages/ColumnDefault.h similarity index 100% rename from dbms/src/Storages/ColumnDefault.h rename to src/Storages/ColumnDefault.h diff --git a/dbms/src/Storages/ColumnDependency.h b/src/Storages/ColumnDependency.h similarity index 100% rename from dbms/src/Storages/ColumnDependency.h rename to src/Storages/ColumnDependency.h diff --git a/dbms/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp similarity index 96% rename from dbms/src/Storages/ColumnsDescription.cpp rename to src/Storages/ColumnsDescription.cpp index 2b2281c9663..c4f6d11605f 100644 --- a/dbms/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -1,4 +1,5 @@ #include + #include #include #include @@ -22,11 +23,13 @@ #include #include #include +#include #include #include #include #include + namespace DB { @@ -36,6 +39,7 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; extern const int CANNOT_PARSE_TEXT; extern const int THERE_IS_NO_DEFAULT_VALUE; + extern const int LOGICAL_ERROR; } ColumnDescription::ColumnDescription(String name_, DataTypePtr type_, bool is_virtual_) @@ -100,7 +104,7 @@ void ColumnDescription::readText(ReadBuffer & buf) ParserColumnDeclaration column_parser(/* require type */ true); String column_line; readEscapedStringUntilEOL(column_line, buf); - ASTPtr ast = parseQuery(column_parser, column_line, "column parser", 0); + ASTPtr ast = parseQuery(column_parser, column_line, "column parser", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); if (const auto * col_ast = ast->as()) { name = col_ast->name; @@ -195,6 +199,18 @@ void ColumnsDescription::remove(const String & column_name) list_it = columns.get<0>().erase(list_it); } +void ColumnsDescription::rename(const String & column_from, const String & column_to) +{ + auto it = columns.get<1>().find(column_from); + if (it == columns.get<1>().end()) + throw Exception("Cannot find column " + column_from + " in ColumnsDescription", ErrorCodes::LOGICAL_ERROR); + + columns.get<1>().modify_key(it, [&column_to] (String & old_name) + { + old_name = column_to; + }); +} + void ColumnsDescription::flattenNested() { diff --git a/dbms/src/Storages/ColumnsDescription.h b/src/Storages/ColumnsDescription.h similarity index 95% rename from dbms/src/Storages/ColumnsDescription.h rename to src/Storages/ColumnsDescription.h index fc85566a8c0..72399a70128 100644 --- a/dbms/src/Storages/ColumnsDescription.h +++ b/src/Storages/ColumnsDescription.h @@ -57,6 +57,10 @@ public: /// `column_name` can be a Nested column name; void remove(const String & column_name); + /// Rename column. column_from and column_to cannot be nested columns. + /// TODO add ability to rename nested columns + void rename(const String & column_from, const String & column_to); + void flattenNested(); /// TODO: remove, insert already flattened Nested columns. bool operator==(const ColumnsDescription & other) const { return columns == other.columns; } diff --git a/dbms/src/Storages/CompressionCodecSelector.h b/src/Storages/CompressionCodecSelector.h similarity index 100% rename from dbms/src/Storages/CompressionCodecSelector.h rename to src/Storages/CompressionCodecSelector.h diff --git a/dbms/src/Storages/ConstraintsDescription.cpp b/src/Storages/ConstraintsDescription.cpp similarity index 93% rename from dbms/src/Storages/ConstraintsDescription.cpp rename to src/Storages/ConstraintsDescription.cpp index ad0cd76733a..8c38af0cd5e 100644 --- a/dbms/src/Storages/ConstraintsDescription.cpp +++ b/src/Storages/ConstraintsDescription.cpp @@ -5,6 +5,8 @@ #include #include +#include + namespace DB { @@ -28,7 +30,7 @@ ConstraintsDescription ConstraintsDescription::parse(const String & str) ConstraintsDescription res; ParserConstraintDeclarationList parser; - ASTPtr list = parseQuery(parser, str, 0); + ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); for (const auto & constraint : list->children) res.constraints.push_back(std::dynamic_pointer_cast(constraint)); diff --git a/dbms/src/Storages/ConstraintsDescription.h b/src/Storages/ConstraintsDescription.h similarity index 100% rename from dbms/src/Storages/ConstraintsDescription.h rename to src/Storages/ConstraintsDescription.h diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp similarity index 100% rename from dbms/src/Storages/Distributed/DirectoryMonitor.cpp rename to src/Storages/Distributed/DirectoryMonitor.cpp diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.h b/src/Storages/Distributed/DirectoryMonitor.h similarity index 100% rename from dbms/src/Storages/Distributed/DirectoryMonitor.h rename to src/Storages/Distributed/DirectoryMonitor.h diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp similarity index 95% rename from dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp rename to src/Storages/Distributed/DistributedBlockOutputStream.cpp index 2aba27dfc67..b0695ccad1b 100644 --- a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -59,6 +61,26 @@ namespace ErrorCodes extern const int CANNOT_LINK; } +static void writeBlockConvert(const BlockOutputStreamPtr & out, const Block & block, const size_t repeats) +{ + if (!blocksHaveEqualStructure(out->getHeader(), block)) + { + ConvertingBlockInputStream convert( + std::make_shared(block), + out->getHeader(), + ConvertingBlockInputStream::MatchColumnsMode::Name); + auto adopted_block = convert.read(); + + for (size_t i = 0; i < repeats; ++i) + out->write(adopted_block); + } + else + { + for (size_t i = 0; i < repeats; ++i) + out->write(block); + } +} + DistributedBlockOutputStream::DistributedBlockOutputStream( const Context & context_, StorageDistributed & storage_, const ASTPtr & query_ast_, const ClusterPtr & cluster_, @@ -305,13 +327,13 @@ ThreadPool::Job DistributedBlockOutputStream::runWritingJob(DistributedBlockOutp job.local_context = std::make_unique(context); InterpreterInsertQuery interp(query_ast, *job.local_context); - job.stream = interp.execute().out; + auto block_io = interp.execute(); + + job.stream = block_io.out; job.stream->writePrefix(); } - size_t num_repetitions = shard_info.getLocalNodeCount(); - for (size_t i = 0; i < num_repetitions; ++i) - job.stream->write(shard_block); + writeBlockConvert(job.stream, shard_block, shard_info.getLocalNodeCount()); } job.blocks_written += 1; @@ -544,11 +566,9 @@ void DistributedBlockOutputStream::writeToLocal(const Block & block, const size_ InterpreterInsertQuery interp(query_ast, context); auto block_io = interp.execute(); + block_io.out->writePrefix(); - - for (size_t i = 0; i < repeats; ++i) - block_io.out->write(block); - + writeBlockConvert(block_io.out, block, repeats); block_io.out->writeSuffix(); } diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h b/src/Storages/Distributed/DistributedBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/Distributed/DistributedBlockOutputStream.h rename to src/Storages/Distributed/DistributedBlockOutputStream.h diff --git a/dbms/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp similarity index 84% rename from dbms/src/Storages/IStorage.cpp rename to src/Storages/IStorage.cpp index c36a28b115f..5a792080370 100644 --- a/dbms/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -28,6 +28,7 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; extern const int TABLE_IS_DROPPED; extern const int NOT_IMPLEMENTED; + extern const int DEADLOCK_AVOIDED; } IStorage::IStorage(StorageID storage_id_, ColumnsDescription virtuals_) : storage_id(std::move(storage_id_)), virtuals(std::move(virtuals_)) @@ -314,43 +315,64 @@ bool IStorage::isVirtualColumn(const String & column_name) const return getColumns().get(column_name).is_virtual; } -TableStructureReadLockHolder IStorage::lockStructureForShare(const String & query_id) +RWLockImpl::LockHolder IStorage::tryLockTimed( + const RWLock & rwlock, RWLockImpl::Type type, const String & query_id, const SettingSeconds & acquire_timeout) +{ + auto lock_holder = rwlock->getLock(type, query_id, std::chrono::milliseconds(acquire_timeout.totalMilliseconds())); + if (!lock_holder) + { + const String type_str = type == RWLockImpl::Type::Read ? "READ" : "WRITE"; + throw Exception( + type_str + " locking attempt on \"" + getStorageID().getFullTableName() + + "\" has timed out! (" + toString(acquire_timeout.totalMilliseconds()) + "ms) " + "Possible deadlock avoided. Client should retry.", + ErrorCodes::DEADLOCK_AVOIDED); + } + return lock_holder; +} + +TableStructureReadLockHolder IStorage::lockStructureForShare(bool will_add_new_data, const String & query_id, const SettingSeconds & acquire_timeout) { TableStructureReadLockHolder result; - result.structure_lock = structure_lock->getLock(RWLockImpl::Read, query_id); + if (will_add_new_data) + result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Read, query_id, acquire_timeout); + result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Read, query_id, acquire_timeout); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); return result; } -TableStructureWriteLockHolder IStorage::lockAlterIntention() +TableStructureWriteLockHolder IStorage::lockAlterIntention(const String & query_id, const SettingSeconds & acquire_timeout) { TableStructureWriteLockHolder result; - result.alter_lock = std::unique_lock(alter_lock); + result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id, acquire_timeout); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); return result; } -void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id) +void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id, const SettingSeconds & acquire_timeout) { - if (!lock_holder.alter_lock) + if (!lock_holder.alter_intention_lock) throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR); - lock_holder.structure_lock = structure_lock->getLock(RWLockImpl::Write, query_id); + if (!lock_holder.new_data_structure_lock) + lock_holder.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id, acquire_timeout); + lock_holder.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id, acquire_timeout); } -TableStructureWriteLockHolder IStorage::lockExclusively(const String & query_id) +TableStructureWriteLockHolder IStorage::lockExclusively(const String & query_id, const SettingSeconds & acquire_timeout) { TableStructureWriteLockHolder result; - result.alter_lock = std::unique_lock(alter_lock); + result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id, acquire_timeout); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); - result.structure_lock = structure_lock->getLock(RWLockImpl::Write, query_id); + result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id, acquire_timeout); + result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id, acquire_timeout); return result; } @@ -365,7 +387,7 @@ void IStorage::alter( const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata metadata = getInMemoryMetadata(); params.apply(metadata); diff --git a/dbms/src/Storages/IStorage.h b/src/Storages/IStorage.h similarity index 94% rename from dbms/src/Storages/IStorage.h rename to src/Storages/IStorage.h index 469f39d65df..1b55feb9d81 100644 --- a/dbms/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -195,22 +195,26 @@ private: IndicesDescription indices; ConstraintsDescription constraints; +private: + RWLockImpl::LockHolder tryLockTimed( + const RWLock & rwlock, RWLockImpl::Type type, const String & query_id, const SettingSeconds & acquire_timeout); + public: /// Acquire this lock if you need the table structure to remain constant during the execution of /// the query. If will_add_new_data is true, this means that the query will add new data to the table /// (INSERT or a parts merge). - TableStructureReadLockHolder lockStructureForShare(const String & query_id); + TableStructureReadLockHolder lockStructureForShare(bool will_add_new_data, const String & query_id, const SettingSeconds & acquire_timeout); /// Acquire this lock at the start of ALTER to lock out other ALTERs and make sure that only you /// can modify the table structure. It can later be upgraded to the exclusive lock. - TableStructureWriteLockHolder lockAlterIntention(); + TableStructureWriteLockHolder lockAlterIntention(const String & query_id, const SettingSeconds & acquire_timeout); /// Upgrade alter intention lock to the full exclusive structure lock. This is done by ALTER queries /// to ensure that no other query uses the table structure and it can be safely changed. - void lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id); + void lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id, const SettingSeconds & acquire_timeout); /// Acquire the full exclusive lock immediately. No other queries can run concurrently. - TableStructureWriteLockHolder lockExclusively(const String & query_id); + TableStructureWriteLockHolder lockExclusively(const String & query_id, const SettingSeconds & acquire_timeout); /** Returns stage to which query is going to be processed in read() function. * (Normally, the function only reads the columns from the list, but in other cases, @@ -218,9 +222,18 @@ public: * * SelectQueryInfo is required since the stage can depends on the query * (see Distributed() engine and optimize_skip_unused_shards). + * + * QueryProcessingStage::Enum required for Distributed over Distributed, + * since it cannot return Complete for intermediate queries never. */ - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context) const { return getQueryProcessingStage(context, {}); } - virtual QueryProcessingStage::Enum getQueryProcessingStage(const Context &, const ASTPtr &) const { return QueryProcessingStage::FetchColumns; } + QueryProcessingStage::Enum getQueryProcessingStage(const Context & context) const + { + return getQueryProcessingStage(context, QueryProcessingStage::Complete, {}); + } + virtual QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const + { + return QueryProcessingStage::FetchColumns; + } /** Watch live changes to the table. * Accepts a list of columns to read, as well as a description of the query, @@ -490,7 +503,12 @@ private: /// If you hold this lock exclusively, you can be sure that no other structure modifying queries /// (e.g. ALTER, DROP) are concurrently executing. But queries that only read table structure /// (e.g. SELECT, INSERT) can continue to execute. - mutable std::mutex alter_lock; + mutable RWLock alter_intention_lock = RWLockImpl::create(); + + /// It is taken for share for the entire INSERT query and the entire merge of the parts (for MergeTree). + /// ALTER COLUMN queries acquire an exclusive lock to ensure that no new parts with the old structure + /// are added to the table and thus the set of parts to modify doesn't change. + mutable RWLock new_data_structure_lock = RWLockImpl::create(); /// Lock for the table column structure (names, types, etc.) and data path. /// It is taken in exclusive mode by queries that modify them (e.g. RENAME, ALTER and DROP) diff --git a/dbms/src/Storages/IStorage_fwd.h b/src/Storages/IStorage_fwd.h similarity index 100% rename from dbms/src/Storages/IStorage_fwd.h rename to src/Storages/IStorage_fwd.h diff --git a/dbms/src/Storages/IndicesDescription.cpp b/src/Storages/IndicesDescription.cpp similarity index 91% rename from dbms/src/Storages/IndicesDescription.cpp rename to src/Storages/IndicesDescription.cpp index a5772a835bf..2363e7924ba 100644 --- a/dbms/src/Storages/IndicesDescription.cpp +++ b/src/Storages/IndicesDescription.cpp @@ -5,6 +5,9 @@ #include #include +#include + + namespace DB { @@ -42,7 +45,7 @@ IndicesDescription IndicesDescription::parse(const String & str) IndicesDescription res; ParserIndexDeclarationList parser; - ASTPtr list = parseQuery(parser, str, 0); + ASTPtr list = parseQuery(parser, str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); for (const auto & index : list->children) res.indices.push_back(std::dynamic_pointer_cast(index)); diff --git a/dbms/src/Storages/IndicesDescription.h b/src/Storages/IndicesDescription.h similarity index 100% rename from dbms/src/Storages/IndicesDescription.h rename to src/Storages/IndicesDescription.h diff --git a/dbms/src/Storages/Kafka/Buffer_fwd.h b/src/Storages/Kafka/Buffer_fwd.h similarity index 100% rename from dbms/src/Storages/Kafka/Buffer_fwd.h rename to src/Storages/Kafka/Buffer_fwd.h diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp b/src/Storages/Kafka/KafkaBlockInputStream.cpp similarity index 99% rename from dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp rename to src/Storages/Kafka/KafkaBlockInputStream.cpp index 39f71912f8e..18f7e696419 100644 --- a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -190,7 +190,6 @@ Block KafkaBlockInputStream::readImpl() result_block.insert(column); return ConvertingBlockInputStream( - context, std::make_shared(result_block), getHeader(), ConvertingBlockInputStream::MatchColumnsMode::Name) diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.h b/src/Storages/Kafka/KafkaBlockInputStream.h similarity index 100% rename from dbms/src/Storages/Kafka/KafkaBlockInputStream.h rename to src/Storages/Kafka/KafkaBlockInputStream.h diff --git a/dbms/src/Storages/Kafka/KafkaBlockOutputStream.cpp b/src/Storages/Kafka/KafkaBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/Kafka/KafkaBlockOutputStream.cpp rename to src/Storages/Kafka/KafkaBlockOutputStream.cpp diff --git a/dbms/src/Storages/Kafka/KafkaBlockOutputStream.h b/src/Storages/Kafka/KafkaBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/Kafka/KafkaBlockOutputStream.h rename to src/Storages/Kafka/KafkaBlockOutputStream.h diff --git a/dbms/src/Storages/Kafka/KafkaSettings.cpp b/src/Storages/Kafka/KafkaSettings.cpp similarity index 100% rename from dbms/src/Storages/Kafka/KafkaSettings.cpp rename to src/Storages/Kafka/KafkaSettings.cpp diff --git a/dbms/src/Storages/Kafka/KafkaSettings.h b/src/Storages/Kafka/KafkaSettings.h similarity index 100% rename from dbms/src/Storages/Kafka/KafkaSettings.h rename to src/Storages/Kafka/KafkaSettings.h diff --git a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp similarity index 100% rename from dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp rename to src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp diff --git a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h similarity index 100% rename from dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h rename to src/Storages/Kafka/ReadBufferFromKafkaConsumer.h diff --git a/dbms/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp similarity index 100% rename from dbms/src/Storages/Kafka/StorageKafka.cpp rename to src/Storages/Kafka/StorageKafka.cpp diff --git a/dbms/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h similarity index 100% rename from dbms/src/Storages/Kafka/StorageKafka.h rename to src/Storages/Kafka/StorageKafka.h diff --git a/dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.cpp b/src/Storages/Kafka/WriteBufferToKafkaProducer.cpp similarity index 100% rename from dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.cpp rename to src/Storages/Kafka/WriteBufferToKafkaProducer.cpp diff --git a/dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.h b/src/Storages/Kafka/WriteBufferToKafkaProducer.h similarity index 100% rename from dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.h rename to src/Storages/Kafka/WriteBufferToKafkaProducer.h diff --git a/dbms/src/Storages/LiveView/LiveViewBlockInputStream.h b/src/Storages/LiveView/LiveViewBlockInputStream.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewBlockInputStream.h rename to src/Storages/LiveView/LiveViewBlockInputStream.h diff --git a/dbms/src/Storages/LiveView/LiveViewBlockOutputStream.h b/src/Storages/LiveView/LiveViewBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewBlockOutputStream.h rename to src/Storages/LiveView/LiveViewBlockOutputStream.h diff --git a/dbms/src/Storages/LiveView/LiveViewCommands.h b/src/Storages/LiveView/LiveViewCommands.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewCommands.h rename to src/Storages/LiveView/LiveViewCommands.h diff --git a/dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h b/src/Storages/LiveView/LiveViewEventsBlockInputStream.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h rename to src/Storages/LiveView/LiveViewEventsBlockInputStream.h diff --git a/dbms/src/Storages/LiveView/StorageBlocks.h b/src/Storages/LiveView/StorageBlocks.h similarity index 93% rename from dbms/src/Storages/LiveView/StorageBlocks.h rename to src/Storages/LiveView/StorageBlocks.h index fd856e27718..a21a9374137 100644 --- a/dbms/src/Storages/LiveView/StorageBlocks.h +++ b/src/Storages/LiveView/StorageBlocks.h @@ -26,7 +26,7 @@ public: return std::make_shared(table_id, columns, std::move(pipes), to_stage); } std::string getName() const override { return "Blocks"; } - QueryProcessingStage::Enum getQueryProcessingStage(const Context &, const ASTPtr &) const override { return to_stage; } + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override { return to_stage; } Pipes read( const Names & /*column_names*/, diff --git a/dbms/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp similarity index 98% rename from dbms/src/Storages/LiveView/StorageLiveView.cpp rename to src/Storages/LiveView/StorageLiveView.cpp index 93d183a594f..569e5c24e1c 100644 --- a/dbms/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -517,11 +517,14 @@ void StorageLiveView::drop(TableStructureWriteLockHolder &) condition.notify_all(); } -void StorageLiveView::refresh() +void StorageLiveView::refresh(const Context & context) { - std::lock_guard lock(mutex); - if (getNewBlocks()) - condition.notify_all(); + auto alter_lock = lockAlterIntention(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + { + std::lock_guard lock(mutex); + if (getNewBlocks()) + condition.notify_all(); + } } Pipes StorageLiveView::read( diff --git a/dbms/src/Storages/LiveView/StorageLiveView.h b/src/Storages/LiveView/StorageLiveView.h similarity index 99% rename from dbms/src/Storages/LiveView/StorageLiveView.h rename to src/Storages/LiveView/StorageLiveView.h index b3ed89f8d10..9186132f99d 100644 --- a/dbms/src/Storages/LiveView/StorageLiveView.h +++ b/src/Storages/LiveView/StorageLiveView.h @@ -123,7 +123,7 @@ public: void startup() override; void shutdown() override; - void refresh(); + void refresh(const Context & context); Pipes read( const Names & column_names, diff --git a/dbms/src/Storages/MarkCache.h b/src/Storages/MarkCache.h similarity index 100% rename from dbms/src/Storages/MarkCache.h rename to src/Storages/MarkCache.h diff --git a/dbms/src/Storages/MergeTree/ActiveDataPartSet.cpp b/src/Storages/MergeTree/ActiveDataPartSet.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ActiveDataPartSet.cpp rename to src/Storages/MergeTree/ActiveDataPartSet.cpp diff --git a/dbms/src/Storages/MergeTree/ActiveDataPartSet.h b/src/Storages/MergeTree/ActiveDataPartSet.h similarity index 100% rename from dbms/src/Storages/MergeTree/ActiveDataPartSet.h rename to src/Storages/MergeTree/ActiveDataPartSet.h diff --git a/dbms/src/Storages/MergeTree/AllMergeSelector.cpp b/src/Storages/MergeTree/AllMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/AllMergeSelector.cpp rename to src/Storages/MergeTree/AllMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/AllMergeSelector.h b/src/Storages/MergeTree/AllMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/AllMergeSelector.h rename to src/Storages/MergeTree/AllMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/BackgroundProcessingPool.cpp b/src/Storages/MergeTree/BackgroundProcessingPool.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/BackgroundProcessingPool.cpp rename to src/Storages/MergeTree/BackgroundProcessingPool.cpp diff --git a/dbms/src/Storages/MergeTree/BackgroundProcessingPool.h b/src/Storages/MergeTree/BackgroundProcessingPool.h similarity index 100% rename from dbms/src/Storages/MergeTree/BackgroundProcessingPool.h rename to src/Storages/MergeTree/BackgroundProcessingPool.h diff --git a/dbms/src/Storages/MergeTree/BoolMask.cpp b/src/Storages/MergeTree/BoolMask.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/BoolMask.cpp rename to src/Storages/MergeTree/BoolMask.cpp diff --git a/dbms/src/Storages/MergeTree/BoolMask.h b/src/Storages/MergeTree/BoolMask.h similarity index 100% rename from dbms/src/Storages/MergeTree/BoolMask.h rename to src/Storages/MergeTree/BoolMask.h diff --git a/dbms/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp similarity index 98% rename from dbms/src/Storages/MergeTree/DataPartsExchange.cpp rename to src/Storages/MergeTree/DataPartsExchange.cpp index 1b216e8bec3..c656fbf0c58 100644 --- a/dbms/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -85,7 +85,8 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo try { - auto storage_lock = data.lockStructureForShare(RWLockImpl::NO_QUERY); + auto storage_lock = data.lockStructureForShare( + false, RWLockImpl::NO_QUERY, data.getSettings()->lock_acquire_timeout_for_background_operations); MergeTreeData::DataPartPtr part = findPart(part_name); diff --git a/dbms/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h similarity index 100% rename from dbms/src/Storages/MergeTree/DataPartsExchange.h rename to src/Storages/MergeTree/DataPartsExchange.h diff --git a/dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp rename to src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp diff --git a/dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.h b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h similarity index 100% rename from dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.h rename to src/Storages/MergeTree/EphemeralLockInZooKeeper.h diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPart.cpp rename to src/Storages/MergeTree/IMergeTreeDataPart.cpp index 3e0caa67518..5d799d257bc 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -98,8 +98,8 @@ void IMergeTreeDataPart::MinMaxIndex::update(const Block & block, const Names & for (size_t i = 0; i < column_names.size(); ++i) { - Field min_value; - Field max_value; + FieldRef min_value; + FieldRef max_value; const ColumnWithTypeAndName & column = block.getByName(column_names[i]); column.column->getExtremes(min_value, max_value); diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPart.h rename to src/Storages/MergeTree/IMergeTreeDataPart.h diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp rename to src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h similarity index 97% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.h rename to src/Storages/MergeTree/IMergeTreeDataPartWriter.h index 4eb842f9279..d18b31edc72 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -102,8 +102,7 @@ public: written_offset_columns = written_offset_columns_; } - using SkipIndices = std::vector; - const SkipIndices & getSkipIndices() { return skip_indices; } + const MergeTreeIndices & getSkipIndices() { return skip_indices; } void initSkipIndices(); void initPrimaryIndex(); @@ -126,7 +125,7 @@ protected: CompressionCodecPtr default_codec; - std::vector skip_indices; + MergeTreeIndices skip_indices; MergeTreeWriterSettings settings; diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp similarity index 90% rename from dbms/src/Storages/MergeTree/IMergeTreeReader.cpp rename to src/Storages/MergeTree/IMergeTreeReader.cpp index 91d52cfa1fc..8243983d837 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -30,7 +30,10 @@ IMergeTreeReader::IMergeTreeReader(const MergeTreeData::DataPartPtr & data_part_ , columns(columns_), uncompressed_cache(uncompressed_cache_), mark_cache(mark_cache_) , settings(settings_), storage(data_part_->storage) , all_mark_ranges(all_mark_ranges_) + , alter_conversions(storage.getAlterConversionsForPart(data_part)) { + for (const NameAndTypePair & column_from_part : data_part->getColumns()) + columns_from_part[column_from_part.name] = column_from_part.type; } IMergeTreeReader::~IMergeTreeReader() = default; @@ -182,6 +185,23 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns } } +NameAndTypePair IMergeTreeReader::getColumnFromPart(const NameAndTypePair & required_column) const +{ + auto it = columns_from_part.find(required_column.name); + if (it != columns_from_part.end()) + return {it->first, it->second}; + + if (alter_conversions.isColumnRenamed(required_column.name)) + { + String old_name = alter_conversions.getColumnOldName(required_column.name); + it = columns_from_part.find(old_name); + if (it != columns_from_part.end()) + return {it->first, it->second}; + } + + return required_column; +} + void IMergeTreeReader::performRequiredConversions(Columns & res_columns) { try @@ -208,10 +228,7 @@ void IMergeTreeReader::performRequiredConversions(Columns & res_columns) if (res_columns[pos] == nullptr) continue; - if (columns_from_part.count(name_and_type->name)) - copy_block.insert({res_columns[pos], columns_from_part[name_and_type->name], name_and_type->name}); - else - copy_block.insert({res_columns[pos], name_and_type->type, name_and_type->name}); + copy_block.insert({res_columns[pos], getColumnFromPart(*name_and_type).type, name_and_type->name}); } DB::performRequiredConversions(copy_block, columns, storage.global_context); diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.h b/src/Storages/MergeTree/IMergeTreeReader.h similarity index 90% rename from dbms/src/Storages/MergeTree/IMergeTreeReader.h rename to src/Storages/MergeTree/IMergeTreeReader.h index 75fefe235f8..02d8f67f9d0 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeReader.h +++ b/src/Storages/MergeTree/IMergeTreeReader.h @@ -4,7 +4,6 @@ #include #include - namespace DB { @@ -59,6 +58,9 @@ public: MergeTreeData::DataPartPtr data_part; protected: + /// Returns actual column type in part, which can differ from table metadata. + NameAndTypePair getColumnFromPart(const NameAndTypePair & required_column) const; + /// avg_value_size_hints are used to reduce the number of reallocations when creating columns of variable size. ValueSizeMap avg_value_size_hints; /// Stores states for IDataType::deserializeBinaryBulk @@ -67,8 +69,6 @@ protected: /// Columns that are read. NamesAndTypesList columns; - std::unordered_map columns_from_part; - UncompressedCache * uncompressed_cache; MarkCache * mark_cache; @@ -78,6 +78,13 @@ protected: MarkRanges all_mark_ranges; friend class MergeTreeRangeReader::DelayedStream; + +private: + /// Alter conversions, which must be applied on fly if required + MergeTreeData::AlterConversions alter_conversions; + + /// Actual data type of columns in part + std::unordered_map columns_from_part; }; } diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp rename to src/Storages/MergeTree/IMergedBlockOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h rename to src/Storages/MergeTree/IMergedBlockOutputStream.h diff --git a/dbms/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp similarity index 95% rename from dbms/src/Storages/MergeTree/KeyCondition.cpp rename to src/Storages/MergeTree/KeyCondition.cpp index e994d254958..e755c4942a1 100644 --- a/dbms/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -338,44 +338,6 @@ inline bool Range::equals(const Field & lhs, const Field & rhs) { return applyVi inline bool Range::less(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess(), lhs, rhs); } -FieldWithInfinity::FieldWithInfinity(const Field & field_) - : field(field_), - type(Type::NORMAL) -{ -} - -FieldWithInfinity::FieldWithInfinity(Field && field_) - : field(std::move(field_)), - type(Type::NORMAL) -{ -} - -FieldWithInfinity::FieldWithInfinity(const Type type_) - : type(type_) -{ -} - -FieldWithInfinity FieldWithInfinity::getMinusInfinity() -{ - return FieldWithInfinity(Type::MINUS_INFINITY); -} - -FieldWithInfinity FieldWithInfinity::getPlusInfinity() -{ - return FieldWithInfinity(Type::PLUS_INFINITY); -} - -bool FieldWithInfinity::operator<(const FieldWithInfinity & other) const -{ - return type < other.type || (type == other.type && type == Type::NORMAL && field < other.field); -} - -bool FieldWithInfinity::operator==(const FieldWithInfinity & other) const -{ - return type == other.type && (type != Type::NORMAL || field == other.field); -} - - /** Calculate expressions, that depend only on constants. * For index to work when something like "WHERE Date = toDate(now())" is written. */ @@ -480,24 +442,41 @@ bool KeyCondition::getConstant(const ASTPtr & expr, Block & block_with_constants } -static void applyFunction( +static Field applyFunctionForField( const FunctionBasePtr & func, - const DataTypePtr & arg_type, const Field & arg_value, - DataTypePtr & res_type, Field & res_value) + const DataTypePtr & arg_type, + const Field & arg_value) { - res_type = func->getReturnType(); - Block block { { arg_type->createColumnConst(1, arg_value), arg_type, "x" }, - { nullptr, res_type, "y" } + { nullptr, func->getReturnType(), "y" } }; func->execute(block, {0}, 1, 1); - - block.safeGetByPosition(1).column->get(0, res_value); + return (*block.safeGetByPosition(1).column)[0]; } +static FieldRef applyFunction(FunctionBasePtr & func, const DataTypePtr & current_type, const FieldRef & field) +{ + /// Fallback for fields without block reference. + if (field.isExplicit()) + return applyFunctionForField(func, current_type, field); + + String result_name = "_" + func->getName() + "_" + toString(field.column_idx); + size_t result_idx; + const auto & block = field.block; + if (!block->has(result_name)) + { + result_idx = block->columns(); + field.block->insert({nullptr, func->getReturnType(), result_name}); + func->execute(*block, {field.column_idx}, result_idx, block->rows()); + } + else + result_idx = block->getPositionByName(result_name); + + return {field.block, field.row_idx, result_idx}; +} void KeyCondition::traverseAST(const ASTPtr & node, const Context & context, Block & block_with_constants) { @@ -569,12 +548,8 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions( return false; // Apply the next transformation step - DataTypePtr new_type; - applyFunction(a.function_base, out_type, out_value, new_type, out_value); - if (!new_type) - return false; - - out_type.swap(new_type); + out_value = applyFunctionForField(a.function_base, out_type, out_value); + out_type = a.function_base->getReturnType(); expr_name = a.result_name; // Transformation results in a key expression, accept @@ -957,8 +932,8 @@ String KeyCondition::toString() const template static BoolMask forAnyHyperrectangle( size_t key_size, - const Field * key_left, - const Field * key_right, + const FieldRef * key_left, + const FieldRef * key_right, bool left_bounded, bool right_bounded, std::vector & hyperrectangle, @@ -1049,8 +1024,8 @@ static BoolMask forAnyHyperrectangle( BoolMask KeyCondition::checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types, bool right_bounded, BoolMask initial_mask) const @@ -1102,19 +1077,12 @@ std::optional KeyCondition::applyMonotonicFunctionsChainToRange( return {}; } - /// Apply the function. - DataTypePtr new_type; if (!key_range.left.isNull()) - applyFunction(func, current_type, key_range.left, new_type, key_range.left); + key_range.left = applyFunction(func, current_type, key_range.left); if (!key_range.right.isNull()) - applyFunction(func, current_type, key_range.right, new_type, key_range.right); + key_range.right = applyFunction(func, current_type, key_range.right); - if (!new_type) - { - return {}; - } - - current_type.swap(new_type); + current_type = func->getReturnType(); if (!monotonicity.is_positive) key_range.swapLeftAndRight(); @@ -1220,8 +1188,8 @@ BoolMask KeyCondition::checkInHyperrectangle( BoolMask KeyCondition::checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types, BoolMask initial_mask) const { @@ -1231,8 +1199,8 @@ BoolMask KeyCondition::checkInRange( bool KeyCondition::mayBeTrueInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types) const { return checkInRange(used_key_size, left_key, right_key, data_types, true, BoolMask::consider_only_can_be_true).can_be_true; @@ -1241,7 +1209,7 @@ bool KeyCondition::mayBeTrueInRange( BoolMask KeyCondition::checkAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types, BoolMask initial_mask) const { @@ -1251,7 +1219,7 @@ BoolMask KeyCondition::checkAfter( bool KeyCondition::mayBeTrueAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types) const { return checkInRange(used_key_size, left_key, nullptr, data_types, false, BoolMask::consider_only_can_be_true).can_be_true; @@ -1382,4 +1350,13 @@ size_t KeyCondition::getMaxKeyColumn() const return res; } +bool KeyCondition::hasMonotonicFunctionsChain() const +{ + for (const auto & element : rpn) + if (!element.monotonic_functions_chain.empty() + || (element.set_index && element.set_index->hasMonotonicFunctionsChain())) + return true; + return false; +} + } diff --git a/dbms/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h similarity index 86% rename from dbms/src/Storages/MergeTree/KeyCondition.h rename to src/Storages/MergeTree/KeyCondition.h index 8667e0aea27..3a3768f0e4c 100644 --- a/dbms/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -15,10 +15,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_TYPE_OF_FIELD; -} class IFunction; using FunctionBasePtr = std::shared_ptr; @@ -26,6 +22,33 @@ using FunctionBasePtr = std::shared_ptr; class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; +/** A field, that can be stored in two reperesenations: + * - A standalone field. + * - A field with reference to its position in a block. + * It's needed for execution of functions on ranges during + * index analysis. If function was executed once for field, + * its result would be cached for whole block for which field's reference points to. + */ +struct FieldRef : public Field +{ + FieldRef() = default; + + /// Create as explicit field without block. + template + FieldRef(T && value) : Field(std::forward(value)) {} + + /// Create as reference to field in block. + FieldRef(Block * block_, size_t row_idx_, size_t column_idx_) + : Field((*block_->getByPosition(column_idx_).column)[row_idx_]), + block(block_), row_idx(row_idx_), column_idx(column_idx_) {} + + bool isExplicit() const { return block == nullptr; } + + Block * block = nullptr; + size_t row_idx = 0; + size_t column_idx = 0; +}; + /** Range with open or closed ends; possibly unbounded. */ struct Range @@ -35,8 +58,8 @@ private: static bool less(const Field & lhs, const Field & rhs); public: - Field left; /// the left border, if any - Field right; /// the right border, if any + FieldRef left; /// the left border, if any + FieldRef right; /// the right border, if any bool left_bounded = false; /// bounded at the left bool right_bounded = false; /// bounded at the right bool left_included = false; /// includes the left border, if any @@ -46,11 +69,11 @@ public: Range() {} /// One point. - Range(const Field & point) + Range(const FieldRef & point) : left(point), right(point), left_bounded(true), right_bounded(true), left_included(true), right_included(true) {} /// A bounded two-sided range. - Range(const Field & left_, bool left_included_, const Field & right_, bool right_included_) + Range(const FieldRef & left_, bool left_included_, const FieldRef & right_, bool right_included_) : left(left_), right(right_), left_bounded(true), right_bounded(true), left_included(left_included_), right_included(right_included_) @@ -58,7 +81,7 @@ public: shrinkToIncludedIfPossible(); } - static Range createRightBounded(const Field & right_point, bool right_included) + static Range createRightBounded(const FieldRef & right_point, bool right_included) { Range r; r.right = right_point; @@ -68,7 +91,7 @@ public: return r; } - static Range createLeftBounded(const Field & left_point, bool left_included) + static Range createLeftBounded(const FieldRef & left_point, bool left_included) { Range r; r.left = left_point; @@ -84,7 +107,7 @@ public: */ void shrinkToIncludedIfPossible() { - if (left_bounded && !left_included) + if (left.isExplicit() && left_bounded && !left_included) { if (left.getType() == Field::Types::UInt64 && left.get() != std::numeric_limits::max()) { @@ -97,7 +120,7 @@ public: left_included = true; } } - if (right_bounded && !right_included) + if (right.isExplicit() && right_bounded && !right_included) { if (right.getType() == Field::Types::UInt64 && right.get() != std::numeric_limits::min()) { @@ -120,13 +143,13 @@ public: } /// x contained in the range - bool contains(const Field & x) const + bool contains(const FieldRef & x) const { return !leftThan(x) && !rightThan(x); } /// x is to the left - bool rightThan(const Field & x) const + bool rightThan(const FieldRef & x) const { return (left_bounded ? !(less(left, x) || (left_included && equals(x, left))) @@ -134,7 +157,7 @@ public: } /// x is to the right - bool leftThan(const Field & x) const + bool leftThan(const FieldRef & x) const { return (right_bounded ? !(less(x, right) || (right_included && equals(x, right))) @@ -195,42 +218,6 @@ public: String toString() const; }; - -/// Class that extends arbitrary objects with infinities, like +-inf for floats -class FieldWithInfinity -{ -public: - enum Type - { - MINUS_INFINITY = -1, - NORMAL = 0, - PLUS_INFINITY = 1 - }; - - explicit FieldWithInfinity(const Field & field_); - FieldWithInfinity(Field && field_); - - static FieldWithInfinity getMinusInfinity(); - static FieldWithInfinity getPlusInfinity(); - - bool operator<(const FieldWithInfinity & other) const; - bool operator==(const FieldWithInfinity & other) const; - - Field getFieldIfFinite() const - { - if (type != NORMAL) - throw Exception("Trying to get field of infinite type", ErrorCodes::BAD_TYPE_OF_FIELD); - return field; - } - -private: - Field field; - Type type; - - FieldWithInfinity(const Type type_); -}; - - /** Condition on the index. * * Consists of the conditions for the key belonging to all possible ranges or sets, @@ -261,8 +248,8 @@ public: /// one of the resulting mask components (see BoolMask::consider_only_can_be_XXX). BoolMask checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef* right_key, const DataTypes & data_types, BoolMask initial_mask = BoolMask(false, false)) const; @@ -270,7 +257,7 @@ public: /// left_key must contain all the fields in the sort_descr in the appropriate order. BoolMask checkAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types, BoolMask initial_mask = BoolMask(false, false)) const; @@ -278,15 +265,15 @@ public: /// This is more efficient than checkInRange(...).can_be_true. bool mayBeTrueInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types) const; /// Same as checkAfter, but calculate only may_be_true component of a result. /// This is more efficient than checkAfter(...).can_be_true. bool mayBeTrueAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types) const; /// Checks that the index can not be used. @@ -295,6 +282,8 @@ public: /// Get the maximum number of the key element used in the condition. size_t getMaxKeyColumn() const; + bool hasMonotonicFunctionsChain() const; + /// Impose an additional condition: the value in the column `column` must be in the range `range`. /// Returns whether there is such a column in the key. bool addCondition(const String & column, const Range & range); @@ -374,8 +363,8 @@ public: private: BoolMask checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types, bool right_bounded, BoolMask initial_mask) const; diff --git a/dbms/src/Storages/MergeTree/LevelMergeSelector.cpp b/src/Storages/MergeTree/LevelMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/LevelMergeSelector.cpp rename to src/Storages/MergeTree/LevelMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/LevelMergeSelector.h b/src/Storages/MergeTree/LevelMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/LevelMergeSelector.h rename to src/Storages/MergeTree/LevelMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/MarkRange.h b/src/Storages/MergeTree/MarkRange.h similarity index 100% rename from dbms/src/Storages/MergeTree/MarkRange.h rename to src/Storages/MergeTree/MarkRange.h diff --git a/dbms/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeList.cpp rename to src/Storages/MergeTree/MergeList.cpp diff --git a/dbms/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeList.h rename to src/Storages/MergeTree/MergeList.h diff --git a/dbms/src/Storages/MergeTree/MergeSelector.h b/src/Storages/MergeTree/MergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeSelector.h rename to src/Storages/MergeTree/MergeSelector.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp rename to src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h rename to src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp rename to src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h b/src/Storages/MergeTree/MergeTreeBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h rename to src/Storages/MergeTree/MergeTreeBlockOutputStream.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp rename to src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h b/src/Storages/MergeTree/MergeTreeBlockReadUtils.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h rename to src/Storages/MergeTree/MergeTreeBlockReadUtils.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp similarity index 98% rename from dbms/src/Storages/MergeTree/MergeTreeData.cpp rename to src/Storages/MergeTree/MergeTreeData.cpp index 232295bcd5e..aaacea6f1e7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -192,7 +192,7 @@ MergeTreeData::MergeTreeData( min_format_version = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING; } - setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast); + setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast); /// format_file always contained on any data path PathWithDisk version_file; @@ -610,14 +610,17 @@ void checkTTLExpression(const ExpressionActionsPtr & ttl_expression, const Strin } -void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls, +void MergeTreeData::setTTLExpressions(const ColumnsDescription & new_columns, const ASTPtr & new_ttl_table_ast, bool only_check) { - auto create_ttl_entry = [this](ASTPtr ttl_ast) + + auto new_column_ttls = new_columns.getColumnTTLs(); + + auto create_ttl_entry = [this, &new_columns](ASTPtr ttl_ast) { TTLEntry result; - auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, getColumns().getAllPhysical()); + auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, new_columns.getAllPhysical()); result.expression = ExpressionAnalyzer(ttl_ast, syntax_result, global_context).getActions(false); result.destination_type = PartDestinationType::DELETE; result.result_column = ttl_ast->getColumnName(); @@ -1457,6 +1460,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S for (const auto & column : getColumns().getAllPhysical()) old_types.emplace(column.name, column.type.get()); + for (const AlterCommand & command : commands) { if (command.type == AlterCommand::MODIFY_ORDER_BY && !is_custom_partitioned) @@ -1471,6 +1475,15 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S "ALTER ADD INDEX is not supported for tables with the old syntax", ErrorCodes::BAD_ARGUMENTS); } + if (command.type == AlterCommand::RENAME_COLUMN) + { + if (columns_alter_type_forbidden.count(command.column_name) || columns_alter_type_metadata_only.count(command.column_name)) + { + throw Exception( + "Trying to ALTER RENAME key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression", + ErrorCodes::ILLEGAL_COLUMN); + } + } else if (command.isModifyingData()) { if (columns_alter_type_forbidden.count(command.column_name)) @@ -1490,7 +1503,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S setProperties(metadata, /* only_check = */ true); - setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast, /* only_check = */ true); + setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast, /* only_check = */ true); if (settings_ast) { @@ -3589,4 +3602,18 @@ bool MergeTreeData::canUsePolymorphicParts(const MergeTreeSettings & settings, S return true; } +MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const MergeTreeDataPartPtr part) const +{ + MutationCommands commands = getFirtsAlterMutationCommandsForPart(part); + + AlterConversions result{}; + for (const auto & command : commands) + /// Currently we need explicit conversions only for RENAME alter + /// all other conversions can be deduced from diff between part columns + /// and columns in storage. + if (command.type == MutationCommand::Type::RENAME_COLUMN) + result.rename_map[command.rename_to] = command.column_name; + + return result; +} } diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h similarity index 96% rename from dbms/src/Storages/MergeTree/MergeTreeData.h rename to src/Storages/MergeTree/MergeTreeData.h index fbc42de5517..d299d39726e 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -33,6 +33,7 @@ namespace DB class MergeListEntry; class AlterCommands; class MergeTreePartsMover; +class MutationCommands; class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; @@ -124,6 +125,20 @@ public: STRONG_TYPEDEF(String, PartitionID) + /// Alter conversions which should be applied on-fly for part. Build from of + /// the most recent mutation commands for part. Now we have only rename_map + /// here (from ALTER_RENAME) command, because for all other type of alters + /// we can deduce conversions for part from difference between + /// part->getColumns() and storage->getColumns(). + struct AlterConversions + { + /// Rename map new_name -> old_name + std::unordered_map rename_map; + + bool isColumnRenamed(const String & new_name) const { return rename_map.count(new_name) > 0; } + String getColumnOldName(const String & new_name) const { return rename_map.at(new_name); } + }; + struct LessDataPart { using is_transparent = void; @@ -418,11 +433,6 @@ public: DataPartPtr getPartIfExists(const String & part_name, const DataPartStates & valid_states); DataPartPtr getPartIfExists(const MergeTreePartInfo & part_info, const DataPartStates & valid_states); - std::vector getSkipIndices() const - { - return std::vector(std::begin(skip_indices), std::end(skip_indices)); - } - /// Total size of active parts in bytes. size_t getTotalActiveSizeInBytes() const; @@ -535,10 +545,9 @@ public: broken_part_callback(name); } - /** Get the key expression AST as an ASTExpressionList. - * It can be specified in the tuple: (CounterID, Date), - * or as one column: CounterID. - */ + /** Get the key expression AST as an ASTExpressionList. It can be specified + * in the tuple: (CounterID, Date), or as one column: CounterID. + */ static ASTPtr extractKeyExpressionList(const ASTPtr & node); bool hasSortingKey() const { return !sorting_key_columns.empty(); } @@ -647,6 +656,9 @@ public: /// Reserves 0 bytes ReservationPtr makeEmptyReservationOnLargestDisk() { return getStoragePolicy()->makeEmptyReservationOnLargestDisk(); } + /// Return alter conversions for part which must be applied on fly. + AlterConversions getAlterConversionsForPart(const MergeTreeDataPartPtr part) const; + MergeTreeDataFormatVersion format_version; Context & global_context; @@ -856,14 +868,14 @@ protected: std::mutex grab_old_parts_mutex; /// The same for clearOldTemporaryDirectories. std::mutex clear_old_temporary_directories_mutex; - /// Mutex for settings usage void setProperties(const StorageInMemoryMetadata & metadata, bool only_check = false); void initPartitionKey(); - void setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls, - const ASTPtr & new_ttl_table_ast, bool only_check = false); + void setTTLExpressions(const ColumnsDescription & columns, + const ASTPtr & new_ttl_table_ast, bool only_check = false); + void checkStoragePolicy(const StoragePolicyPtr & new_storage_policy); void setStoragePolicy(const String & new_storage_policy_name, bool only_check = false); @@ -908,6 +920,11 @@ protected: /// mechanisms for parts locking virtual bool partIsAssignedToBackgroundOperation(const DataPartPtr & part) const = 0; + /// Return most recent mutations commands for part which weren't applied + /// Used to receive AlterConversions for part and apply them on fly. This + /// method has different implementations for replicated and non replicated + /// MergeTree because they store mutations in different way. + virtual MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const = 0; /// Moves part to specified space, used in ALTER ... MOVE ... queries bool movePartsToSpace(const DataPartsVector & parts, SpacePtr space); diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataFormatVersion.h b/src/Storages/MergeTree/MergeTreeDataFormatVersion.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataFormatVersion.h rename to src/Storages/MergeTree/MergeTreeDataFormatVersion.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp similarity index 93% rename from dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp rename to src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 0c8c39b074c..c10a6c6dd59 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -26,11 +26,14 @@ #include #include #include +#include #include #include #include +#include + namespace ProfileEvents { extern const Event MergedRows; @@ -770,6 +773,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor MergedBlockOutputStream to{ new_data_part, merging_columns, + data.skip_indices, compression_codec, merged_column_to_size, data_settings->min_merge_bytes_to_use_direct_io, @@ -1039,8 +1043,10 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor /// All columns from part are changed and may be some more that were missing before in part if (isCompactPart(source_part) || source_part->getColumns().isSubsetOf(updated_header.getNamesAndTypesList())) { + auto part_indices = getIndicesForNewDataPart(data.skip_indices, for_file_renames); mutateAllPartColumns( new_data_part, + part_indices, in, time_of_mutation, compression_codec, @@ -1056,7 +1062,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor auto indices_to_recalc = getIndicesToRecalculate(in, storage_from_source_part, updated_header.getNamesAndTypesList(), context); NameSet files_to_skip = collectFilesToSkip(updated_header, indices_to_recalc, mrk_extension); - NameSet files_to_remove = collectFilesToRemove(source_part, for_file_renames, mrk_extension); + NameToNameMap files_to_rename = collectFilesForRenames(source_part, for_file_renames, mrk_extension); if (need_remove_expired_values) files_to_skip.insert("ttl.txt"); @@ -1064,10 +1070,21 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor /// Create hardlinks for unchanged files for (auto it = disk->iterateDirectory(source_part->getFullRelativePath()); it->isValid(); it->next()) { - if (files_to_skip.count(it->name()) || files_to_remove.count(it->name())) + if (files_to_skip.count(it->name())) continue; - String destination = new_part_tmp_path + "/" + it->name(); + String destination = new_part_tmp_path + "/"; + auto rename_it = files_to_rename.find(it->name()); + if (rename_it != files_to_rename.end()) + { + if (rename_it->second.empty()) + continue; + destination += rename_it->second; + } + else + { + destination += it->name(); + } disk->createHardLink(it->path(), destination); } @@ -1090,9 +1107,19 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor need_remove_expired_values); } - for (const String & removed_file : files_to_remove) - if (new_data_part->checksums.files.count(removed_file)) - new_data_part->checksums.files.erase(removed_file); + for (const auto & [rename_from, rename_to] : files_to_rename) + { + if (rename_to.empty() && new_data_part->checksums.files.count(rename_from)) + { + new_data_part->checksums.files.erase(rename_from); + } + else if (new_data_part->checksums.files.count(rename_from)) + { + new_data_part->checksums.files[rename_to] = new_data_part->checksums.files[rename_from]; + + new_data_part->checksums.files.erase(rename_from); + } + } finalizeMutatedPart(source_part, new_data_part, need_remove_expired_values); } @@ -1235,6 +1262,21 @@ void MergeTreeDataMergerMutator::splitMutationCommands( else if (is_compact_part && command.type == MutationCommand::Type::DROP_COLUMN) { removed_columns_from_compact_part.emplace(command.column_name); + for_file_renames.push_back(command); + } + else if (command.type == MutationCommand::Type::RENAME_COLUMN) + { + if (is_compact_part) + { + for_interpreter.push_back( + { + .type = MutationCommand::Type::READ_COLUMN, + .column_name = command.rename_to, + }); + already_changed_columns.emplace(command.column_name); + } + else + for_file_renames.push_back(command); } else { @@ -1248,7 +1290,8 @@ void MergeTreeDataMergerMutator::splitMutationCommands( /// we just don't read dropped columns for (const auto & column : part->getColumns()) { - if (!removed_columns_from_compact_part.count(column.name) && !already_changed_columns.count(column.name)) + if (!removed_columns_from_compact_part.count(column.name) + && !already_changed_columns.count(column.name)) { for_interpreter.emplace_back(MutationCommand { @@ -1262,7 +1305,7 @@ void MergeTreeDataMergerMutator::splitMutationCommands( } -NameSet MergeTreeDataMergerMutator::collectFilesToRemove( +NameToNameMap MergeTreeDataMergerMutator::collectFilesForRenames( MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension) { /// Collect counts for shared streams of different columns. As an example, Nested columns have shared stream with array sizes. @@ -1277,14 +1320,14 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove( {}); } - NameSet remove_files; + NameToNameMap rename_map; /// Remove old indices for (const auto & command : commands_for_removes) { if (command.type == MutationCommand::Type::DROP_INDEX) { - remove_files.emplace("skp_idx_" + command.column_name + ".idx"); - remove_files.emplace("skp_idx_" + command.column_name + mrk_extension); + rename_map.emplace("skp_idx_" + command.column_name + ".idx", ""); + rename_map.emplace("skp_idx_" + command.column_name + mrk_extension, ""); } else if (command.type == MutationCommand::Type::DROP_COLUMN) { @@ -1294,8 +1337,8 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove( /// Delete files if they are no longer shared with another column. if (--stream_counts[stream_name] == 0) { - remove_files.emplace(stream_name + ".bin"); - remove_files.emplace(stream_name + mrk_extension); + rename_map.emplace(stream_name + ".bin", ""); + rename_map.emplace(stream_name + mrk_extension, ""); } }; @@ -1304,9 +1347,31 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove( if (column) column->type->enumerateStreams(callback, stream_path); } + else if (command.type == MutationCommand::Type::RENAME_COLUMN) + { + String escaped_name_from = escapeForFileName(command.column_name); + String escaped_name_to = escapeForFileName(command.rename_to); + + IDataType::StreamCallback callback = [&](const IDataType::SubstreamPath & substream_path) + { + String stream_from = IDataType::getFileNameForStream(command.column_name, substream_path); + + String stream_to = boost::replace_first_copy(stream_from, escaped_name_from, escaped_name_to); + + if (stream_from != stream_to) + { + rename_map.emplace(stream_from + ".bin", stream_to + ".bin"); + rename_map.emplace(stream_from + mrk_extension, stream_to + mrk_extension); + } + }; + IDataType::SubstreamPath stream_path; + auto column = source_part->getColumns().tryGetByName(command.column_name); + if (column) + column->type->enumerateStreams(callback, stream_path); + } } - return remove_files; + return rename_map; } NameSet MergeTreeDataMergerMutator::collectFilesToSkip( @@ -1344,10 +1409,13 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( const MutationCommands & commands_for_removes) { NameSet removed_columns; + NameToNameMap renamed_columns; for (const auto & command : commands_for_removes) { if (command.type == MutationCommand::DROP_COLUMN) removed_columns.insert(command.column_name); + if (command.type == MutationCommand::RENAME_COLUMN) + renamed_columns.emplace(command.rename_to, command.column_name); } Names source_column_names = source_part->getColumns().getNames(); NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end()); @@ -1364,12 +1432,32 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( { ++it; } + else if (renamed_columns.count(it->name) && source_columns_name_set.count(renamed_columns[it->name])) + { + ++it; + } else it = all_columns.erase(it); } return all_columns; } +MergeTreeIndices MergeTreeDataMergerMutator::getIndicesForNewDataPart( + const MergeTreeIndices & all_indices, + const MutationCommands & commands_for_removes) +{ + NameSet removed_indices; + for (const auto & command : commands_for_removes) + if (command.type == MutationCommand::DROP_INDEX) + removed_indices.insert(command.column_name); + + MergeTreeIndices new_indices; + for (const auto & index : all_indices) + if (!removed_indices.count(index->name)) + new_indices.push_back(index); + + return new_indices; +} std::set MergeTreeDataMergerMutator::getIndicesToRecalculate( BlockInputStreamPtr & input_stream, @@ -1434,6 +1522,7 @@ bool MergeTreeDataMergerMutator::shouldExecuteTTL(const Names & columns, const M void MergeTreeDataMergerMutator::mutateAllPartColumns( MergeTreeData::MutableDataPartPtr new_data_part, + const MergeTreeIndices & skip_indices, BlockInputStreamPtr mutating_stream, time_t time_of_mutation, const CompressionCodecPtr & compression_codec, @@ -1455,6 +1544,7 @@ void MergeTreeDataMergerMutator::mutateAllPartColumns( MergedBlockOutputStream out{ new_data_part, new_data_part->getColumns(), + skip_indices, compression_codec}; mutating_stream->readPrefix(); @@ -1491,7 +1581,6 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns( if (mutating_stream == nullptr) throw Exception("Cannot mutate part columns with uninitialized mutations stream. It's a bug", ErrorCodes::LOGICAL_ERROR); - if (need_remove_expired_values) mutating_stream = std::make_shared(mutating_stream, data, new_data_part, time_of_mutation, true); diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h similarity index 95% rename from dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h rename to src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 3d41ceee990..6f4f8a03e9a 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -147,7 +147,7 @@ private: /// Apply commands to source_part i.e. remove some columns in source_part /// and return set of files, that have to be removed from filesystem and checksums - static NameSet collectFilesToRemove(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension); + static NameToNameMap collectFilesForRenames(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension); /// Files, that we don't need to remove and don't need to hardlink, for example columns.txt and checksums.txt. /// Because we will generate new versions of them after we perform mutation. @@ -160,6 +160,11 @@ private: NamesAndTypesList all_columns, const MutationCommands & commands_for_removes); + /// Get skip indcies, that should exists in the resulting data part. + static MergeTreeIndices getIndicesForNewDataPart( + const MergeTreeIndices & all_indices, + const MutationCommands & commands_for_removes); + bool shouldExecuteTTL(const Names & columns, const MutationCommands & commands) const; /// Return set of indices which should be recalculated during mutation also @@ -173,6 +178,7 @@ private: /// Override all columns of new part using mutating_stream void mutateAllPartColumns( MergeTreeData::MutableDataPartPtr new_data_part, + const MergeTreeIndices & skip_indices, BlockInputStreamPtr mutating_stream, time_t time_of_mutation, const CompressionCodecPtr & codec, diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp rename to src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.h b/src/Storages/MergeTree/MergeTreeDataPartChecksum.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.h rename to src/Storages/MergeTree/MergeTreeDataPartChecksum.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp rename to src/Storages/MergeTree/MergeTreeDataPartCompact.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.h rename to src/Storages/MergeTree/MergeTreeDataPartCompact.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp rename to src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h rename to src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartType.cpp b/src/Storages/MergeTree/MergeTreeDataPartType.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartType.cpp rename to src/Storages/MergeTree/MergeTreeDataPartType.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartType.h b/src/Storages/MergeTree/MergeTreeDataPartType.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartType.h rename to src/Storages/MergeTree/MergeTreeDataPartType.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWide.cpp rename to src/Storages/MergeTree/MergeTreeDataPartWide.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWide.h rename to src/Storages/MergeTree/MergeTreeDataPartWide.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp rename to src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h rename to src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp rename to src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h rename to src/Storages/MergeTree/MergeTreeDataPartWriterWide.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp similarity index 96% rename from dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp rename to src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 629a2b2cc18..816af8db3e9 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -61,6 +61,7 @@ namespace std #include #include #include +#include namespace ProfileEvents { @@ -802,6 +803,16 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( res.emplace_back(std::move(source)); } + + /// Use ConcatProcessor to concat sources together. + /// It is needed to read in parts order (and so in PK order) if single thread is used. + if (res.size() > 1) + { + auto concat = std::make_shared(res.front().getHeader(), res.size()); + Pipe pipe(std::move(res), std::move(concat)); + res = Pipes(); + res.emplace_back(std::move(pipe)); + } } return res; @@ -1190,11 +1201,33 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( * If fits, split it into smaller ones and put them on the stack. If not, discard it. * If the segment is already of one mark length, add it to response and discard it. */ - std::vector ranges_stack{ {0, marks_count} }; + std::vector ranges_stack = { {0, marks_count} }; + + std::function create_field_ref; + /// If there are no monotonic functions, there is no need to save block reference. + /// Passing explicit field to FieldRef allows to optimize ranges and shows better performance. + if (key_condition.hasMonotonicFunctionsChain()) + { + auto index_block = std::make_shared(); + for (size_t i = 0; i < used_key_size; ++i) + index_block->insert({index[i], data.primary_key_data_types[i], data.primary_key_columns[i]}); + + create_field_ref = [index_block](size_t row, size_t column, FieldRef & field) + { + field = {index_block.get(), row, column}; + }; + } + else + { + create_field_ref = [&index](size_t row, size_t column, FieldRef & field) + { + index[column]->get(row, field); + }; + } /// NOTE Creating temporary Field objects to pass to KeyCondition. - Row index_left(used_key_size); - Row index_right(used_key_size); + std::vector index_left(used_key_size); + std::vector index_right(used_key_size); while (!ranges_stack.empty()) { @@ -1205,7 +1238,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( if (range.end == marks_count && !has_final_mark) { for (size_t i = 0; i < used_key_size; ++i) - index[i]->get(range.begin, index_left[i]); + create_field_ref(range.begin, i, index_left[i]); may_be_true = key_condition.mayBeTrueAfter( used_key_size, index_left.data(), data.primary_key_data_types); @@ -1217,8 +1250,8 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( for (size_t i = 0; i < used_key_size; ++i) { - index[i]->get(range.begin, index_left[i]); - index[i]->get(range.end, index_right[i]); + create_field_ref(range.begin, i, index_left[i]); + create_field_ref(range.end, i, index_right[i]); } may_be_true = key_condition.mayBeTrueInRange( @@ -1243,9 +1276,9 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( size_t end; for (end = range.end; end > range.begin + step; end -= step) - ranges_stack.push_back(MarkRange(end - step, end)); + ranges_stack.emplace_back(end - step, end); - ranges_stack.push_back(MarkRange(range.begin, end)); + ranges_stack.emplace_back(range.begin, end); } } } diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h rename to src/Storages/MergeTree/MergeTreeDataSelectExecutor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp similarity index 98% rename from dbms/src/Storages/MergeTree/MergeTreeDataWriter.cpp rename to src/Storages/MergeTree/MergeTreeDataWriter.cpp index c560583259c..23a60ddab78 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -210,8 +210,8 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithPa const auto & date_lut = DateLUT::instance(); - DayNum min_month = date_lut.toFirstDayNumOfMonth(DayNum(min_date)); - DayNum max_month = date_lut.toFirstDayNumOfMonth(DayNum(max_date)); + auto min_month = date_lut.toNumYYYYMM(min_date); + auto max_month = date_lut.toNumYYYYMM(max_date); if (min_month != max_month) throw Exception("Logical error: part spans more than one month.", ErrorCodes::LOGICAL_ERROR); @@ -294,7 +294,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithPa /// either default lz4 or compression method with zero thresholds on absolute and relative part size. auto compression_codec = data.global_context.chooseCompressionCodec(0, 0); - MergedBlockOutputStream out(new_data_part, columns, compression_codec); + MergedBlockOutputStream out(new_data_part, columns, data.skip_indices, compression_codec); out.writePrefix(); out.writeWithPermutation(block, perm_ptr); diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataWriter.h rename to src/Storages/MergeTree/MergeTreeDataWriter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIOSettings.h rename to src/Storages/MergeTree/MergeTreeIOSettings.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp index 88e3fc6662a..44e45f0c337 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp @@ -259,7 +259,7 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTIn( size_t row_size = column->size(); size_t position = header.getPositionByName(key_ast->getColumnName()); const DataTypePtr & index_type = header.getByPosition(position).type; - const auto & converted_column = castColumn(ColumnWithTypeAndName{column, type, ""}, index_type, context); + const auto & converted_column = castColumn(ColumnWithTypeAndName{column, type, ""}, index_type); out.predicate.emplace_back(std::make_pair(position, BloomFilterHash::hashWithColumn(index_type, converted_column, 0, row_size))); if (function_name == "in" || function_name == "globalIn") diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp similarity index 88% rename from dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp rename to src/Storages/MergeTree/MergeTreeIndexFullText.cpp index 4b3bd954496..e42ac942362 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -19,6 +19,15 @@ #include +#if defined(__SSE2__) +#include + +#if defined(__SSE4_2__) +#include +#endif + +#endif + namespace DB { @@ -606,8 +615,67 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size { *token_start = *pos; *token_len = 0; + while (*pos < len) { +#if defined(__SSE2__) + // NOTE: we assume that `data` string is padded from the right with 15 bytes. + const __m128i haystack = _mm_loadu_si128(reinterpret_cast(data + *pos)); + const size_t haystack_length = 16; + +#if defined(__SSE4_2__) + // With the help of https://www.strchr.com/strcmp_and_strlen_using_sse_4.2 + const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + '\xFF', '\x80', 'z', 'a', 'Z', 'A', '9', '0'); + // Every bit represents if `haystack` character is in the ranges (1) or not (0) + const int result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES)); +#else + // NOTE: -1 and +1 required since SSE2 has no `>=` and `<=` instructions on packed 8-bit integers (epi8). + const auto number_begin = _mm_set1_epi8('0' - 1); + const auto number_end = _mm_set1_epi8('9' + 1); + const auto alpha_lower_begin = _mm_set1_epi8('a' - 1); + const auto alpha_lower_end = _mm_set1_epi8('z' + 1); + const auto alpha_upper_begin = _mm_set1_epi8('A' - 1); + const auto alpha_upper_end = _mm_set1_epi8('Z' + 1); + const auto zero = _mm_set1_epi8(0); + + // every bit represents if `haystack` character `c` statisfies condition: + // (c < 0) || (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1) + // < 0 since _mm_cmplt_epi8 threats chars as SIGNED, and so all chars > 0x80 are negative. + const int result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(_mm_or_si128( + _mm_cmplt_epi8(haystack, zero), + _mm_and_si128(_mm_cmpgt_epi8(haystack, number_begin), _mm_cmplt_epi8(haystack, number_end))), + _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_lower_begin), _mm_cmplt_epi8(haystack, alpha_lower_end))), + _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_upper_begin), _mm_cmplt_epi8(haystack, alpha_upper_end)))); +#endif + if (result_bitmask == 0) + { + if (*token_len != 0) + // end of token started on previous haystack + return true; + + *pos += haystack_length; + continue; + } + + const auto token_start_pos_in_current_haystack = getTrailingZeroBitsUnsafe(result_bitmask); + if (*token_len == 0) + // new token + *token_start = *pos + token_start_pos_in_current_haystack; + else if (token_start_pos_in_current_haystack != 0) + // end of token starting in one of previous haystacks + return true; + + const auto token_bytes_in_current_haystack = getTrailingZeroBitsUnsafe(~(result_bitmask >> token_start_pos_in_current_haystack)); + *token_len += token_bytes_in_current_haystack; + + *pos += token_start_pos_in_current_haystack + token_bytes_in_current_haystack; + if (token_start_pos_in_current_haystack + token_bytes_in_current_haystack == haystack_length) + // check if there are leftovers in next `haystack` + continue; + + break; +#else if (isASCII(data[*pos]) && !isAlphaNumericASCII(data[*pos])) { /// Finish current token if any @@ -621,7 +689,16 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size ++*pos; ++*token_len; } +#endif } + +#if defined(__SSE2__) + // Could happen only if string is not padded with zeroes, and we accidentally hopped over end of data. + if (*token_start > len) + return false; + *token_len = std::min(len - *token_start, *token_len); +#endif + return *token_len > 0; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h b/src/Storages/MergeTree/MergeTreeIndexFullText.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h rename to src/Storages/MergeTree/MergeTreeIndexFullText.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp rename to src/Storages/MergeTree/MergeTreeIndexGranularity.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.h rename to src/Storages/MergeTree/MergeTreeIndexGranularity.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp rename to src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h rename to src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch rename to src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp rename to src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index 122f038fee6..220fc70c549 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -100,8 +100,8 @@ void MergeTreeIndexAggregatorMinMax::update(const Block & block, size_t * pos, s size_t rows_read = std::min(limit, block.rows() - *pos); - Field field_min; - Field field_max; + FieldRef field_min; + FieldRef field_max; for (size_t i = 0; i < index.columns.size(); ++i) { const auto & column = block.getByName(index.columns[i]).column; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/src/Storages/MergeTree/MergeTreeIndexMinMax.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h rename to src/Storages/MergeTree/MergeTreeIndexMinMax.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp b/src/Storages/MergeTree/MergeTreeIndexReader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp rename to src/Storages/MergeTree/MergeTreeIndexReader.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h b/src/Storages/MergeTree/MergeTreeIndexReader.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexReader.h rename to src/Storages/MergeTree/MergeTreeIndexReader.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp rename to src/Storages/MergeTree/MergeTreeIndexSet.cpp index f68184e2691..8cc67adb097 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -239,7 +239,7 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( return; /// Replace logical functions with bit functions. - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). traverseAST(expression_ast); auto syntax_analyzer_result = SyntaxAnalyzer(context).analyze( diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexSet.h rename to src/Storages/MergeTree/MergeTreeIndexSet.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndices.cpp b/src/Storages/MergeTree/MergeTreeIndices.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndices.cpp rename to src/Storages/MergeTree/MergeTreeIndices.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h similarity index 98% rename from dbms/src/Storages/MergeTree/MergeTreeIndices.h rename to src/Storages/MergeTree/MergeTreeIndices.h index 007851f2912..d871a522e6c 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -125,7 +125,7 @@ public: size_t granularity; }; -using MergeTreeIndices = std::vector; +using MergeTreeIndices = std::vector; class MergeTreeIndexFactory : private boost::noncopyable diff --git a/dbms/src/Storages/MergeTree/MergeTreeMarksLoader.cpp b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMarksLoader.cpp rename to src/Storages/MergeTree/MergeTreeMarksLoader.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeMarksLoader.h b/src/Storages/MergeTree/MergeTreeMarksLoader.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMarksLoader.h rename to src/Storages/MergeTree/MergeTreeMarksLoader.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMutationEntry.cpp rename to src/Storages/MergeTree/MergeTreeMutationEntry.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h rename to src/Storages/MergeTree/MergeTreeMutationEntry.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeMutationStatus.h b/src/Storages/MergeTree/MergeTreeMutationStatus.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMutationStatus.h rename to src/Storages/MergeTree/MergeTreeMutationStatus.h diff --git a/dbms/src/Storages/MergeTree/MergeTreePartInfo.cpp b/src/Storages/MergeTree/MergeTreePartInfo.cpp similarity index 97% rename from dbms/src/Storages/MergeTree/MergeTreePartInfo.cpp rename to src/Storages/MergeTree/MergeTreePartInfo.cpp index 43bd9538e3e..d30f6470bb1 100644 --- a/dbms/src/Storages/MergeTree/MergeTreePartInfo.cpp +++ b/src/Storages/MergeTree/MergeTreePartInfo.cpp @@ -120,8 +120,8 @@ void MergeTreePartInfo::parseMinMaxDatesFromPartName(const String & part_name, D min_date = date_lut.YYYYMMDDToDayNum(min_yyyymmdd); max_date = date_lut.YYYYMMDDToDayNum(max_yyyymmdd); - DayNum min_month = date_lut.toFirstDayNumOfMonth(min_date); - DayNum max_month = date_lut.toFirstDayNumOfMonth(max_date); + auto min_month = date_lut.toNumYYYYMM(min_date); + auto max_month = date_lut.toNumYYYYMM(max_date); if (min_month != max_month) throw Exception("Part name " + part_name + " contains different months", ErrorCodes::BAD_DATA_PART_NAME); diff --git a/dbms/src/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartInfo.h rename to src/Storages/MergeTree/MergeTreePartInfo.h diff --git a/dbms/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartition.cpp rename to src/Storages/MergeTree/MergeTreePartition.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreePartition.h b/src/Storages/MergeTree/MergeTreePartition.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartition.h rename to src/Storages/MergeTree/MergeTreePartition.h diff --git a/dbms/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartsMover.cpp rename to src/Storages/MergeTree/MergeTreePartsMover.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartsMover.h rename to src/Storages/MergeTree/MergeTreePartsMover.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp rename to src/Storages/MergeTree/MergeTreeRangeReader.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeRangeReader.h rename to src/Storages/MergeTree/MergeTreeRangeReader.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp rename to src/Storages/MergeTree/MergeTreeReadPool.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReadPool.h rename to src/Storages/MergeTree/MergeTreeReadPool.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp similarity index 95% rename from dbms/src/Storages/MergeTree/MergeTreeReaderCompact.cpp rename to src/Storages/MergeTree/MergeTreeReaderCompact.cpp index 496371b6e4b..a895149e12e 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -78,17 +78,18 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( auto name_and_type = columns.begin(); for (size_t i = 0; i < columns_num; ++i, ++name_and_type) { - const auto & [name, type] = *name_and_type; + const auto & [name, type] = getColumnFromPart(*name_and_type); auto position = data_part->getColumnPosition(name); - /// If array of Nested column is missing in part, - /// we have to read it's offsets if they exists. if (!position && typeid_cast(type.get())) { + /// If array of Nested column is missing in part, + /// we have to read it's offsets if they exists. position = findColumnForOffsets(name); read_only_offsets[i] = (position != std::nullopt); } + column_positions[i] = std::move(position); } @@ -111,7 +112,7 @@ size_t MergeTreeReaderCompact::readRows(size_t from_mark, bool continue_reading, bool append = res_columns[i] != nullptr; if (!append) - res_columns[i] = column_it->type->createColumn(); + res_columns[i] = getColumnFromPart(*column_it).type->createColumn(); mutable_columns[i] = res_columns[i]->assumeMutable(); } @@ -125,7 +126,7 @@ size_t MergeTreeReaderCompact::readRows(size_t from_mark, bool continue_reading, if (!res_columns[pos]) continue; - const auto & [name, type] = *name_and_type; + auto [name, type] = getColumnFromPart(*name_and_type); auto & column = mutable_columns[pos]; try diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderCompact.h b/src/Storages/MergeTree/MergeTreeReaderCompact.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderCompact.h rename to src/Storages/MergeTree/MergeTreeReaderCompact.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp b/src/Storages/MergeTree/MergeTreeReaderStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp rename to src/Storages/MergeTree/MergeTreeReaderStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderStream.h b/src/Storages/MergeTree/MergeTreeReaderStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderStream.h rename to src/Storages/MergeTree/MergeTreeReaderStream.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp similarity index 92% rename from dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp rename to src/Storages/MergeTree/MergeTreeReaderWide.cpp index 1f87f229cc5..1a03acb5758 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -41,17 +41,10 @@ MergeTreeReaderWide::MergeTreeReaderWide( { try { - for (const NameAndTypePair & column_from_part : data_part->getColumns()) - { - columns_from_part[column_from_part.name] = column_from_part.type; - } - for (const NameAndTypePair & column : columns) { - if (columns_from_part.count(column.name)) - addStreams(column.name, *columns_from_part[column.name], profile_callback_, clock_type_); - else - addStreams(column.name, *column.type, profile_callback_, clock_type_); + auto column_from_part = getColumnFromPart(column); + addStreams(column_from_part.name, *column_from_part.type, profile_callback_, clock_type_); } } catch (...) @@ -82,12 +75,7 @@ size_t MergeTreeReaderWide::readRows(size_t from_mark, bool continue_reading, si auto name_and_type = columns.begin(); for (size_t pos = 0; pos < num_columns; ++pos, ++name_and_type) { - String & name = name_and_type->name; - DataTypePtr type; - if (columns_from_part.count(name)) - type = columns_from_part[name]; - else - type = name_and_type->type; + auto [name, type] = getColumnFromPart(*name_and_type); /// The column is already present in the block so we will append the values to the end. bool append = res_columns[pos] != nullptr; diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.h b/src/Storages/MergeTree/MergeTreeReaderWide.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderWide.h rename to src/Storages/MergeTree/MergeTreeReaderWide.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp rename to src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h rename to src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp rename to src/Storages/MergeTree/MergeTreeSelectProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.h rename to src/Storages/MergeTree/MergeTreeSelectProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp b/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp rename to src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h b/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h rename to src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSettings.cpp rename to src/Storages/MergeTree/MergeTreeSettings.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h similarity index 98% rename from dbms/src/Storages/MergeTree/MergeTreeSettings.h rename to src/Storages/MergeTree/MergeTreeSettings.h index bbd1fd6cbeb..02c852b4f4b 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -42,6 +42,7 @@ struct MergeTreeSettings : public SettingsCollection M(SettingUInt64, number_of_free_entries_in_pool_to_execute_mutation, 10, "When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ M(SettingSeconds, old_parts_lifetime, 8 * 60, "How many seconds to keep obsolete parts.", 0) \ M(SettingSeconds, temporary_directories_lifetime, 86400, "How many seconds to keep tmp_-directories.", 0) \ + M(SettingSeconds, lock_acquire_timeout_for_background_operations, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "For background operations like merges, mutations etc. How many seconds before failing to acquire table locks.", 0) \ \ /** Inserts settings. */ \ M(SettingUInt64, parts_to_delay_insert, 150, "If table contains at least that many active parts in single partition, artificially slow down insert into table.", 0) \ diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp rename to src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h rename to src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp similarity index 98% rename from dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp rename to src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index fa29494d1c9..749c0d64525 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -202,10 +202,10 @@ void MergeTreeWhereOptimizer::optimize(ASTSelectQuery & select) const prewhere_conditions.splice(prewhere_conditions.end(), where_conditions, cond_it); total_size_of_moved_conditions += cond_it->columns_size; - /// Move all other conditions that depend on the same set of columns. + /// Move all other viable conditions that depend on the same set of columns. for (auto jt = where_conditions.begin(); jt != where_conditions.end();) { - if (jt->columns_size == cond_it->columns_size && jt->identifiers == cond_it->identifiers) + if (jt->viable && jt->columns_size == cond_it->columns_size && jt->identifiers == cond_it->identifiers) prewhere_conditions.splice(prewhere_conditions.end(), where_conditions, jt++); else ++jt; diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h rename to src/Storages/MergeTree/MergeTreeWhereOptimizer.h diff --git a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp similarity index 96% rename from dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp rename to src/Storages/MergeTree/MergedBlockOutputStream.cpp index 221170b7a32..2b482ac7c29 100644 --- a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -15,10 +15,11 @@ namespace ErrorCodes MergedBlockOutputStream::MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, bool blocks_are_granules_size) : MergedBlockOutputStream( - data_part, columns_list_, default_codec, {}, + data_part, columns_list_, skip_indices, default_codec, {}, data_part->storage.global_context.getSettings().min_bytes_to_use_direct_io, blocks_are_granules_size) { @@ -27,6 +28,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( MergedBlockOutputStream::MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size, size_t aio_threshold, @@ -49,7 +51,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( disk->createDirectories(part_path); - writer = data_part->getWriter(columns_list, data_part->storage.getSkipIndices(), default_codec, writer_settings); + writer = data_part->getWriter(columns_list, skip_indices, default_codec, writer_settings); writer->initPrimaryIndex(); writer->initSkipIndices(); } diff --git a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h similarity index 95% rename from dbms/src/Storages/MergeTree/MergedBlockOutputStream.h rename to src/Storages/MergeTree/MergedBlockOutputStream.h index ee453f41a31..5a92977640e 100644 --- a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -16,12 +16,14 @@ public: MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, bool blocks_are_granules_size = false); MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size, size_t aio_threshold, diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp rename to src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h rename to src/Storages/MergeTree/MergedColumnOnlyOutputStream.h diff --git a/dbms/src/Storages/MergeTree/PartDestinationType.h b/src/Storages/MergeTree/PartDestinationType.h similarity index 100% rename from dbms/src/Storages/MergeTree/PartDestinationType.h rename to src/Storages/MergeTree/PartDestinationType.h diff --git a/dbms/src/Storages/MergeTree/RPNBuilder.h b/src/Storages/MergeTree/RPNBuilder.h similarity index 100% rename from dbms/src/Storages/MergeTree/RPNBuilder.h rename to src/Storages/MergeTree/RPNBuilder.h diff --git a/dbms/src/Storages/MergeTree/RangesInDataPart.h b/src/Storages/MergeTree/RangesInDataPart.h similarity index 100% rename from dbms/src/Storages/MergeTree/RangesInDataPart.h rename to src/Storages/MergeTree/RangesInDataPart.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h b/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h rename to src/Storages/MergeTree/ReplicatedMergeTreeAddress.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h rename to src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index fda0a8eb5a8..72255081e6b 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -147,11 +147,11 @@ void ReplicatedMergeTreeBlockOutputStream::write(const Block & block) /// That is, do not insert the same data to the same partition twice. block_id = part->info.partition_id + "_" + toString(hash_value.words[0]) + "_" + toString(hash_value.words[1]); - LOG_DEBUG(log, "Wrote block with ID '" << block_id << "', " << block.rows() << " rows"); + LOG_DEBUG(log, "Wrote block with ID '" << block_id << "', " << current_block.block.rows() << " rows"); } else { - LOG_DEBUG(log, "Wrote block with " << block.rows() << " rows"); + LOG_DEBUG(log, "Wrote block with " << current_block.block.rows() << " rows"); } try diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h rename to src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 27ad6871573..b1164f6621c 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -57,7 +57,8 @@ void ReplicatedMergeTreeCleanupThread::iterate() { /// TODO: Implement tryLockStructureForShare. - auto lock = storage.lockStructureForShare(""); + auto lock = storage.lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage.getSettings()->lock_acquire_timeout_for_background_operations); storage.clearOldTemporaryDirectories(); } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h rename to src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h rename to src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h rename to src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp similarity index 98% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 5c8f878503a..b587b5f71c0 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -203,7 +203,9 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na else if (part->name == part_name) { auto zookeeper = storage.getZooKeeper(); - auto table_lock = storage.lockStructureForShare(RWLockImpl::NO_QUERY); + + auto table_lock = storage.lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage.getSettings()->lock_acquire_timeout_for_background_operations); auto local_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( part->getColumns(), part->checksums); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h rename to src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h b/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h rename to src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp similarity index 98% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index c0749818ebf..bebb6ee79c3 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1309,6 +1309,21 @@ ReplicatedMergeTreeMergePredicate ReplicatedMergeTreeQueue::getMergePredicate(zk } +MutationCommands ReplicatedMergeTreeQueue::getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const +{ + std::lock_guard lock(state_mutex); + auto in_partition = mutations_by_partition.find(part->info.partition_id); + if (in_partition == mutations_by_partition.end()) + return MutationCommands{}; + + Int64 part_version = part->info.getDataVersion(); + for (auto [mutation_version, mutation_status] : in_partition->second) + if (mutation_version > part_version && mutation_status->entry->alter_version != -1) + return mutation_status->entry->commands; + + return MutationCommands{}; +} + MutationCommands ReplicatedMergeTreeQueue::getMutationCommands( const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const { @@ -1743,7 +1758,7 @@ bool ReplicatedMergeTreeMergePredicate::operator()( { if (out_reason) *out_reason = "There are " + toString(covered.size()) + " parts (from " + covered.front() - + " to " + covered.back() + ") that are still not present or beeing processed by " + + " to " + covered.back() + ") that are still not present or being processed by " + " other background process on this replica between " + left->name + " and " + right->name; return false; } @@ -1776,7 +1791,7 @@ std::optional> ReplicatedMergeTreeMergePredicate::getDesir /// the part (checked by querying queue.virtual_parts), we can confidently assign a mutation to /// version X for this part. - /// We cannot mutate part if it's beeing inserted with quorum and it's not + /// We cannot mutate part if it's being inserted with quorum and it's not /// already reached. if (part->name == inprogress_quorum_part) return {}; diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h similarity index 98% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h rename to src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 534978873c2..fcb3dfb4b86 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -331,6 +331,11 @@ public: MutationCommands getMutationCommands(const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const; + /// Return mutation commands for part with smallest mutation version bigger + /// than data part version. Used when we apply alter commands on fly, + /// without actual data modification on disk. + MutationCommands getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const; + /// Mark finished mutations as done. If the function needs to be called again at some later time /// (because some mutations are probably done but we are not sure yet), returns true. bool tryFinalizeMutations(zkutil::ZooKeeperPtr zookeeper); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h b/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h rename to src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h rename to src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h rename to src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h rename to src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h diff --git a/dbms/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/SimpleMergeSelector.cpp rename to src/Storages/MergeTree/SimpleMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/SimpleMergeSelector.h b/src/Storages/MergeTree/SimpleMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/SimpleMergeSelector.h rename to src/Storages/MergeTree/SimpleMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h similarity index 99% rename from dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h rename to src/Storages/MergeTree/StorageFromMergeTreeDataPart.h index 0e93301e124..4d799522920 100644 --- a/dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h +++ b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -52,12 +52,10 @@ public: return part->storage.getInMemoryMetadata(); } - bool hasSortingKey() const { return part->storage.hasSortingKey(); } Names getSortingKeyColumns() const override { return part->storage.getSortingKeyColumns(); } - protected: StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_) : IStorage(getIDFromPart(part_), part_->storage.getVirtuals()) diff --git a/dbms/src/Storages/MergeTree/TTLMergeSelector.cpp b/src/Storages/MergeTree/TTLMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/TTLMergeSelector.cpp rename to src/Storages/MergeTree/TTLMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/TTLMergeSelector.h b/src/Storages/MergeTree/TTLMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/TTLMergeSelector.h rename to src/Storages/MergeTree/TTLMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp similarity index 77% rename from dbms/src/Storages/MergeTree/checkDataPart.cpp rename to src/Storages/MergeTree/checkDataPart.cpp index 6da051d04ac..52d9a2750c7 100644 --- a/dbms/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -63,6 +63,7 @@ IMergeTreeDataPart::Checksums checkDataPart( /// Real checksums based on contents of data. Must correspond to checksums.txt. If not - it means the data is broken. IMergeTreeDataPart::Checksums checksums_data; + /// This function calculates checksum for both compressed and decompressed contents of compressed file. auto checksum_compressed_file = [](const DiskPtr & disk_, const String & file_path) { auto file_buf = disk_->readFile(file_path); @@ -78,6 +79,7 @@ IMergeTreeDataPart::Checksums checkDataPart( }; }; + /// First calculate checksums for columns data if (part_type == MergeTreeDataPartType::COMPACT) { const auto & file_name = MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION; @@ -99,20 +101,7 @@ IMergeTreeDataPart::Checksums checkDataPart( throw Exception("Unknown type in part " + path, ErrorCodes::UNKNOWN_PART_TYPE); } - for (auto it = disk->iterateDirectory(path); it->isValid(); it->next()) - { - const String & file_name = it->name(); - auto checksum_it = checksums_data.files.find(file_name); - if (checksum_it == checksums_data.files.end() && file_name != "checksums.txt" && file_name != "columns.txt") - { - auto file_buf = disk->readFile(it->path()); - HashingReadBuffer hashing_buf(*file_buf); - hashing_buf.tryIgnore(std::numeric_limits::max()); - checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); - } - } - - /// Checksums from file checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums. + /// Checksums from the rest files listed in checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums. IMergeTreeDataPart::Checksums checksums_txt; if (require_checksums || disk->exists(path + "checksums.txt")) @@ -122,6 +111,31 @@ IMergeTreeDataPart::Checksums checkDataPart( assertEOF(*buf); } + const auto & checksum_files_txt = checksums_txt.files; + for (auto it = disk->iterateDirectory(path); it->isValid(); it->next()) + { + const String & file_name = it->name(); + auto checksum_it = checksums_data.files.find(file_name); + + /// Skip files that we already calculated. Also skip metadata files that are not checksummed. + if (checksum_it == checksums_data.files.end() && file_name != "checksums.txt" && file_name != "columns.txt") + { + auto txt_checksum_it = checksum_files_txt.find(file_name); + if (txt_checksum_it == checksum_files_txt.end() || txt_checksum_it->second.uncompressed_size == 0) + { + /// The file is not compressed. + auto file_buf = disk->readFile(it->path()); + HashingReadBuffer hashing_buf(*file_buf); + hashing_buf.tryIgnore(std::numeric_limits::max()); + checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); + } + else /// If we have both compressed and uncompressed in txt, than calculate them + { + checksums_data.files[file_name] = checksum_compressed_file(disk, it->path()); + } + } + } + if (is_cancelled()) return {}; diff --git a/dbms/src/Storages/MergeTree/checkDataPart.h b/src/Storages/MergeTree/checkDataPart.h similarity index 100% rename from dbms/src/Storages/MergeTree/checkDataPart.h rename to src/Storages/MergeTree/checkDataPart.h diff --git a/dbms/src/Storages/MergeTree/localBackup.cpp b/src/Storages/MergeTree/localBackup.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/localBackup.cpp rename to src/Storages/MergeTree/localBackup.cpp diff --git a/dbms/src/Storages/MergeTree/localBackup.h b/src/Storages/MergeTree/localBackup.h similarity index 100% rename from dbms/src/Storages/MergeTree/localBackup.h rename to src/Storages/MergeTree/localBackup.h diff --git a/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp rename to src/Storages/MergeTree/registerStorageMergeTree.cpp index e6a6beff57d..b5d6bb9e975 100644 --- a/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -280,7 +280,7 @@ Careful choice of the primary key is extremely important for processing short-ti Optional sampling expression can be specified in the SAMPLE BY clause. It is used to implement the SAMPLE clause in a SELECT query for approximate query execution. Sampling expression must be one of the elements of the primary key tuple. For example, if your primary key is (CounterID, EventDate, intHash64(UserID)), your sampling expression might be intHash64(UserID). -Engine settings can be specified in the SETTINGS clause. Full list is in the source code in the 'dbms/src/Storages/MergeTree/MergeTreeSettings.h' file. +Engine settings can be specified in the SETTINGS clause. Full list is in the source code in the 'src/Storages/MergeTree/MergeTreeSettings.h' file. E.g. you can specify the index (primary key) granularity with SETTINGS index_granularity = 8192. Examples: diff --git a/dbms/src/Storages/MutationCommands.cpp b/src/Storages/MutationCommands.cpp similarity index 90% rename from dbms/src/Storages/MutationCommands.cpp rename to src/Storages/MutationCommands.cpp index 8c66646abed..f3569c344d9 100644 --- a/dbms/src/Storages/MutationCommands.cpp +++ b/src/Storages/MutationCommands.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -94,6 +95,15 @@ std::optional MutationCommand::parse(ASTAlterCommand * command, res.clear = true; return res; } + else if (parse_alter_commands && command->type == ASTAlterCommand::RENAME_COLUMN) + { + MutationCommand res; + res.ast = command->ptr(); + res.type = MutationCommand::Type::RENAME_COLUMN; + res.column_name = command->column->as().name; + res.rename_to = command->rename_to->as().name; + return res; + } else if (command->type == ASTAlterCommand::MATERIALIZE_TTL) { MutationCommand res; @@ -128,7 +138,7 @@ void MutationCommands::readText(ReadBuffer & in) ParserAlterCommandList p_alter_commands; auto commands_ast = parseQuery( - p_alter_commands, commands_str.data(), commands_str.data() + commands_str.length(), "mutation commands list", 0); + p_alter_commands, commands_str.data(), commands_str.data() + commands_str.length(), "mutation commands list", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); for (ASTAlterCommand * command_ast : commands_ast->as().commands) { auto command = MutationCommand::parse(command_ast, true); diff --git a/dbms/src/Storages/MutationCommands.h b/src/Storages/MutationCommands.h similarity index 94% rename from dbms/src/Storages/MutationCommands.h rename to src/Storages/MutationCommands.h index f006575a9b8..6fa4f7fb641 100644 --- a/dbms/src/Storages/MutationCommands.h +++ b/src/Storages/MutationCommands.h @@ -31,7 +31,8 @@ struct MutationCommand READ_COLUMN, DROP_COLUMN, DROP_INDEX, - MATERIALIZE_TTL + MATERIALIZE_TTL, + RENAME_COLUMN, }; Type type = EMPTY; @@ -53,6 +54,9 @@ struct MutationCommand /// We need just clear column, not drop from metadata. bool clear = false; + /// Column rename_to + String rename_to; + /// If parse_alter_commands, than consider more Alter commands as mutation commands static std::optional parse(ASTAlterCommand * command, bool parse_alter_commands = false); }; diff --git a/dbms/src/Storages/PartitionCommands.cpp b/src/Storages/PartitionCommands.cpp similarity index 100% rename from dbms/src/Storages/PartitionCommands.cpp rename to src/Storages/PartitionCommands.cpp diff --git a/dbms/src/Storages/PartitionCommands.h b/src/Storages/PartitionCommands.h similarity index 100% rename from dbms/src/Storages/PartitionCommands.h rename to src/Storages/PartitionCommands.h diff --git a/dbms/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp similarity index 99% rename from dbms/src/Storages/ReadInOrderOptimizer.cpp rename to src/Storages/ReadInOrderOptimizer.cpp index c05acfa71ab..5bbe5be9928 100644 --- a/dbms/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include namespace DB diff --git a/dbms/src/Storages/ReadInOrderOptimizer.h b/src/Storages/ReadInOrderOptimizer.h similarity index 100% rename from dbms/src/Storages/ReadInOrderOptimizer.h rename to src/Storages/ReadInOrderOptimizer.h diff --git a/dbms/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h similarity index 100% rename from dbms/src/Storages/SelectQueryInfo.h rename to src/Storages/SelectQueryInfo.h diff --git a/dbms/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp similarity index 98% rename from dbms/src/Storages/StorageBuffer.cpp rename to src/Storages/StorageBuffer.cpp index 53fb257d58d..1765e663902 100644 --- a/dbms/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -135,7 +135,7 @@ private: }; -QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context & context, QueryProcessingStage::Enum to_stage, const ASTPtr & query_ptr) const { if (destination_id) { @@ -144,7 +144,7 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context if (destination.get() == this) throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - return destination->getQueryProcessingStage(context, query_ptr); + return destination->getQueryProcessingStage(context, to_stage, query_ptr); } return QueryProcessingStage::FetchColumns; @@ -168,7 +168,8 @@ Pipes StorageBuffer::read( if (destination.get() == this) throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - auto destination_lock = destination->lockStructureForShare(context.getCurrentQueryId()); + auto destination_lock = destination->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); const bool dst_has_same_structure = std::all_of(column_names.begin(), column_names.end(), [this, destination](const String& column_name) { @@ -224,7 +225,7 @@ Pipes StorageBuffer::read( pipe.getHeader(), header_after_adding_defaults, getColumns().getDefaults(), context)); pipe.addSimpleTransform(std::make_shared( - pipe.getHeader(), header, ConvertingTransform::MatchColumnsMode::Name, context)); + pipe.getHeader(), header, ConvertingTransform::MatchColumnsMode::Name)); } } } @@ -662,7 +663,7 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl << " have different type of column " << backQuoteIfNeed(column.name) << " (" << dst_col.type->getName() << " != " << column.type->getName() << "). Block of data is converted."); - column.column = castColumn(column, dst_col.type, global_context); + column.column = castColumn(column, dst_col.type); column.type = dst_col.type; } @@ -757,7 +758,7 @@ std::optional StorageBuffer::totalBytes() const void StorageBuffer::alter(const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); checkAlterIsPossible(params, context.getSettingsRef()); diff --git a/dbms/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h similarity index 98% rename from dbms/src/Storages/StorageBuffer.h rename to src/Storages/StorageBuffer.h index 7a3d907ae76..93f95692b18 100644 --- a/dbms/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -54,7 +54,7 @@ public: std::string getName() const override { return "Buffer"; } - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; Pipes read( const Names & column_names, diff --git a/dbms/src/Storages/StorageDictionary.cpp b/src/Storages/StorageDictionary.cpp similarity index 83% rename from dbms/src/Storages/StorageDictionary.cpp rename to src/Storages/StorageDictionary.cpp index 6f2d41d2b13..86831593d54 100644 --- a/dbms/src/Storages/StorageDictionary.cpp +++ b/src/Storages/StorageDictionary.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -22,6 +23,7 @@ namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int THERE_IS_NO_COLUMN; + extern const int UNKNOWN_TABLE; } @@ -45,6 +47,11 @@ StorageDictionary::StorageDictionary( } } +void StorageDictionary::checkTableCanBeDropped() const +{ + throw Exception("Cannot detach dictionary " + backQuoteIfNeed(dictionary_name) + " as table, use DETACH DICTIONARY query.", ErrorCodes::UNKNOWN_TABLE); +} + Pipes StorageDictionary::read( const Names & column_names, const SelectQueryInfo & /*query_info*/, @@ -68,16 +75,30 @@ NamesAndTypesList StorageDictionary::getNamesAndTypes(const DictionaryStructure if (dictionary_structure.id) dictionary_names_and_types.emplace_back(dictionary_structure.id->name, std::make_shared()); + + /// In old-style (XML) configuration we don't have this attributes in the + /// main attribute list, so we have to add them to columns list explicitly. + /// In the new configuration (DDL) we have them both in range_* nodes and + /// main attribute list, but for compatibility we add them before main + /// attributes list. if (dictionary_structure.range_min) dictionary_names_and_types.emplace_back(dictionary_structure.range_min->name, dictionary_structure.range_min->type); + if (dictionary_structure.range_max) dictionary_names_and_types.emplace_back(dictionary_structure.range_max->name, dictionary_structure.range_max->type); + if (dictionary_structure.key) + { for (const auto & attribute : *dictionary_structure.key) dictionary_names_and_types.emplace_back(attribute.name, attribute.type); + } for (const auto & attribute : dictionary_structure.attributes) - dictionary_names_and_types.emplace_back(attribute.name, attribute.type); + { + /// Some attributes can be already added (range_min and range_max) + if (!dictionary_names_and_types.contains(attribute.name)) + dictionary_names_and_types.emplace_back(attribute.name, attribute.type); + } return dictionary_names_and_types; } diff --git a/dbms/src/Storages/StorageDictionary.h b/src/Storages/StorageDictionary.h similarity index 97% rename from dbms/src/Storages/StorageDictionary.h rename to src/Storages/StorageDictionary.h index fd6cb1902dc..87826304166 100644 --- a/dbms/src/Storages/StorageDictionary.h +++ b/src/Storages/StorageDictionary.h @@ -25,6 +25,8 @@ class StorageDictionary final : public ext::shared_ptr_helper public: std::string getName() const override { return "Dictionary"; } + void checkTableCanBeDropped() const override; + Pipes read(const Names & column_names, const SelectQueryInfo & query_info, const Context & context, diff --git a/dbms/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp similarity index 95% rename from dbms/src/Storages/StorageDistributed.cpp rename to src/Storages/StorageDistributed.cpp index 6f98d282e8c..b453b73c4cb 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -89,18 +90,23 @@ ASTPtr rewriteSelectQuery(const ASTPtr & query, const std::string & database, co auto modified_query_ast = query->clone(); ASTSelectQuery & select_query = modified_query_ast->as(); - - /// restore long column names in JOIN ON expressions - if (auto tables = select_query.tables()) - { - RestoreQualifiedNamesVisitor::Data data; - RestoreQualifiedNamesVisitor(data).visit(tables); - } - if (table_function_ptr) select_query.addTableFunction(table_function_ptr); else select_query.replaceDatabaseAndTable(database, table); + + /// Restore long column names (cause our short names are ambiguous). + /// TODO: aliased table functions & CREATE TABLE AS table function cases + if (!table_function_ptr) + { + RestoreQualifiedNamesVisitor::Data data; + data.distributed_table = DatabaseAndTableWithAlias(*getTableExpression(query->as(), 0)); + data.remote_table.database = database; + data.remote_table.table = table; + data.rename = true; + RestoreQualifiedNamesVisitor(data).visit(modified_query_ast); + } + return modified_query_ast; } @@ -236,6 +242,24 @@ void replaceConstantExpressions(ASTPtr & node, const Context & context, const Na visitor.visit(node); } +QueryProcessingStage::Enum getQueryProcessingStageImpl(const Context & context, QueryProcessingStage::Enum to_stage, const ClusterPtr & cluster) +{ + const Settings & settings = context.getSettingsRef(); + + size_t num_local_shards = cluster->getLocalShardCount(); + size_t num_remote_shards = cluster->getRemoteShardCount(); + size_t result_size = (num_remote_shards * settings.max_parallel_replicas) + num_local_shards; + + if (settings.distributed_group_by_no_merge) + return QueryProcessingStage::Complete; + /// Nested distributed query cannot return Complete stage, + /// since the parent query need to aggregate the results after. + if (to_stage == QueryProcessingStage::WithMergeableState) + return QueryProcessingStage::WithMergeableState; + return result_size == 1 ? QueryProcessingStage::Complete + : QueryProcessingStage::WithMergeableState; +} + } @@ -354,25 +378,10 @@ StoragePtr StorageDistributed::createWithOwnCluster( } -static QueryProcessingStage::Enum getQueryProcessingStageImpl(const Context & context, const ClusterPtr & cluster) -{ - const Settings & settings = context.getSettingsRef(); - - size_t num_local_shards = cluster->getLocalShardCount(); - size_t num_remote_shards = cluster->getRemoteShardCount(); - size_t result_size = (num_remote_shards * settings.max_parallel_replicas) + num_local_shards; - - if (settings.distributed_group_by_no_merge) - return QueryProcessingStage::Complete; - else /// Normal mode. - return result_size == 1 ? QueryProcessingStage::Complete - : QueryProcessingStage::WithMergeableState; -} - -QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const { auto cluster = getOptimizedCluster(context, query_ptr); - return getQueryProcessingStageImpl(context, cluster); + return getQueryProcessingStageImpl(context, to_stage, cluster); } Pipes StorageDistributed::read( @@ -454,7 +463,7 @@ void StorageDistributed::checkAlterIsPossible(const AlterCommands & commands, co void StorageDistributed::alter(const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); checkAlterIsPossible(params, context.getSettingsRef()); @@ -801,6 +810,9 @@ void registerStorageDistributed(StorageFactory & factory) storage_policy, args.relative_data_path, args.attach); + }, + { + .source_access_type = AccessType::REMOTE, }); } diff --git a/dbms/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h similarity index 98% rename from dbms/src/Storages/StorageDistributed.h rename to src/Storages/StorageDistributed.h index e12831709f7..81c6b54a63e 100644 --- a/dbms/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -67,7 +67,7 @@ public: bool isRemote() const override { return true; } - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; Pipes read( const Names & column_names, diff --git a/dbms/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp similarity index 96% rename from dbms/src/Storages/StorageFactory.cpp rename to src/Storages/StorageFactory.cpp index 9fb548c3893..0a8ceb4b8e5 100644 --- a/dbms/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -189,4 +189,13 @@ StorageFactory & StorageFactory::instance() return ret; } + +AccessType StorageFactory::getSourceAccessType(const String & table_engine) const +{ + auto it = storages.find(table_engine); + if (it == storages.end()) + return AccessType::NONE; + return it->second.features.source_access_type; +} + } diff --git a/dbms/src/Storages/StorageFactory.h b/src/Storages/StorageFactory.h similarity index 94% rename from dbms/src/Storages/StorageFactory.h rename to src/Storages/StorageFactory.h index e64d8647dd8..de9060769cb 100644 --- a/dbms/src/Storages/StorageFactory.h +++ b/src/Storages/StorageFactory.h @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -54,6 +55,7 @@ public: bool supports_ttl = false; bool supports_replication = false; bool supports_deduplication = false; + AccessType source_access_type = AccessType::NONE; }; using CreatorFn = std::function; @@ -83,6 +85,7 @@ public: .supports_ttl = false, .supports_replication = false, .supports_deduplication = false, + .source_access_type = AccessType::NONE, }); const Storages & getAllStorages() const @@ -108,6 +111,7 @@ public: return result; } + AccessType getSourceAccessType(const String & table_engine) const; private: Storages storages; diff --git a/dbms/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp similarity index 86% rename from dbms/src/Storages/StorageFile.cpp rename to src/Storages/StorageFile.cpp index 48341ce918d..d1332016150 100644 --- a/dbms/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -541,67 +541,71 @@ void StorageFile::truncate(const ASTPtr & /*query*/, const Context & /* context void registerStorageFile(StorageFactory & factory) { - factory.registerStorage("File", [](const StorageFactory::Arguments & args) - { - ASTs & engine_args = args.engine_args; - - if (!(engine_args.size() >= 1 && engine_args.size() <= 3)) // NOLINT - throw Exception( - "Storage File requires from 1 to 3 arguments: name of used format, source and compression_method.", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - - engine_args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[0], args.local_context); - String format_name = engine_args[0]->as().value.safeGet(); - - String compression_method; - StorageFile::CommonArguments common_args{args.table_id, format_name, compression_method, - args.columns, args.constraints, args.context}; - - if (engine_args.size() == 1) /// Table in database - return StorageFile::create(args.relative_data_path, common_args); - - /// Will use FD if engine_args[1] is int literal or identifier with std* name - int source_fd = -1; - String source_path; - - if (auto opt_name = tryGetIdentifierName(engine_args[1])) + factory.registerStorage( + "File", + [](const StorageFactory::Arguments & args) { - if (*opt_name == "stdin") - source_fd = STDIN_FILENO; - else if (*opt_name == "stdout") - source_fd = STDOUT_FILENO; - else if (*opt_name == "stderr") - source_fd = STDERR_FILENO; + ASTs & engine_args = args.engine_args; + + if (!(engine_args.size() >= 1 && engine_args.size() <= 3)) // NOLINT + throw Exception( + "Storage File requires from 1 to 3 arguments: name of used format, source and compression_method.", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + engine_args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[0], args.local_context); + String format_name = engine_args[0]->as().value.safeGet(); + + String compression_method; + StorageFile::CommonArguments common_args{ + args.table_id, format_name, compression_method, args.columns, args.constraints, args.context}; + + if (engine_args.size() == 1) /// Table in database + return StorageFile::create(args.relative_data_path, common_args); + + /// Will use FD if engine_args[1] is int literal or identifier with std* name + int source_fd = -1; + String source_path; + + if (auto opt_name = tryGetIdentifierName(engine_args[1])) + { + if (*opt_name == "stdin") + source_fd = STDIN_FILENO; + else if (*opt_name == "stdout") + source_fd = STDOUT_FILENO; + else if (*opt_name == "stderr") + source_fd = STDERR_FILENO; + else + throw Exception( + "Unknown identifier '" + *opt_name + "' in second arg of File storage constructor", ErrorCodes::UNKNOWN_IDENTIFIER); + } + else if (const auto * literal = engine_args[1]->as()) + { + auto type = literal->value.getType(); + if (type == Field::Types::Int64) + source_fd = static_cast(literal->value.get()); + else if (type == Field::Types::UInt64) + source_fd = static_cast(literal->value.get()); + else if (type == Field::Types::String) + source_path = literal->value.get(); + else + throw Exception("Second argument must be path or file descriptor", ErrorCodes::BAD_ARGUMENTS); + } + + if (engine_args.size() == 3) + { + engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], args.local_context); + compression_method = engine_args[2]->as().value.safeGet(); + } else - throw Exception("Unknown identifier '" + *opt_name + "' in second arg of File storage constructor", - ErrorCodes::UNKNOWN_IDENTIFIER); - } - else if (const auto * literal = engine_args[1]->as()) + compression_method = "auto"; + + if (0 <= source_fd) /// File descriptor + return StorageFile::create(source_fd, common_args); + else /// User's file + return StorageFile::create(source_path, args.context.getUserFilesPath(), common_args); + }, { - auto type = literal->value.getType(); - if (type == Field::Types::Int64) - source_fd = static_cast(literal->value.get()); - else if (type == Field::Types::UInt64) - source_fd = static_cast(literal->value.get()); - else if (type == Field::Types::String) - source_path = literal->value.get(); - else - throw Exception("Second argument must be path or file descriptor", ErrorCodes::BAD_ARGUMENTS); - } - - if (engine_args.size() == 3) - { - engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], args.local_context); - compression_method = engine_args[2]->as().value.safeGet(); - } - else - compression_method = "auto"; - - if (0 <= source_fd) /// File descriptor - return StorageFile::create(source_fd, common_args); - else /// User's file - return StorageFile::create(source_path, args.context.getUserFilesPath(), common_args); - }); + .source_access_type = AccessType::FILE, + }); } - } diff --git a/dbms/src/Storages/StorageFile.h b/src/Storages/StorageFile.h similarity index 100% rename from dbms/src/Storages/StorageFile.h rename to src/Storages/StorageFile.h diff --git a/dbms/src/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp similarity index 94% rename from dbms/src/Storages/StorageGenerateRandom.cpp rename to src/Storages/StorageGenerateRandom.cpp index 1fd2d4ec2d8..70b84c076b7 100644 --- a/dbms/src/Storages/StorageGenerateRandom.cpp +++ b/src/Storages/StorageGenerateRandom.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +58,12 @@ void fillBufferWithRandomData(char * __restrict data, size_t size, pcg64 & rng) ColumnPtr fillColumnWithRandomData( - const DataTypePtr type, UInt64 limit, UInt64 max_array_length, UInt64 max_string_length, pcg64 & rng, const Context & context) + const DataTypePtr type, + UInt64 limit, + UInt64 max_array_length, + UInt64 max_string_length, + pcg64 & rng, + const Context & context) { TypeIndex idx = type->getTypeId(); @@ -205,7 +211,10 @@ ColumnPtr fillColumnWithRandomData( { auto column = ColumnUInt16::create(); column->getData().resize(limit); - fillBufferWithRandomData(reinterpret_cast(column->getData().data()), limit * sizeof(UInt16), rng); + + for (size_t i = 0; i < limit; ++i) + column->getData()[i] = rng() % (DATE_LUT_MAX_DAY_NUM + 1); /// Slow + return column; } case TypeIndex::UInt32: [[fallthrough]]; @@ -337,14 +346,24 @@ public: protected: Chunk generate() override { + /// To support Nested types, we will collect them to single Array of Tuple. + auto names_and_types = Nested::collect(block_header.getNamesAndTypesList()); + Columns columns; - columns.reserve(block_header.columns()); - DataTypes types = block_header.getDataTypes(); + columns.reserve(names_and_types.size()); - for (const auto & type : types) - columns.emplace_back(fillColumnWithRandomData(type, block_size, max_array_length, max_string_length, rng, context)); + Block compact_block; + for (const auto & elem : names_and_types) + { + compact_block.insert( + { + fillColumnWithRandomData(elem.type, block_size, max_array_length, max_string_length, rng, context), + elem.type, + elem.name + }); + } - return {std::move(columns), block_size}; + return {Nested::flatten(compact_block).getColumns(), block_size}; } private: diff --git a/dbms/src/Storages/StorageGenerateRandom.h b/src/Storages/StorageGenerateRandom.h similarity index 100% rename from dbms/src/Storages/StorageGenerateRandom.h rename to src/Storages/StorageGenerateRandom.h diff --git a/dbms/src/Storages/StorageHDFS.cpp b/src/Storages/StorageHDFS.cpp similarity index 99% rename from dbms/src/Storages/StorageHDFS.cpp rename to src/Storages/StorageHDFS.cpp index 192fb658154..c9c41cf9d54 100644 --- a/dbms/src/Storages/StorageHDFS.cpp +++ b/src/Storages/StorageHDFS.cpp @@ -339,6 +339,9 @@ void registerStorageHDFS(StorageFactory & factory) } else compression_method = "auto"; return StorageHDFS::create(url, args.table_id, format_name, args.columns, args.constraints, args.context, compression_method); + }, + { + .source_access_type = AccessType::HDFS, }); } diff --git a/dbms/src/Storages/StorageHDFS.h b/src/Storages/StorageHDFS.h similarity index 100% rename from dbms/src/Storages/StorageHDFS.h rename to src/Storages/StorageHDFS.h diff --git a/dbms/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp similarity index 100% rename from dbms/src/Storages/StorageInMemoryMetadata.cpp rename to src/Storages/StorageInMemoryMetadata.cpp diff --git a/dbms/src/Storages/StorageInMemoryMetadata.h b/src/Storages/StorageInMemoryMetadata.h similarity index 100% rename from dbms/src/Storages/StorageInMemoryMetadata.h rename to src/Storages/StorageInMemoryMetadata.h diff --git a/dbms/src/Storages/StorageInput.cpp b/src/Storages/StorageInput.cpp similarity index 100% rename from dbms/src/Storages/StorageInput.cpp rename to src/Storages/StorageInput.cpp diff --git a/dbms/src/Storages/StorageInput.h b/src/Storages/StorageInput.h similarity index 100% rename from dbms/src/Storages/StorageInput.h rename to src/Storages/StorageInput.h diff --git a/dbms/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp similarity index 95% rename from dbms/src/Storages/StorageJoin.cpp rename to src/Storages/StorageJoin.cpp index f5e88b193cd..8912680b1dd 100644 --- a/dbms/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include @@ -57,8 +57,8 @@ StorageJoin::StorageJoin( if (!getColumns().hasPhysical(key)) throw Exception{"Key column (" + key + ") does not exist in table declaration.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE}; - table_join = std::make_shared(limits, use_nulls, kind, strictness, key_names); - join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); + table_join = std::make_shared(limits, use_nulls, kind, strictness, key_names); + join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); restore(); } @@ -70,11 +70,11 @@ void StorageJoin::truncate(const ASTPtr &, const Context &, TableStructureWriteL Poco::File(path + "tmp/").createDirectories(); increment = 0; - join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); + join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); } -HashJoinPtr StorageJoin::getJoin(std::shared_ptr analyzed_join) const +HashJoinPtr StorageJoin::getJoin(std::shared_ptr analyzed_join) const { if (!analyzed_join->sameStrictnessAndKind(strictness, kind)) throw Exception("Table " + getStorageID().getNameForLogs() + " has incompatible type of JOIN.", ErrorCodes::INCOMPATIBLE_TYPE_OF_JOIN); @@ -89,7 +89,7 @@ HashJoinPtr StorageJoin::getJoin(std::shared_ptr analyzed_join) co /// Some HACK to remove wrong names qualifiers: table.column -> column. analyzed_join->setRightKeys(key_names); - HashJoinPtr join_clone = std::make_shared(analyzed_join, getSampleBlock().sortColumns()); + HashJoinPtr join_clone = std::make_shared(analyzed_join, getSampleBlock().sortColumns()); join_clone->reuseJoinedData(*join); return join_clone; } @@ -244,7 +244,7 @@ size_t rawSize(const StringRef & t) class JoinSource : public SourceWithProgress { public: - JoinSource(const Join & parent_, UInt64 max_block_size_, Block sample_block_) + JoinSource(const HashJoin & parent_, UInt64 max_block_size_, Block sample_block_) : SourceWithProgress(sample_block_) , parent(parent_) , lock(parent.data->rwlock) @@ -287,7 +287,7 @@ protected: } private: - const Join & parent; + const HashJoin & parent; std::shared_lock lock; UInt64 max_block_size; Block sample_block; @@ -326,7 +326,7 @@ private: switch (parent.data->type) { #define M(TYPE) \ - case Join::Type::TYPE: \ + case HashJoin::Type::TYPE: \ rows_added = fillColumns(*maps.TYPE); \ break; APPLY_FOR_JOIN_VARIANTS_LIMITED(M) diff --git a/dbms/src/Storages/StorageJoin.h b/src/Storages/StorageJoin.h similarity index 91% rename from dbms/src/Storages/StorageJoin.h rename to src/Storages/StorageJoin.h index acfc8a8b4e7..f956abb4d3b 100644 --- a/dbms/src/Storages/StorageJoin.h +++ b/src/Storages/StorageJoin.h @@ -9,9 +9,9 @@ namespace DB { -class AnalyzedJoin; -class Join; -using HashJoinPtr = std::shared_ptr; +class TableJoin; +class HashJoin; +using HashJoinPtr = std::shared_ptr; /** Allows you save the state for later use on the right side of the JOIN. @@ -31,7 +31,7 @@ public: /// Access the innards. HashJoinPtr & getJoin() { return join; } - HashJoinPtr getJoin(std::shared_ptr analyzed_join) const; + HashJoinPtr getJoin(std::shared_ptr analyzed_join) const; /// Verify that the data structure is suitable for implementing this type of JOIN. void assertCompatible(ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_) const; @@ -53,7 +53,7 @@ private: ASTTableJoin::Strictness strictness; /// ANY | ALL bool overwrite; - std::shared_ptr table_join; + std::shared_ptr table_join; HashJoinPtr join; void insertBlock(const Block & block) override; diff --git a/dbms/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp similarity index 100% rename from dbms/src/Storages/StorageLog.cpp rename to src/Storages/StorageLog.cpp diff --git a/dbms/src/Storages/StorageLog.h b/src/Storages/StorageLog.h similarity index 100% rename from dbms/src/Storages/StorageLog.h rename to src/Storages/StorageLog.h diff --git a/dbms/src/Storages/StorageLogSettings.cpp b/src/Storages/StorageLogSettings.cpp similarity index 100% rename from dbms/src/Storages/StorageLogSettings.cpp rename to src/Storages/StorageLogSettings.cpp diff --git a/dbms/src/Storages/StorageLogSettings.h b/src/Storages/StorageLogSettings.h similarity index 100% rename from dbms/src/Storages/StorageLogSettings.h rename to src/Storages/StorageLogSettings.h diff --git a/dbms/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp similarity index 96% rename from dbms/src/Storages/StorageMaterializedView.cpp rename to src/Storages/StorageMaterializedView.cpp index 63031572cd6..a2f17d84e4e 100644 --- a/dbms/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -171,9 +171,9 @@ StorageInMemoryMetadata StorageMaterializedView::getInMemoryMetadata() const return result; } -QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const { - return getTargetTable()->getQueryProcessingStage(context, query_ptr); + return getTargetTable()->getQueryProcessingStage(context, to_stage, query_ptr); } Pipes StorageMaterializedView::read( @@ -185,7 +185,9 @@ Pipes StorageMaterializedView::read( const unsigned num_streams) { auto storage = getTargetTable(); - auto lock = storage->lockStructureForShare(context.getCurrentQueryId()); + auto lock = storage->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + if (query_info.order_by_optimizer) query_info.input_sorting_info = query_info.order_by_optimizer->getInputOrder(storage); @@ -200,7 +202,8 @@ Pipes StorageMaterializedView::read( BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const Context & context) { auto storage = getTargetTable(); - auto lock = storage->lockStructureForShare(context.getCurrentQueryId()); + auto lock = storage->lockStructureForShare( + true, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto stream = storage->write(query, context); stream->addTableLock(lock); return stream; @@ -258,7 +261,7 @@ void StorageMaterializedView::alter( const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata metadata = getInMemoryMetadata(); params.apply(metadata); diff --git a/dbms/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h similarity index 95% rename from dbms/src/Storages/StorageMaterializedView.h rename to src/Storages/StorageMaterializedView.h index 6284f791f4f..20a0ee2dccb 100644 --- a/dbms/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -31,6 +31,7 @@ public: bool supportsPrewhere() const override { return getTargetTable()->supportsPrewhere(); } bool supportsFinal() const override { return getTargetTable()->supportsFinal(); } bool supportsIndexForIn() const override { return getTargetTable()->supportsIndexForIn(); } + bool supportsParallelInsert() const override { return getTargetTable()->supportsParallelInsert(); } bool mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, const Context & query_context) const override { return getTargetTable()->mayBenefitFromIndexForIn(left_in_operand, query_context); @@ -59,7 +60,7 @@ public: void checkTableCanBeDropped() const override; void checkPartitionCanBeDropped(const ASTPtr & partition) override; - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; StoragePtr getTargetTable() const; StoragePtr tryGetTargetTable() const; diff --git a/dbms/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp similarity index 100% rename from dbms/src/Storages/StorageMemory.cpp rename to src/Storages/StorageMemory.cpp diff --git a/dbms/src/Storages/StorageMemory.h b/src/Storages/StorageMemory.h similarity index 100% rename from dbms/src/Storages/StorageMemory.h rename to src/Storages/StorageMemory.h diff --git a/dbms/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp similarity index 94% rename from dbms/src/Storages/StorageMerge.cpp rename to src/Storages/StorageMerge.cpp index f102ee1c6f8..9be2aebccb5 100644 --- a/dbms/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include @@ -118,7 +117,8 @@ bool StorageMerge::isRemote() const bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, const Context & query_context) const { /// It's beneficial if it is true for at least one table. - StorageListWithLocks selected_tables = getSelectedTables(query_context.getCurrentQueryId()); + StorageListWithLocks selected_tables = getSelectedTables( + query_context.getCurrentQueryId(), query_context.getSettingsRef()); size_t i = 0; for (const auto & table : selected_tables) @@ -136,7 +136,7 @@ bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, cons } -QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context & context, QueryProcessingStage::Enum to_stage, const ASTPtr & query_ptr) const { auto stage_in_source_tables = QueryProcessingStage::FetchColumns; @@ -150,7 +150,7 @@ QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context & if (table.get() != this) { ++selected_table_size; - stage_in_source_tables = std::max(stage_in_source_tables, table->getQueryProcessingStage(context, query_ptr)); + stage_in_source_tables = std::max(stage_in_source_tables, table->getQueryProcessingStage(context, to_stage, query_ptr)); } iterator->next(); @@ -195,7 +195,7 @@ Pipes StorageMerge::read( * This is necessary to correctly pass the recommended number of threads to each table. */ StorageListWithLocks selected_tables = getSelectedTables( - query_info.query, has_table_virtual_column, context.getCurrentQueryId()); + query_info.query, has_table_virtual_column, context.getCurrentQueryId(), context.getSettingsRef()); if (selected_tables.empty()) /// FIXME: do we support sampling in this case? @@ -287,7 +287,8 @@ Pipes StorageMerge::createSources(const SelectQueryInfo & query_info, const Quer return pipes; } - if (processed_stage <= storage->getQueryProcessingStage(*modified_context, query_info.query)) + auto storage_stage = storage->getQueryProcessingStage(*modified_context, QueryProcessingStage::Complete, query_info.query); + if (processed_stage <= storage_stage) { /// If there are only virtual columns in query, you must request at least one other column. if (real_column_names.empty()) @@ -295,7 +296,7 @@ Pipes StorageMerge::createSources(const SelectQueryInfo & query_info, const Quer pipes = storage->read(real_column_names, modified_query_info, *modified_context, processed_stage, max_block_size, UInt32(streams_num)); } - else if (processed_stage > storage->getQueryProcessingStage(*modified_context, query_info.query)) + else if (processed_stage > storage_stage) { modified_query_info.query->as()->replaceDatabaseAndTable(source_database, table_name); @@ -355,7 +356,7 @@ Pipes StorageMerge::createSources(const SelectQueryInfo & query_info, const Quer } -StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String & query_id) const +StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String & query_id, const Settings & settings) const { StorageListWithLocks selected_tables; auto iterator = getDatabaseIterator(); @@ -364,7 +365,8 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String { auto & table = iterator->table(); if (table.get() != this) - selected_tables.emplace_back(table, table->lockStructureForShare(query_id), iterator->name()); + selected_tables.emplace_back( + table, table->lockStructureForShare(false, query_id, settings.lock_acquire_timeout), iterator->name()); iterator->next(); } @@ -373,7 +375,8 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String } -StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr & query, bool has_virtual_column, const String & query_id) const +StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( + const ASTPtr & query, bool has_virtual_column, const String & query_id, const Settings & settings) const { StorageListWithLocks selected_tables; DatabaseTablesIteratorPtr iterator = getDatabaseIterator(); @@ -389,7 +392,8 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr if (storage.get() != this) { - selected_tables.emplace_back(storage, storage->lockStructureForShare(query_id), iterator->name()); + selected_tables.emplace_back( + storage, storage->lockStructureForShare(false, query_id, settings.lock_acquire_timeout), iterator->name()); virtual_column->insert(iterator->name()); } @@ -434,7 +438,7 @@ void StorageMerge::checkAlterIsPossible(const AlterCommands & commands, const Se void StorageMerge::alter( const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata storage_metadata = getInMemoryMetadata(); @@ -473,7 +477,7 @@ void StorageMerge::convertingSourceStream(const Block & header, const Context & Pipe & pipe, QueryProcessingStage::Enum processed_stage) { Block before_block_header = pipe.getHeader(); - pipe.addSimpleTransform(std::make_shared(before_block_header, header, ConvertingTransform::MatchColumnsMode::Name, context)); + pipe.addSimpleTransform(std::make_shared(before_block_header, header, ConvertingTransform::MatchColumnsMode::Name)); auto where_expression = query->as()->where(); diff --git a/dbms/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h similarity index 91% rename from dbms/src/Storages/StorageMerge.h rename to src/Storages/StorageMerge.h index 1d2df3cb9ce..fbc1906b411 100644 --- a/dbms/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -31,7 +31,7 @@ public: NameAndTypePair getColumn(const String & column_name) const override; bool hasColumn(const String & column_name) const override; - QueryProcessingStage::Enum getQueryProcessingStage(const Context &, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; Pipes read( const Names & column_names, @@ -57,9 +57,10 @@ private: using StorageWithLockAndName = std::tuple; using StorageListWithLocks = std::list; - StorageListWithLocks getSelectedTables(const String & query_id) const; + StorageListWithLocks getSelectedTables(const String & query_id, const Settings & settings) const; - StorageMerge::StorageListWithLocks getSelectedTables(const ASTPtr & query, bool has_virtual_column, const String & query_id) const; + StorageMerge::StorageListWithLocks getSelectedTables( + const ASTPtr & query, bool has_virtual_column, const String & query_id, const Settings & settings) const; template StoragePtr getFirstTable(F && predicate) const; diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp similarity index 91% rename from dbms/src/Storages/StorageMergeTree.cpp rename to src/Storages/StorageMergeTree.cpp index 64950a47437..1aac6717728 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -223,7 +223,7 @@ void StorageMergeTree::alter( /// This alter can be performed at metadata level only if (commands.isSettingsAlter()) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); changeSettings(metadata.settings_ast, table_lock_holder); @@ -231,24 +231,29 @@ void StorageMergeTree::alter( } else { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); changeSettings(metadata.settings_ast, table_lock_holder); /// Reinitialize primary key because primary key column types might have changed. setProperties(metadata); - setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast); + setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast); DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata); - /// We release all locks except alter_lock which allows + String mutation_file_name; + Int64 mutation_version = -1; + if (!maybe_mutation_commands.empty()) + mutation_version = startMutation(maybe_mutation_commands, mutation_file_name); + + /// We release all locks except alter_intention_lock which allows /// to execute alter queries sequentially table_lock_holder.releaseAllExceptAlterIntention(); /// Always execute required mutations synchronously, because alters /// should be executed in sequential order. if (!maybe_mutation_commands.empty()) - mutateImpl(maybe_mutation_commands, /* mutations_sync = */ 1); + waitForMutation(mutation_version, mutation_file_name); } } @@ -351,43 +356,42 @@ public: }; -void StorageMergeTree::mutateImpl(const MutationCommands & commands, size_t mutations_sync) +Int64 StorageMergeTree::startMutation(const MutationCommands & commands, String & mutation_file_name) { /// Choose any disk, because when we load mutations we search them at each disk /// where storage can be placed. See loadMutations(). auto disk = getStoragePolicy()->getAnyDisk(); - String file_name; Int64 version; + std::lock_guard lock(currently_processing_in_background_mutex); - { - std::lock_guard lock(currently_processing_in_background_mutex); + MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get()); + version = increment.get(); + entry.commit(version); + mutation_file_name = entry.file_name; + auto insertion = current_mutations_by_id.emplace(mutation_file_name, std::move(entry)); + current_mutations_by_version.emplace(version, insertion.first->second); - MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get()); - version = increment.get(); - entry.commit(version); - file_name = entry.file_name; - auto insertion = current_mutations_by_id.emplace(file_name, std::move(entry)); - current_mutations_by_version.emplace(version, insertion.first->second); - - LOG_INFO(log, "Added mutation: " << file_name); - merging_mutating_task_handle->wake(); - } - - /// We have to wait mutation end - if (mutations_sync > 0) - { - LOG_INFO(log, "Waiting mutation: " << file_name); - auto check = [version, this]() { return shutdown_called || isMutationDone(version); }; - std::unique_lock lock(mutation_wait_mutex); - mutation_wait_event.wait(lock, check); - LOG_INFO(log, "Mutation " << file_name << " done"); - } + LOG_INFO(log, "Added mutation: " << mutation_file_name); + merging_mutating_task_handle->wake(); + return version; +} +void StorageMergeTree::waitForMutation(Int64 version, const String & file_name) +{ + LOG_INFO(log, "Waiting mutation: " << file_name); + auto check = [version, this]() { return shutdown_called || isMutationDone(version); }; + std::unique_lock lock(mutation_wait_mutex); + mutation_wait_event.wait(lock, check); + LOG_INFO(log, "Mutation " << file_name << " done"); } void StorageMergeTree::mutate(const MutationCommands & commands, const Context & query_context) { - mutateImpl(commands, query_context.getSettingsRef().mutations_sync); + String mutation_file_name; + Int64 version = startMutation(commands, mutation_file_name); + + if (query_context.getSettingsRef().mutations_sync > 0) + waitForMutation(version, mutation_file_name); } namespace @@ -537,7 +541,8 @@ bool StorageMergeTree::merge( bool deduplicate, String * out_disable_reason) { - auto table_lock_holder = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock_holder = lockStructureForShare( + true, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); FutureMergedMutatedPart future_part; @@ -655,7 +660,8 @@ BackgroundProcessingPoolTaskResult StorageMergeTree::movePartsTask() bool StorageMergeTree::tryMutatePart() { - auto table_lock_holder = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock_holder = lockStructureForShare( + true, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); size_t max_ast_elements = global_context.getSettingsRef().max_expanded_ast_elements; FutureMergedMutatedPart future_part; @@ -688,10 +694,16 @@ bool StorageMergeTree::tryMutatePart() MutationCommands commands_for_size_validation; for (const auto & command : it->second.commands) { - if (command.type != MutationCommand::Type::DROP_COLUMN && command.type != MutationCommand::Type::DROP_INDEX) + if (command.type != MutationCommand::Type::DROP_COLUMN + && command.type != MutationCommand::Type::DROP_INDEX + && command.type != MutationCommand::Type::RENAME_COLUMN) + { commands_for_size_validation.push_back(command); + } else + { commands_size += command.ast->size(); + } } if (!commands_for_size_validation.empty()) @@ -780,7 +792,8 @@ BackgroundProcessingPoolTaskResult StorageMergeTree::mergeMutateTask() { { /// TODO: Implement tryLockStructureForShare. - auto lock_structure = lockStructureForShare(""); + auto lock_structure = lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); clearOldPartsFromFilesystem(); clearOldTemporaryDirectories(); } @@ -973,14 +986,16 @@ void StorageMergeTree::alterPartition(const ASTPtr & query, const PartitionComma case PartitionCommand::FREEZE_PARTITION: { - auto lock = lockStructureForShare(context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); freezePartition(command.partition, command.with_name, context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { - auto lock = lockStructureForShare(context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); freezeAll(command.with_name, context, lock); } break; @@ -998,7 +1013,7 @@ void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, cons /// This protects against "revival" of data for a removed partition after completion of merge. auto merge_blocker = merger_mutator.merges_blocker.cancel(); /// Waits for completion of merge and does not start new ones. - auto lock = lockExclusively(context.getCurrentQueryId()); + auto lock = lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); String partition_id = getPartitionIDFromQuery(partition, context); @@ -1045,8 +1060,8 @@ void StorageMergeTree::attachPartition(const ASTPtr & partition, bool attach_par void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context) { - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = source_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); Stopwatch watch; MergeTreeData & src_data = checkStructureAndGetMergeTreeData(source_table); @@ -1116,8 +1131,8 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & context) { - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = dest_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto dest_table_storage = std::dynamic_pointer_cast(dest_table); if (!dest_table_storage) @@ -1254,4 +1269,15 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, const Context & c return results; } + +MutationCommands StorageMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const +{ + std::lock_guard lock(currently_processing_in_background_mutex); + + auto it = current_mutations_by_version.upper_bound(part->info.getDataVersion()); + if (it == current_mutations_by_version.end()) + return {}; + return it->second.commands; +} + } diff --git a/dbms/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h similarity index 93% rename from dbms/src/Storages/StorageMergeTree.h rename to src/Storages/StorageMergeTree.h index 8161e0b5c4b..1762ba19824 100644 --- a/dbms/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -120,7 +120,11 @@ private: BackgroundProcessingPoolTaskResult movePartsTask(); - void mutateImpl(const MutationCommands & commands, size_t mutations_sync); + /// Allocate block number for new mutation, write mutation to disk + /// and into in-memory structures. Wake up merge-mutation task. + Int64 startMutation(const MutationCommands & commands, String & mutation_file_name); + /// Wait until mutation with version will finish mutation for all parts + void waitForMutation(Int64 version, const String & file_name); /// Try and find a single part to mutate and mutate it. If some part was successfully mutated, return true. bool tryMutatePart(); @@ -165,6 +169,8 @@ protected: const MergingParams & merging_params_, std::unique_ptr settings_, bool has_force_restore_data_flag); + + MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override; }; } diff --git a/dbms/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp similarity index 99% rename from dbms/src/Storages/StorageMySQL.cpp rename to src/Storages/StorageMySQL.cpp index 6645b41376a..055e3f8f264 100644 --- a/dbms/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -248,6 +248,9 @@ void registerStorageMySQL(StorageFactory & factory) args.columns, args.constraints, args.context); + }, + { + .source_access_type = AccessType::MYSQL, }); } diff --git a/dbms/src/Storages/StorageMySQL.h b/src/Storages/StorageMySQL.h similarity index 100% rename from dbms/src/Storages/StorageMySQL.h rename to src/Storages/StorageMySQL.h diff --git a/dbms/src/Storages/StorageNull.cpp b/src/Storages/StorageNull.cpp similarity index 96% rename from dbms/src/Storages/StorageNull.cpp rename to src/Storages/StorageNull.cpp index 878be5bbf2d..bafb3d9a9fb 100644 --- a/dbms/src/Storages/StorageNull.cpp +++ b/src/Storages/StorageNull.cpp @@ -48,7 +48,7 @@ void StorageNull::checkAlterIsPossible(const AlterCommands & commands, const Set void StorageNull::alter( const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata metadata = getInMemoryMetadata(); diff --git a/dbms/src/Storages/StorageNull.h b/src/Storages/StorageNull.h similarity index 100% rename from dbms/src/Storages/StorageNull.h rename to src/Storages/StorageNull.h diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp similarity index 98% rename from dbms/src/Storages/StorageReplicatedMergeTree.cpp rename to src/Storages/StorageReplicatedMergeTree.cpp index 336fef069d0..a19a424c643 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1,4 +1,5 @@ -#include +#include + #include #include #include @@ -25,6 +26,8 @@ #include #include +#include + #include #include @@ -246,6 +249,11 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( createTableIfNotExists(); + /// We have to check granularity on other replicas. If it's fixed we + /// must create our new replica with fixed granularity and store this + /// information in /replica/metadata. + other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); + checkTableStructure(zookeeper_path); Coordination::Stat metadata_stat; @@ -256,6 +264,18 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } else { + + /// In old tables this node may missing or be empty + String replica_metadata; + bool replica_metadata_exists = current_zookeeper->tryGet(replica_path + "/metadata", replica_metadata); + if (!replica_metadata_exists || replica_metadata.empty()) + { + /// We have to check shared node granularity before we create ours. + other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); + ReplicatedMergeTreeTableMetadata current_metadata(*this); + current_zookeeper->createOrUpdate(replica_path + "/metadata", current_metadata.toString(), zkutil::CreateMode::Persistent); + } + checkTableStructure(replica_path); checkParts(skip_sanity_checks); @@ -263,8 +283,13 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( { metadata_version = parse(current_zookeeper->get(replica_path + "/metadata_version")); } - else /// This replica was created on old version, so we have to take version of global node + else { + /// This replica was created with old clickhouse version, so we have + /// to take version of global node. If somebody will alter our + /// table, then we will fill /metadata_version node in zookeeper. + /// Otherwise on the next restart we can again use version from + /// shared metadata node because it was not changed. Coordination::Stat metadata_stat; current_zookeeper->get(zookeeper_path + "/metadata", &metadata_stat); metadata_version = metadata_stat.version; @@ -277,7 +302,6 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( createNewZooKeeperNodes(); - other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); } @@ -447,7 +471,6 @@ void StorageReplicatedMergeTree::checkTableStructure(const String & zookeeper_pr } } - void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & metadata_diff) { StorageInMemoryMetadata metadata = getInMemoryMetadata(); @@ -459,7 +482,7 @@ void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_column if (metadata_diff.sorting_key_changed) { ParserNotEmptyExpressionList parser(false); - auto new_sorting_key_expr_list = parseQuery(parser, metadata_diff.new_sorting_key, 0); + auto new_sorting_key_expr_list = parseQuery(parser, metadata_diff.new_sorting_key, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); if (new_sorting_key_expr_list->children.size() == 1) metadata.order_by_ast = new_sorting_key_expr_list->children[0]; @@ -487,7 +510,7 @@ void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_column if (metadata_diff.ttl_table_changed) { ParserTTLExpressionList parser; - metadata.ttl_for_table_ast = parseQuery(parser, metadata_diff.new_ttl_table, 0); + metadata.ttl_for_table_ast = parseQuery(parser, metadata_diff.new_ttl_table, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); } } @@ -497,7 +520,7 @@ void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_column /// Even if the primary/sorting keys didn't change we must reinitialize it /// because primary key column types might have changed. setProperties(metadata); - setTTLExpressions(new_columns.getColumnTTLs(), metadata.ttl_for_table_ast); + setTTLExpressions(new_columns, metadata.ttl_for_table_ast); } @@ -1025,7 +1048,8 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) ReservationPtr reserved_space = reserveSpacePreferringTTLRules(estimated_space_for_merge, ttl_infos, time(nullptr), max_volume_index); - auto table_lock = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations); FutureMergedMutatedPart future_merged_part(parts, entry.new_part_type); if (future_merged_part.name != entry.new_part_name) @@ -1160,7 +1184,8 @@ bool StorageReplicatedMergeTree::tryExecutePartMutation(const StorageReplicatedM /// Can throw an exception. ReservationPtr reserved_space = reserveSpace(estimated_space_for_result, source_part->disk); - auto table_lock = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations); MutableDataPartPtr new_part; Transaction transaction(*this); @@ -1514,7 +1539,8 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) PartDescriptions parts_to_add; DataPartsVector parts_to_remove; - auto table_lock_holder_dst_table = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock_holder_dst_table = lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); for (size_t i = 0; i < entry_replace.new_part_names.size(); ++i) { @@ -1576,7 +1602,8 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) return 0; } - table_lock_holder_src_table = source_table->lockStructureForShare(RWLockImpl::NO_QUERY); + table_lock_holder_src_table = source_table->lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); DataPartStates valid_states{MergeTreeDataPartState::PreCommitted, MergeTreeDataPartState::Committed, MergeTreeDataPartState::Outdated}; @@ -2699,7 +2726,8 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Strin TableStructureReadLockHolder table_lock_holder; if (!to_detached) - table_lock_holder = lockStructureForShare(RWLockImpl::NO_QUERY); + table_lock_holder = lockStructureForShare( + true, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); /// Logging Stopwatch stopwatch; @@ -3166,7 +3194,7 @@ bool StorageReplicatedMergeTree::executeMetadataAlter(const StorageReplicatedMer { /// TODO (relax this lock) - auto table_lock = lockExclusively(RWLockImpl::NO_QUERY); + auto table_lock = lockExclusively(RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); LOG_INFO(log, "Metadata changed in ZooKeeper. Applying changes locally."); @@ -3193,7 +3221,8 @@ void StorageReplicatedMergeTree::alter( if (params.isSettingsAlter()) { - lockStructureExclusively(table_lock_holder, query_context.getCurrentQueryId()); + lockStructureExclusively( + table_lock_holder, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); /// We don't replicate storage_settings_ptr ALTER. It's local operation. /// Also we don't upgrade alter lock to table structure lock. StorageInMemoryMetadata metadata = getInMemoryMetadata(); @@ -3223,7 +3252,7 @@ void StorageReplicatedMergeTree::alter( alter_entry.emplace(); mutation_znode.reset(); - /// We can safely read structure, because we guarded with alter_lock + /// We can safely read structure, because we guarded with alter_intention_lock if (is_readonly) throw Exception("Can't ALTER readonly table", ErrorCodes::TABLE_IS_READ_ONLY); @@ -3259,7 +3288,8 @@ void StorageReplicatedMergeTree::alter( if (ast_to_str(current_metadata.settings_ast) != ast_to_str(future_metadata.settings_ast)) { - lockStructureExclusively(table_lock_holder, query_context.getCurrentQueryId()); + lockStructureExclusively( + table_lock_holder, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); /// Just change settings current_metadata.settings_ast = future_metadata.settings_ast; changeSettings(current_metadata.settings_ast, table_lock_holder); @@ -3428,14 +3458,16 @@ void StorageReplicatedMergeTree::alterPartition(const ASTPtr & query, const Part case PartitionCommand::FREEZE_PARTITION: { - auto lock = lockStructureForShare(query_context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); freezePartition(command.partition, command.with_name, query_context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { - auto lock = lockStructureForShare(query_context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); freezeAll(command.with_name, query_context, lock); } break; @@ -4443,7 +4475,8 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() { /// Critical section is not required (since grabOldParts() returns unique part set on each call) - auto table_lock = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); auto zookeeper = getZooKeeper(); DataPartsVector parts = grabOldParts(); @@ -4738,8 +4771,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ const Context & context) { /// First argument is true, because we possibly will add new data to current table. - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = source_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(true, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); Stopwatch watch; MergeTreeData & src_data = checkStructureAndGetMergeTreeData(source_table); @@ -4917,8 +4950,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & context) { - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = dest_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto dest_table_storage = std::dynamic_pointer_cast(dest_table); if (!dest_table_storage) @@ -5293,32 +5326,8 @@ bool StorageReplicatedMergeTree::canUseAdaptiveGranularity() const } -StorageInMemoryMetadata -StorageReplicatedMergeTree::getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const +MutationCommands StorageReplicatedMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const { - auto replicated_metadata = ReplicatedMergeTreeTableMetadata::parse(metadata_str); - StorageInMemoryMetadata result = getInMemoryMetadata(); - result.columns = ColumnsDescription::parse(columns_str); - result.constraints = ConstraintsDescription::parse(replicated_metadata.constraints); - result.indices = IndicesDescription::parse(replicated_metadata.skip_indices); - - ParserExpression expression_p; - - /// The only thing, that can be changed is ttl expression - if (replicated_metadata.primary_key.empty()) - throw Exception("Primary key cannot be empty" , ErrorCodes::LOGICAL_ERROR); - - if (!replicated_metadata.sorting_key.empty()) - { - result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.sorting_key + ")", 0); - result.primary_key_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0); - } - else - { - result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0); - } - return result; - + return queue.getFirstAlterMutationCommandsForPart(part); } - } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h similarity index 99% rename from dbms/src/Storages/StorageReplicatedMergeTree.h rename to src/Storages/StorageReplicatedMergeTree.h index b8132329ae4..01dd32614f9 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -526,7 +526,7 @@ private: void waitMutationToFinishOnReplicas( const Strings & replicas, const String & mutation_id) const; - StorageInMemoryMetadata getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const; + MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override; protected: /** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table. @@ -543,6 +543,7 @@ protected: const MergingParams & merging_params_, std::unique_ptr settings_, bool has_force_restore_data_flag); + }; diff --git a/dbms/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp similarity index 99% rename from dbms/src/Storages/StorageS3.cpp rename to src/Storages/StorageS3.cpp index 4c359cafda7..679f343d0da 100644 --- a/dbms/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -348,6 +348,9 @@ void registerStorageS3(StorageFactory & factory) compression_method = "auto"; return StorageS3::create(s3_uri, access_key_id, secret_access_key, args.table_id, format_name, min_upload_part_size, args.columns, args.constraints, args.context); + }, + { + .source_access_type = AccessType::S3, }); } diff --git a/dbms/src/Storages/StorageS3.h b/src/Storages/StorageS3.h similarity index 100% rename from dbms/src/Storages/StorageS3.h rename to src/Storages/StorageS3.h diff --git a/dbms/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp similarity index 98% rename from dbms/src/Storages/StorageSet.cpp rename to src/Storages/StorageSet.cpp index 72ae46787c8..79f5198b304 100644 --- a/dbms/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -112,7 +112,7 @@ StorageSet::StorageSet( const ConstraintsDescription & constraints_, const Context & context_) : StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_}, - set(std::make_shared(SizeLimits(), false)) + set(std::make_shared(SizeLimits(), false, true)) { Block header = getSampleBlock(); header = header.sortColumns(); @@ -137,7 +137,7 @@ void StorageSet::truncate(const ASTPtr &, const Context &, TableStructureWriteLo header = header.sortColumns(); increment = 0; - set = std::make_shared(SizeLimits(), false); + set = std::make_shared(SizeLimits(), false, true); set->setHeader(header); } diff --git a/dbms/src/Storages/StorageSet.h b/src/Storages/StorageSet.h similarity index 100% rename from dbms/src/Storages/StorageSet.h rename to src/Storages/StorageSet.h diff --git a/dbms/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp similarity index 100% rename from dbms/src/Storages/StorageStripeLog.cpp rename to src/Storages/StorageStripeLog.cpp diff --git a/dbms/src/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h similarity index 100% rename from dbms/src/Storages/StorageStripeLog.h rename to src/Storages/StorageStripeLog.h diff --git a/dbms/src/Storages/StorageTinyLog.cpp b/src/Storages/StorageTinyLog.cpp similarity index 99% rename from dbms/src/Storages/StorageTinyLog.cpp rename to src/Storages/StorageTinyLog.cpp index f171c694f21..7b89be93f8a 100644 --- a/dbms/src/Storages/StorageTinyLog.cpp +++ b/src/Storages/StorageTinyLog.cpp @@ -402,8 +402,8 @@ Pipes StorageTinyLog::read( Pipes pipes; - // When reading, we lock the entire storage, because we only have one file - // per column and can't modify it concurrently. + // When reading, we lock the entire storage, because we only have one file + // per column and can't modify it concurrently. pipes.emplace_back(std::make_shared( max_block_size, Nested::collect(getColumns().getAllPhysical().addTypes(column_names)), *this, context.getSettingsRef().max_read_buffer_size)); diff --git a/dbms/src/Storages/StorageTinyLog.h b/src/Storages/StorageTinyLog.h similarity index 100% rename from dbms/src/Storages/StorageTinyLog.h rename to src/Storages/StorageTinyLog.h diff --git a/dbms/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp similarity index 99% rename from dbms/src/Storages/StorageURL.cpp rename to src/Storages/StorageURL.cpp index aaccccebef3..6c6f79b50e7 100644 --- a/dbms/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -252,6 +252,9 @@ void registerStorageURL(StorageFactory & factory) format_name, args.columns, args.constraints, args.context, compression_method); + }, + { + .source_access_type = AccessType::URL, }); } } diff --git a/dbms/src/Storages/StorageURL.h b/src/Storages/StorageURL.h similarity index 100% rename from dbms/src/Storages/StorageURL.h rename to src/Storages/StorageURL.h diff --git a/dbms/src/Storages/StorageValues.cpp b/src/Storages/StorageValues.cpp similarity index 100% rename from dbms/src/Storages/StorageValues.cpp rename to src/Storages/StorageValues.cpp diff --git a/dbms/src/Storages/StorageValues.h b/src/Storages/StorageValues.h similarity index 100% rename from dbms/src/Storages/StorageValues.h rename to src/Storages/StorageValues.h diff --git a/dbms/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp similarity index 94% rename from dbms/src/Storages/StorageView.cpp rename to src/Storages/StorageView.cpp index 05feeb7d786..5937d0a62ab 100644 --- a/dbms/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -12,13 +12,12 @@ #include #include -#include - #include #include #include #include +#include namespace DB @@ -78,8 +77,15 @@ Pipes StorageView::read( /// It's expected that the columns read from storage are not constant. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. for (auto & pipe : pipes) + { pipe.addSimpleTransform(std::make_shared(pipe.getHeader())); + /// And also convert to expected structure. + pipe.addSimpleTransform(std::make_shared( + pipe.getHeader(), getSampleBlockForColumns(column_names), + ConvertingTransform::MatchColumnsMode::Name)); + } + return pipes; } diff --git a/dbms/src/Storages/StorageView.h b/src/Storages/StorageView.h similarity index 100% rename from dbms/src/Storages/StorageView.h rename to src/Storages/StorageView.h diff --git a/dbms/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp similarity index 98% rename from dbms/src/Storages/StorageXDBC.cpp rename to src/Storages/StorageXDBC.cpp index cedd2abf38f..dd449e490aa 100644 --- a/dbms/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -134,6 +134,9 @@ namespace args.context, bridge_helper); + }, + { + .source_access_type = BridgeHelperMixin::getSourceAccessType(), }); } } diff --git a/dbms/src/Storages/StorageXDBC.h b/src/Storages/StorageXDBC.h similarity index 100% rename from dbms/src/Storages/StorageXDBC.h rename to src/Storages/StorageXDBC.h diff --git a/dbms/src/Storages/System/CMakeLists.txt b/src/Storages/System/CMakeLists.txt similarity index 100% rename from dbms/src/Storages/System/CMakeLists.txt rename to src/Storages/System/CMakeLists.txt diff --git a/dbms/src/Storages/System/IStorageSystemOneBlock.h b/src/Storages/System/IStorageSystemOneBlock.h similarity index 100% rename from dbms/src/Storages/System/IStorageSystemOneBlock.h rename to src/Storages/System/IStorageSystemOneBlock.h diff --git a/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp b/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp rename to src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp diff --git a/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.h b/src/Storages/System/StorageSystemAggregateFunctionCombinators.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.h rename to src/Storages/System/StorageSystemAggregateFunctionCombinators.h diff --git a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp b/src/Storages/System/StorageSystemAsynchronousMetrics.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp rename to src/Storages/System/StorageSystemAsynchronousMetrics.cpp diff --git a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h b/src/Storages/System/StorageSystemAsynchronousMetrics.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h rename to src/Storages/System/StorageSystemAsynchronousMetrics.h diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.cpp b/src/Storages/System/StorageSystemBuildOptions.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemBuildOptions.cpp rename to src/Storages/System/StorageSystemBuildOptions.cpp diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in b/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in similarity index 100% rename from dbms/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in rename to src/Storages/System/StorageSystemBuildOptions.generated.cpp.in diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.h b/src/Storages/System/StorageSystemBuildOptions.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemBuildOptions.h rename to src/Storages/System/StorageSystemBuildOptions.h diff --git a/dbms/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemClusters.cpp rename to src/Storages/System/StorageSystemClusters.cpp diff --git a/dbms/src/Storages/System/StorageSystemClusters.h b/src/Storages/System/StorageSystemClusters.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemClusters.h rename to src/Storages/System/StorageSystemClusters.h diff --git a/dbms/src/Storages/System/StorageSystemCollations.cpp b/src/Storages/System/StorageSystemCollations.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemCollations.cpp rename to src/Storages/System/StorageSystemCollations.cpp diff --git a/dbms/src/Storages/System/StorageSystemCollations.h b/src/Storages/System/StorageSystemCollations.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemCollations.h rename to src/Storages/System/StorageSystemCollations.h diff --git a/dbms/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp similarity index 97% rename from dbms/src/Storages/System/StorageSystemColumns.cpp rename to src/Storages/System/StorageSystemColumns.cpp index 9af8904ab26..26e2376c3f7 100644 --- a/dbms/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -62,12 +62,12 @@ public: ColumnPtr databases_, ColumnPtr tables_, Storages storages_, - const std::shared_ptr & access_, - String query_id_) + const Context & context) : SourceWithProgress(header_) , columns_mask(std::move(columns_mask_)), max_block_size(max_block_size_) , databases(std::move(databases_)), tables(std::move(tables_)), storages(std::move(storages_)) - , query_id(std::move(query_id_)), total_tables(tables->size()), access(access_) + , total_tables(tables->size()), access(context.getAccess()) + , query_id(context.getCurrentQueryId()), lock_acquire_timeout(context.getSettingsRef().lock_acquire_timeout) { } @@ -103,7 +103,7 @@ protected: try { - table_lock = storage->lockStructureForShare(query_id); + table_lock = storage->lockStructureForShare(false, query_id, lock_acquire_timeout); } catch (const Exception & e) { @@ -227,10 +227,11 @@ private: ColumnPtr databases; ColumnPtr tables; Storages storages; - String query_id; size_t db_table_num = 0; size_t total_tables; std::shared_ptr access; + String query_id; + SettingSeconds lock_acquire_timeout; }; @@ -331,8 +332,8 @@ Pipes StorageSystemColumns::read( pipes.emplace_back(std::make_shared( std::move(columns_mask), std::move(header), max_block_size, - std::move(filtered_database_column), std::move(filtered_table_column), std::move(storages), - context.getAccess(), context.getCurrentQueryId())); + std::move(filtered_database_column), std::move(filtered_table_column), + std::move(storages), context)); return pipes; } diff --git a/dbms/src/Storages/System/StorageSystemColumns.h b/src/Storages/System/StorageSystemColumns.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemColumns.h rename to src/Storages/System/StorageSystemColumns.h diff --git a/dbms/src/Storages/System/StorageSystemContributors.cpp b/src/Storages/System/StorageSystemContributors.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.cpp rename to src/Storages/System/StorageSystemContributors.cpp diff --git a/dbms/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.generated.cpp rename to src/Storages/System/StorageSystemContributors.generated.cpp diff --git a/dbms/src/Storages/System/StorageSystemContributors.h b/src/Storages/System/StorageSystemContributors.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.h rename to src/Storages/System/StorageSystemContributors.h diff --git a/dbms/src/Storages/System/StorageSystemContributors.sh b/src/Storages/System/StorageSystemContributors.sh similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.sh rename to src/Storages/System/StorageSystemContributors.sh diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp b/src/Storages/System/StorageSystemDataTypeFamilies.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp rename to src/Storages/System/StorageSystemDataTypeFamilies.cpp diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.h b/src/Storages/System/StorageSystemDataTypeFamilies.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDataTypeFamilies.h rename to src/Storages/System/StorageSystemDataTypeFamilies.h diff --git a/dbms/src/Storages/System/StorageSystemDatabases.cpp b/src/Storages/System/StorageSystemDatabases.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDatabases.cpp rename to src/Storages/System/StorageSystemDatabases.cpp diff --git a/dbms/src/Storages/System/StorageSystemDatabases.h b/src/Storages/System/StorageSystemDatabases.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDatabases.h rename to src/Storages/System/StorageSystemDatabases.h diff --git a/dbms/src/Storages/System/StorageSystemDetachedParts.cpp b/src/Storages/System/StorageSystemDetachedParts.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDetachedParts.cpp rename to src/Storages/System/StorageSystemDetachedParts.cpp diff --git a/dbms/src/Storages/System/StorageSystemDetachedParts.h b/src/Storages/System/StorageSystemDetachedParts.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDetachedParts.h rename to src/Storages/System/StorageSystemDetachedParts.h diff --git a/dbms/src/Storages/System/StorageSystemDictionaries.cpp b/src/Storages/System/StorageSystemDictionaries.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDictionaries.cpp rename to src/Storages/System/StorageSystemDictionaries.cpp diff --git a/dbms/src/Storages/System/StorageSystemDictionaries.h b/src/Storages/System/StorageSystemDictionaries.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDictionaries.h rename to src/Storages/System/StorageSystemDictionaries.h diff --git a/dbms/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDisks.cpp rename to src/Storages/System/StorageSystemDisks.cpp diff --git a/dbms/src/Storages/System/StorageSystemDisks.h b/src/Storages/System/StorageSystemDisks.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDisks.h rename to src/Storages/System/StorageSystemDisks.h diff --git a/dbms/src/Storages/System/StorageSystemEvents.cpp b/src/Storages/System/StorageSystemEvents.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemEvents.cpp rename to src/Storages/System/StorageSystemEvents.cpp diff --git a/dbms/src/Storages/System/StorageSystemEvents.h b/src/Storages/System/StorageSystemEvents.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemEvents.h rename to src/Storages/System/StorageSystemEvents.h diff --git a/dbms/src/Storages/System/StorageSystemFormats.cpp b/src/Storages/System/StorageSystemFormats.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemFormats.cpp rename to src/Storages/System/StorageSystemFormats.cpp diff --git a/dbms/src/Storages/System/StorageSystemFormats.h b/src/Storages/System/StorageSystemFormats.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemFormats.h rename to src/Storages/System/StorageSystemFormats.h diff --git a/dbms/src/Storages/System/StorageSystemFunctions.cpp b/src/Storages/System/StorageSystemFunctions.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemFunctions.cpp rename to src/Storages/System/StorageSystemFunctions.cpp diff --git a/dbms/src/Storages/System/StorageSystemFunctions.h b/src/Storages/System/StorageSystemFunctions.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemFunctions.h rename to src/Storages/System/StorageSystemFunctions.h diff --git a/dbms/src/Storages/System/StorageSystemGraphite.cpp b/src/Storages/System/StorageSystemGraphite.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemGraphite.cpp rename to src/Storages/System/StorageSystemGraphite.cpp diff --git a/dbms/src/Storages/System/StorageSystemGraphite.h b/src/Storages/System/StorageSystemGraphite.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemGraphite.h rename to src/Storages/System/StorageSystemGraphite.h diff --git a/dbms/src/Storages/System/StorageSystemMacros.cpp b/src/Storages/System/StorageSystemMacros.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMacros.cpp rename to src/Storages/System/StorageSystemMacros.cpp diff --git a/dbms/src/Storages/System/StorageSystemMacros.h b/src/Storages/System/StorageSystemMacros.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMacros.h rename to src/Storages/System/StorageSystemMacros.h diff --git a/dbms/src/Storages/System/StorageSystemMergeTreeSettings.cpp b/src/Storages/System/StorageSystemMergeTreeSettings.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMergeTreeSettings.cpp rename to src/Storages/System/StorageSystemMergeTreeSettings.cpp diff --git a/dbms/src/Storages/System/StorageSystemMergeTreeSettings.h b/src/Storages/System/StorageSystemMergeTreeSettings.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMergeTreeSettings.h rename to src/Storages/System/StorageSystemMergeTreeSettings.h diff --git a/dbms/src/Storages/System/StorageSystemMerges.cpp b/src/Storages/System/StorageSystemMerges.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMerges.cpp rename to src/Storages/System/StorageSystemMerges.cpp diff --git a/dbms/src/Storages/System/StorageSystemMerges.h b/src/Storages/System/StorageSystemMerges.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMerges.h rename to src/Storages/System/StorageSystemMerges.h diff --git a/dbms/src/Storages/System/StorageSystemMetrics.cpp b/src/Storages/System/StorageSystemMetrics.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMetrics.cpp rename to src/Storages/System/StorageSystemMetrics.cpp diff --git a/dbms/src/Storages/System/StorageSystemMetrics.h b/src/Storages/System/StorageSystemMetrics.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMetrics.h rename to src/Storages/System/StorageSystemMetrics.h diff --git a/dbms/src/Storages/System/StorageSystemModels.cpp b/src/Storages/System/StorageSystemModels.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemModels.cpp rename to src/Storages/System/StorageSystemModels.cpp diff --git a/dbms/src/Storages/System/StorageSystemModels.h b/src/Storages/System/StorageSystemModels.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemModels.h rename to src/Storages/System/StorageSystemModels.h diff --git a/dbms/src/Storages/System/StorageSystemMutations.cpp b/src/Storages/System/StorageSystemMutations.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMutations.cpp rename to src/Storages/System/StorageSystemMutations.cpp diff --git a/dbms/src/Storages/System/StorageSystemMutations.h b/src/Storages/System/StorageSystemMutations.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMutations.h rename to src/Storages/System/StorageSystemMutations.h diff --git a/dbms/src/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemNumbers.cpp rename to src/Storages/System/StorageSystemNumbers.cpp diff --git a/dbms/src/Storages/System/StorageSystemNumbers.h b/src/Storages/System/StorageSystemNumbers.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemNumbers.h rename to src/Storages/System/StorageSystemNumbers.h diff --git a/dbms/src/Storages/System/StorageSystemOne.cpp b/src/Storages/System/StorageSystemOne.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemOne.cpp rename to src/Storages/System/StorageSystemOne.cpp diff --git a/dbms/src/Storages/System/StorageSystemOne.h b/src/Storages/System/StorageSystemOne.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemOne.h rename to src/Storages/System/StorageSystemOne.h diff --git a/dbms/src/Storages/System/StorageSystemParts.cpp b/src/Storages/System/StorageSystemParts.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemParts.cpp rename to src/Storages/System/StorageSystemParts.cpp diff --git a/dbms/src/Storages/System/StorageSystemParts.h b/src/Storages/System/StorageSystemParts.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemParts.h rename to src/Storages/System/StorageSystemParts.h diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp similarity index 98% rename from dbms/src/Storages/System/StorageSystemPartsBase.cpp rename to src/Storages/System/StorageSystemPartsBase.cpp index c212b30d268..19c6f6b3d03 100644 --- a/dbms/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -62,7 +62,7 @@ StoragesInfo::getParts(MergeTreeData::DataPartStateVector & state, bool has_stat } StoragesInfoStream::StoragesInfoStream(const SelectQueryInfo & query_info, const Context & context) - : query_id(context.getCurrentQueryId()) + : query_id(context.getCurrentQueryId()), settings(context.getSettings()) { /// Will apply WHERE to subset of columns and then add more columns. /// This is kind of complicated, but we use WHERE to do less work. @@ -192,7 +192,7 @@ StoragesInfo StoragesInfoStream::next() try { /// For table not to be dropped and set of columns to remain constant. - info.table_lock = info.storage->lockStructureForShare(query_id); + info.table_lock = info.storage->lockStructureForShare(false, query_id, settings.lock_acquire_timeout); } catch (const Exception & e) { diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h similarity index 98% rename from dbms/src/Storages/System/StorageSystemPartsBase.h rename to src/Storages/System/StorageSystemPartsBase.h index b30f7c62914..be8e45146cb 100644 --- a/dbms/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -36,6 +36,8 @@ public: private: String query_id; + Settings settings; + ColumnPtr database_column; ColumnPtr table_column; diff --git a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp b/src/Storages/System/StorageSystemPartsColumns.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemPartsColumns.cpp rename to src/Storages/System/StorageSystemPartsColumns.cpp diff --git a/dbms/src/Storages/System/StorageSystemPartsColumns.h b/src/Storages/System/StorageSystemPartsColumns.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemPartsColumns.h rename to src/Storages/System/StorageSystemPartsColumns.h diff --git a/dbms/src/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemProcesses.cpp rename to src/Storages/System/StorageSystemProcesses.cpp diff --git a/dbms/src/Storages/System/StorageSystemProcesses.h b/src/Storages/System/StorageSystemProcesses.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemProcesses.h rename to src/Storages/System/StorageSystemProcesses.h diff --git a/dbms/src/Storages/System/StorageSystemQuotaUsage.cpp b/src/Storages/System/StorageSystemQuotaUsage.cpp similarity index 97% rename from dbms/src/Storages/System/StorageSystemQuotaUsage.cpp rename to src/Storages/System/StorageSystemQuotaUsage.cpp index 53afb1d563a..1f943d02446 100644 --- a/dbms/src/Storages/System/StorageSystemQuotaUsage.cpp +++ b/src/Storages/System/StorageSystemQuotaUsage.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -40,7 +41,9 @@ NamesAndTypesList StorageSystemQuotaUsage::getNamesAndTypes() void StorageSystemQuotaUsage::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { + context.checkAccess(AccessType::SHOW_QUOTAS); const auto & access_control = context.getAccessControlManager(); + for (const auto & info : access_control.getQuotaUsageInfo()) { for (const auto & interval : info.intervals) diff --git a/dbms/src/Storages/System/StorageSystemQuotaUsage.h b/src/Storages/System/StorageSystemQuotaUsage.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemQuotaUsage.h rename to src/Storages/System/StorageSystemQuotaUsage.h diff --git a/dbms/src/Storages/System/StorageSystemQuotas.cpp b/src/Storages/System/StorageSystemQuotas.cpp similarity index 98% rename from dbms/src/Storages/System/StorageSystemQuotas.cpp rename to src/Storages/System/StorageSystemQuotas.cpp index 228339ea305..a22bb11bbc3 100644 --- a/dbms/src/Storages/System/StorageSystemQuotas.cpp +++ b/src/Storages/System/StorageSystemQuotas.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -54,6 +55,8 @@ NamesAndTypesList StorageSystemQuotas::getNamesAndTypes() void StorageSystemQuotas::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { + context.checkAccess(AccessType::SHOW_QUOTAS); + size_t i = 0; auto & name_column = *res_columns[i++]; auto & id_column = *res_columns[i++]; diff --git a/dbms/src/Storages/System/StorageSystemQuotas.h b/src/Storages/System/StorageSystemQuotas.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemQuotas.h rename to src/Storages/System/StorageSystemQuotas.h diff --git a/dbms/src/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicas.cpp rename to src/Storages/System/StorageSystemReplicas.cpp diff --git a/dbms/src/Storages/System/StorageSystemReplicas.h b/src/Storages/System/StorageSystemReplicas.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicas.h rename to src/Storages/System/StorageSystemReplicas.h diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp b/src/Storages/System/StorageSystemReplicationQueue.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicationQueue.cpp rename to src/Storages/System/StorageSystemReplicationQueue.cpp diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.h b/src/Storages/System/StorageSystemReplicationQueue.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicationQueue.h rename to src/Storages/System/StorageSystemReplicationQueue.h diff --git a/dbms/src/Storages/System/StorageSystemRowPolicies.cpp b/src/Storages/System/StorageSystemRowPolicies.cpp similarity index 96% rename from dbms/src/Storages/System/StorageSystemRowPolicies.cpp rename to src/Storages/System/StorageSystemRowPolicies.cpp index bd302cba3cf..12221cc52de 100644 --- a/dbms/src/Storages/System/StorageSystemRowPolicies.cpp +++ b/src/Storages/System/StorageSystemRowPolicies.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -33,6 +34,7 @@ NamesAndTypesList StorageSystemRowPolicies::getNamesAndTypes() void StorageSystemRowPolicies::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { + context.checkAccess(AccessType::SHOW_ROW_POLICIES); const auto & access_control = context.getAccessControlManager(); std::vector ids = access_control.findAll(); diff --git a/dbms/src/Storages/System/StorageSystemRowPolicies.h b/src/Storages/System/StorageSystemRowPolicies.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemRowPolicies.h rename to src/Storages/System/StorageSystemRowPolicies.h diff --git a/dbms/src/Storages/System/StorageSystemSettings.cpp b/src/Storages/System/StorageSystemSettings.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemSettings.cpp rename to src/Storages/System/StorageSystemSettings.cpp diff --git a/dbms/src/Storages/System/StorageSystemSettings.h b/src/Storages/System/StorageSystemSettings.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemSettings.h rename to src/Storages/System/StorageSystemSettings.h diff --git a/dbms/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemStackTrace.cpp rename to src/Storages/System/StorageSystemStackTrace.cpp diff --git a/dbms/src/Storages/System/StorageSystemStackTrace.h b/src/Storages/System/StorageSystemStackTrace.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemStackTrace.h rename to src/Storages/System/StorageSystemStackTrace.h diff --git a/dbms/src/Storages/System/StorageSystemStoragePolicies.cpp b/src/Storages/System/StorageSystemStoragePolicies.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemStoragePolicies.cpp rename to src/Storages/System/StorageSystemStoragePolicies.cpp diff --git a/dbms/src/Storages/System/StorageSystemStoragePolicies.h b/src/Storages/System/StorageSystemStoragePolicies.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemStoragePolicies.h rename to src/Storages/System/StorageSystemStoragePolicies.h diff --git a/dbms/src/Storages/System/StorageSystemTableEngines.cpp b/src/Storages/System/StorageSystemTableEngines.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableEngines.cpp rename to src/Storages/System/StorageSystemTableEngines.cpp diff --git a/dbms/src/Storages/System/StorageSystemTableEngines.h b/src/Storages/System/StorageSystemTableEngines.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableEngines.h rename to src/Storages/System/StorageSystemTableEngines.h diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.cpp b/src/Storages/System/StorageSystemTableFunctions.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableFunctions.cpp rename to src/Storages/System/StorageSystemTableFunctions.cpp diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.h b/src/Storages/System/StorageSystemTableFunctions.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableFunctions.h rename to src/Storages/System/StorageSystemTableFunctions.h diff --git a/dbms/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp similarity index 96% rename from dbms/src/Storages/System/StorageSystemTables.cpp rename to src/Storages/System/StorageSystemTables.cpp index 5d0aec921de..81ff6a03e12 100644 --- a/dbms/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -239,19 +239,25 @@ protected: StoragePtr table = nullptr; TableStructureReadLockHolder lock; - try + if (need_lock_structure) { - if (need_lock_structure) + table = tables_it->table(); + if (table == nullptr) { - table = tables_it->table(); - lock = table->lockStructureForShare(context.getCurrentQueryId()); - } - } - catch (const Exception & e) - { - if (e.code() == ErrorCodes::TABLE_IS_DROPPED) + // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) continue; - throw; + } + try + { + lock = table->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + } + catch (const Exception & e) + { + if (e.code() == ErrorCodes::TABLE_IS_DROPPED) + continue; + throw; + } } ++rows_count; diff --git a/dbms/src/Storages/System/StorageSystemTables.h b/src/Storages/System/StorageSystemTables.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemTables.h rename to src/Storages/System/StorageSystemTables.h diff --git a/dbms/src/Storages/System/StorageSystemZeros.cpp b/src/Storages/System/StorageSystemZeros.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemZeros.cpp rename to src/Storages/System/StorageSystemZeros.cpp diff --git a/dbms/src/Storages/System/StorageSystemZeros.h b/src/Storages/System/StorageSystemZeros.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemZeros.h rename to src/Storages/System/StorageSystemZeros.h diff --git a/dbms/src/Storages/System/StorageSystemZooKeeper.cpp b/src/Storages/System/StorageSystemZooKeeper.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemZooKeeper.cpp rename to src/Storages/System/StorageSystemZooKeeper.cpp diff --git a/dbms/src/Storages/System/StorageSystemZooKeeper.h b/src/Storages/System/StorageSystemZooKeeper.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemZooKeeper.h rename to src/Storages/System/StorageSystemZooKeeper.h diff --git a/dbms/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp similarity index 100% rename from dbms/src/Storages/System/attachSystemTables.cpp rename to src/Storages/System/attachSystemTables.cpp diff --git a/dbms/src/Storages/System/attachSystemTables.h b/src/Storages/System/attachSystemTables.h similarity index 100% rename from dbms/src/Storages/System/attachSystemTables.h rename to src/Storages/System/attachSystemTables.h diff --git a/src/Storages/TableStructureLockHolder.h b/src/Storages/TableStructureLockHolder.h new file mode 100644 index 00000000000..b5fc0c620ad --- /dev/null +++ b/src/Storages/TableStructureLockHolder.h @@ -0,0 +1,48 @@ +#pragma once + +#include + +namespace DB +{ + +/// Structs that hold table structure (columns, their types, default values etc.) locks when executing queries. +/// See IStorage::lock* methods for comments. + +struct TableStructureWriteLockHolder +{ + void release() + { + *this = TableStructureWriteLockHolder(); + } + + void releaseAllExceptAlterIntention() + { + new_data_structure_lock.reset(); + structure_lock.reset(); + } + +private: + friend class IStorage; + + /// Order is important. + RWLockImpl::LockHolder alter_intention_lock; + RWLockImpl::LockHolder new_data_structure_lock; + RWLockImpl::LockHolder structure_lock; +}; + +struct TableStructureReadLockHolder +{ + void release() + { + *this = TableStructureReadLockHolder(); + } + +private: + friend class IStorage; + + /// Order is important. + RWLockImpl::LockHolder new_data_structure_lock; + RWLockImpl::LockHolder structure_lock; +}; + +} diff --git a/dbms/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp similarity index 100% rename from dbms/src/Storages/VirtualColumnUtils.cpp rename to src/Storages/VirtualColumnUtils.cpp diff --git a/dbms/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h similarity index 100% rename from dbms/src/Storages/VirtualColumnUtils.h rename to src/Storages/VirtualColumnUtils.h diff --git a/dbms/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp similarity index 98% rename from dbms/src/Storages/getStructureOfRemoteTable.cpp rename to src/Storages/getStructureOfRemoteTable.cpp index eb386c66d60..3b4ee489b46 100644 --- a/dbms/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -137,7 +137,7 @@ ColumnsDescription getStructureOfRemoteTableInShard( column.default_desc.kind = columnDefaultKindFromString(kind_name); String expr_str = (*default_expr)[i].get(); column.default_desc.expression = parseQuery( - expr_parser, expr_str.data(), expr_str.data() + expr_str.size(), "default expression", 0); + expr_parser, expr_str.data(), expr_str.data() + expr_str.size(), "default expression", 0, context.getSettingsRef().max_parser_depth); } res.add(column); diff --git a/dbms/src/Storages/getStructureOfRemoteTable.h b/src/Storages/getStructureOfRemoteTable.h similarity index 100% rename from dbms/src/Storages/getStructureOfRemoteTable.h rename to src/Storages/getStructureOfRemoteTable.h diff --git a/dbms/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp similarity index 100% rename from dbms/src/Storages/registerStorages.cpp rename to src/Storages/registerStorages.cpp diff --git a/dbms/src/Storages/registerStorages.h b/src/Storages/registerStorages.h similarity index 100% rename from dbms/src/Storages/registerStorages.h rename to src/Storages/registerStorages.h diff --git a/dbms/src/Storages/tests/CMakeLists.txt b/src/Storages/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Storages/tests/CMakeLists.txt rename to src/Storages/tests/CMakeLists.txt diff --git a/dbms/src/Storages/tests/active_parts.py b/src/Storages/tests/active_parts.py similarity index 100% rename from dbms/src/Storages/tests/active_parts.py rename to src/Storages/tests/active_parts.py diff --git a/dbms/src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp b/src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp similarity index 100% rename from dbms/src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp rename to src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp diff --git a/dbms/src/Storages/tests/get_current_inserts_in_replicated.cpp b/src/Storages/tests/get_current_inserts_in_replicated.cpp similarity index 100% rename from dbms/src/Storages/tests/get_current_inserts_in_replicated.cpp rename to src/Storages/tests/get_current_inserts_in_replicated.cpp diff --git a/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/src/Storages/tests/gtest_SplitTokenExtractor.cpp new file mode 100644 index 00000000000..9255e5ca817 --- /dev/null +++ b/src/Storages/tests/gtest_SplitTokenExtractor.cpp @@ -0,0 +1,196 @@ +#include + +#include +#include + +#include + +#include +#include +#include +#include + +namespace +{ +using namespace DB; +using namespace std::literals::string_literals; +} + +struct SplitTokenExtractorTestCase +{ + const std::string_view description; + const std::string source; + const std::vector tokens; +}; + +std::ostream & operator<<(std::ostream & ostr, const SplitTokenExtractorTestCase & test_case) +{ + return ostr << test_case.description; +} + +class SplitTokenExtractorTest : public ::testing::TestWithParam +{ +public: + void SetUp() override + { + const auto & param = GetParam(); + const auto & source = param.source; + data = std::make_unique>(source.data(), source.data() + source.size()); + + // add predefined padding that forms tokens to ensure no reads past end of buffer. + const char extra_padding[] = "this is the end \xd1\x8d\xd1\x82\xd0\xbe\xd0\xba\xd0\xbe \xd0\xbd\xd0\xb5\xd1\x86"; + data->insert(data->end(), std::begin(extra_padding), std::end(extra_padding)); + + data->resize(data->size() - sizeof(extra_padding)); + } + + std::unique_ptr> data; +}; + +TEST_P(SplitTokenExtractorTest, next) +{ + const auto & param = GetParam(); + + SplitTokenExtractor token_extractor; + + size_t i = 0; + + size_t pos = 0; + size_t token_start = 0; + size_t token_len = 0; + + for (const auto & expected_token : param.tokens) + { + SCOPED_TRACE(++i); + ASSERT_TRUE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)); + + EXPECT_EQ(expected_token, std::string_view(data->data() + token_start, token_len)) + << " token_start:" << token_start << " token_len: " << token_len; + } + ASSERT_FALSE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)) + << "\n\t=> \"" << param.source.substr(token_start, token_len) << "\"" + << "\n\t" << token_start << ", " << token_len << ", " << pos << ", " << data->size(); +} + +INSTANTIATE_TEST_SUITE_P(NoTokens, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Empty input sequence produces no tokens.", + "", + {} + }, + { + "Whitespace only", + " ", + {} + }, + { + "Whitespace only large string", + " \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r", + {} + } + }) +); + +INSTANTIATE_TEST_SUITE_P(ShortSingleToken, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Short single token", + "foo", + {"foo"} + }, + { + "Short single token surruonded by whitespace", + "\t\vfoo\n\r", + {"foo"} + } + }) +); + +INSTANTIATE_TEST_SUITE_P(UTF8, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Single token with mixed ASCII and UTF-8 chars", + "abc\u0442" "123\u0447XYZ\u043A", + {"abc\u0442" "123\u0447XYZ\u043A"} + }, + { + "Multiple UTF-8 tokens", + "\u043F\u0440\u0438\u0432\u0435\u0442, \u043C\u0438\u0440!", + {"\u043F\u0440\u0438\u0432\u0435\u0442", "\u043C\u0438\u0440"} + }, + }) +); + +INSTANTIATE_TEST_SUITE_P(MultipleTokens, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Multiple tokens separated by whitespace", + "\nabc 123\tXYZ\r", + { + "abc", "123", "XYZ" + } + }, + { + "Multiple tokens separated by non-printable chars", + "\0abc\1" "123\2XYZ\4"s, + { + "abc", "123", "XYZ" + } + }, + { + "ASCII table is split into numeric, upper case and lower case letters", + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16" + "\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNO" + "PQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c" + "\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1" + "\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6" + "\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb" + "\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0" + "\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"s, + { + "0123456789", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c" + "\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1" + "\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6" + "\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb" + "\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0" + "\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + } + } + }) +); + + +INSTANTIATE_TEST_SUITE_P(SIMD_Cases, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "First 16 bytes are empty, then a shor token", + " abcdef", + {"abcdef"} + }, + { + "Token crosses bounday of 16-byte chunk", + " abcdef", + {"abcdef"} + }, + { + "Token ends at the end of 16-byte chunk", + " abcdef", + {"abcdef"} + }, + { + "Token crosses bondaries of multiple 16-byte chunks", + "abcdefghijklmnopqrstuvwxyz", + {"abcdefghijklmnopqrstuvwxyz"} + }, + }) +); diff --git a/dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp b/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp similarity index 100% rename from dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp rename to src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp diff --git a/dbms/src/Storages/tests/gtest_row_source_bits_test.cpp b/src/Storages/tests/gtest_row_source_bits_test.cpp similarity index 100% rename from dbms/src/Storages/tests/gtest_row_source_bits_test.cpp rename to src/Storages/tests/gtest_row_source_bits_test.cpp diff --git a/dbms/src/Storages/tests/gtest_storage_log.cpp b/src/Storages/tests/gtest_storage_log.cpp similarity index 100% rename from dbms/src/Storages/tests/gtest_storage_log.cpp rename to src/Storages/tests/gtest_storage_log.cpp diff --git a/dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp b/src/Storages/tests/gtest_transform_query_for_external_database.cpp similarity index 98% rename from dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp rename to src/Storages/tests/gtest_transform_query_for_external_database.cpp index 385e47201a5..22407ca1bd9 100644 --- a/dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp +++ b/src/Storages/tests/gtest_transform_query_for_external_database.cpp @@ -48,7 +48,7 @@ static State & state() static void check(const std::string & query, const std::string & expected, const Context & context, const NamesAndTypesList & columns) { ParserSelectQuery parser; - ASTPtr ast = parseQuery(parser, query, 1000); + ASTPtr ast = parseQuery(parser, query, 1000, 1000); SelectQueryInfo query_info; query_info.syntax_analyzer_result = SyntaxAnalyzer(context).analyzeSelect(ast, columns); query_info.query = ast; diff --git a/dbms/src/Storages/tests/merge_selector.cpp b/src/Storages/tests/merge_selector.cpp similarity index 100% rename from dbms/src/Storages/tests/merge_selector.cpp rename to src/Storages/tests/merge_selector.cpp diff --git a/dbms/src/Storages/tests/merge_selector2.cpp b/src/Storages/tests/merge_selector2.cpp similarity index 100% rename from dbms/src/Storages/tests/merge_selector2.cpp rename to src/Storages/tests/merge_selector2.cpp diff --git a/dbms/src/Storages/tests/part_name.cpp b/src/Storages/tests/part_name.cpp similarity index 100% rename from dbms/src/Storages/tests/part_name.cpp rename to src/Storages/tests/part_name.cpp diff --git a/dbms/src/Storages/tests/remove_symlink_directory.cpp b/src/Storages/tests/remove_symlink_directory.cpp similarity index 100% rename from dbms/src/Storages/tests/remove_symlink_directory.cpp rename to src/Storages/tests/remove_symlink_directory.cpp diff --git a/dbms/src/Storages/tests/storage_log.cpp b/src/Storages/tests/storage_log.cpp similarity index 100% rename from dbms/src/Storages/tests/storage_log.cpp rename to src/Storages/tests/storage_log.cpp diff --git a/dbms/src/Storages/tests/system_numbers.cpp b/src/Storages/tests/system_numbers.cpp similarity index 100% rename from dbms/src/Storages/tests/system_numbers.cpp rename to src/Storages/tests/system_numbers.cpp diff --git a/dbms/src/Storages/tests/test_alter_distributed.sql b/src/Storages/tests/test_alter_distributed.sql similarity index 100% rename from dbms/src/Storages/tests/test_alter_distributed.sql rename to src/Storages/tests/test_alter_distributed.sql diff --git a/dbms/src/Storages/tests/test_alter_merge.sql b/src/Storages/tests/test_alter_merge.sql similarity index 100% rename from dbms/src/Storages/tests/test_alter_merge.sql rename to src/Storages/tests/test_alter_merge.sql diff --git a/dbms/src/Storages/tests/test_alter_merge_tree.sql b/src/Storages/tests/test_alter_merge_tree.sql similarity index 100% rename from dbms/src/Storages/tests/test_alter_merge_tree.sql rename to src/Storages/tests/test_alter_merge_tree.sql diff --git a/dbms/src/Storages/tests/transform_part_zk_nodes.cpp b/src/Storages/tests/transform_part_zk_nodes.cpp similarity index 100% rename from dbms/src/Storages/tests/transform_part_zk_nodes.cpp rename to src/Storages/tests/transform_part_zk_nodes.cpp diff --git a/dbms/src/Storages/transformQueryForExternalDatabase.cpp b/src/Storages/transformQueryForExternalDatabase.cpp similarity index 100% rename from dbms/src/Storages/transformQueryForExternalDatabase.cpp rename to src/Storages/transformQueryForExternalDatabase.cpp diff --git a/dbms/src/Storages/transformQueryForExternalDatabase.h b/src/Storages/transformQueryForExternalDatabase.h similarity index 100% rename from dbms/src/Storages/transformQueryForExternalDatabase.h rename to src/Storages/transformQueryForExternalDatabase.h diff --git a/dbms/src/TableFunctions/CMakeLists.txt b/src/TableFunctions/CMakeLists.txt similarity index 100% rename from dbms/src/TableFunctions/CMakeLists.txt rename to src/TableFunctions/CMakeLists.txt diff --git a/src/TableFunctions/ITableFunction.cpp b/src/TableFunctions/ITableFunction.cpp new file mode 100644 index 00000000000..6a784c062da --- /dev/null +++ b/src/TableFunctions/ITableFunction.cpp @@ -0,0 +1,23 @@ +#include +#include +#include +#include +#include + + +namespace ProfileEvents +{ + extern const Event TableFunctionExecute; +} + +namespace DB +{ + +StoragePtr ITableFunction::execute(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const +{ + ProfileEvents::increment(ProfileEvents::TableFunctionExecute); + context.checkAccess(AccessType::CREATE_TEMPORARY_TABLE | StorageFactory::instance().getSourceAccessType(getStorageTypeName())); + return executeImpl(ast_function, context, table_name); +} + +} diff --git a/dbms/src/TableFunctions/ITableFunction.h b/src/TableFunctions/ITableFunction.h similarity index 95% rename from dbms/src/TableFunctions/ITableFunction.h rename to src/TableFunctions/ITableFunction.h index 9a9525d5887..0bbd7e787a5 100644 --- a/dbms/src/TableFunctions/ITableFunction.h +++ b/src/TableFunctions/ITableFunction.h @@ -38,6 +38,7 @@ public: private: virtual StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const = 0; + virtual const char * getStorageTypeName() const = 0; }; using TableFunctionPtr = std::shared_ptr; diff --git a/dbms/src/TableFunctions/ITableFunctionFileLike.cpp b/src/TableFunctions/ITableFunctionFileLike.cpp similarity index 96% rename from dbms/src/TableFunctions/ITableFunctionFileLike.cpp rename to src/TableFunctions/ITableFunctionFileLike.cpp index eca507a4003..46a64cef785 100644 --- a/dbms/src/TableFunctions/ITableFunctionFileLike.cpp +++ b/src/TableFunctions/ITableFunctionFileLike.cpp @@ -10,8 +10,6 @@ #include -#include - #include #include @@ -65,8 +63,6 @@ StoragePtr ITableFunctionFileLike::executeImpl(const ASTPtr & ast_function, cons if (args.size() == 4) compression_method = args[3]->as().value.safeGet(); - context.checkAccess(getRequiredAccessType()); - /// Create table StoragePtr storage = getStorage(filename, format, columns, const_cast(context), table_name, compression_method); diff --git a/dbms/src/TableFunctions/ITableFunctionFileLike.h b/src/TableFunctions/ITableFunctionFileLike.h similarity index 89% rename from dbms/src/TableFunctions/ITableFunctionFileLike.h rename to src/TableFunctions/ITableFunctionFileLike.h index e80bf158f8e..a18ca8ea4c8 100644 --- a/dbms/src/TableFunctions/ITableFunctionFileLike.h +++ b/src/TableFunctions/ITableFunctionFileLike.h @@ -5,7 +5,6 @@ namespace DB { -enum class AccessType; class ColumnsDescription; /* @@ -17,6 +16,5 @@ private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; virtual StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const = 0; - virtual AccessType getRequiredAccessType() const = 0; }; } diff --git a/dbms/src/TableFunctions/ITableFunctionXDBC.cpp b/src/TableFunctions/ITableFunctionXDBC.cpp similarity index 98% rename from dbms/src/TableFunctions/ITableFunctionXDBC.cpp rename to src/TableFunctions/ITableFunctionXDBC.cpp index 50236b65445..adf0c9240bc 100644 --- a/dbms/src/TableFunctions/ITableFunctionXDBC.cpp +++ b/src/TableFunctions/ITableFunctionXDBC.cpp @@ -60,8 +60,6 @@ StoragePtr ITableFunctionXDBC::executeImpl(const ASTPtr & ast_function, const Co remote_table_name = args[1]->as().value.safeGet(); } - context.checkAccess(getRequiredAccessType()); - /* Infer external table structure */ /// Have to const_cast, because bridges store their commands inside context BridgeHelperPtr helper = createBridgeHelper(const_cast(context), context.getSettingsRef().http_receive_timeout.value, connection_string); diff --git a/dbms/src/TableFunctions/ITableFunctionXDBC.h b/src/TableFunctions/ITableFunctionXDBC.h similarity index 87% rename from dbms/src/TableFunctions/ITableFunctionXDBC.h rename to src/TableFunctions/ITableFunctionXDBC.h index 211bac281c8..262c237bac2 100644 --- a/dbms/src/TableFunctions/ITableFunctionXDBC.h +++ b/src/TableFunctions/ITableFunctionXDBC.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -22,8 +21,6 @@ private: virtual BridgeHelperPtr createBridgeHelper(Context & context, const Poco::Timespan & http_timeout_, const std::string & connection_string_) const = 0; - - virtual AccessType getRequiredAccessType() const = 0; }; class TableFunctionJDBC : public ITableFunctionXDBC @@ -43,7 +40,7 @@ private: return std::make_shared>(context, http_timeout_, connection_string_); } - AccessType getRequiredAccessType() const override { return AccessType::jdbc; } + const char * getStorageTypeName() const override { return "JDBC"; } }; class TableFunctionODBC : public ITableFunctionXDBC @@ -63,6 +60,6 @@ private: return std::make_shared>(context, http_timeout_, connection_string_); } - AccessType getRequiredAccessType() const override { return AccessType::odbc; } + const char * getStorageTypeName() const override { return "ODBC"; } }; } diff --git a/dbms/src/TableFunctions/TableFunctionFactory.cpp b/src/TableFunctions/TableFunctionFactory.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionFactory.cpp rename to src/TableFunctions/TableFunctionFactory.cpp diff --git a/dbms/src/TableFunctions/TableFunctionFactory.h b/src/TableFunctions/TableFunctionFactory.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionFactory.h rename to src/TableFunctions/TableFunctionFactory.h diff --git a/dbms/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp similarity index 90% rename from dbms/src/TableFunctions/TableFunctionFile.cpp rename to src/TableFunctions/TableFunctionFile.cpp index c27999e6199..0a68ed59aa2 100644 --- a/dbms/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -15,11 +15,6 @@ StoragePtr TableFunctionFile::getStorage( return StorageFile::create(source, global_context.getUserFilesPath(), args); } -AccessType TableFunctionFile::getRequiredAccessType() const -{ - return AccessType::file; -} - void registerTableFunctionFile(TableFunctionFactory & factory) { factory.registerFunction(); diff --git a/dbms/src/TableFunctions/TableFunctionFile.h b/src/TableFunctions/TableFunctionFile.h similarity index 90% rename from dbms/src/TableFunctions/TableFunctionFile.h rename to src/TableFunctions/TableFunctionFile.h index 558d5305674..ead924f6828 100644 --- a/dbms/src/TableFunctions/TableFunctionFile.h +++ b/src/TableFunctions/TableFunctionFile.h @@ -24,6 +24,5 @@ public: private: StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const std::string & compression_method) const override; - AccessType getRequiredAccessType() const override; -}; -} + const char * getStorageTypeName() const override { return "File"; } +};} diff --git a/dbms/src/TableFunctions/TableFunctionGenerateRandom.cpp b/src/TableFunctions/TableFunctionGenerateRandom.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionGenerateRandom.cpp rename to src/TableFunctions/TableFunctionGenerateRandom.cpp diff --git a/dbms/src/TableFunctions/TableFunctionGenerateRandom.h b/src/TableFunctions/TableFunctionGenerateRandom.h similarity index 87% rename from dbms/src/TableFunctions/TableFunctionGenerateRandom.h rename to src/TableFunctions/TableFunctionGenerateRandom.h index 042a5c59dbe..b0919608737 100644 --- a/dbms/src/TableFunctions/TableFunctionGenerateRandom.h +++ b/src/TableFunctions/TableFunctionGenerateRandom.h @@ -15,6 +15,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "GenerateRandom"; } }; diff --git a/dbms/src/TableFunctions/TableFunctionHDFS.cpp b/src/TableFunctions/TableFunctionHDFS.cpp similarity index 87% rename from dbms/src/TableFunctions/TableFunctionHDFS.cpp rename to src/TableFunctions/TableFunctionHDFS.cpp index 8af41b2e772..3bd6cd3ed76 100644 --- a/dbms/src/TableFunctions/TableFunctionHDFS.cpp +++ b/src/TableFunctions/TableFunctionHDFS.cpp @@ -4,7 +4,6 @@ #if USE_HDFS #include #include -#include #include #include @@ -22,10 +21,6 @@ StoragePtr TableFunctionHDFS::getStorage( compression_method); } -AccessType TableFunctionHDFS::getRequiredAccessType() const -{ - return AccessType::hdfs; -} #if USE_HDFS void registerTableFunctionHDFS(TableFunctionFactory & factory) diff --git a/dbms/src/TableFunctions/TableFunctionHDFS.h b/src/TableFunctions/TableFunctionHDFS.h similarity index 90% rename from dbms/src/TableFunctions/TableFunctionHDFS.h rename to src/TableFunctions/TableFunctionHDFS.h index 4bdb6703d31..443ce0aa93b 100644 --- a/dbms/src/TableFunctions/TableFunctionHDFS.h +++ b/src/TableFunctions/TableFunctionHDFS.h @@ -25,7 +25,7 @@ public: private: StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const override; - AccessType getRequiredAccessType() const override; + const char * getStorageTypeName() const override { return "HDFS"; } }; } diff --git a/dbms/src/TableFunctions/TableFunctionInput.cpp b/src/TableFunctions/TableFunctionInput.cpp similarity index 95% rename from dbms/src/TableFunctions/TableFunctionInput.cpp rename to src/TableFunctions/TableFunctionInput.cpp index 41bb292c2b2..e8f3453da06 100644 --- a/dbms/src/TableFunctions/TableFunctionInput.cpp +++ b/src/TableFunctions/TableFunctionInput.cpp @@ -10,7 +10,6 @@ #include #include #include -#include #include #include "registerTableFunctions.h" @@ -37,8 +36,6 @@ StoragePtr TableFunctionInput::executeImpl(const ASTPtr & ast_function, const Co throw Exception("Table function '" + getName() + "' requires exactly 1 argument: structure", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - context.checkAccess(AccessType::input); - String structure = evaluateConstantExpressionOrIdentifierAsLiteral(args[0], context)->as().value.safeGet(); auto columns = parseColumnsListFromString(structure, context); StoragePtr storage = StorageInput::create(StorageID(getDatabaseName(), table_name), columns); diff --git a/dbms/src/TableFunctions/TableFunctionInput.h b/src/TableFunctions/TableFunctionInput.h similarity index 87% rename from dbms/src/TableFunctions/TableFunctionInput.h rename to src/TableFunctions/TableFunctionInput.h index 24e5c5b2118..92c2e3a6e54 100644 --- a/dbms/src/TableFunctions/TableFunctionInput.h +++ b/src/TableFunctions/TableFunctionInput.h @@ -16,5 +16,6 @@ public: private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Input"; } }; } diff --git a/dbms/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp similarity index 96% rename from dbms/src/TableFunctions/TableFunctionMerge.cpp rename to src/TableFunctions/TableFunctionMerge.cpp index 1ced074761b..cd924270f7c 100644 --- a/dbms/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -68,8 +67,6 @@ StoragePtr TableFunctionMerge::executeImpl(const ASTPtr & ast_function, const Co String source_database = args[0]->as().value.safeGet(); String table_name_regexp = args[1]->as().value.safeGet(); - context.checkAccess(AccessType::merge, source_database); - auto res = StorageMerge::create( StorageID(getDatabaseName(), table_name), ColumnsDescription{chooseColumns(source_database, table_name_regexp, context)}, diff --git a/dbms/src/TableFunctions/TableFunctionMerge.h b/src/TableFunctions/TableFunctionMerge.h similarity index 89% rename from dbms/src/TableFunctions/TableFunctionMerge.h rename to src/TableFunctions/TableFunctionMerge.h index 43d4b692bc8..b11a9551d34 100644 --- a/dbms/src/TableFunctions/TableFunctionMerge.h +++ b/src/TableFunctions/TableFunctionMerge.h @@ -17,6 +17,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Merge"; } }; diff --git a/dbms/src/TableFunctions/TableFunctionMySQL.cpp b/src/TableFunctions/TableFunctionMySQL.cpp similarity index 98% rename from dbms/src/TableFunctions/TableFunctionMySQL.cpp rename to src/TableFunctions/TableFunctionMySQL.cpp index 11f797d4ecf..be707c3520d 100644 --- a/dbms/src/TableFunctions/TableFunctionMySQL.cpp +++ b/src/TableFunctions/TableFunctionMySQL.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -57,8 +56,6 @@ StoragePtr TableFunctionMySQL::executeImpl(const ASTPtr & ast_function, const Co std::string user_name = args[3]->as().value.safeGet(); std::string password = args[4]->as().value.safeGet(); - context.checkAccess(AccessType::mysql); - bool replace_query = false; std::string on_duplicate_clause; if (args.size() >= 6) diff --git a/dbms/src/TableFunctions/TableFunctionMySQL.h b/src/TableFunctions/TableFunctionMySQL.h similarity index 89% rename from dbms/src/TableFunctions/TableFunctionMySQL.h rename to src/TableFunctions/TableFunctionMySQL.h index fd5b0219df6..850affc5887 100644 --- a/dbms/src/TableFunctions/TableFunctionMySQL.h +++ b/src/TableFunctions/TableFunctionMySQL.h @@ -20,6 +20,7 @@ public: } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "MySQL"; } }; } diff --git a/dbms/src/TableFunctions/TableFunctionNumbers.cpp b/src/TableFunctions/TableFunctionNumbers.cpp similarity index 96% rename from dbms/src/TableFunctions/TableFunctionNumbers.cpp rename to src/TableFunctions/TableFunctionNumbers.cpp index bb414f4783f..c8c0fe96092 100644 --- a/dbms/src/TableFunctions/TableFunctionNumbers.cpp +++ b/src/TableFunctions/TableFunctionNumbers.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include "registerTableFunctions.h" @@ -33,8 +32,6 @@ StoragePtr TableFunctionNumbers::executeImpl(const ASTPtr & ast_f UInt64 offset = arguments.size() == 2 ? evaluateArgument(context, arguments[0]) : 0; UInt64 length = arguments.size() == 2 ? evaluateArgument(context, arguments[1]) : evaluateArgument(context, arguments[0]); - context.checkAccess(AccessType::numbers); - auto res = StorageSystemNumbers::create(StorageID(getDatabaseName(), table_name), multithreaded, length, offset, false); res->startup(); return res; diff --git a/dbms/src/TableFunctions/TableFunctionNumbers.h b/src/TableFunctions/TableFunctionNumbers.h similarity index 89% rename from dbms/src/TableFunctions/TableFunctionNumbers.h rename to src/TableFunctions/TableFunctionNumbers.h index e5ab38ccad8..c3efbc426ef 100644 --- a/dbms/src/TableFunctions/TableFunctionNumbers.h +++ b/src/TableFunctions/TableFunctionNumbers.h @@ -19,6 +19,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "SystemNumbers"; } UInt64 evaluateArgument(const Context & context, ASTPtr & argument) const; }; diff --git a/dbms/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp similarity index 99% rename from dbms/src/TableFunctions/TableFunctionRemote.cpp rename to src/TableFunctions/TableFunctionRemote.cpp index 202f8be4703..cfeb3907136 100644 --- a/dbms/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -132,8 +131,6 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C if (arg_num < args.size()) throw Exception(help_message, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - context.checkAccess(AccessType::remote); - /// ExpressionAnalyzer will be created in InterpreterSelectQuery that will meet these `Identifier` when processing the request. /// We need to mark them as the name of the database or table, because the default value is column. for (auto ast : args) diff --git a/dbms/src/TableFunctions/TableFunctionRemote.h b/src/TableFunctions/TableFunctionRemote.h similarity index 92% rename from dbms/src/TableFunctions/TableFunctionRemote.h rename to src/TableFunctions/TableFunctionRemote.h index ef2e5cf190c..2dd58a8a6a7 100644 --- a/dbms/src/TableFunctions/TableFunctionRemote.h +++ b/src/TableFunctions/TableFunctionRemote.h @@ -22,6 +22,7 @@ public: private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Distributed"; } std::string name; bool is_cluster_function; diff --git a/dbms/src/TableFunctions/TableFunctionS3.cpp b/src/TableFunctions/TableFunctionS3.cpp similarity index 97% rename from dbms/src/TableFunctions/TableFunctionS3.cpp rename to src/TableFunctions/TableFunctionS3.cpp index 73121d342e2..0cf9914ed2b 100644 --- a/dbms/src/TableFunctions/TableFunctionS3.cpp +++ b/src/TableFunctions/TableFunctionS3.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include #include @@ -64,8 +63,6 @@ StoragePtr TableFunctionS3::executeImpl(const ASTPtr & ast_function, const Conte else compression_method = "auto"; - context.checkAccess(AccessType::s3); - ColumnsDescription columns = parseColumnsListFromString(structure, context); /// Create table diff --git a/dbms/src/TableFunctions/TableFunctionS3.h b/src/TableFunctions/TableFunctionS3.h similarity index 92% rename from dbms/src/TableFunctions/TableFunctionS3.h rename to src/TableFunctions/TableFunctionS3.h index a49033da1b4..aef4e28ca76 100644 --- a/dbms/src/TableFunctions/TableFunctionS3.h +++ b/src/TableFunctions/TableFunctionS3.h @@ -38,6 +38,8 @@ private: Context & global_context, const std::string & table_name, const String & compression_method); + + const char * getStorageTypeName() const override { return "S3"; } }; } diff --git a/dbms/src/TableFunctions/TableFunctionURL.cpp b/src/TableFunctions/TableFunctionURL.cpp similarity index 89% rename from dbms/src/TableFunctions/TableFunctionURL.cpp rename to src/TableFunctions/TableFunctionURL.cpp index a78ac2f2114..59978ae08b1 100644 --- a/dbms/src/TableFunctions/TableFunctionURL.cpp +++ b/src/TableFunctions/TableFunctionURL.cpp @@ -16,11 +16,6 @@ StoragePtr TableFunctionURL::getStorage( return StorageURL::create(uri, StorageID(getDatabaseName(), table_name), format, columns, ConstraintsDescription{}, global_context, compression_method); } -AccessType TableFunctionURL::getRequiredAccessType() const -{ - return AccessType::url; -} - void registerTableFunctionURL(TableFunctionFactory & factory) { factory.registerFunction(); diff --git a/dbms/src/TableFunctions/TableFunctionURL.h b/src/TableFunctions/TableFunctionURL.h similarity index 89% rename from dbms/src/TableFunctions/TableFunctionURL.h rename to src/TableFunctions/TableFunctionURL.h index ea0ca842b48..61dca561f0c 100644 --- a/dbms/src/TableFunctions/TableFunctionURL.h +++ b/src/TableFunctions/TableFunctionURL.h @@ -20,6 +20,6 @@ public: private: StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const override; - AccessType getRequiredAccessType() const override; + const char * getStorageTypeName() const override { return "URL"; } }; } diff --git a/dbms/src/TableFunctions/TableFunctionValues.cpp b/src/TableFunctions/TableFunctionValues.cpp similarity index 97% rename from dbms/src/TableFunctions/TableFunctionValues.cpp rename to src/TableFunctions/TableFunctionValues.cpp index 6f568fbea60..4e166b10d8f 100644 --- a/dbms/src/TableFunctions/TableFunctionValues.cpp +++ b/src/TableFunctions/TableFunctionValues.cpp @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -75,8 +74,6 @@ StoragePtr TableFunctionValues::executeImpl(const ASTPtr & ast_function, const C throw Exception("Table function '" + getName() + "' requires 2 or more arguments: structure and values.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - context.checkAccess(AccessType::values); - /// Parsing first argument as table structure and creating a sample block std::string structure = args[0]->as().value.safeGet(); diff --git a/dbms/src/TableFunctions/TableFunctionValues.h b/src/TableFunctions/TableFunctionValues.h similarity index 87% rename from dbms/src/TableFunctions/TableFunctionValues.h rename to src/TableFunctions/TableFunctionValues.h index f02dc69162f..3cc3687dab5 100644 --- a/dbms/src/TableFunctions/TableFunctionValues.h +++ b/src/TableFunctions/TableFunctionValues.h @@ -14,6 +14,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Values"; } }; diff --git a/dbms/src/TableFunctions/TableFunctionZeros.cpp b/src/TableFunctions/TableFunctionZeros.cpp similarity index 95% rename from dbms/src/TableFunctions/TableFunctionZeros.cpp rename to src/TableFunctions/TableFunctionZeros.cpp index d69e533c0d9..13436f04e1c 100644 --- a/dbms/src/TableFunctions/TableFunctionZeros.cpp +++ b/src/TableFunctions/TableFunctionZeros.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include "registerTableFunctions.h" @@ -32,8 +31,6 @@ StoragePtr TableFunctionZeros::executeImpl(const ASTPtr & ast_fun UInt64 length = evaluateArgument(context, arguments[0]); - context.checkAccess(AccessType::zeros); - auto res = StorageSystemZeros::create(StorageID(getDatabaseName(), table_name), multithreaded, length); res->startup(); return res; diff --git a/dbms/src/TableFunctions/TableFunctionZeros.h b/src/TableFunctions/TableFunctionZeros.h similarity index 89% rename from dbms/src/TableFunctions/TableFunctionZeros.h rename to src/TableFunctions/TableFunctionZeros.h index c8f3cbabc0e..71570c23a89 100644 --- a/dbms/src/TableFunctions/TableFunctionZeros.h +++ b/src/TableFunctions/TableFunctionZeros.h @@ -19,6 +19,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "SystemZeros"; } UInt64 evaluateArgument(const Context & context, ASTPtr & argument) const; }; diff --git a/dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp b/src/TableFunctions/parseColumnsListForTableFunction.cpp similarity index 87% rename from dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp rename to src/TableFunctions/parseColumnsListForTableFunction.cpp index 9b775b70492..c419164ac38 100644 --- a/dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp +++ b/src/TableFunctions/parseColumnsListForTableFunction.cpp @@ -19,9 +19,7 @@ ColumnsDescription parseColumnsListFromString(const std::string & structure, con Expected expected; Tokens tokens(structure.c_str(), structure.c_str() + structure.size()); - IParser::Pos token_iterator(tokens); - const Settings & settings = context.getSettingsRef(); - token_iterator.max_depth = settings.max_parser_depth; + IParser::Pos token_iterator(tokens, context.getSettingsRef().max_parser_depth); ParserColumnDeclarationList parser; ASTPtr columns_list_raw; diff --git a/dbms/src/TableFunctions/parseColumnsListForTableFunction.h b/src/TableFunctions/parseColumnsListForTableFunction.h similarity index 100% rename from dbms/src/TableFunctions/parseColumnsListForTableFunction.h rename to src/TableFunctions/parseColumnsListForTableFunction.h diff --git a/dbms/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp similarity index 100% rename from dbms/src/TableFunctions/registerTableFunctions.cpp rename to src/TableFunctions/registerTableFunctions.cpp diff --git a/dbms/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h similarity index 100% rename from dbms/src/TableFunctions/registerTableFunctions.h rename to src/TableFunctions/registerTableFunctions.h diff --git a/dbms/tests/.gitignore b/tests/.gitignore similarity index 100% rename from dbms/tests/.gitignore rename to tests/.gitignore diff --git a/dbms/tests/CMakeLists.txt b/tests/CMakeLists.txt similarity index 100% rename from dbms/tests/CMakeLists.txt rename to tests/CMakeLists.txt diff --git a/dbms/tests/CTestCustom.cmake b/tests/CTestCustom.cmake similarity index 100% rename from dbms/tests/CTestCustom.cmake rename to tests/CTestCustom.cmake diff --git a/dbms/tests/clickhouse-client.xml b/tests/clickhouse-client.xml similarity index 100% rename from dbms/tests/clickhouse-client.xml rename to tests/clickhouse-client.xml diff --git a/dbms/tests/clickhouse-test b/tests/clickhouse-test similarity index 100% rename from dbms/tests/clickhouse-test rename to tests/clickhouse-test diff --git a/dbms/tests/clickhouse-test-server b/tests/clickhouse-test-server similarity index 93% rename from dbms/tests/clickhouse-test-server rename to tests/clickhouse-test-server index 831fd05fd82..7195abbfde8 100755 --- a/dbms/tests/clickhouse-test-server +++ b/tests/clickhouse-test-server @@ -10,11 +10,11 @@ DATA_DIR=${DATA_DIR:=`mktemp -d /tmp/clickhouse.test..XXXXX`} DATA_DIR_PATTERN=${DATA_DIR_PATTERN:=/tmp/clickhouse} # path from config file, will be replaced to temporary LOG_DIR=${LOG_DIR:=$DATA_DIR/log} export CLICKHOUSE_BINARY_NAME=${CLICKHOUSE_BINARY_NAME:="clickhouse"} -( [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}-server" ] || [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}" ] ) && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} # Build without separate build dir +( [ -x "$ROOT_DIR/programs/${CLICKHOUSE_BINARY_NAME}-server" ] || [ -x "$ROOT_DIR/programs/${CLICKHOUSE_BINARY_NAME}" ] ) && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} # Build without separate build dir [ -d "$ROOT_DIR/build${BUILD_TYPE}" ] && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR/build${BUILD_TYPE}} BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} [ -x ${CLICKHOUSE_BINARY_NAME}-server" ] && [ -x ${CLICKHOUSE_BINARY_NAME}-client" ] && BIN_DIR= # Allow run in /usr/bin -( [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}" ] || [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}-server" ] ) && BIN_DIR=${BIN_DIR:=$BUILD_DIR/dbms/programs/} +( [ -x "$BUILD_DIR/programs/${CLICKHOUSE_BINARY_NAME}" ] || [ -x "$BUILD_DIR/programs/${CLICKHOUSE_BINARY_NAME}-server" ] ) && BIN_DIR=${BIN_DIR:=$BUILD_DIR/programs/} [ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-server" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-server} [ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME} server} [ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-client" ] && CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-client} @@ -91,7 +91,7 @@ fi VERSION=`$CLICKHOUSE_CLIENT --version-clean` # If run from compile dir - use in-place compile binary and headers -[ -n "$BIN_DIR" ] && INTERNAL_COMPILER_PARAMS="--compiler_executable_root=${INTERNAL_COMPILER_BIN_ROOT:=$BUILD_DIR/dbms/programs/} --compiler_headers=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/ --compiler_headers_root=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/" +[ -n "$BIN_DIR" ] && INTERNAL_COMPILER_PARAMS="--compiler_executable_root=${INTERNAL_COMPILER_BIN_ROOT:=$BUILD_DIR/programs/} --compiler_headers=$BUILD_DIR/programs/clang/headers/$VERSION/ --compiler_headers_root=$BUILD_DIR/programs/clang/headers/$VERSION/" $GDB $CLICKHOUSE_SERVER --config-file=$CLICKHOUSE_CONFIG --log=$CLICKHOUSE_LOG $TEST_SERVER_PARAMS -- \ --http_port=$CLICKHOUSE_PORT_HTTP \ diff --git a/dbms/tests/client-test.xml b/tests/client-test.xml similarity index 100% rename from dbms/tests/client-test.xml rename to tests/client-test.xml diff --git a/tests/config/access_management.xml b/tests/config/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/config/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/dbms/tests/config/client_config.xml b/tests/config/client_config.xml similarity index 100% rename from dbms/tests/config/client_config.xml rename to tests/config/client_config.xml diff --git a/tests/config/clusters.xml b/tests/config/clusters.xml new file mode 100644 index 00000000000..c0babf0ff89 --- /dev/null +++ b/tests/config/clusters.xml @@ -0,0 +1,20 @@ + + + + + + shard_0 + localhost + 9000 + + + + + shard_1 + localhost + 9000 + + + + + \ No newline at end of file diff --git a/tests/config/decimals_dictionary.xml b/tests/config/decimals_dictionary.xml new file mode 100644 index 00000000000..f728fa774a7 --- /dev/null +++ b/tests/config/decimals_dictionary.xml @@ -0,0 +1,197 @@ + + + flat_decimals + + + localhost + 9000 + default + + system +
    decimals
    + + + 0 + + + + + + key + + + d32 + Decimal32(4) + 0 + + + d64 + Decimal64(6) + 0 + + + d128 + Decimal128(1) + 0 + + + + + + hashed_decimals + + + localhost + 9000 + default + + system + decimals
    +
    + + 0 + + + + + + key + + + d32 + Decimal32(4) + 0 + + + d64 + Decimal64(6) + 0 + + + d128 + Decimal128(1) + 0 + + +
    + + + cache_decimals + + + localhost + 9000 + default + + system + decimals
    +
    + + 0 + + 1000 + + + + key + + + d32 + Decimal32(4) + 0 + + + d64 + Decimal64(6) + 0 + + + d128 + Decimal128(1) + 0 + + +
    + + + complex_hashed_decimals + + + localhost + 9000 + default + + system + decimals
    +
    + + 0 + + + + + + + key + UInt64 + + + + d32 + Decimal32(4) + 0 + + + d64 + Decimal64(6) + 0 + + + d128 + Decimal128(1) + 0 + + +
    + + + complex_cache_decimals + + + localhost + 9000 + default + + system + decimals
    +
    + + 0 + + 1000 + + + + + key + UInt64 + + + + d32 + Decimal32(4) + 0 + + + d64 + Decimal64(6) + 0 + + + d128 + Decimal128(1) + 0 + + +
    + diff --git a/dbms/tests/config/dhparam.pem b/tests/config/dhparam.pem similarity index 100% rename from dbms/tests/config/dhparam.pem rename to tests/config/dhparam.pem diff --git a/dbms/tests/config/disks.xml b/tests/config/disks.xml similarity index 100% rename from dbms/tests/config/disks.xml rename to tests/config/disks.xml diff --git a/tests/config/graphite.xml b/tests/config/graphite.xml new file mode 100644 index 00000000000..9920c8e42cf --- /dev/null +++ b/tests/config/graphite.xml @@ -0,0 +1,28 @@ + + + Version + + sum + sum + + 0 + 600 + + + 172800 + 6000 + + + + max + + 0 + 600 + + + 172800 + 6000 + + + + diff --git a/tests/config/ints_dictionary.xml b/tests/config/ints_dictionary.xml new file mode 100644 index 00000000000..a22dab8933c --- /dev/null +++ b/tests/config/ints_dictionary.xml @@ -0,0 +1,514 @@ + + + flat_ints + + + localhost + 9000 + default + + system + ints
    +
    + + 0 + + + + + + key + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + + + hashed_ints + + + localhost + 9000 + default + + system + ints
    +
    + + 0 + + + + + + key + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + + + hashed_sparse_ints + + + localhost + 9000 + default + + system + ints
    +
    + + 0 + + + + + + key + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + + + cache_ints + + + localhost + 9000 + default + + system + ints
    +
    + + 0 + + 1000 + + + + key + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + + + complex_hashed_ints + + + localhost + 9000 + default + + system + ints
    +
    + + 0 + + + + + + + key + UInt64 + + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + + + complex_cache_ints + + + localhost + 9000 + default + + system + ints
    +
    + + 0 + + 1000 + + + + + key + UInt64 + + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + + + +one_cell_cache_ints + + + localhost + 9000 + default + + test_01054 + ints
    +
    + +0 + + 1 + + + + key + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + + + + one_cell_cache_ints_overflow + + + localhost + 9000 + default + + test_01054_overflow + ints
    +
    + + 0 + + 1 + + + + key + + + i8 + Int8 + 0 + + + i16 + Int16 + 0 + + + i32 + Int32 + 0 + + + i64 + Int64 + 0 + + + u8 + UInt8 + 0 + + + u16 + UInt16 + 0 + + + u32 + UInt32 + 0 + + + u64 + UInt64 + 0 + + +
    + +
    \ No newline at end of file diff --git a/dbms/tests/config/listen.xml b/tests/config/listen.xml similarity index 100% rename from dbms/tests/config/listen.xml rename to tests/config/listen.xml diff --git a/dbms/tests/config/log_queries.xml b/tests/config/log_queries.xml similarity index 100% rename from dbms/tests/config/log_queries.xml rename to tests/config/log_queries.xml diff --git a/dbms/tests/config/macros.xml b/tests/config/macros.xml similarity index 100% rename from dbms/tests/config/macros.xml rename to tests/config/macros.xml diff --git a/dbms/tests/config/metric_log.xml b/tests/config/metric_log.xml similarity index 100% rename from dbms/tests/config/metric_log.xml rename to tests/config/metric_log.xml diff --git a/dbms/tests/config/part_log.xml b/tests/config/part_log.xml similarity index 100% rename from dbms/tests/config/part_log.xml rename to tests/config/part_log.xml diff --git a/tests/config/polymorphic_parts.xml b/tests/config/polymorphic_parts.xml new file mode 100644 index 00000000000..2924aa5c69d --- /dev/null +++ b/tests/config/polymorphic_parts.xml @@ -0,0 +1,5 @@ + + + 10485760 + + diff --git a/dbms/tests/config/query_masking_rules.xml b/tests/config/query_masking_rules.xml similarity index 100% rename from dbms/tests/config/query_masking_rules.xml rename to tests/config/query_masking_rules.xml diff --git a/dbms/tests/config/readonly.xml b/tests/config/readonly.xml similarity index 100% rename from dbms/tests/config/readonly.xml rename to tests/config/readonly.xml diff --git a/dbms/tests/config/secure_ports.xml b/tests/config/secure_ports.xml similarity index 100% rename from dbms/tests/config/secure_ports.xml rename to tests/config/secure_ports.xml diff --git a/dbms/tests/config/server.crt b/tests/config/server.crt similarity index 100% rename from dbms/tests/config/server.crt rename to tests/config/server.crt diff --git a/dbms/tests/config/server.key b/tests/config/server.key similarity index 100% rename from dbms/tests/config/server.key rename to tests/config/server.key diff --git a/tests/config/strings_dictionary.xml b/tests/config/strings_dictionary.xml new file mode 100644 index 00000000000..c5643eecb68 --- /dev/null +++ b/tests/config/strings_dictionary.xml @@ -0,0 +1,209 @@ + + + flat_strings + + + localhost + 9000 + default + + system + strings
    +
    + + 0 + + + + + + key + + + str + String + + + +
    + + + hashed_strings + + + localhost + 9000 + default + + system + strings
    +
    + + 0 + + + + + + key + + + str + String + + + +
    + + + cache_strings + + + localhost + 9000 + default + + system + strings
    +
    + + 0 + + 1000 + + + + key + + + str + String + + + +
    + + + complex_hashed_strings + + + localhost + 9000 + default + + system + strings
    +
    + + 0 + + + + + + + key + UInt64 + + + + str + String + + + +
    + + + complex_cache_strings + + + localhost + 9000 + default + + system + strings
    +
    + + 0 + + 1000 + + + + + key + UInt64 + + + + str + String + + + +
    + + + complex_hashed_strings_key + + + localhost + 9000 + default + + system + strings
    +
    + + 0 + + + + + + + str + String + + + + key + UInt64 + 0 + + +
    + + + complex_cache_strings_key + + + localhost + 9000 + default + + system + strings
    +
    + + 0 + + 1000 + + + + + str + String + + + + key + UInt64 + 0 + + +
    +
    diff --git a/dbms/tests/config/text_log.xml b/tests/config/text_log.xml similarity index 100% rename from dbms/tests/config/text_log.xml rename to tests/config/text_log.xml diff --git a/dbms/tests/config/zookeeper.xml b/tests/config/zookeeper.xml similarity index 100% rename from dbms/tests/config/zookeeper.xml rename to tests/config/zookeeper.xml diff --git a/tests/decimals_dictionary.xml b/tests/decimals_dictionary.xml new file mode 120000 index 00000000000..15f5b3800b3 --- /dev/null +++ b/tests/decimals_dictionary.xml @@ -0,0 +1 @@ +config/decimals_dictionary.xml \ No newline at end of file diff --git a/dbms/tests/external_models/catboost/data/build_catboost.sh b/tests/external_models/catboost/data/build_catboost.sh similarity index 100% rename from dbms/tests/external_models/catboost/data/build_catboost.sh rename to tests/external_models/catboost/data/build_catboost.sh diff --git a/dbms/tests/external_models/catboost/helpers/__init__.py b/tests/external_models/catboost/helpers/__init__.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/__init__.py rename to tests/external_models/catboost/helpers/__init__.py diff --git a/dbms/tests/external_models/catboost/helpers/client.py b/tests/external_models/catboost/helpers/client.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/client.py rename to tests/external_models/catboost/helpers/client.py diff --git a/dbms/tests/external_models/catboost/helpers/generate.py b/tests/external_models/catboost/helpers/generate.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/generate.py rename to tests/external_models/catboost/helpers/generate.py diff --git a/dbms/tests/external_models/catboost/helpers/server.py b/tests/external_models/catboost/helpers/server.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/server.py rename to tests/external_models/catboost/helpers/server.py diff --git a/dbms/tests/external_models/catboost/helpers/server_with_models.py b/tests/external_models/catboost/helpers/server_with_models.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/server_with_models.py rename to tests/external_models/catboost/helpers/server_with_models.py diff --git a/dbms/tests/external_models/catboost/helpers/table.py b/tests/external_models/catboost/helpers/table.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/table.py rename to tests/external_models/catboost/helpers/table.py diff --git a/dbms/tests/external_models/catboost/helpers/train.py b/tests/external_models/catboost/helpers/train.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/train.py rename to tests/external_models/catboost/helpers/train.py diff --git a/dbms/tests/external_models/catboost/pytest.ini b/tests/external_models/catboost/pytest.ini similarity index 100% rename from dbms/tests/external_models/catboost/pytest.ini rename to tests/external_models/catboost/pytest.ini diff --git a/dbms/tests/external_models/catboost/test_apply_catboost_model/test.py b/tests/external_models/catboost/test_apply_catboost_model/test.py similarity index 100% rename from dbms/tests/external_models/catboost/test_apply_catboost_model/test.py rename to tests/external_models/catboost/test_apply_catboost_model/test.py diff --git a/dbms/tests/instructions/clang-tidy.txt b/tests/instructions/clang-tidy.txt similarity index 100% rename from dbms/tests/instructions/clang-tidy.txt rename to tests/instructions/clang-tidy.txt diff --git a/dbms/tests/instructions/coverity.txt b/tests/instructions/coverity.txt similarity index 100% rename from dbms/tests/instructions/coverity.txt rename to tests/instructions/coverity.txt diff --git a/dbms/tests/instructions/cppcheck.txt b/tests/instructions/cppcheck.txt similarity index 100% rename from dbms/tests/instructions/cppcheck.txt rename to tests/instructions/cppcheck.txt diff --git a/tests/instructions/developer_instruction_en.md b/tests/instructions/developer_instruction_en.md new file mode 120000 index 00000000000..7ce5ac9b690 --- /dev/null +++ b/tests/instructions/developer_instruction_en.md @@ -0,0 +1 @@ +../../docs/en/development/developer_instruction.md \ No newline at end of file diff --git a/tests/instructions/developer_instruction_ru.md b/tests/instructions/developer_instruction_ru.md new file mode 120000 index 00000000000..3beb31f0d28 --- /dev/null +++ b/tests/instructions/developer_instruction_ru.md @@ -0,0 +1 @@ +../../docs/ru/development/developer_instruction.md \ No newline at end of file diff --git a/dbms/tests/instructions/easy_tasks_sorted_ru.md b/tests/instructions/easy_tasks_sorted_ru.md similarity index 99% rename from dbms/tests/instructions/easy_tasks_sorted_ru.md rename to tests/instructions/easy_tasks_sorted_ru.md index cb94fa1885f..2dd60f97db3 100644 --- a/dbms/tests/instructions/easy_tasks_sorted_ru.md +++ b/tests/instructions/easy_tasks_sorted_ru.md @@ -2,7 +2,7 @@ ## Недостатки юзабилити, если пользователь не может прочитать конфиг клиента. -`dbms/programs/client/Client.cpp` +`programs/client/Client.cpp` Делаем `chmod 000 /etc/clickhouse-client/config.xml` и смотрим, что получится. diff --git a/dbms/tests/instructions/heap-profiler.txt b/tests/instructions/heap-profiler.txt similarity index 92% rename from dbms/tests/instructions/heap-profiler.txt rename to tests/instructions/heap-profiler.txt index dd188f751a5..3c35e9cf518 100644 --- a/dbms/tests/instructions/heap-profiler.txt +++ b/tests/instructions/heap-profiler.txt @@ -1,7 +1,7 @@ Build clickhouse without tcmalloc. cmake -D ENABLE_TCMALLOC=0 Copy clickhouse binary to your server. -scp dbms/programs/clickhouse server:~ +scp programs/clickhouse server:~ ssh to your server diff --git a/dbms/tests/instructions/jemalloc_memory_profile.txt b/tests/instructions/jemalloc_memory_profile.txt similarity index 100% rename from dbms/tests/instructions/jemalloc_memory_profile.txt rename to tests/instructions/jemalloc_memory_profile.txt diff --git a/dbms/tests/instructions/kafka.txt b/tests/instructions/kafka.txt similarity index 100% rename from dbms/tests/instructions/kafka.txt rename to tests/instructions/kafka.txt diff --git a/dbms/tests/instructions/ninja_trace.txt b/tests/instructions/ninja_trace.txt similarity index 100% rename from dbms/tests/instructions/ninja_trace.txt rename to tests/instructions/ninja_trace.txt diff --git a/dbms/tests/instructions/pvs-studio.txt b/tests/instructions/pvs-studio.txt similarity index 100% rename from dbms/tests/instructions/pvs-studio.txt rename to tests/instructions/pvs-studio.txt diff --git a/dbms/tests/instructions/sanitizers.md b/tests/instructions/sanitizers.md similarity index 96% rename from dbms/tests/instructions/sanitizers.md rename to tests/instructions/sanitizers.md index 45e1304e2a1..b501f946b46 100644 --- a/dbms/tests/instructions/sanitizers.md +++ b/tests/instructions/sanitizers.md @@ -16,7 +16,7 @@ ninja ## Copy binary to your server ``` -scp ./dbms/programs/clickhouse yourserver:~/clickhouse-asan +scp ./programs/clickhouse yourserver:~/clickhouse-asan ``` ## Start ClickHouse and run tests diff --git a/dbms/tests/instructions/syntax.txt b/tests/instructions/syntax.txt similarity index 100% rename from dbms/tests/instructions/syntax.txt rename to tests/instructions/syntax.txt diff --git a/dbms/tests/instructions/tscancode.txt b/tests/instructions/tscancode.txt similarity index 100% rename from dbms/tests/instructions/tscancode.txt rename to tests/instructions/tscancode.txt diff --git a/dbms/tests/integration/.dockerignore b/tests/integration/.dockerignore similarity index 100% rename from dbms/tests/integration/.dockerignore rename to tests/integration/.dockerignore diff --git a/dbms/tests/integration/.gitignore b/tests/integration/.gitignore similarity index 100% rename from dbms/tests/integration/.gitignore rename to tests/integration/.gitignore diff --git a/tests/integration/CMakeLists.txt b/tests/integration/CMakeLists.txt new file mode 100644 index 00000000000..8280464051f --- /dev/null +++ b/tests/integration/CMakeLists.txt @@ -0,0 +1,24 @@ +if(CLICKHOUSE_SPLIT_BINARY) + set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse-server CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse-client) +else() + set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse) +endif() + +find_program(DOCKER_CMD docker) +find_program(DOCKER_COMPOSE_CMD docker-compose) +find_program(PYTEST_CMD pytest) +find_program(SUDO_CMD sudo) + +# will mount only one binary to docker container - build with .so cant work +if(MAKE_STATIC_LIBRARIES AND DOCKER_CMD) + if(INTEGRATION_USE_RUNNER AND SUDO_CMD) + add_test(NAME integration-runner WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND ${SUDO_CMD} ${CMAKE_CURRENT_SOURCE_DIR}/runner --binary ${ClickHouse_BINARY_DIR}/programs/clickhouse --configs-dir ${ClickHouse_SOURCE_DIR}/programs/server/) + message(STATUS "Using tests in docker with runner SUDO=${SUDO_CMD}; DOCKER=${DOCKER_CMD};") + endif() + if(NOT INTEGRATION_USE_RUNNER AND DOCKER_COMPOSE_CMD AND PYTEST_CMD) + # To run one test with debug: + # cmake . -DPYTEST_OPT="-ss;test_cluster_copier" + add_test(NAME integration-pytest WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND env ${TEST_USE_BINARIES} "CLICKHOUSE_TESTS_BASE_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/programs/server/" ${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}) + message(STATUS "Using tests in docker DOCKER=${DOCKER_CMD}; DOCKER_COMPOSE=${DOCKER_COMPOSE_CMD}; PYTEST=${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}") + endif() +endif() diff --git a/tests/integration/README.md b/tests/integration/README.md new file mode 100644 index 00000000000..e067b385577 --- /dev/null +++ b/tests/integration/README.md @@ -0,0 +1,111 @@ +## ClickHouse integration tests + +This directory contains tests that involve several ClickHouse instances, custom configs, ZooKeeper, etc. + +### Running natively + +Prerequisites: +* Ubuntu 14.04 (Trusty) or higher. +* [docker](https://www.docker.com/community-edition#/download). Minimum required API version: 1.25, check with `docker version`. + +You must install latest Docker from +https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository +Don't use Docker from your system repository. + +* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev` +* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` +* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio rpm-confluent-schemaregistry` + +(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python-pytest python-dicttoxml python-docker python-pymysql python-pymongo python-tzlocal python-kazoo python-psycopg2 python-kafka python-pytest-timeout python-minio` + +If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login. +(You must close all your sessions (for example, restart your computer)) +To check, that you have access to Docker, run `docker ps`. + +Run the tests with the `pytest` command. To select which tests to run, use: `pytest -k ` + +By default tests are run with system-wide client binary, server binary and base configs. To change that, +set the following environment variables: +* `CLICKHOUSE_TESTS_SERVER_BIN_PATH` to choose the server binary. +* `CLICKHOUSE_TESTS_CLIENT_BIN_PATH` to choose the client binary. +* `CLICKHOUSE_TESTS_BASE_CONFIG_DIR` to choose the directory from which base configs (`config.xml` and + `users.xml`) are taken. + + +### Running with runner script + +The only requirement is fresh configured docker and +docker pull yandex/clickhouse-integration-tests-runner + +Notes: +* If you want to run integration tests without `sudo` you have to add your user to docker group `sudo usermod -aG docker $USER`. [More information](https://docs.docker.com/install/linux/linux-postinstall/) about docker configuration. +* If you already had run these tests without `./runner` script you may have problems with pytest cache. It can be removed with `rm -r __pycache__ .pytest_cache/`. +* Some tests maybe require a lot of resources (CPU, RAM, etc.). Better not try large tests like `test_cluster_copier` or `test_distributed_ddl*` on your laptop. + +You can run tests via `./runner` script and pass pytest arguments as last arg: +``` +$ ./runner --binary $HOME/ClickHouse/programs/clickhouse --bridge-binary $HOME/ClickHouse/programs/clickhouse-odbc-bridge --configs-dir $HOME/ClickHouse/programs/server/ 'test_odbc_interaction -ss' +Start tests +============================= test session starts ============================== +platform linux2 -- Python 2.7.15rc1, pytest-4.0.0, py-1.7.0, pluggy-0.8.0 +rootdir: /ClickHouse/tests/integration, inifile: pytest.ini +collected 6 items + +test_odbc_interaction/test.py Removing network clickhouse_default +... + +Killing roottestodbcinteraction_node1_1 ... done +Killing roottestodbcinteraction_mysql1_1 ... done +Killing roottestodbcinteraction_postgres1_1 ... done +Removing roottestodbcinteraction_node1_1 ... done +Removing roottestodbcinteraction_mysql1_1 ... done +Removing roottestodbcinteraction_postgres1_1 ... done +Removing network roottestodbcinteraction_default + +==================== 6 passed, 1 warnings in 95.21 seconds ===================== + +``` + +Path to binary and configs maybe specified via env variables: +``` +$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/ +$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse +$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge +$ ./runner 'test_odbc_interaction' +$ # or ./runner '-v -ss' +Start tests +============================= test session starts ============================== +platform linux2 -- Python 2.7.15rc1, pytest-4.0.0, py-1.7.0, pluggy-0.8.0 +rootdir: /ClickHouse/tests/integration, inifile: pytest.ini +collected 6 items + +test_odbc_interaction/test.py ...... [100%] +==================== 6 passed, 1 warnings in 96.33 seconds ===================== +``` + +You can just open shell inside a container by overwritting the command: +./runner --command=bash + +### Rebuilding the docker containers + +The main container used for integration tests lives in `docker/test/integration/Dockerfile`. Rebuild it with +``` +cd docker/test/integration +docker build -t yandex/clickhouse-integration-test . +``` + +The helper container used by the `runner` script is in `tests/integration/image/Dockerfile`. + +### Adding new tests + +To add new test named `foo`, create a directory `test_foo` with an empty `__init__.py` and a file +named `test.py` containing tests in it. All functions with names starting with `test` will become test cases. + +`helpers` directory contains utilities for: +* Launching a ClickHouse cluster with or without ZooKeeper in docker containers. +* Sending queries to launched instances. +* Introducing network failures such as severing network link between two instances. + +To assert that two TSV files must be equal, wrap them in the `TSV` class and use the regular `assert` +statement. Example: `assert TSV(result) == TSV(reference)`. In case the assertion fails, `pytest` +will automagically detect the types of variables and only the small diff of two files is printed. diff --git a/dbms/tests/integration/conftest.py b/tests/integration/conftest.py similarity index 100% rename from dbms/tests/integration/conftest.py rename to tests/integration/conftest.py diff --git a/dbms/tests/integration/helpers/0_common_instance_config.xml b/tests/integration/helpers/0_common_instance_config.xml similarity index 100% rename from dbms/tests/integration/helpers/0_common_instance_config.xml rename to tests/integration/helpers/0_common_instance_config.xml diff --git a/tests/integration/helpers/0_common_instance_users.xml b/tests/integration/helpers/0_common_instance_users.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/helpers/0_common_instance_users.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/dbms/tests/integration/helpers/__init__.py b/tests/integration/helpers/__init__.py similarity index 100% rename from dbms/tests/integration/helpers/__init__.py rename to tests/integration/helpers/__init__.py diff --git a/dbms/tests/integration/helpers/client.py b/tests/integration/helpers/client.py similarity index 100% rename from dbms/tests/integration/helpers/client.py rename to tests/integration/helpers/client.py diff --git a/dbms/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py similarity index 97% rename from dbms/tests/integration/helpers/cluster.py rename to tests/integration/helpers/cluster.py index 5dc93cb338a..a9fd572a8b0 100644 --- a/dbms/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -139,12 +139,13 @@ class ClickHouseCluster: cmd += " client" return cmd - def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macros={}, + def add_instance(self, name, config_dir=None, main_configs=None, user_configs=None, macros=None, with_zookeeper=False, with_mysql=False, with_kafka=False, clickhouse_path_dir=None, with_odbc_drivers=False, with_postgres=False, with_hdfs=False, with_mongo=False, with_redis=False, with_minio=False, - hostname=None, env_variables={}, image="yandex/clickhouse-integration-test", - stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=[]): + hostname=None, env_variables=None, image="yandex/clickhouse-integration-test", + stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None, + zookeeper_docker_compose_path=None): """Add an instance to the cluster. name - the name of the instance directory and the value of the 'instance' macro in ClickHouse. @@ -161,13 +162,14 @@ class ClickHouseCluster: raise Exception("Can\'t add instance `%s': there is already an instance with the same name!" % name) instance = ClickHouseInstance( - self, self.base_dir, name, config_dir, main_configs, user_configs, macros, with_zookeeper, + self, self.base_dir, name, config_dir, main_configs or [], user_configs or [], macros or {}, + with_zookeeper, self.zookeeper_config_path, with_mysql, with_kafka, with_mongo, with_redis, with_minio, self.base_configs_dir, self.server_bin_path, self.odbc_bridge_bin_path, clickhouse_path_dir, with_odbc_drivers, hostname=hostname, - env_variables=env_variables, image=image, stay_alive=stay_alive, ipv4_address=ipv4_address, + env_variables=env_variables or {}, image=image, stay_alive=stay_alive, ipv4_address=ipv4_address, ipv6_address=ipv6_address, - with_installed_binary=with_installed_binary, tmpfs=tmpfs) + with_installed_binary=with_installed_binary, tmpfs=tmpfs or []) self.instances[name] = instance if ipv4_address is not None or ipv6_address is not None: @@ -178,10 +180,13 @@ class ClickHouseCluster: cmds = [] if with_zookeeper and not self.with_zookeeper: + if not zookeeper_docker_compose_path: + zookeeper_docker_compose_path = p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml') + self.with_zookeeper = True - self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')]) + self.base_cmd.extend(['--file', zookeeper_docker_compose_path]) self.base_zookeeper_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', - self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')] + self.project_name, '--file', zookeeper_docker_compose_path] cmds.append(self.base_zookeeper_cmd) if with_mysql and not self.with_mysql: @@ -580,17 +585,17 @@ class ClickHouseInstance: self, cluster, base_path, name, custom_config_dir, custom_main_configs, custom_user_configs, macros, with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, with_mongo, with_redis, with_minio, base_configs_dir, server_bin_path, odbc_bridge_bin_path, - clickhouse_path_dir, with_odbc_drivers, hostname=None, env_variables={}, + clickhouse_path_dir, with_odbc_drivers, hostname=None, env_variables=None, image="yandex/clickhouse-integration-test", - stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=[]): + stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None): self.name = name - self.base_cmd = cluster.base_cmd[:] + self.base_cmd = cluster.base_cmd self.docker_id = cluster.get_instance_docker_id(self.name) self.cluster = cluster self.hostname = hostname if hostname is not None else self.name - self.tmpfs = tmpfs[:] + self.tmpfs = tmpfs or [] self.custom_config_dir = p.abspath(p.join(base_path, custom_config_dir)) if custom_config_dir else None self.custom_main_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_main_configs] self.custom_user_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_user_configs] @@ -611,7 +616,7 @@ class ClickHouseInstance: self.path = p.join(self.cluster.instances_dir, name) self.docker_compose_path = p.join(self.path, 'docker_compose.yml') - self.env_variables = env_variables + self.env_variables = env_variables or {} if with_odbc_drivers: self.odbc_ini_path = os.path.dirname(self.docker_compose_path) + "/odbc.ini:/etc/odbc.ini" self.with_mysql = True @@ -923,6 +928,7 @@ class ClickHouseInstance: # The file is named with 0_ prefix to be processed before other configuration overloads. shutil.copy(p.join(HELPERS_DIR, '0_common_instance_config.xml'), self.config_d_dir) + shutil.copy(p.join(HELPERS_DIR, '0_common_instance_users.xml'), users_d_dir) # Generate and write macros file macros = self.macros.copy() @@ -1041,4 +1047,4 @@ class ClickHouseKiller(object): self.clickhouse_node.kill_clickhouse() def __exit__(self, exc_type, exc_val, exc_tb): - self.clickhouse_node.restore_clickhouse() \ No newline at end of file + self.clickhouse_node.restore_clickhouse() diff --git a/dbms/tests/integration/helpers/docker_compose_hdfs.yml b/tests/integration/helpers/docker_compose_hdfs.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_hdfs.yml rename to tests/integration/helpers/docker_compose_hdfs.yml diff --git a/dbms/tests/integration/helpers/docker_compose_kafka.yml b/tests/integration/helpers/docker_compose_kafka.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_kafka.yml rename to tests/integration/helpers/docker_compose_kafka.yml diff --git a/dbms/tests/integration/helpers/docker_compose_minio.yml b/tests/integration/helpers/docker_compose_minio.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_minio.yml rename to tests/integration/helpers/docker_compose_minio.yml diff --git a/dbms/tests/integration/helpers/docker_compose_mongo.yml b/tests/integration/helpers/docker_compose_mongo.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_mongo.yml rename to tests/integration/helpers/docker_compose_mongo.yml diff --git a/dbms/tests/integration/helpers/docker_compose_mysql.yml b/tests/integration/helpers/docker_compose_mysql.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_mysql.yml rename to tests/integration/helpers/docker_compose_mysql.yml diff --git a/dbms/tests/integration/helpers/docker_compose_net.yml b/tests/integration/helpers/docker_compose_net.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_net.yml rename to tests/integration/helpers/docker_compose_net.yml diff --git a/dbms/tests/integration/helpers/docker_compose_postgres.yml b/tests/integration/helpers/docker_compose_postgres.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_postgres.yml rename to tests/integration/helpers/docker_compose_postgres.yml diff --git a/dbms/tests/integration/helpers/docker_compose_redis.yml b/tests/integration/helpers/docker_compose_redis.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_redis.yml rename to tests/integration/helpers/docker_compose_redis.yml diff --git a/dbms/tests/integration/helpers/docker_compose_zookeeper.yml b/tests/integration/helpers/docker_compose_zookeeper.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_zookeeper.yml rename to tests/integration/helpers/docker_compose_zookeeper.yml diff --git a/dbms/tests/integration/helpers/hdfs_api.py b/tests/integration/helpers/hdfs_api.py similarity index 100% rename from dbms/tests/integration/helpers/hdfs_api.py rename to tests/integration/helpers/hdfs_api.py diff --git a/dbms/tests/integration/helpers/helper_container/Dockerfile b/tests/integration/helpers/helper_container/Dockerfile similarity index 100% rename from dbms/tests/integration/helpers/helper_container/Dockerfile rename to tests/integration/helpers/helper_container/Dockerfile diff --git a/dbms/tests/integration/helpers/network.py b/tests/integration/helpers/network.py similarity index 100% rename from dbms/tests/integration/helpers/network.py rename to tests/integration/helpers/network.py diff --git a/dbms/tests/integration/helpers/test_tools.py b/tests/integration/helpers/test_tools.py similarity index 100% rename from dbms/tests/integration/helpers/test_tools.py rename to tests/integration/helpers/test_tools.py diff --git a/tests/integration/helpers/uclient.py b/tests/integration/helpers/uclient.py new file mode 100644 index 00000000000..6318802d81a --- /dev/null +++ b/tests/integration/helpers/uclient.py @@ -0,0 +1,36 @@ +import os +import sys +import time + +CURDIR = os.path.dirname(os.path.realpath(__file__)) + +sys.path.insert(0, os.path.join(CURDIR)) + +import uexpect + +prompt = ':\) ' +end_of_block = r'.*\r\n.*\r\n' + +class client(object): + def __init__(self, command=None, name='', log=None): + self.client = uexpect.spawn(['/bin/bash','--noediting']) + if command is None: + command = '/usr/bin/clickhouse-client' + self.client.command = command + self.client.eol('\r') + self.client.logger(log, prefix=name) + self.client.timeout(20) + self.client.expect('[#\$] ', timeout=2) + self.client.send(command) + + def __enter__(self): + return self.client.__enter__() + + def __exit__(self, type, value, traceback): + self.client.reader['kill_event'].set() + # send Ctrl-C + self.client.send('\x03', eol='') + time.sleep(0.3) + self.client.send('quit', eol='\r') + self.client.send('\x03', eol='') + return self.client.__exit__(type, value, traceback) diff --git a/dbms/tests/queries/0_stateless/helpers/uexpect.py b/tests/integration/helpers/uexpect.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/uexpect.py rename to tests/integration/helpers/uexpect.py diff --git a/tests/integration/helpers/zookeeper-ssl-entrypoint.sh b/tests/integration/helpers/zookeeper-ssl-entrypoint.sh new file mode 100755 index 00000000000..3ddb21881d6 --- /dev/null +++ b/tests/integration/helpers/zookeeper-ssl-entrypoint.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +set -e + +export ZOO_SERVER_CNXN_FACTORY=org.apache.zookeeper.server.NettyServerCnxnFactory +export ZOO_SSL_KEYSTORE_LOCATION=/conf/certs/zookeeper.p12 +export ZOO_SSL_KEYSTORE_PASSWORD=password +export ZOO_SSL_TRUSTSTORE_LOCATION=/conf/certs/truststore.p12 +export ZOO_SSL_TRUSTSTORE_PASSWORD=password + + +# Allow the container to be started with `--user` +if [[ "$1" = 'zkServer.sh' && "$(id -u)" = '0' ]]; then + chown -R zookeeper "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR" "$ZOO_LOG_DIR" "$ZOO_CONF_DIR" + exec gosu zookeeper "$0" "$@" +fi + +# Generate the config only if it doesn't exist +if [[ ! -f "$ZOO_CONF_DIR/zoo.cfg" ]]; then + CONFIG="$ZOO_CONF_DIR/zoo.cfg" + { + echo "dataDir=$ZOO_DATA_DIR" + echo "dataLogDir=$ZOO_DATA_LOG_DIR" + + echo "tickTime=$ZOO_TICK_TIME" + echo "initLimit=$ZOO_INIT_LIMIT" + echo "syncLimit=$ZOO_SYNC_LIMIT" + + echo "autopurge.snapRetainCount=$ZOO_AUTOPURGE_SNAPRETAINCOUNT" + echo "autopurge.purgeInterval=$ZOO_AUTOPURGE_PURGEINTERVAL" + echo "maxClientCnxns=$ZOO_MAX_CLIENT_CNXNS" + echo "standaloneEnabled=$ZOO_STANDALONE_ENABLED" + echo "admin.enableServer=$ZOO_ADMINSERVER_ENABLED" + } >> "$CONFIG" + if [[ -z $ZOO_SERVERS ]]; then + ZOO_SERVERS="server.1=localhost:2888:3888;2181" + fi + + for server in $ZOO_SERVERS; do + echo "$server" >> "$CONFIG" + done + + if [[ -n $ZOO_4LW_COMMANDS_WHITELIST ]]; then + echo "4lw.commands.whitelist=$ZOO_4LW_COMMANDS_WHITELIST" >> "$CONFIG" + fi + + + if [[ -n $ZOO_SSL_QUORUM ]]; then + { + echo "sslQuorum=$ZOO_SSL_QUORUM" + echo "serverCnxnFactory=$ZOO_SERVER_CNXN_FACTORY" + echo "ssl.quorum.keyStore.location=$ZOO_SSL_QUORUM_KEYSTORE_LOCATION" + echo "ssl.quorum.keyStore.password=$ZOO_SSL_QUORUM_KEYSTORE_PASSWORD" + echo "ssl.quorum.trustStore.location=$ZOO_SSL_QUORUM_TRUSTSTORE_LOCATION" + echo "ssl.quorum.trustStore.password=$ZOO_SSL_QUORUM_TRUSTSTORE_PASSWORD" + } >> "$CONFIG" + fi + + if [[ -n $ZOO_PORT_UNIFICATION ]]; then + echo "portUnification=$ZOO_PORT_UNIFICATION" >> "$CONFIG" + fi + + if [[ -n $ZOO_SECURE_CLIENT_PORT ]]; then + { + echo "secureClientPort=$ZOO_SECURE_CLIENT_PORT" + echo "serverCnxnFactory=$ZOO_SERVER_CNXN_FACTORY" + echo "ssl.keyStore.location=$ZOO_SSL_KEYSTORE_LOCATION" + echo "ssl.keyStore.password=$ZOO_SSL_KEYSTORE_PASSWORD" + echo "ssl.trustStore.location=$ZOO_SSL_TRUSTSTORE_LOCATION" + echo "ssl.trustStore.password=$ZOO_SSL_TRUSTSTORE_PASSWORD" + } >> "$CONFIG" + fi + + if [[ -n $ZOO_CLIENT_PORT_UNIFICATION ]]; then + echo "client.portUnification=$ZOO_CLIENT_PORT_UNIFICATION" >> "$CONFIG" + fi +fi + +# Write myid only if it doesn't exist +if [[ ! -f "$ZOO_DATA_DIR/myid" ]]; then + echo "${ZOO_MY_ID:-1}" > "$ZOO_DATA_DIR/myid" +fi + +mkdir -p $(dirname $ZOO_SSL_KEYSTORE_LOCATION) +mkdir -p $(dirname $ZOO_SSL_TRUSTSTORE_LOCATION) + +if [[ ! -f "$ZOO_SSL_KEYSTORE_LOCATION" ]]; then + keytool -genkeypair -alias zookeeper -keyalg RSA -validity 365 -keysize 2048 -dname "cn=zookeeper" -keypass password -keystore $ZOO_SSL_KEYSTORE_LOCATION -storepass password -deststoretype pkcs12 +fi + +if [[ ! -f "$ZOO_SSL_TRUSTSTORE_LOCATION" ]]; then + keytool -importcert -alias zookeeper -file /clickhouse-config/client.crt -keystore $ZOO_SSL_TRUSTSTORE_LOCATION -storepass password -noprompt -deststoretype pkcs12 +fi + +exec "$@" diff --git a/dbms/tests/integration/helpers/zookeeper_config.xml b/tests/integration/helpers/zookeeper_config.xml similarity index 100% rename from dbms/tests/integration/helpers/zookeeper_config.xml rename to tests/integration/helpers/zookeeper_config.xml diff --git a/dbms/tests/integration/image/Dockerfile b/tests/integration/image/Dockerfile similarity index 100% rename from dbms/tests/integration/image/Dockerfile rename to tests/integration/image/Dockerfile diff --git a/dbms/tests/integration/image/dockerd-entrypoint.sh b/tests/integration/image/dockerd-entrypoint.sh similarity index 92% rename from dbms/tests/integration/image/dockerd-entrypoint.sh rename to tests/integration/image/dockerd-entrypoint.sh index 89ccd78823d..8b0682396f8 100755 --- a/dbms/tests/integration/image/dockerd-entrypoint.sh +++ b/tests/integration/image/dockerd-entrypoint.sh @@ -22,5 +22,5 @@ export CLICKHOUSE_TESTS_CLIENT_BIN_PATH=/clickhouse export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge -cd /ClickHouse/dbms/tests/integration -exec "$@" \ No newline at end of file +cd /ClickHouse/tests/integration +exec "$@" diff --git a/dbms/tests/integration/image/modprobe.sh b/tests/integration/image/modprobe.sh similarity index 100% rename from dbms/tests/integration/image/modprobe.sh rename to tests/integration/image/modprobe.sh diff --git a/tests/integration/pytest.ini b/tests/integration/pytest.ini new file mode 100644 index 00000000000..bff275e3188 --- /dev/null +++ b/tests/integration/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +python_files = test*.py +norecursedirs = _instances +timeout = 300 +junit_duration_report = call diff --git a/dbms/tests/integration/runner b/tests/integration/runner similarity index 98% rename from dbms/tests/integration/runner rename to tests/integration/runner index cd148d1fe72..399c87dcf06 100755 --- a/dbms/tests/integration/runner +++ b/tests/integration/runner @@ -10,7 +10,7 @@ import subprocess import sys CUR_FILE_DIR = os.path.dirname(os.path.realpath(__file__)) -DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../../")) +DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../")) CURRENT_WORK_DIR = os.getcwd() CONTAINER_NAME = "clickhouse_integration_tests" @@ -57,7 +57,7 @@ if __name__ == "__main__": parser.add_argument( "--configs-dir", - default=os.environ.get("CLICKHOUSE_TESTS_BASE_CONFIG_DIR", os.path.join(DEFAULT_CLICKHOUSE_ROOT, "dbms/programs/server")), + default=os.environ.get("CLICKHOUSE_TESTS_BASE_CONFIG_DIR", os.path.join(DEFAULT_CLICKHOUSE_ROOT, "programs/server")), help="Path to clickhouse configs directory") parser.add_argument( diff --git a/dbms/tests/integration/test_adaptive_granularity/__init__.py b/tests/integration/test_access_control_on_cluster/__init__.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/__init__.py rename to tests/integration/test_access_control_on_cluster/__init__.py diff --git a/tests/integration/test_access_control_on_cluster/configs/config.d/clusters.xml b/tests/integration/test_access_control_on_cluster/configs/config.d/clusters.xml new file mode 100644 index 00000000000..741f862d162 --- /dev/null +++ b/tests/integration/test_access_control_on_cluster/configs/config.d/clusters.xml @@ -0,0 +1,22 @@ + + + + + + ch1 + 9000 + + + ch2 + 9000 + + + + + ch3 + 9000 + + + + + diff --git a/tests/integration/test_access_control_on_cluster/test.py b/tests/integration/test_access_control_on_cluster/test.py new file mode 100644 index 00000000000..6ca4ac15398 --- /dev/null +++ b/tests/integration/test_access_control_on_cluster/test.py @@ -0,0 +1,41 @@ +import time +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance('ch1', config_dir="configs", with_zookeeper=True) +ch2 = cluster.add_instance('ch2', config_dir="configs", with_zookeeper=True) +ch3 = cluster.add_instance('ch3', config_dir="configs", with_zookeeper=True) + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_access_control_on_cluster(): + ch1.query("CREATE USER Alex ON CLUSTER 'cluster'") + assert ch1.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" + assert ch2.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" + assert ch3.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" + + ch2.query("GRANT ON CLUSTER 'cluster' SELECT ON *.* TO Alex") + assert ch1.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" + assert ch2.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" + assert ch3.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" + + ch3.query("REVOKE ON CLUSTER 'cluster' SELECT ON *.* FROM Alex") + assert ch1.query("SHOW GRANTS FOR Alex") == "" + assert ch2.query("SHOW GRANTS FOR Alex") == "" + assert ch3.query("SHOW GRANTS FOR Alex") == "" + + ch2.query("DROP USER Alex ON CLUSTER 'cluster'") + assert "User `Alex` not found" in ch1.query_and_get_error("SHOW CREATE USER Alex") + assert "User `Alex` not found" in ch2.query_and_get_error("SHOW CREATE USER Alex") + assert "User `Alex` not found" in ch3.query_and_get_error("SHOW CREATE USER Alex") + diff --git a/dbms/tests/integration/test_adaptive_granularity_replicated/__init__.py b/tests/integration/test_adaptive_granularity/__init__.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity_replicated/__init__.py rename to tests/integration/test_adaptive_granularity/__init__.py diff --git a/dbms/tests/integration/test_adaptive_granularity/configs/log_conf.xml b/tests/integration/test_adaptive_granularity/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/configs/log_conf.xml rename to tests/integration/test_adaptive_granularity/configs/log_conf.xml diff --git a/dbms/tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml b/tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml rename to tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml diff --git a/dbms/tests/integration/test_adaptive_granularity/configs/remote_servers.xml b/tests/integration/test_adaptive_granularity/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/configs/remote_servers.xml rename to tests/integration/test_adaptive_granularity/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_adaptive_granularity/test.py b/tests/integration/test_adaptive_granularity/test.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/test.py rename to tests/integration/test_adaptive_granularity/test.py diff --git a/dbms/tests/integration/test_aggregation_memory_efficient/__init__.py b/tests/integration/test_adaptive_granularity_different_settings/__init__.py similarity index 100% rename from dbms/tests/integration/test_aggregation_memory_efficient/__init__.py rename to tests/integration/test_adaptive_granularity_different_settings/__init__.py diff --git a/tests/integration/test_adaptive_granularity_different_settings/test.py b/tests/integration/test_adaptive_granularity_different_settings/test.py new file mode 100644 index 00000000000..b066c437e06 --- /dev/null +++ b/tests/integration/test_adaptive_granularity_different_settings/test.py @@ -0,0 +1,49 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance('node1', with_zookeeper=True) +node2 = cluster.add_instance('node2', with_zookeeper=True) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def test_attach_detach(start_cluster): + + node1.query(""" + CREATE TABLE test (key UInt64) + ENGINE = ReplicatedMergeTree('/clickhouse/test', '1') + ORDER BY tuple() + SETTINGS index_granularity_bytes = 0""") + + node1.query("INSERT INTO test VALUES (1), (2)") + + node2.query(""" + CREATE TABLE test (key UInt64) + ENGINE = ReplicatedMergeTree('/clickhouse/test', '2') + ORDER BY tuple()""") + + node2.query("INSERT INTO test VALUES (3), (4)") + + node1.query("SYSTEM SYNC REPLICA test") + node2.query("SYSTEM SYNC REPLICA test") + + assert node1.query("SELECT COUNT() FROM test") == "4\n" + assert node2.query("SELECT COUNT() FROM test") == "4\n" + + node1.query("DETACH TABLE test") + node2.query("DETACH TABLE test") + + node1.query("ATTACH TABLE test") + node2.query("ATTACH TABLE test") + + assert node1.query("SELECT COUNT() FROM test") == "4\n" + assert node2.query("SELECT COUNT() FROM test") == "4\n" diff --git a/dbms/tests/integration/test_allowed_client_hosts/__init__.py b/tests/integration/test_adaptive_granularity_replicated/__init__.py similarity index 100% rename from dbms/tests/integration/test_allowed_client_hosts/__init__.py rename to tests/integration/test_adaptive_granularity_replicated/__init__.py diff --git a/dbms/tests/integration/test_adaptive_granularity_replicated/test.py b/tests/integration/test_adaptive_granularity_replicated/test.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity_replicated/test.py rename to tests/integration/test_adaptive_granularity_replicated/test.py diff --git a/dbms/tests/integration/test_allowed_url_from_config/__init__.py b/tests/integration/test_aggregation_memory_efficient/__init__.py similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/__init__.py rename to tests/integration/test_aggregation_memory_efficient/__init__.py diff --git a/dbms/tests/integration/test_aggregation_memory_efficient/test.py b/tests/integration/test_aggregation_memory_efficient/test.py similarity index 100% rename from dbms/tests/integration/test_aggregation_memory_efficient/test.py rename to tests/integration/test_aggregation_memory_efficient/test.py diff --git a/dbms/tests/integration/test_atomic_drop_table/__init__.py b/tests/integration/test_allowed_client_hosts/__init__.py similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/__init__.py rename to tests/integration/test_allowed_client_hosts/__init__.py diff --git a/dbms/tests/integration/test_allowed_client_hosts/configs/users.d/network.xml b/tests/integration/test_allowed_client_hosts/configs/users.d/network.xml similarity index 100% rename from dbms/tests/integration/test_allowed_client_hosts/configs/users.d/network.xml rename to tests/integration/test_allowed_client_hosts/configs/users.d/network.xml diff --git a/dbms/tests/integration/test_allowed_client_hosts/test.py b/tests/integration/test_allowed_client_hosts/test.py similarity index 100% rename from dbms/tests/integration/test_allowed_client_hosts/test.py rename to tests/integration/test_allowed_client_hosts/test.py diff --git a/dbms/tests/integration/test_authentication/__init__.py b/tests/integration/test_allowed_url_from_config/__init__.py similarity index 100% rename from dbms/tests/integration/test_authentication/__init__.py rename to tests/integration/test_allowed_url_from_config/__init__.py diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml b/tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml rename to tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml b/tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml rename to tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/test.py b/tests/integration/test_allowed_url_from_config/test.py similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/test.py rename to tests/integration/test_allowed_url_from_config/test.py diff --git a/dbms/tests/integration/test_backup_restore/__init__.py b/tests/integration/test_atomic_drop_table/__init__.py similarity index 100% rename from dbms/tests/integration/test_backup_restore/__init__.py rename to tests/integration/test_atomic_drop_table/__init__.py diff --git a/dbms/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml b/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml rename to tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml diff --git a/dbms/tests/integration/test_atomic_drop_table/configs/remote_servers.xml b/tests/integration/test_atomic_drop_table/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/configs/remote_servers.xml rename to tests/integration/test_atomic_drop_table/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_atomic_drop_table/test.py b/tests/integration/test_atomic_drop_table/test.py similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/test.py rename to tests/integration/test_atomic_drop_table/test.py diff --git a/dbms/tests/integration/test_backward_compatability/__init__.py b/tests/integration/test_authentication/__init__.py similarity index 100% rename from dbms/tests/integration/test_backward_compatability/__init__.py rename to tests/integration/test_authentication/__init__.py diff --git a/dbms/tests/integration/test_authentication/test.py b/tests/integration/test_authentication/test.py similarity index 100% rename from dbms/tests/integration/test_authentication/test.py rename to tests/integration/test_authentication/test.py diff --git a/dbms/tests/integration/test_block_structure_mismatch/__init__.py b/tests/integration/test_backup_restore/__init__.py similarity index 100% rename from dbms/tests/integration/test_block_structure_mismatch/__init__.py rename to tests/integration/test_backup_restore/__init__.py diff --git a/dbms/tests/integration/test_backup_restore/test.py b/tests/integration/test_backup_restore/test.py similarity index 100% rename from dbms/tests/integration/test_backup_restore/test.py rename to tests/integration/test_backup_restore/test.py diff --git a/dbms/tests/integration/test_check_table/__init__.py b/tests/integration/test_backward_compatability/__init__.py similarity index 100% rename from dbms/tests/integration/test_check_table/__init__.py rename to tests/integration/test_backward_compatability/__init__.py diff --git a/dbms/tests/integration/test_backward_compatability/test.py b/tests/integration/test_backward_compatability/test.py similarity index 100% rename from dbms/tests/integration/test_backward_compatability/test.py rename to tests/integration/test_backward_compatability/test.py diff --git a/dbms/tests/integration/test_cluster_all_replicas/__init__.py b/tests/integration/test_block_structure_mismatch/__init__.py similarity index 100% rename from dbms/tests/integration/test_cluster_all_replicas/__init__.py rename to tests/integration/test_block_structure_mismatch/__init__.py diff --git a/dbms/tests/integration/test_block_structure_mismatch/configs/remote_servers.xml b/tests/integration/test_block_structure_mismatch/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_block_structure_mismatch/configs/remote_servers.xml rename to tests/integration/test_block_structure_mismatch/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_block_structure_mismatch/test.py b/tests/integration/test_block_structure_mismatch/test.py similarity index 100% rename from dbms/tests/integration/test_block_structure_mismatch/test.py rename to tests/integration/test_block_structure_mismatch/test.py diff --git a/dbms/tests/integration/test_cluster_copier/__init__.py b/tests/integration/test_check_table/__init__.py similarity index 100% rename from dbms/tests/integration/test_cluster_copier/__init__.py rename to tests/integration/test_check_table/__init__.py diff --git a/dbms/tests/integration/test_check_table/test.py b/tests/integration/test_check_table/test.py similarity index 100% rename from dbms/tests/integration/test_check_table/test.py rename to tests/integration/test_check_table/test.py diff --git a/dbms/tests/integration/test_concurrent_queries_for_user_restriction/__init__.py b/tests/integration/test_cluster_all_replicas/__init__.py similarity index 100% rename from dbms/tests/integration/test_concurrent_queries_for_user_restriction/__init__.py rename to tests/integration/test_cluster_all_replicas/__init__.py diff --git a/dbms/tests/integration/test_cluster_all_replicas/configs/remote_servers.xml b/tests/integration/test_cluster_all_replicas/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_cluster_all_replicas/configs/remote_servers.xml rename to tests/integration/test_cluster_all_replicas/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_cluster_all_replicas/test.py b/tests/integration/test_cluster_all_replicas/test.py similarity index 100% rename from dbms/tests/integration/test_cluster_all_replicas/test.py rename to tests/integration/test_cluster_all_replicas/test.py diff --git a/dbms/tests/integration/test_config_corresponding_root/__init__.py b/tests/integration/test_cluster_copier/__init__.py similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/__init__.py rename to tests/integration/test_cluster_copier/__init__.py diff --git a/dbms/tests/integration/test_cluster_copier/configs/conf.d/clusters.xml b/tests/integration/test_cluster_copier/configs/conf.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/conf.d/clusters.xml rename to tests/integration/test_cluster_copier/configs/conf.d/clusters.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/conf.d/ddl.xml b/tests/integration/test_cluster_copier/configs/conf.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/conf.d/ddl.xml rename to tests/integration/test_cluster_copier/configs/conf.d/ddl.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/conf.d/query_log.xml b/tests/integration/test_cluster_copier/configs/conf.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/conf.d/query_log.xml rename to tests/integration/test_cluster_copier/configs/conf.d/query_log.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/config-copier.xml b/tests/integration/test_cluster_copier/configs/config-copier.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/config-copier.xml rename to tests/integration/test_cluster_copier/configs/config-copier.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/users.xml b/tests/integration/test_cluster_copier/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/users.xml rename to tests/integration/test_cluster_copier/configs/users.xml diff --git a/dbms/tests/integration/test_cluster_copier/task0_description.xml b/tests/integration/test_cluster_copier/task0_description.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task0_description.xml rename to tests/integration/test_cluster_copier/task0_description.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_month_to_week_description.xml b/tests/integration/test_cluster_copier/task_month_to_week_description.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_month_to_week_description.xml rename to tests/integration/test_cluster_copier/task_month_to_week_description.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_no_arg.xml b/tests/integration/test_cluster_copier/task_no_arg.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_no_arg.xml rename to tests/integration/test_cluster_copier/task_no_arg.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_no_index.xml b/tests/integration/test_cluster_copier/task_no_index.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_no_index.xml rename to tests/integration/test_cluster_copier/task_no_index.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_test_block_size.xml b/tests/integration/test_cluster_copier/task_test_block_size.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_test_block_size.xml rename to tests/integration/test_cluster_copier/task_test_block_size.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_trivial.xml b/tests/integration/test_cluster_copier/task_trivial.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_trivial.xml rename to tests/integration/test_cluster_copier/task_trivial.xml diff --git a/dbms/tests/integration/test_cluster_copier/test.py b/tests/integration/test_cluster_copier/test.py similarity index 100% rename from dbms/tests/integration/test_cluster_copier/test.py rename to tests/integration/test_cluster_copier/test.py diff --git a/dbms/tests/integration/test_cluster_copier/trivial_test.py b/tests/integration/test_cluster_copier/trivial_test.py similarity index 100% rename from dbms/tests/integration/test_cluster_copier/trivial_test.py rename to tests/integration/test_cluster_copier/trivial_test.py diff --git a/dbms/tests/integration/test_config_substitutions/__init__.py b/tests/integration/test_concurrent_queries_for_user_restriction/__init__.py similarity index 100% rename from dbms/tests/integration/test_config_substitutions/__init__.py rename to tests/integration/test_concurrent_queries_for_user_restriction/__init__.py diff --git a/dbms/tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml b/tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml similarity index 100% rename from dbms/tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml rename to tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml diff --git a/dbms/tests/integration/test_concurrent_queries_for_user_restriction/test.py b/tests/integration/test_concurrent_queries_for_user_restriction/test.py similarity index 100% rename from dbms/tests/integration/test_concurrent_queries_for_user_restriction/test.py rename to tests/integration/test_concurrent_queries_for_user_restriction/test.py diff --git a/dbms/tests/integration/test_consistant_parts_after_move_partition/__init__.py b/tests/integration/test_config_corresponding_root/__init__.py similarity index 100% rename from dbms/tests/integration/test_consistant_parts_after_move_partition/__init__.py rename to tests/integration/test_config_corresponding_root/__init__.py diff --git a/dbms/tests/integration/test_config_corresponding_root/configs/config.d/bad.xml b/tests/integration/test_config_corresponding_root/configs/config.d/bad.xml similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/configs/config.d/bad.xml rename to tests/integration/test_config_corresponding_root/configs/config.d/bad.xml diff --git a/tests/integration/test_config_corresponding_root/configs/config.xml b/tests/integration/test_config_corresponding_root/configs/config.xml new file mode 100644 index 00000000000..4e130afa84d --- /dev/null +++ b/tests/integration/test_config_corresponding_root/configs/config.xml @@ -0,0 +1,415 @@ + + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + 8123 + 9000 + + + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 9009 + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + /var/lib/clickhouse/user_files/ + + + users.xml + + + default + + + + + + default + + + + + + + + + false + + + + + + + + localhost + 9000 + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + system + query_log
    + + toYYYYMM(event_date) + + 7500 +
    + + + + system + query_thread_log
    + toYYYYMM(event_date) + 7500 +
    + + + + + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + +
    diff --git a/dbms/tests/integration/test_config_corresponding_root/configs/users.xml b/tests/integration/test_config_corresponding_root/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/configs/users.xml rename to tests/integration/test_config_corresponding_root/configs/users.xml diff --git a/dbms/tests/integration/test_config_corresponding_root/test.py b/tests/integration/test_config_corresponding_root/test.py similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/test.py rename to tests/integration/test_config_corresponding_root/test.py diff --git a/dbms/tests/integration/test_consistent_parts_after_clone_replica/__init__.py b/tests/integration/test_config_substitutions/__init__.py similarity index 100% rename from dbms/tests/integration/test_consistent_parts_after_clone_replica/__init__.py rename to tests/integration/test_config_substitutions/__init__.py diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_allow_databases.xml b/tests/integration/test_config_substitutions/configs/config_allow_databases.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_allow_databases.xml rename to tests/integration/test_config_substitutions/configs/config_allow_databases.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_env.xml b/tests/integration/test_config_substitutions/configs/config_env.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_env.xml rename to tests/integration/test_config_substitutions/configs/config_env.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_incl.xml b/tests/integration/test_config_substitutions/configs/config_incl.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_incl.xml rename to tests/integration/test_config_substitutions/configs/config_incl.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_include_from_env.xml b/tests/integration/test_config_substitutions/configs/config_include_from_env.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_include_from_env.xml rename to tests/integration/test_config_substitutions/configs/config_include_from_env.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_no_substs.xml b/tests/integration/test_config_substitutions/configs/config_no_substs.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_no_substs.xml rename to tests/integration/test_config_substitutions/configs/config_no_substs.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_zk.xml b/tests/integration/test_config_substitutions/configs/config_zk.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_zk.xml rename to tests/integration/test_config_substitutions/configs/config_zk.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/max_query_size.xml b/tests/integration/test_config_substitutions/configs/max_query_size.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/max_query_size.xml rename to tests/integration/test_config_substitutions/configs/max_query_size.xml diff --git a/dbms/tests/integration/test_config_substitutions/test.py b/tests/integration/test_config_substitutions/test.py similarity index 100% rename from dbms/tests/integration/test_config_substitutions/test.py rename to tests/integration/test_config_substitutions/test.py diff --git a/dbms/tests/integration/test_cross_replication/__init__.py b/tests/integration/test_consistant_parts_after_move_partition/__init__.py similarity index 100% rename from dbms/tests/integration/test_cross_replication/__init__.py rename to tests/integration/test_consistant_parts_after_move_partition/__init__.py diff --git a/dbms/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml b/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml rename to tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_consistant_parts_after_move_partition/test.py b/tests/integration/test_consistant_parts_after_move_partition/test.py similarity index 100% rename from dbms/tests/integration/test_consistant_parts_after_move_partition/test.py rename to tests/integration/test_consistant_parts_after_move_partition/test.py diff --git a/dbms/tests/integration/test_delayed_replica_failover/__init__.py b/tests/integration/test_consistent_parts_after_clone_replica/__init__.py similarity index 100% rename from dbms/tests/integration/test_delayed_replica_failover/__init__.py rename to tests/integration/test_consistent_parts_after_clone_replica/__init__.py diff --git a/dbms/tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml b/tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml rename to tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_consistent_parts_after_clone_replica/test.py b/tests/integration/test_consistent_parts_after_clone_replica/test.py similarity index 100% rename from dbms/tests/integration/test_consistent_parts_after_clone_replica/test.py rename to tests/integration/test_consistent_parts_after_clone_replica/test.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py b/tests/integration/test_cross_replication/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py rename to tests/integration/test_cross_replication/__init__.py diff --git a/dbms/tests/integration/test_cross_replication/configs/remote_servers.xml b/tests/integration/test_cross_replication/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_cross_replication/configs/remote_servers.xml rename to tests/integration/test_cross_replication/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_cross_replication/test.py b/tests/integration/test_cross_replication/test.py similarity index 100% rename from dbms/tests/integration/test_cross_replication/test.py rename to tests/integration/test_cross_replication/test.py diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/__init__.py b/tests/integration/test_delayed_replica_failover/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/__init__.py rename to tests/integration/test_delayed_replica_failover/__init__.py diff --git a/dbms/tests/integration/test_delayed_replica_failover/configs/remote_servers.xml b/tests/integration/test_delayed_replica_failover/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_delayed_replica_failover/configs/remote_servers.xml rename to tests/integration/test_delayed_replica_failover/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py similarity index 100% rename from dbms/tests/integration/test_delayed_replica_failover/test.py rename to tests/integration/test_delayed_replica_failover/test.py diff --git a/dbms/tests/integration/test_dictionaries_ddl/__init__.py b/tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/__init__.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml b/tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml rename to tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep b/tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep rename to tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml b/tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml rename to tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py b/tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py b/tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem b/tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem rename to tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py b/tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/test.py b/tests/integration/test_dictionaries_all_layouts_and_sources/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/test.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/test.py diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py b/tests/integration/test_dictionaries_complex_key_cache_string/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py rename to tests/integration/test_dictionaries_complex_key_cache_string/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml b/tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml rename to tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml b/tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml rename to tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml b/tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml rename to tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/test.py b/tests/integration/test_dictionaries_complex_key_cache_string/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/test.py rename to tests/integration/test_dictionaries_complex_key_cache_string/test.py diff --git a/dbms/tests/integration/test_dictionaries_mysql/__init__.py b/tests/integration/test_dictionaries_ddl/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/__init__.py rename to tests/integration/test_dictionaries_ddl/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/config.xml b/tests/integration/test_dictionaries_ddl/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/config.xml rename to tests/integration/test_dictionaries_ddl/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml b/tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml rename to tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml b/tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml rename to tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml b/tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml rename to tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/users.xml b/tests/integration/test_dictionaries_ddl/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/users.xml rename to tests/integration/test_dictionaries_ddl/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/test.py b/tests/integration/test_dictionaries_ddl/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/test.py rename to tests/integration/test_dictionaries_ddl/test.py diff --git a/dbms/tests/integration/test_dictionaries_null_value/__init__.py b/tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/__init__.py rename to tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/test.py b/tests/integration/test_dictionaries_depend_on_dictionaries/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/test.py rename to tests/integration/test_dictionaries_depend_on_dictionaries/test.py diff --git a/dbms/tests/integration/test_dictionaries_select_all/__init__.py b/tests/integration/test_dictionaries_mysql/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/__init__.py rename to tests/integration/test_dictionaries_mysql/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/config.xml b/tests/integration/test_dictionaries_mysql/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/config.xml rename to tests/integration/test_dictionaries_mysql/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml b/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml rename to tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml b/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml rename to tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/remote_servers.xml b/tests/integration/test_dictionaries_mysql/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/remote_servers.xml rename to tests/integration/test_dictionaries_mysql/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/users.xml b/tests/integration/test_dictionaries_mysql/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/users.xml rename to tests/integration/test_dictionaries_mysql/configs/users.xml diff --git a/tests/integration/test_dictionaries_mysql/test.py b/tests/integration/test_dictionaries_mysql/test.py new file mode 100644 index 00000000000..647e36c71b3 --- /dev/null +++ b/tests/integration/test_dictionaries_mysql/test.py @@ -0,0 +1,95 @@ +import pytest +import os +import time + +## sudo -H pip install PyMySQL +import pymysql.cursors + +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +CONFIG_FILES = ['configs/dictionaries/mysql_dict1.xml', 'configs/dictionaries/mysql_dict2.xml', 'configs/remote_servers.xml'] + +cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs')) +instance = cluster.add_instance('instance', main_configs=CONFIG_FILES, with_mysql = True) + +create_table_mysql_template = """ + CREATE TABLE IF NOT EXISTS `test`.`{}` ( + `id` int(11) NOT NULL, + `value` varchar(50) NOT NULL, + PRIMARY KEY (`id`) + ) ENGINE=InnoDB; + """ + +create_clickhouse_dictionary_table_template = """ + CREATE TABLE IF NOT EXISTS `test`.`dict_table_{}` (`id` UInt64, `value` String) ENGINE = Dictionary({}) + """ + +@pytest.fixture(scope="module") +def started_cluster(): + try: + #time.sleep(30) + cluster.start() + + # Create a MySQL database + mysql_connection = get_mysql_conn() + create_mysql_db(mysql_connection, 'test') + mysql_connection.close() + + # Create database in ClickHouse + instance.query("CREATE DATABASE IF NOT EXISTS test") + + # Create database in ClickChouse using MySQL protocol (will be used for data insertion) + instance.query("CREATE DATABASE clickhouse_mysql ENGINE = MySQL('mysql1:3306', 'test', 'root', 'clickhouse')") + + yield cluster + + finally: + cluster.shutdown() + + +def test_load_mysql_dictionaries(started_cluster): + # Load dictionaries + query = instance.query + query("SYSTEM RELOAD DICTIONARIES") + + for n in range(0, 5): + # Create MySQL tables, fill them and create CH dict tables + prepare_mysql_table('test', str(n)) + + # Check dictionaries are loaded and have correct number of elements + for n in range(0, 100): + # Force reload of dictionaries (each 10 iteration) + if (n % 10) == 0: + query("SYSTEM RELOAD DICTIONARIES") + + # Check number of row + assert query("SELECT count() FROM `test`.`dict_table_{}`".format('test' + str(n % 5))).rstrip() == '10000' + +def create_mysql_db(mysql_connection, name): + with mysql_connection.cursor() as cursor: + cursor.execute("CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET 'utf8'".format(name)) + +def prepare_mysql_table(table_name, index): + mysql_connection = get_mysql_conn() + + # Create table + create_mysql_table(mysql_connection, table_name + str(index)) + + # Insert rows using CH + query = instance.query + query("INSERT INTO `clickhouse_mysql`.{}(id, value) select number, concat('{} value ', toString(number)) from numbers(10000) ".format(table_name + str(index), table_name + str(index))) + assert query("SELECT count() FROM `clickhouse_mysql`.{}".format(table_name + str(index))).rstrip() == '10000' + mysql_connection.close() + + #Create CH Dictionary tables based on MySQL tables + query(create_clickhouse_dictionary_table_template.format(table_name + str(index), 'dict' + str(index))) + +def get_mysql_conn(): + conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.10', port=3308) + return conn + +def create_mysql_table(conn, table_name): + with conn.cursor() as cursor: + cursor.execute(create_table_mysql_template.format(table_name)) diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/__init__.py b/tests/integration/test_dictionaries_null_value/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/__init__.py rename to tests/integration/test_dictionaries_null_value/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_null_value/configs/config.xml b/tests/integration/test_dictionaries_null_value/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/configs/config.xml rename to tests/integration/test_dictionaries_null_value/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml b/tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml rename to tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml diff --git a/dbms/tests/integration/test_dictionaries_null_value/configs/users.xml b/tests/integration/test_dictionaries_null_value/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/configs/users.xml rename to tests/integration/test_dictionaries_null_value/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_null_value/test.py b/tests/integration/test_dictionaries_null_value/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/test.py rename to tests/integration/test_dictionaries_null_value/test.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/__init__.py b/tests/integration/test_dictionaries_select_all/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/__init__.py rename to tests/integration/test_dictionaries_select_all/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/config.xml b/tests/integration/test_dictionaries_select_all/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/config.xml rename to tests/integration/test_dictionaries_select_all/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore b/tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore rename to tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv b/tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv rename to tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/users.xml b/tests/integration/test_dictionaries_select_all/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/users.xml rename to tests/integration/test_dictionaries_select_all/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_select_all/generate_dictionaries.py b/tests/integration/test_dictionaries_select_all/generate_dictionaries.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/generate_dictionaries.py rename to tests/integration/test_dictionaries_select_all/generate_dictionaries.py diff --git a/dbms/tests/integration/test_dictionaries_select_all/test.py b/tests/integration/test_dictionaries_select_all/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/test.py rename to tests/integration/test_dictionaries_select_all/test.py diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/__init__.py b/tests/integration/test_dictionaries_update_and_reload/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/__init__.py rename to tests/integration/test_dictionaries_update_and_reload/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/config.xml b/tests/integration/test_dictionaries_update_and_reload/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/config.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/users.xml b/tests/integration/test_dictionaries_update_and_reload/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/users.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/test.py b/tests/integration/test_dictionaries_update_and_reload/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/test.py rename to tests/integration/test_dictionaries_update_and_reload/test.py diff --git a/dbms/tests/integration/test_disk_access_storage/__init__.py b/tests/integration/test_dictionary_allow_read_expired_keys/__init__.py similarity index 100% rename from dbms/tests/integration/test_disk_access_storage/__init__.py rename to tests/integration/test_dictionary_allow_read_expired_keys/__init__.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml b/tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml rename to tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml b/tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml rename to tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml b/tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml rename to tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py rename to tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py rename to tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py rename to tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py diff --git a/dbms/tests/integration/test_distributed_ddl/__init__.py b/tests/integration/test_dictionary_ddl_on_cluster/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/__init__.py rename to tests/integration/test_dictionary_ddl_on_cluster/__init__.py diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml b/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml rename to tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml b/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml rename to tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/test.py b/tests/integration/test_dictionary_ddl_on_cluster/test.py similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/test.py rename to tests/integration/test_dictionary_ddl_on_cluster/test.py diff --git a/dbms/tests/integration/test_distributed_ddl_password/__init__.py b/tests/integration/test_disk_access_storage/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/__init__.py rename to tests/integration/test_disk_access_storage/__init__.py diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py new file mode 100644 index 00000000000..babceee7c76 --- /dev/null +++ b/tests/integration/test_disk_access_storage/test.py @@ -0,0 +1,106 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', stay_alive=True) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def create_entities(): + instance.query("CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000") + instance.query("CREATE USER u1 SETTINGS PROFILE s1") + instance.query("CREATE ROLE rx SETTINGS PROFILE s1") + instance.query("CREATE USER u2 IDENTIFIED BY 'qwerty' HOST LOCAL DEFAULT ROLE rx") + instance.query("CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2") + instance.query("CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a<1000 TO u1, u2") + instance.query("CREATE QUOTA q FOR INTERVAL 1 HOUR MAX QUERIES 100 TO ALL EXCEPT rx") + + +@pytest.fixture(autouse=True) +def drop_entities(): + instance.query("DROP USER IF EXISTS u1, u2") + instance.query("DROP ROLE IF EXISTS rx, ry") + instance.query("DROP ROW POLICY IF EXISTS p ON mydb.mytable") + instance.query("DROP QUOTA IF EXISTS q") + instance.query("DROP SETTINGS PROFILE IF EXISTS s1, s2") + + +def test_create(): + create_entities() + + def check(): + assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" + assert instance.query("SHOW CREATE USER u2") == "CREATE USER u2 HOST LOCAL DEFAULT ROLE rx\n" + assert instance.query("SHOW CREATE ROW POLICY p ON mydb.mytable") == "CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a < 1000 TO u1, u2\n" + assert instance.query("SHOW CREATE QUOTA q") == "CREATE QUOTA q KEYED BY \\'none\\' FOR INTERVAL 1 HOUR MAX QUERIES 100 TO ALL EXCEPT rx\n" + assert instance.query("SHOW GRANTS FOR u1") == "" + assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx TO u2\n" + assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s1\n" + assert instance.query("SHOW GRANTS FOR rx") == "" + assert instance.query("SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + + check() + instance.restart_clickhouse() # Check persistency + check() + + +def test_alter(): + create_entities() + instance.restart_clickhouse() + + instance.query("CREATE ROLE ry") + instance.query("GRANT ry TO u2") + instance.query("ALTER USER u2 DEFAULT ROLE ry") + instance.query("GRANT rx TO ry WITH ADMIN OPTION") + instance.query("ALTER ROLE rx SETTINGS PROFILE s2") + instance.query("GRANT SELECT ON mydb.mytable TO u1") + instance.query("GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION") + instance.query("ALTER SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY") + + def check(): + assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" + assert instance.query("SHOW CREATE USER u2") == "CREATE USER u2 HOST LOCAL DEFAULT ROLE ry\n" + assert instance.query("SHOW GRANTS FOR u1") == "GRANT SELECT ON mydb.mytable TO u1\n" + assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx, ry TO u2\n" + assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s2\n" + assert instance.query("SHOW CREATE ROLE ry") == "CREATE ROLE ry\n" + assert instance.query("SHOW GRANTS FOR rx") == "GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION\n" + assert instance.query("SHOW GRANTS FOR ry") == "GRANT rx TO ry WITH ADMIN OPTION\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + + check() + instance.restart_clickhouse() # Check persistency + check() + + +def test_drop(): + create_entities() + instance.restart_clickhouse() + + instance.query("DROP USER u2") + instance.query("DROP ROLE rx") + instance.query("DROP ROW POLICY p ON mydb.mytable") + instance.query("DROP QUOTA q") + instance.query("DROP SETTINGS PROFILE s1") + + def check(): + assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2\n" + assert "User `u2` not found" in instance.query_and_get_error("SHOW CREATE USER u2") + assert "Row policy `p ON mydb.mytable` not found" in instance.query_and_get_error("SHOW CREATE ROW POLICY p ON mydb.mytable") + assert "Quota `q` not found" in instance.query_and_get_error("SHOW CREATE QUOTA q") + + check() + instance.restart_clickhouse() # Check persistency + check() diff --git a/dbms/tests/integration/test_distributed_format/__init__.py b/tests/integration/test_distributed_ddl/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_format/__init__.py rename to tests/integration/test_distributed_ddl/__init__.py diff --git a/dbms/tests/integration/test_distributed_ddl/cluster.py b/tests/integration/test_distributed_ddl/cluster.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/cluster.py rename to tests/integration/test_distributed_ddl/cluster.py diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/clusters.xml b/tests/integration/test_distributed_ddl/configs/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/clusters.xml rename to tests/integration/test_distributed_ddl/configs/config.d/clusters.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/ddl.xml b/tests/integration/test_distributed_ddl/configs/config.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/ddl.xml rename to tests/integration/test_distributed_ddl/configs/config.d/ddl.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/macro.xml b/tests/integration/test_distributed_ddl/configs/config.d/macro.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/macro.xml rename to tests/integration/test_distributed_ddl/configs/config.d/macro.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/query_log.xml b/tests/integration/test_distributed_ddl/configs/config.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs/config.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml b/tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml rename to tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/users.d/query_log.xml b/tests/integration/test_distributed_ddl/configs/users.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/users.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs/users.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml b/tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml rename to tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/dhparam.pem b/tests/integration/test_distributed_ddl/configs_secure/dhparam.pem similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/dhparam.pem rename to tests/integration/test_distributed_ddl/configs_secure/dhparam.pem diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/server.crt b/tests/integration/test_distributed_ddl/configs_secure/server.crt similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/server.crt rename to tests/integration/test_distributed_ddl/configs_secure/server.crt diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/server.key b/tests/integration/test_distributed_ddl/configs_secure/server.key similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/server.key rename to tests/integration/test_distributed_ddl/configs_secure/server.key diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml b/tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml b/tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml rename to tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml diff --git a/dbms/tests/integration/test_distributed_ddl/test.py b/tests/integration/test_distributed_ddl/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/test.py rename to tests/integration/test_distributed_ddl/test.py diff --git a/dbms/tests/integration/test_distributed_ddl/test_replicated_alter.py b/tests/integration/test_distributed_ddl/test_replicated_alter.py similarity index 98% rename from dbms/tests/integration/test_distributed_ddl/test_replicated_alter.py rename to tests/integration/test_distributed_ddl/test_replicated_alter.py index 490587240eb..e66e731cbb1 100644 --- a/dbms/tests/integration/test_distributed_ddl/test_replicated_alter.py +++ b/tests/integration/test_distributed_ddl/test_replicated_alter.py @@ -68,7 +68,7 @@ ENGINE = Distributed(cluster, default, merge_for_alter, i) test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster MODIFY COLUMN i Int64") - test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster ADD COLUMN String s DEFAULT toString(i)") + test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster ADD COLUMN s String DEFAULT toString(i)") assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(''.join(['{}\t{}\n'.format(x,x) for x in xrange(4)])) diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/__init__.py b/tests/integration/test_distributed_ddl_password/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/__init__.py rename to tests/integration/test_distributed_ddl_password/__init__.py diff --git a/dbms/tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml b/tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml rename to tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml diff --git a/dbms/tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml b/tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml rename to tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml diff --git a/dbms/tests/integration/test_distributed_ddl_password/test.py b/tests/integration/test_distributed_ddl_password/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/test.py rename to tests/integration/test_distributed_ddl_password/test.py diff --git a/dbms/tests/integration/test_distributed_storage_configuration/__init__.py b/tests/integration/test_distributed_format/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_storage_configuration/__init__.py rename to tests/integration/test_distributed_format/__init__.py diff --git a/dbms/tests/integration/test_distributed_format/configs/remote_servers.xml b/tests/integration/test_distributed_format/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_format/configs/remote_servers.xml rename to tests/integration/test_distributed_format/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_distributed_format/test.py b/tests/integration/test_distributed_format/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_format/test.py rename to tests/integration/test_distributed_format/test.py diff --git a/dbms/tests/integration/test_distributed_system_query/__init__.py b/tests/integration/test_distributed_over_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_system_query/__init__.py rename to tests/integration/test_distributed_over_distributed/__init__.py diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml b/tests/integration/test_distributed_over_distributed/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml rename to tests/integration/test_distributed_over_distributed/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml b/tests/integration/test_distributed_over_distributed/configs/set_distributed_defaults.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml rename to tests/integration/test_distributed_over_distributed/configs/set_distributed_defaults.xml diff --git a/tests/integration/test_distributed_over_distributed/test.py b/tests/integration/test_distributed_over_distributed/test.py new file mode 100644 index 00000000000..31d6de55bea --- /dev/null +++ b/tests/integration/test_distributed_over_distributed/test.py @@ -0,0 +1,98 @@ +# This test is a subset of the 01223_dist_on_dist. +# (just in case, with real separate instances). + +from __future__ import print_function + +import itertools +import timeit +import logging + +import pytest + +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import TSV + + +cluster = ClickHouseCluster(__file__) + +NODES = {'node' + str(i): cluster.add_instance( + 'node' + str(i), + main_configs=['configs/remote_servers.xml'], + user_configs=['configs/set_distributed_defaults.xml'], +) for i in (1, 2)} + +CREATE_TABLES_SQL = ''' +CREATE TABLE + base_table( + node String, + key Int32, + value Int32 + ) +ENGINE = Memory; + +CREATE TABLE + distributed_table +AS base_table +ENGINE = Distributed(test_cluster, default, base_table); + +CREATE TABLE + distributed_over_distributed_table +AS distributed_table +ENGINE = Distributed('test_cluster', default, distributed_table); +''' + +INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}', {key}, {value})" + +@pytest.fixture(scope="session") +def started_cluster(): + try: + cluster.start() + for node_index, (node_name, node) in enumerate(NODES.items()): + node.query(CREATE_TABLES_SQL) + for i in range(0, 2): + node.query(INSERT_SQL_TEMPLATE.format(node_id=node_name, key=i, value=i + (node_index * 10))) + yield cluster + + finally: + cluster.shutdown() + + +@pytest.mark.parametrize("node", NODES.values()) +@pytest.mark.parametrize("source", ["distributed_over_distributed_table", "cluster('test_cluster', default, distributed_table)"]) +class TestDistributedOverDistributedSuite: + def test_select_with_order_by_node(self, started_cluster, node, source): + assert node.query("SELECT * FROM {source} ORDER BY node, key".format(source=source)) \ + == """node1 0 0 +node1 0 0 +node1 1 1 +node1 1 1 +node2 0 10 +node2 0 10 +node2 1 11 +node2 1 11 +""" + + def test_select_with_order_by_key(self, started_cluster, node, source): + assert node.query("SELECT * FROM {source} ORDER BY key, node".format(source=source)) \ + == """node1 0 0 +node1 0 0 +node2 0 10 +node2 0 10 +node1 1 1 +node1 1 1 +node2 1 11 +node2 1 11 +""" + + def test_select_with_group_by_node(self, started_cluster, node, source): + assert node.query("SELECT node, SUM(value) FROM {source} GROUP BY node ORDER BY node".format(source=source)) \ + == "node1 2\nnode2 42\n" + + def test_select_with_group_by_key(self, started_cluster, node, source): + assert node.query("SELECT key, SUM(value) FROM {source} GROUP BY key ORDER BY key".format(source=source)) \ + == "0 20\n1 24\n" + + def test_select_sum(self, started_cluster, node, source): + assert node.query("SELECT SUM(value) FROM {source}".format(source=source)) \ + == "44\n" diff --git a/dbms/tests/integration/test_extreme_deduplication/__init__.py b/tests/integration/test_distributed_over_live_view/__init__.py similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/__init__.py rename to tests/integration/test_distributed_over_live_view/__init__.py diff --git a/dbms/tests/integration/test_distributed_system_query/configs/remote_servers.xml b/tests/integration/test_distributed_over_live_view/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_system_query/configs/remote_servers.xml rename to tests/integration/test_distributed_over_live_view/configs/remote_servers.xml diff --git a/tests/integration/test_distributed_over_live_view/configs/set_distributed_defaults.xml b/tests/integration/test_distributed_over_live_view/configs/set_distributed_defaults.xml new file mode 100644 index 00000000000..194eb1ebb87 --- /dev/null +++ b/tests/integration/test_distributed_over_live_view/configs/set_distributed_defaults.xml @@ -0,0 +1,35 @@ + + + + 3 + 1000 + 1 + + + 5 + 3000 + 1 + + + + + + + + ::/0 + + default + default + + + + + ::/0 + + delays + default + + + + + diff --git a/tests/integration/test_distributed_over_live_view/test.py b/tests/integration/test_distributed_over_live_view/test.py new file mode 100644 index 00000000000..f932379e5c6 --- /dev/null +++ b/tests/integration/test_distributed_over_live_view/test.py @@ -0,0 +1,230 @@ +from __future__ import print_function + +import sys +import time +import itertools +import timeit +import logging + +import pytest + +from helpers.uclient import client, prompt, end_of_block +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) + +NODES = {'node' + str(i): cluster.add_instance( + 'node' + str(i), + main_configs=['configs/remote_servers.xml'], + user_configs=['configs/set_distributed_defaults.xml'], +) for i in (1, 2)} + +CREATE_TABLES_SQL = ''' +DROP TABLE IF EXISTS lv_over_base_table; +DROP TABLE IF EXISTS distributed_table; +DROP TABLE IF EXISTS base_table; + +SET allow_experimental_live_view = 1; + +CREATE TABLE + base_table( + node String, + key Int32, + value Int32 + ) +ENGINE = Memory; + +CREATE LIVE VIEW lv_over_base_table AS SELECT * FROM base_table; + +CREATE TABLE + distributed_table +AS base_table +ENGINE = Distributed(test_cluster, default, base_table, rand()); +''' + +INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}', {key}, {value})" + +@pytest.fixture(scope="function") +def started_cluster(): + try: + cluster.start() + for node_index, (node_name, node) in enumerate(NODES.items()): + node.query(CREATE_TABLES_SQL) + for i in range(0, 2): + sql = INSERT_SQL_TEMPLATE.format(node_id=node_name, key=i, value=i + (node_index * 10)) + node.query(sql) + yield cluster + + finally: + cluster.shutdown() + + +@pytest.mark.parametrize("node", NODES.values()[:1]) +@pytest.mark.parametrize("source", ["lv_over_distributed_table"]) +class TestLiveViewOverDistributedSuite: + def test_distributed_over_live_view_order_by_node(self, started_cluster, node, source): + log = sys.stdout + node0, node1 = NODES.values() + + select_query = "SELECT * FROM distributed_over_lv ORDER BY node, key FORMAT CSV" + + with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ + client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS distributed_over_lv") + client1.expect(prompt) + client1.send("CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") + client1.expect(prompt) + + client1.send(select_query) + client1.expect('"node1",0,0\r\n.*"node1",1,1\r\n.*"node2",0,10\r\n.*"node2",1,11\r\n') + client1.expect(prompt) + + client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 2, 3)") + client1.expect(prompt) + client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") + client2.expect(prompt) + time.sleep(2) + client1.send(select_query) + client1.expect('"node1",0,0\r\n.*"node1",1,1\r\n.*"node1",1,3\r\n.*"node1",2,3\r\n.*"node1",3,3\r\n.*"node2",0,10\r\n.*"node2",1,11\r\n') + client1.expect(prompt) + + def test_distributed_over_live_view_order_by_key(self, started_cluster, node, source): + log = sys.stdout + node0, node1 = NODES.values() + + select_query = "SELECT * FROM distributed_over_lv ORDER BY key, node FORMAT CSV" + + with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ + client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS distributed_over_lv") + client1.expect(prompt) + client1.send("CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") + client1.expect(prompt) + + client1.send(select_query) + client1.expect('"node1",0,0\r\n"node2",0,10\r\n"node1",1,1\r\n.*"node2",1,11\r\n') + client1.expect(prompt) + + client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 2, 3)") + client1.expect(prompt) + client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") + client2.expect(prompt) + time.sleep(2) + client1.send(select_query) + client1.expect('"node1",0,0\r\n.*"node2",0,10.*\r\n"node1",1,1\r\n.*"node1",1,3\r\n.*"node2",1,11\r\n.*"node1",2,3\r\n.*"node1",3,3\r\n') + client1.expect(prompt) + + def test_distributed_over_live_view_group_by_node(self, started_cluster, node, source): + log = sys.stdout + node0, node1 = NODES.values() + + select_query = "SELECT node, SUM(value) FROM distributed_over_lv GROUP BY node ORDER BY node FORMAT CSV" + + with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ + client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS distributed_over_lv") + client1.expect(prompt) + client1.send("CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") + client1.expect(prompt) + + client1.send(select_query) + client1.expect('"node1",1\r\n"node2",21\r\n') + client1.expect(prompt) + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + time.sleep(2) + client1.send(select_query) + client1.expect('"node1",3\r\n.*"node2",21\r\n') + client1.expect(prompt) + + client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 3, 3)") + client1.expect(prompt) + client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") + client2.expect(prompt) + time.sleep(2) + client1.send(select_query) + client1.expect('"node1",12\r\n.*"node2",21\r\n') + client1.expect(prompt) + + def test_distributed_over_live_view_group_by_key(self, started_cluster, node, source): + log = sys.stdout + node0, node1 = NODES.values() + + select_query = "SELECT key, SUM(value) FROM distributed_over_lv GROUP BY key ORDER BY key FORMAT CSV" + + with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ + client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS distributed_over_lv") + client1.expect(prompt) + client1.send("CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") + client1.expect(prompt) + + client1.send(select_query) + client1.expect("0,10\r\n1,12\r\n") + client1.expect(prompt) + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + time.sleep(2) + client1.send(select_query) + client1.expect("0,10\r\n1,12\r\n2,2\r\n") + client1.expect(prompt) + + client2.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 3, 3)") + client2.expect(prompt) + time.sleep(2) + client1.send(select_query) + client1.expect("0,10\r\n.*1,15\r\n.*2,2\r\n.*3,3\r\n") + client1.expect(prompt) + + def test_distributed_over_live_view_sum(self, started_cluster, node, source): + log = sys.stdout + node0, node1 = NODES.values() + + with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ + client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS distributed_over_lv") + client1.expect(prompt) + client1.send("CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") + client1.expect(prompt) + + client1.send("SELECT sum(value) FROM distributed_over_lv") + client1.expect(r"22" + end_of_block) + client1.expect(prompt) + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + + time.sleep(2) + + client1.send("SELECT sum(value) FROM distributed_over_lv") + client1.expect(r"24" + end_of_block) + client1.expect(prompt) + + client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3), ('node1', 4, 4)") + client2.expect(prompt) + + time.sleep(2) + + client1.send("SELECT sum(value) FROM distributed_over_lv") + client1.expect(r"31" + end_of_block) + client1.expect(prompt) + diff --git a/dbms/tests/integration/test_filesystem_layout/__init__.py b/tests/integration/test_distributed_respect_user_timeouts/__init__.py similarity index 100% rename from dbms/tests/integration/test_filesystem_layout/__init__.py rename to tests/integration/test_distributed_respect_user_timeouts/__init__.py diff --git a/dbms/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml b/tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml diff --git a/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml b/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml new file mode 100644 index 00000000000..194eb1ebb87 --- /dev/null +++ b/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml @@ -0,0 +1,35 @@ + + + + 3 + 1000 + 1 + + + 5 + 3000 + 1 + + + + + + + + ::/0 + + default + default + + + + + ::/0 + + delays + default + + + + + diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml diff --git a/tests/integration/test_distributed_respect_user_timeouts/test.py b/tests/integration/test_distributed_respect_user_timeouts/test.py new file mode 100644 index 00000000000..ba760e90412 --- /dev/null +++ b/tests/integration/test_distributed_respect_user_timeouts/test.py @@ -0,0 +1,166 @@ +import itertools +import timeit + +import pytest + +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import TSV + + +cluster = ClickHouseCluster(__file__) + +NODES = {'node' + str(i): None for i in (1, 2)} + +CREATE_TABLES_SQL = ''' +CREATE DATABASE test; + +CREATE TABLE base_table( + node String +) +ENGINE = MergeTree +PARTITION BY node +ORDER BY node; + +CREATE TABLE distributed_table +ENGINE = Distributed(test_cluster, default, base_table) AS base_table; +''' + +INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}')" + +SELECTS_SQL = { + 'distributed': 'SELECT node FROM distributed_table ORDER BY node', + 'remote': ("SELECT node FROM remote('node1,node2', default.base_table) " + "ORDER BY node"), +} + +EXCEPTION_NETWORK = 'e.displayText() = DB::NetException: ' +EXCEPTION_TIMEOUT = 'Timeout exceeded while reading from socket (' +EXCEPTION_CONNECT = 'Timeout: connect timed out: ' + +TIMEOUT_MEASUREMENT_EPS = 0.01 + +EXPECTED_BEHAVIOR = { + 'default': { + 'times': 3, + 'timeout': 1, + }, + 'ready_to_wait': { + 'times': 5, + 'timeout': 3, + }, +} + +TIMEOUT_DIFF_UPPER_BOUND = { + 'default': { + 'distributed': 5.5, + 'remote': 2.5, + }, + 'ready_to_wait': { + 'distributed': 3, + 'remote': 1.5, + }, +} + +def _check_exception(exception, expected_tries=3): + lines = exception.split('\n') + + assert len(lines) > 4, "Unexpected exception (expected: timeout info)" + + assert lines[0].startswith('Received exception from server (version') + + assert lines[1].startswith('Code: 279') + assert lines[1].endswith('All connection tries failed. Log: ') + + assert lines[2] == '', "Unexpected exception text (expected: empty line)" + + for i, line in enumerate(lines[3:3 + expected_tries]): + expected_lines = ( + 'Code: 209, ' + EXCEPTION_NETWORK + EXCEPTION_TIMEOUT, + 'Code: 209, ' + EXCEPTION_NETWORK + EXCEPTION_CONNECT, + ) + + assert any(line.startswith(expected) for expected in expected_lines), \ + 'Unexpected exception at one of the connection attempts' + + assert lines[3 + expected_tries] == '', 'Wrong number of connect attempts' + + +@pytest.fixture(scope="module", params=["configs", "configs_secure"]) +def started_cluster(request): + + cluster = ClickHouseCluster(__file__) + cluster.__with_ssl_config = request.param == "configs_secure" + for name in NODES: + NODES[name] = cluster.add_instance(name, config_dir=request.param) + try: + cluster.start() + + for node_id, node in NODES.items(): + node.query(CREATE_TABLES_SQL) + node.query(INSERT_SQL_TEMPLATE.format(node_id=node_id)) + + yield cluster + + finally: + cluster.shutdown() + + +def _check_timeout_and_exception(node, user, query_base, query): + repeats = EXPECTED_BEHAVIOR[user]['times'] + + extra_repeats = 1 + # Table function remote() are executed two times. + # It tries to get table stucture from remote shards. + # On 'node'2 it will firsty try to get structure from 'node1' (which is not available), + # so so threre are two extra conection attempts for 'node2' and 'remote' + if node.name == 'node2' and query_base == 'remote': + extra_repeats = 3 + + expected_timeout = EXPECTED_BEHAVIOR[user]['timeout'] * repeats * extra_repeats + + start = timeit.default_timer() + exception = node.query_and_get_error(query, user=user) + + # And it should timeout no faster than: + measured_timeout = timeit.default_timer() - start + + assert expected_timeout - measured_timeout <= TIMEOUT_MEASUREMENT_EPS + assert measured_timeout - expected_timeout <= TIMEOUT_DIFF_UPPER_BOUND[user][query_base] + + # And exception should reflect connection attempts: + _check_exception(exception, repeats) + + +@pytest.mark.parametrize( + ('first_user', 'node_name', 'query_base'), + tuple(itertools.product(EXPECTED_BEHAVIOR, NODES, SELECTS_SQL)), +) +def test_reconnect(started_cluster, node_name, first_user, query_base): + node = NODES[node_name] + query = SELECTS_SQL[query_base] + if started_cluster.__with_ssl_config: + query = query.replace('remote(', 'remoteSecure(') + + # Everything is up, select should work: + assert TSV(node.query(query, + user=first_user)) == TSV('node1\nnode2') + + with PartitionManager() as pm: + # Break the connection. + pm.partition_instances(*NODES.values()) + + # Now it shouldn't: + _check_timeout_and_exception(node, first_user, query_base, query) + + # Other user should have different timeout and exception + _check_timeout_and_exception( + node, + 'default' if first_user != 'default' else 'ready_to_wait', + query_base, + query, + ) + + # select should work again: + assert TSV(node.query(query, + user=first_user)) == TSV('node1\nnode2') diff --git a/dbms/tests/integration/test_force_deduplication/__init__.py b/tests/integration/test_distributed_storage_configuration/__init__.py similarity index 100% rename from dbms/tests/integration/test_force_deduplication/__init__.py rename to tests/integration/test_distributed_storage_configuration/__init__.py diff --git a/dbms/tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml b/tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml rename to tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_distributed_storage_configuration/test.py b/tests/integration/test_distributed_storage_configuration/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_storage_configuration/test.py rename to tests/integration/test_distributed_storage_configuration/test.py diff --git a/dbms/tests/integration/test_format_avro_confluent/__init__.py b/tests/integration/test_distributed_system_query/__init__.py similarity index 100% rename from dbms/tests/integration/test_format_avro_confluent/__init__.py rename to tests/integration/test_distributed_system_query/__init__.py diff --git a/tests/integration/test_distributed_system_query/configs/remote_servers.xml b/tests/integration/test_distributed_system_query/configs/remote_servers.xml new file mode 100644 index 00000000000..ebce4697529 --- /dev/null +++ b/tests/integration/test_distributed_system_query/configs/remote_servers.xml @@ -0,0 +1,18 @@ + + + + + + node1 + 9000 + + + + + node2 + 9000 + + + + + diff --git a/dbms/tests/integration/test_distributed_system_query/test.py b/tests/integration/test_distributed_system_query/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_system_query/test.py rename to tests/integration/test_distributed_system_query/test.py diff --git a/dbms/tests/integration/test_format_schema_on_server/__init__.py b/tests/integration/test_enabling_access_management/__init__.py similarity index 100% rename from dbms/tests/integration/test_format_schema_on_server/__init__.py rename to tests/integration/test_enabling_access_management/__init__.py diff --git a/tests/integration/test_enabling_access_management/configs/users.d/extra_users.xml b/tests/integration/test_enabling_access_management/configs/users.d/extra_users.xml new file mode 100644 index 00000000000..7d87a29a915 --- /dev/null +++ b/tests/integration/test_enabling_access_management/configs/users.d/extra_users.xml @@ -0,0 +1,13 @@ + + + + + readonly + 1 + + + + default + + + diff --git a/tests/integration/test_enabling_access_management/test.py b/tests/integration/test_enabling_access_management/test.py new file mode 100644 index 00000000000..abb8cd6c07a --- /dev/null +++ b/tests/integration/test_enabling_access_management/test.py @@ -0,0 +1,24 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', config_dir="configs") + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_enabling_access_management(): + instance.query("CREATE USER Alex", user='default') + assert instance.query("SHOW CREATE USER Alex", user='default') == "CREATE USER Alex\n" + assert instance.query("SHOW CREATE USER Alex", user='readonly') == "CREATE USER Alex\n" + assert "Not enough privileges" in instance.query_and_get_error("SHOW CREATE USER Alex", user='xyz') + + assert "Cannot execute query in readonly mode" in instance.query_and_get_error("CREATE USER Robin", user='readonly') + assert "Not enough privileges" in instance.query_and_get_error("CREATE USER Robin", user='xyz') diff --git a/dbms/tests/integration/test_globs_in_filepath/__init__.py b/tests/integration/test_extreme_deduplication/__init__.py similarity index 100% rename from dbms/tests/integration/test_globs_in_filepath/__init__.py rename to tests/integration/test_extreme_deduplication/__init__.py diff --git a/dbms/tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml b/tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml rename to tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml diff --git a/dbms/tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml b/tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml rename to tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml diff --git a/dbms/tests/integration/test_extreme_deduplication/test.py b/tests/integration/test_extreme_deduplication/test.py similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/test.py rename to tests/integration/test_extreme_deduplication/test.py diff --git a/dbms/tests/integration/test_grant_and_revoke/__init__.py b/tests/integration/test_filesystem_layout/__init__.py similarity index 100% rename from dbms/tests/integration/test_grant_and_revoke/__init__.py rename to tests/integration/test_filesystem_layout/__init__.py diff --git a/dbms/tests/integration/test_filesystem_layout/test.py b/tests/integration/test_filesystem_layout/test.py similarity index 100% rename from dbms/tests/integration/test_filesystem_layout/test.py rename to tests/integration/test_filesystem_layout/test.py diff --git a/dbms/tests/integration/test_graphite_merge_tree/__init__.py b/tests/integration/test_force_deduplication/__init__.py similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/__init__.py rename to tests/integration/test_force_deduplication/__init__.py diff --git a/dbms/tests/integration/test_force_deduplication/test.py b/tests/integration/test_force_deduplication/test.py similarity index 100% rename from dbms/tests/integration/test_force_deduplication/test.py rename to tests/integration/test_force_deduplication/test.py diff --git a/dbms/tests/integration/test_host_ip_change/__init__.py b/tests/integration/test_format_avro_confluent/__init__.py similarity index 100% rename from dbms/tests/integration/test_host_ip_change/__init__.py rename to tests/integration/test_format_avro_confluent/__init__.py diff --git a/dbms/tests/integration/test_format_avro_confluent/test.py b/tests/integration/test_format_avro_confluent/test.py similarity index 100% rename from dbms/tests/integration/test_format_avro_confluent/test.py rename to tests/integration/test_format_avro_confluent/test.py diff --git a/dbms/tests/integration/test_http_and_readonly/__init__.py b/tests/integration/test_format_schema_on_server/__init__.py similarity index 100% rename from dbms/tests/integration/test_http_and_readonly/__init__.py rename to tests/integration/test_format_schema_on_server/__init__.py diff --git a/dbms/tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto b/tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto similarity index 100% rename from dbms/tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto rename to tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto diff --git a/dbms/tests/integration/test_format_schema_on_server/test.py b/tests/integration/test_format_schema_on_server/test.py similarity index 100% rename from dbms/tests/integration/test_format_schema_on_server/test.py rename to tests/integration/test_format_schema_on_server/test.py diff --git a/dbms/tests/integration/test_https_replication/__init__.py b/tests/integration/test_globs_in_filepath/__init__.py similarity index 100% rename from dbms/tests/integration/test_https_replication/__init__.py rename to tests/integration/test_globs_in_filepath/__init__.py diff --git a/tests/integration/test_globs_in_filepath/test.py b/tests/integration/test_globs_in_filepath/test.py new file mode 100644 index 00000000000..c85c39a8838 --- /dev/null +++ b/tests/integration/test_globs_in_filepath/test.py @@ -0,0 +1,134 @@ +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node') +path_to_userfiles_from_defaut_config = "/var/lib/clickhouse/user_files/" # should be the same as in config file + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + + except Exception as ex: + print(ex) + raise ex + finally: + cluster.shutdown() + +def test_strange_filenames(start_cluster): + # 2 rows data + some_data = "\t111.222\nData\t333.444" + + node.exec_in_container(['bash', '-c', 'mkdir {}strange_names/'.format(path_to_userfiles_from_defaut_config)], privileged=True, user='root') + + files = ["p.o.i.n.t.s", + "b}{ra{ces", + "b}.o{t.h"] + + # filename inside testing data for debug simplicity + for filename in files: + node.exec_in_container(['bash', '-c', 'echo "{}{}" > {}strange_names/{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, filename)], privileged=True, user='root') + + test_requests = [("p.o.??n.t.s", "2"), + ("p.o.*t.s", "2"), + ("b}{r?{ces", "2"), + ("b}*ces", "2"), + ("b}.?{t.h", "2")] + + for pattern, value in test_requests: + assert node.query(''' + select count(*) from file('strange_names/{}', 'TSV', 'text String, number Float64') + '''.format(pattern)) == '{}\n'.format(value) + assert node.query(''' + select count(*) from file('{}strange_names/{}', 'TSV', 'text String, number Float64') + '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) + +def test_linear_structure(start_cluster): + # 2 rows data + some_data = "\t123.456\nData\t789.012" + + files = ["file1", "file2", "file3", "file4", "file5", + "file000", "file111", "file222", "file333", "file444", + "a_file", "b_file", "c_file", "d_file", "e_file", + "a_data", "b_data", "c_data", "d_data", "e_data"] + + # filename inside testing data for debug simplicity + for filename in files: + node.exec_in_container(['bash', '-c', 'echo "{}{}" > {}{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, filename)], privileged=True, user='root') + + test_requests = [("file{0..9}", "10"), + ("file?", "10"), + ("nothing*", "0"), + ("file{0..9}{0..9}{0..9}", "10"), + ("file{000..999}", "10"), + ("file???", "10"), + ("file*", "20"), + ("a_{file,data}", "4"), + ("?_{file,data}", "20"), + ("{a,b,c,d,e}_{file,data}", "20"), + ("{a,b,c,d,e}?{file,data}", "20"), + ("*", "40")] + + for pattern, value in test_requests: + assert node.query(''' + select count(*) from file('{}', 'TSV', 'text String, number Float64') + '''.format(pattern)) == '{}\n'.format(value) + assert node.query(''' + select count(*) from file('{}{}', 'TSV', 'text String, number Float64') + '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) + +def test_deep_structure(start_cluster): + # 2 rows data + some_data = "\t135.791\nData\t246.802" + dirs = ["directory1/", "directory2/", "some_more_dir/", "we/", + "directory1/big_dir/", + "directory1/dir1/", "directory1/dir2/", "directory1/dir3/", + "directory2/dir1/", "directory2/dir2/", "directory2/one_more_dir/", + "some_more_dir/yet_another_dir/", + "we/need/", "we/need/to/", "we/need/to/go/", "we/need/to/go/deeper/"] + + for dir in dirs: + node.exec_in_container(['bash', '-c', 'mkdir {}{}'.format(path_to_userfiles_from_defaut_config, dir)], privileged=True, user='root') + + # all directories appeared in files must be listed in dirs + files = [] + for i in range(10): + for j in range(10): + for k in range(10): + files.append("directory1/big_dir/file" + str(i) + str(j) + str(k)) + + for dir in dirs: + files.append(dir+"file") + + # filename inside testing data for debug simplicity + for filename in files: + node.exec_in_container(['bash', '-c', 'echo "{}{}" > {}{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, filename)], privileged=True, user='root') + + test_requests = [ ("directory{1..5}/big_dir/*", "2002"), ("directory{0..6}/big_dir/*{0..9}{0..9}{0..9}", "2000"), + ("?", "0"), + ("directory{0..5}/dir{1..3}/file", "10"), ("directory{0..5}/dir?/file", "10"), + ("we/need/to/go/deeper/file", "2"), ("*/*/*/*/*/*", "2"), ("we/need/??/go/deeper/*?*?*?*?*", "2")] + + for pattern, value in test_requests: + assert node.query(''' + select count(*) from file('{}', 'TSV', 'text String, number Float64') + '''.format(pattern)) == '{}\n'.format(value) + assert node.query(''' + select count(*) from file('{}{}', 'TSV', 'text String, number Float64') + '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) + +def test_table_function_and_virtual_columns(start_cluster): + node.exec_in_container(['bash', '-c', 'mkdir -p {}some/path/to/'.format(path_to_userfiles_from_defaut_config)]) + node.exec_in_container(['bash', '-c', 'touch {}some/path/to/data.CSV'.format(path_to_userfiles_from_defaut_config)]) + node.query("insert into table function file('some/path/to/data.CSV', CSV, 'n UInt8, s String') select number, concat('str_', toString(number)) from numbers(100000)") + assert node.query("select count() from file('some/path/to/data.CSV', CSV, 'n UInt8, s String')").rstrip() == '100000' + node.query("insert into table function file('nonexist.csv', 'CSV', 'val1 UInt32') values (1)") + assert node.query("select * from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip()== '1' + assert "nonexist.csv" in node.query("select _path from file('nonexis?.csv', 'CSV', 'val1 UInt32')").rstrip() + assert "nonexist.csv" in node.query("select _path from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() + assert "nonexist.csv" == node.query("select _file from file('nonexis?.csv', 'CSV', 'val1 UInt32')").rstrip() + assert "nonexist.csv" == node.query("select _file from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() \ No newline at end of file diff --git a/dbms/tests/integration/test_inherit_multiple_profiles/__init__.py b/tests/integration/test_grant_and_revoke/__init__.py similarity index 100% rename from dbms/tests/integration/test_inherit_multiple_profiles/__init__.py rename to tests/integration/test_grant_and_revoke/__init__.py diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py new file mode 100644 index 00000000000..6f4b0be5325 --- /dev/null +++ b/tests/integration/test_grant_and_revoke/test.py @@ -0,0 +1,129 @@ +import pytest +from helpers.cluster import ClickHouseCluster +import re + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance') + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + + instance.query("CREATE TABLE test_table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query("INSERT INTO test_table VALUES (1,5), (2,10)") + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_users_and_roles(): + try: + yield + finally: + instance.query("DROP USER IF EXISTS A, B") + instance.query("DROP ROLE IF EXISTS R1, R2") + + +def test_login(): + instance.query("CREATE USER A") + instance.query("CREATE USER B") + assert instance.query("SELECT 1", user='A') == "1\n" + assert instance.query("SELECT 1", user='B') == "1\n" + + +def test_grant_and_revoke(): + instance.query("CREATE USER A") + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + instance.query('GRANT SELECT ON test_table TO A') + assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + + instance.query('REVOKE SELECT ON test_table FROM A') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + +def test_grant_option(): + instance.query("CREATE USER A") + instance.query("CREATE USER B") + + instance.query('GRANT SELECT ON test_table TO A') + assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + assert "Not enough privileges" in instance.query_and_get_error("GRANT SELECT ON test_table TO B", user='A') + + instance.query('GRANT SELECT ON test_table TO A WITH GRANT OPTION') + instance.query("GRANT SELECT ON test_table TO B", user='A') + assert instance.query("SELECT * FROM test_table", user='B') == "1\t5\n2\t10\n" + + instance.query('REVOKE SELECT ON test_table FROM A, B') + + +def test_create_role(): + instance.query("CREATE USER A") + instance.query('CREATE ROLE R1') + + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + instance.query('GRANT SELECT ON test_table TO R1') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + instance.query('GRANT R1 TO A') + assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + + instance.query('REVOKE R1 FROM A') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + +def test_grant_role_to_role(): + instance.query("CREATE USER A") + instance.query('CREATE ROLE R1') + instance.query('CREATE ROLE R2') + + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + instance.query('GRANT R1 TO A') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + instance.query('GRANT R2 TO R1') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + instance.query('GRANT SELECT ON test_table TO R2') + assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + + +def test_combine_privileges(): + instance.query("CREATE USER A ") + instance.query('CREATE ROLE R1') + instance.query('CREATE ROLE R2') + + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + + instance.query('GRANT R1 TO A') + instance.query('GRANT SELECT(x) ON test_table TO R1') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + assert instance.query("SELECT x FROM test_table", user='A') == "1\n2\n" + + instance.query('GRANT SELECT(y) ON test_table TO R2') + instance.query('GRANT R2 TO A') + assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + + +def test_admin_option(): + instance.query("CREATE USER A") + instance.query("CREATE USER B") + instance.query('CREATE ROLE R1') + + instance.query('GRANT SELECT ON test_table TO R1') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B') + + instance.query('GRANT R1 TO A') + assert "Not enough privileges" in instance.query_and_get_error("GRANT R1 TO B", user='A') + assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B') + + instance.query('GRANT R1 TO A WITH ADMIN OPTION') + instance.query("GRANT R1 TO B", user='A') + assert instance.query("SELECT * FROM test_table", user='B') == "1\t5\n2\t10\n" diff --git a/dbms/tests/integration/test_insert_into_distributed/__init__.py b/tests/integration/test_graphite_merge_tree/__init__.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/__init__.py rename to tests/integration/test_graphite_merge_tree/__init__.py diff --git a/dbms/tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml b/tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml rename to tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml diff --git a/dbms/tests/integration/test_graphite_merge_tree/test.py b/tests/integration/test_graphite_merge_tree/test.py similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/test.py rename to tests/integration/test_graphite_merge_tree/test.py diff --git a/dbms/tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference b/tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference rename to tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference diff --git a/dbms/tests/integration/test_insert_into_distributed_sync_async/__init__.py b/tests/integration/test_host_ip_change/__init__.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_sync_async/__init__.py rename to tests/integration/test_host_ip_change/__init__.py diff --git a/dbms/tests/integration/test_host_ip_change/configs/dns_update_long.xml b/tests/integration/test_host_ip_change/configs/dns_update_long.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/dns_update_long.xml rename to tests/integration/test_host_ip_change/configs/dns_update_long.xml diff --git a/dbms/tests/integration/test_host_ip_change/configs/dns_update_short.xml b/tests/integration/test_host_ip_change/configs/dns_update_short.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/dns_update_short.xml rename to tests/integration/test_host_ip_change/configs/dns_update_short.xml diff --git a/dbms/tests/integration/test_host_ip_change/configs/listen_host.xml b/tests/integration/test_host_ip_change/configs/listen_host.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/listen_host.xml rename to tests/integration/test_host_ip_change/configs/listen_host.xml diff --git a/dbms/tests/integration/test_host_ip_change/configs/remote_servers.xml b/tests/integration/test_host_ip_change/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/remote_servers.xml rename to tests/integration/test_host_ip_change/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_host_ip_change/test.py b/tests/integration/test_host_ip_change/test.py similarity index 100% rename from dbms/tests/integration/test_host_ip_change/test.py rename to tests/integration/test_host_ip_change/test.py diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py b/tests/integration/test_http_and_readonly/__init__.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py rename to tests/integration/test_http_and_readonly/__init__.py diff --git a/dbms/tests/integration/test_http_and_readonly/test.py b/tests/integration/test_http_and_readonly/test.py similarity index 100% rename from dbms/tests/integration/test_http_and_readonly/test.py rename to tests/integration/test_http_and_readonly/test.py diff --git a/dbms/tests/integration/test_log_family_s3/__init__.py b/tests/integration/test_https_replication/__init__.py similarity index 100% rename from dbms/tests/integration/test_log_family_s3/__init__.py rename to tests/integration/test_https_replication/__init__.py diff --git a/dbms/tests/integration/test_https_replication/configs/config.xml b/tests/integration/test_https_replication/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/config.xml rename to tests/integration/test_https_replication/configs/config.xml diff --git a/dbms/tests/integration/test_https_replication/configs/no_ssl_conf.xml b/tests/integration/test_https_replication/configs/no_ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/no_ssl_conf.xml rename to tests/integration/test_https_replication/configs/no_ssl_conf.xml diff --git a/dbms/tests/integration/test_https_replication/configs/remote_servers.xml b/tests/integration/test_https_replication/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/remote_servers.xml rename to tests/integration/test_https_replication/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_https_replication/configs/server.crt b/tests/integration/test_https_replication/configs/server.crt similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/server.crt rename to tests/integration/test_https_replication/configs/server.crt diff --git a/dbms/tests/integration/test_https_replication/configs/server.key b/tests/integration/test_https_replication/configs/server.key similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/server.key rename to tests/integration/test_https_replication/configs/server.key diff --git a/dbms/tests/integration/test_https_replication/configs/ssl_conf.xml b/tests/integration/test_https_replication/configs/ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/ssl_conf.xml rename to tests/integration/test_https_replication/configs/ssl_conf.xml diff --git a/dbms/tests/integration/test_https_replication/test.py b/tests/integration/test_https_replication/test.py similarity index 100% rename from dbms/tests/integration/test_https_replication/test.py rename to tests/integration/test_https_replication/test.py diff --git a/dbms/tests/integration/test_logs_level/__init__.py b/tests/integration/test_inherit_multiple_profiles/__init__.py similarity index 100% rename from dbms/tests/integration/test_logs_level/__init__.py rename to tests/integration/test_inherit_multiple_profiles/__init__.py diff --git a/dbms/tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml b/tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml similarity index 100% rename from dbms/tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml rename to tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml diff --git a/dbms/tests/integration/test_inherit_multiple_profiles/test.py b/tests/integration/test_inherit_multiple_profiles/test.py similarity index 100% rename from dbms/tests/integration/test_inherit_multiple_profiles/test.py rename to tests/integration/test_inherit_multiple_profiles/test.py diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/__init__.py b/tests/integration/test_insert_into_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/__init__.py rename to tests/integration/test_insert_into_distributed/__init__.py diff --git a/dbms/tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml b/tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml rename to tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml diff --git a/dbms/tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml b/tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml rename to tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml diff --git a/dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml b/tests/integration/test_insert_into_distributed/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml rename to tests/integration/test_insert_into_distributed/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_insert_into_distributed/test.py b/tests/integration/test_insert_into_distributed/test.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/test.py rename to tests/integration/test_insert_into_distributed/test.py diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/__init__.py b/tests/integration/test_insert_into_distributed_sync_async/__init__.py similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/__init__.py rename to tests/integration/test_insert_into_distributed_sync_async/__init__.py diff --git a/dbms/tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml b/tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml rename to tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_sync_async/test.py b/tests/integration/test_insert_into_distributed_sync_async/test.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_sync_async/test.py rename to tests/integration/test_insert_into_distributed_sync_async/test.py diff --git a/dbms/tests/integration/test_merge_table_over_distributed/__init__.py b/tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py similarity index 100% rename from dbms/tests/integration/test_merge_table_over_distributed/__init__.py rename to tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml b/tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml rename to tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml b/tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml rename to tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml b/tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml rename to tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/test.py b/tests/integration/test_insert_into_distributed_through_materialized_view/test.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/test.py rename to tests/integration/test_insert_into_distributed_through_materialized_view/test.py diff --git a/dbms/tests/integration/test_merge_tree_s3/__init__.py b/tests/integration/test_live_view_over_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/__init__.py rename to tests/integration/test_live_view_over_distributed/__init__.py diff --git a/tests/integration/test_live_view_over_distributed/configs/remote_servers.xml b/tests/integration/test_live_view_over_distributed/configs/remote_servers.xml new file mode 100644 index 00000000000..ebce4697529 --- /dev/null +++ b/tests/integration/test_live_view_over_distributed/configs/remote_servers.xml @@ -0,0 +1,18 @@ + + + + + + node1 + 9000 + + + + + node2 + 9000 + + + + + diff --git a/tests/integration/test_live_view_over_distributed/configs/set_distributed_defaults.xml b/tests/integration/test_live_view_over_distributed/configs/set_distributed_defaults.xml new file mode 100644 index 00000000000..194eb1ebb87 --- /dev/null +++ b/tests/integration/test_live_view_over_distributed/configs/set_distributed_defaults.xml @@ -0,0 +1,35 @@ + + + + 3 + 1000 + 1 + + + 5 + 3000 + 1 + + + + + + + + ::/0 + + default + default + + + + + ::/0 + + delays + default + + + + + diff --git a/tests/integration/test_live_view_over_distributed/test.py b/tests/integration/test_live_view_over_distributed/test.py new file mode 100644 index 00000000000..c7b9c452725 --- /dev/null +++ b/tests/integration/test_live_view_over_distributed/test.py @@ -0,0 +1,243 @@ +from __future__ import print_function + +import sys +import itertools +import timeit +import logging + +import pytest + +from helpers.uclient import client, prompt, end_of_block +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) + +NODES = {'node' + str(i): cluster.add_instance( + 'node' + str(i), + main_configs=['configs/remote_servers.xml'], + user_configs=['configs/set_distributed_defaults.xml'], +) for i in (1, 2)} + +CREATE_TABLES_SQL = ''' +DROP TABLE IF EXISTS lv_over_distributed_table; +DROP TABLE IF EXISTS distributed_table; +DROP TABLE IF EXISTS base_table; + +SET allow_experimental_live_view = 1; + +CREATE TABLE + base_table( + node String, + key Int32, + value Int32 + ) +ENGINE = Memory; + +CREATE TABLE + distributed_table +AS base_table +ENGINE = Distributed(test_cluster, default, base_table, rand()); + +CREATE LIVE VIEW lv_over_distributed_table AS SELECT * FROM distributed_table; +''' + +INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}', {key}, {value})" + +@pytest.fixture(scope="function") +def started_cluster(): + try: + cluster.start() + for node_index, (node_name, node) in enumerate(NODES.items()): + node.query(CREATE_TABLES_SQL) + for i in range(0, 2): + sql = INSERT_SQL_TEMPLATE.format(node_id=node_name, key=i, value=i + (node_index * 10)) + node.query(sql) + yield cluster + + finally: + cluster.shutdown() + + +@pytest.mark.parametrize("node", NODES.values()[:1]) +@pytest.mark.parametrize("source", ["lv_over_distributed_table"]) +class TestLiveViewOverDistributedSuite: + def test_select_with_order_by_node(self, started_cluster, node, source): + r = node.query("SELECT * FROM {source} ORDER BY node, key".format(source=source)) + assert r == """node1\t0\t0 +node1\t1\t1 +node2\t0\t10 +node2\t1\t11 +""" + + def test_select_with_order_by_key(self, started_cluster, node, source): + assert node.query("SELECT * FROM {source} ORDER BY key, node".format(source=source)) \ + == """node1\t0\t0 +node2\t0\t10 +node1\t1\t1 +node2\t1\t11 +""" + + def test_select_with_group_by_node(self, started_cluster, node, source): + assert node.query("SELECT node, SUM(value) FROM {source} GROUP BY node ORDER BY node".format(source=source)) \ + == "node1\t1\nnode2\t21\n" + + def test_select_with_group_by_key(self, started_cluster, node, source): + assert node.query("SELECT key, SUM(value) FROM {source} GROUP BY key ORDER BY key".format(source=source)) \ + == "0\t10\n1\t12\n" + + def test_select_sum(self, started_cluster, node, source): + assert node.query("SELECT SUM(value) FROM {source}".format(source=source)) \ + == "22\n" + + def test_watch_live_view_order_by_node(self, started_cluster, node, source): + log = sys.stdout + command = " ".join(node.client.command) + args = dict(log=log, command=command) + + with client(name="client1> ", **args) as client1, client(name="client2> ", **args) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("SET allow_experimental_live_view = 1") + client1.expect(prompt) + client2.send("SET allow_experimental_live_view = 1") + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS lv") + client1.expect(prompt) + client1.send("CREATE LIVE VIEW lv AS SELECT * FROM distributed_table ORDER BY node, key") + client1.expect(prompt) + + client1.send("WATCH lv FORMAT CSV") + client1.expect('"node1",0,0,1\r\n.*"node1",1,1,1\r\n.*"node2",0,10,1\r\n.*"node2",1,11,1\r\n') + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + client1.expect('"node1",0,0,2\r\n.*"node1",1,1,2\r\n.*"node1",2,2,2\r\n.*"node2",0,10,2\r\n.*"node2",1,11,2\r\n') + + client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node3', 3, 3)") + client2.expect(prompt) + client1.expect('"node1",0,0,3\r\n.*"node1",0,3,3\r\n.*"node1",1,1,3\r\n.*"node1",2,2,3\r\n.*"node2",0,10,3\r\n.*"node2",1,11,3\r\n.*"node3",3,3,3\r\n') + + def test_watch_live_view_order_by_key(self, started_cluster, node, source): + log = sys.stdout + command = " ".join(node.client.command) + args = dict(log=log, command=command) + + with client(name="client1> ", **args) as client1, client(name="client2> ", **args) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("SET allow_experimental_live_view = 1") + client1.expect(prompt) + client2.send("SET allow_experimental_live_view = 1") + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS lv") + client1.expect(prompt) + client1.send("CREATE LIVE VIEW lv AS SELECT * FROM distributed_table ORDER BY key, node") + client1.expect(prompt) + + client1.send("WATCH lv FORMAT CSV") + client1.expect('"node1",0,0,1\r\n.*"node2",0,10,1\r\n.*"node1",1,1,1\r\n.*"node2",1,11,1\r\n') + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + client1.expect('"node1",0,0,2\r\n.*"node2",0,10,2\r\n.*"node1",1,1,2\r\n.*"node2",1,11,2\r\n.*"node1",2,2,2\r\n') + + client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node3', 3, 3)") + client2.expect(prompt) + client1.expect('"node1",0,0,3\r\n.*"node1",0,3,3\r\n.*"node2",0,10,3\r\n.*"node1",1,1,3\r\n.*"node2",1,11,3\r\n.*"node1",2,2,3\r\n.*"node3",3,3,3\r\n') + + def test_watch_live_view_group_by_node(self, started_cluster, node, source): + log = sys.stdout + command = " ".join(node.client.command) + args = dict(log=log, command=command) + + with client(name="client1> ", **args) as client1, client(name="client2> ", **args) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("SET allow_experimental_live_view = 1") + client1.expect(prompt) + client2.send("SET allow_experimental_live_view = 1") + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS lv") + client1.expect(prompt) + client1.send("CREATE LIVE VIEW lv AS SELECT node, SUM(value) FROM distributed_table GROUP BY node ORDER BY node") + client1.expect(prompt) + + client1.send("WATCH lv FORMAT CSV") + client1.expect('"node1",1,1\r\n.*"node2",21,1\r\n') + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + client1.expect('"node1",3,2\r\n.*"node2",21,2\r\n') + + client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node3', 3, 3)") + client2.expect(prompt) + client1.expect('"node1",6,3\r\n.*"node2",21,3\r\n.*"node3",3,3\r\n') + + def test_watch_live_view_group_by_key(self, started_cluster, node, source): + log = sys.stdout + command = " ".join(node.client.command) + args = dict(log=log, command=command) + sep = ' \xe2\x94\x82' + with client(name="client1> ", **args) as client1, client(name="client2> ", **args) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("SET allow_experimental_live_view = 1") + client1.expect(prompt) + client2.send("SET allow_experimental_live_view = 1") + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS lv") + client1.expect(prompt) + client1.send("CREATE LIVE VIEW lv AS SELECT key, SUM(value) FROM distributed_table GROUP BY key ORDER BY key") + client1.expect(prompt) + + client1.send("WATCH lv FORMAT CSV") + client1.expect("0,10,1\r\n.*1,12,1\r\n") + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + client1.expect("0,10,2\r\n.*1,12,2\r\n.*2,2,2\r\n") + + client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node1', 3, 3)") + client2.expect(prompt) + client1.expect("0,13,3\r\n.*1,12,3\r\n.*2,2,3\r\n.*3,3,3\r\n") + + + def test_watch_live_view_sum(self, started_cluster, node, source): + log = sys.stdout + command = " ".join(node.client.command) + args = dict(log=log, command=command) + + with client(name="client1> ", **args) as client1, client(name="client2> ", **args) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send("SET allow_experimental_live_view = 1") + client1.expect(prompt) + client2.send("SET allow_experimental_live_view = 1") + client2.expect(prompt) + + client1.send("DROP TABLE IF EXISTS lv") + client1.expect(prompt) + client1.send("CREATE LIVE VIEW lv AS SELECT sum(value) FROM distributed_table") + client1.expect(prompt) + + client1.send("WATCH lv") + client1.expect(r"22.*1" + end_of_block) + + client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") + client2.expect(prompt) + client1.expect(r"24.*2" + end_of_block) + + client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3), ('node1', 4, 4)") + client2.expect(prompt) + client1.expect(r"31.*3" + end_of_block) diff --git a/dbms/tests/integration/test_multiple_disks/__init__.py b/tests/integration/test_log_family_s3/__init__.py similarity index 100% rename from dbms/tests/integration/test_multiple_disks/__init__.py rename to tests/integration/test_log_family_s3/__init__.py diff --git a/dbms/tests/integration/test_log_family_s3/configs/config.d/log_conf.xml b/tests/integration/test_log_family_s3/configs/config.d/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_log_family_s3/configs/config.d/log_conf.xml rename to tests/integration/test_log_family_s3/configs/config.d/log_conf.xml diff --git a/dbms/tests/integration/test_log_family_s3/configs/config.xml b/tests/integration/test_log_family_s3/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_log_family_s3/configs/config.xml rename to tests/integration/test_log_family_s3/configs/config.xml diff --git a/dbms/tests/integration/test_log_family_s3/configs/users.xml b/tests/integration/test_log_family_s3/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_log_family_s3/configs/users.xml rename to tests/integration/test_log_family_s3/configs/users.xml diff --git a/dbms/tests/integration/test_log_family_s3/test.py b/tests/integration/test_log_family_s3/test.py similarity index 100% rename from dbms/tests/integration/test_log_family_s3/test.py rename to tests/integration/test_log_family_s3/test.py diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/__init__.py b/tests/integration/test_logs_level/__init__.py similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/__init__.py rename to tests/integration/test_logs_level/__init__.py diff --git a/dbms/tests/integration/test_logs_level/configs/config_information.xml b/tests/integration/test_logs_level/configs/config_information.xml similarity index 100% rename from dbms/tests/integration/test_logs_level/configs/config_information.xml rename to tests/integration/test_logs_level/configs/config_information.xml diff --git a/dbms/tests/integration/test_logs_level/test.py b/tests/integration/test_logs_level/test.py similarity index 100% rename from dbms/tests/integration/test_logs_level/test.py rename to tests/integration/test_logs_level/test.py diff --git a/dbms/tests/integration/test_mysql_database_engine/__init__.py b/tests/integration/test_match_process_uid_against_data_owner/__init__.py similarity index 100% rename from dbms/tests/integration/test_mysql_database_engine/__init__.py rename to tests/integration/test_match_process_uid_against_data_owner/__init__.py diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/configs/config.xml b/tests/integration/test_match_process_uid_against_data_owner/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/configs/config.xml rename to tests/integration/test_match_process_uid_against_data_owner/configs/config.xml diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/configs/users.xml b/tests/integration/test_match_process_uid_against_data_owner/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/configs/users.xml rename to tests/integration/test_match_process_uid_against_data_owner/configs/users.xml diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/test.py b/tests/integration/test_match_process_uid_against_data_owner/test.py similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/test.py rename to tests/integration/test_match_process_uid_against_data_owner/test.py diff --git a/dbms/tests/integration/test_mysql_protocol/__init__.py b/tests/integration/test_max_http_connections_for_replication/__init__.py similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/__init__.py rename to tests/integration/test_max_http_connections_for_replication/__init__.py diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml b/tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml rename to tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml b/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml rename to tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/test.py b/tests/integration/test_max_http_connections_for_replication/test.py similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/test.py rename to tests/integration/test_max_http_connections_for_replication/test.py diff --git a/dbms/tests/integration/test_non_default_compression/__init__.py b/tests/integration/test_merge_table_over_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_non_default_compression/__init__.py rename to tests/integration/test_merge_table_over_distributed/__init__.py diff --git a/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml b/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml new file mode 100644 index 00000000000..ebce4697529 --- /dev/null +++ b/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml @@ -0,0 +1,18 @@ + + + + + + node1 + 9000 + + + + + node2 + 9000 + + + + + diff --git a/dbms/tests/integration/test_merge_table_over_distributed/test.py b/tests/integration/test_merge_table_over_distributed/test.py similarity index 100% rename from dbms/tests/integration/test_merge_table_over_distributed/test.py rename to tests/integration/test_merge_table_over_distributed/test.py diff --git a/dbms/tests/integration/test_odbc_interaction/__init__.py b/tests/integration/test_merge_tree_s3/__init__.py similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/__init__.py rename to tests/integration/test_merge_tree_s3/__init__.py diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/users.xml b/tests/integration/test_merge_tree_s3/configs/config.d/users.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/users.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/users.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.xml b/tests/integration/test_merge_tree_s3/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.xml rename to tests/integration/test_merge_tree_s3/configs/config.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/test.py rename to tests/integration/test_merge_tree_s3/test.py diff --git a/dbms/tests/integration/test_old_versions/__init__.py b/tests/integration/test_multiple_disks/__init__.py similarity index 100% rename from dbms/tests/integration/test_old_versions/__init__.py rename to tests/integration/test_multiple_disks/__init__.py diff --git a/dbms/tests/integration/test_multiple_disks/configs/config.d/cluster.xml b/tests/integration/test_multiple_disks/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_multiple_disks/configs/config.d/cluster.xml rename to tests/integration/test_multiple_disks/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml rename to tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_multiple_disks/configs/logs_config.xml b/tests/integration/test_multiple_disks/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_multiple_disks/configs/logs_config.xml rename to tests/integration/test_multiple_disks/configs/logs_config.xml diff --git a/dbms/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py similarity index 100% rename from dbms/tests/integration/test_multiple_disks/test.py rename to tests/integration/test_multiple_disks/test.py diff --git a/dbms/tests/integration/test_part_log_table/__init__.py b/tests/integration/test_mutations_with_merge_tree/__init__.py similarity index 100% rename from dbms/tests/integration/test_part_log_table/__init__.py rename to tests/integration/test_mutations_with_merge_tree/__init__.py diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/configs/config.xml b/tests/integration/test_mutations_with_merge_tree/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/configs/config.xml rename to tests/integration/test_mutations_with_merge_tree/configs/config.xml diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/configs/users.xml b/tests/integration/test_mutations_with_merge_tree/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/configs/users.xml rename to tests/integration/test_mutations_with_merge_tree/configs/users.xml diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/test.py b/tests/integration/test_mutations_with_merge_tree/test.py similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/test.py rename to tests/integration/test_mutations_with_merge_tree/test.py diff --git a/dbms/tests/integration/test_partition/__init__.py b/tests/integration/test_mysql_database_engine/__init__.py similarity index 100% rename from dbms/tests/integration/test_partition/__init__.py rename to tests/integration/test_mysql_database_engine/__init__.py diff --git a/dbms/tests/integration/test_mysql_database_engine/configs/remote_servers.xml b/tests/integration/test_mysql_database_engine/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_mysql_database_engine/configs/remote_servers.xml rename to tests/integration/test_mysql_database_engine/configs/remote_servers.xml diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py new file mode 100644 index 00000000000..2791cc7b382 --- /dev/null +++ b/tests/integration/test_mysql_database_engine/test.py @@ -0,0 +1,133 @@ +import time +import contextlib + +import pymysql.cursors +import pytest + +from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException + +cluster = ClickHouseCluster(__file__) +clickhouse_node = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql=True) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +class MySQLNodeInstance: + def __init__(self, user='root', password='clickhouse', hostname='127.0.0.1', port=3308): + self.user = user + self.port = port + self.hostname = hostname + self.password = password + self.mysql_connection = None # lazy init + + def query(self, execution_query): + if self.mysql_connection is None: + self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.hostname, port=self.port) + with self.mysql_connection.cursor() as cursor: + cursor.execute(execution_query) + + def close(self): + if self.mysql_connection is not None: + self.mysql_connection.close() + + +def test_mysql_ddl_for_mysql_database(started_cluster): + with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: + mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") + + clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', 'test_database', 'root', 'clickhouse')") + assert 'test_database' in clickhouse_node.query('SHOW DATABASES') + + mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;') + assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + + time.sleep(3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained + mysql_node.query('ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11)') + assert 'add_column' in clickhouse_node.query("SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + + time.sleep(3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained + mysql_node.query('ALTER TABLE `test_database`.`test_table` DROP COLUMN `add_column`') + assert 'add_column' not in clickhouse_node.query("SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + + mysql_node.query('DROP TABLE `test_database`.`test_table`;') + assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') + + clickhouse_node.query("DROP DATABASE test_database") + assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') + + mysql_node.query("DROP DATABASE test_database") + + +def test_clickhouse_ddl_for_mysql_database(started_cluster): + with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: + mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") + mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;') + + clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', 'test_database', 'root', 'clickhouse')") + + assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + clickhouse_node.query("DROP TABLE test_database.test_table") + assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') + clickhouse_node.query("ATTACH TABLE test_database.test_table") + assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + clickhouse_node.query("DETACH TABLE test_database.test_table") + assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') + clickhouse_node.query("ATTACH TABLE test_database.test_table") + assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + + clickhouse_node.query("DROP DATABASE test_database") + assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') + + mysql_node.query("DROP DATABASE test_database") + + +def test_clickhouse_dml_for_mysql_database(started_cluster): + with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: + mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") + mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `i``d` int(11) NOT NULL, PRIMARY KEY (`i``d`)) ENGINE=InnoDB;') + clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', test_database, 'root', 'clickhouse')") + + assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '0' + clickhouse_node.query("INSERT INTO `test_database`.`test_table`(`i\`d`) select number from numbers(10000)") + assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '10000' + + mysql_node.query("DROP DATABASE test_database") + + +def test_clickhouse_join_for_mysql_database(started_cluster): + with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: + mysql_node.query("CREATE DATABASE IF NOT EXISTS test DEFAULT CHARACTER SET 'utf8'") + mysql_node.query("CREATE TABLE test.t1_mysql_local (" + "pays VARCHAR(55) DEFAULT 'FRA' NOT NULL," + "service VARCHAR(5) DEFAULT '' NOT NULL," + "opco CHAR(3) DEFAULT '' NOT NULL" + ")") + mysql_node.query("CREATE TABLE test.t2_mysql_local (" + "service VARCHAR(5) DEFAULT '' NOT NULL," + "opco VARCHAR(5) DEFAULT ''" + ")") + clickhouse_node.query("CREATE TABLE default.t1_remote_mysql AS mysql('mysql1:3306','test','t1_mysql_local','root','clickhouse')") + clickhouse_node.query("CREATE TABLE default.t2_remote_mysql AS mysql('mysql1:3306','test','t2_mysql_local','root','clickhouse')") + assert clickhouse_node.query("SELECT s.pays " + "FROM default.t1_remote_mysql AS s " + "LEFT JOIN default.t1_remote_mysql AS s_ref " + "ON (s_ref.opco = s.opco AND s_ref.service = s.service)") == '' + mysql_node.query("DROP DATABASE test") + + +def test_bad_arguments_for_mysql_database_engine(started_cluster): + with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: + with pytest.raises(QueryRuntimeException) as exception: + mysql_node.query("CREATE DATABASE IF NOT EXISTS test_bad_arguments DEFAULT CHARACTER SET 'utf8'") + clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', test_bad_arguments, root, 'clickhouse')") + + assert 'Database engine MySQL requested literal argument.' in str(exception.value) + mysql_node.query("DROP DATABASE test_bad_arguments") diff --git a/dbms/tests/integration/test_parts_delete_zookeeper/__init__.py b/tests/integration/test_mysql_protocol/__init__.py similarity index 100% rename from dbms/tests/integration/test_parts_delete_zookeeper/__init__.py rename to tests/integration/test_mysql_protocol/__init__.py diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/0.reference b/tests/integration/test_mysql_protocol/clients/golang/0.reference similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/0.reference rename to tests/integration/test_mysql_protocol/clients/golang/0.reference diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/Dockerfile b/tests/integration/test_mysql_protocol/clients/golang/Dockerfile similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/Dockerfile rename to tests/integration/test_mysql_protocol/clients/golang/Dockerfile diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/main.go b/tests/integration/test_mysql_protocol/clients/golang/main.go similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/main.go rename to tests/integration/test_mysql_protocol/clients/golang/main.go diff --git a/tests/integration/test_mysql_protocol/clients/java/0.reference b/tests/integration/test_mysql_protocol/clients/java/0.reference new file mode 100644 index 00000000000..3e3e20d1ebb --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/0.reference @@ -0,0 +1,15 @@ +33jdbcnull +44cknull +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 diff --git a/tests/integration/test_mysql_protocol/clients/java/Dockerfile b/tests/integration/test_mysql_protocol/clients/java/Dockerfile new file mode 100644 index 00000000000..96713a68e66 --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:18.04 + +RUN apt-get update && \ + apt-get install -y software-properties-common build-essential openjdk-8-jdk libmysql-java curl + +RUN rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* \ +RUN apt-get clean + +ARG ver=5.1.46 +RUN curl -L -o /mysql-connector-java-${ver}.jar https://repo1.maven.org/maven2/mysql/mysql-connector-java/${ver}/mysql-connector-java-${ver}.jar +ENV CLASSPATH=$CLASSPATH:/mysql-connector-java-${ver}.jar + +WORKDIR /jdbc +COPY Test.java Test.java +RUN javac Test.java diff --git a/tests/integration/test_mysql_protocol/clients/java/Test.java b/tests/integration/test_mysql_protocol/clients/java/Test.java new file mode 100644 index 00000000000..89659529679 --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/Test.java @@ -0,0 +1,77 @@ +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +class JavaConnectorTest { + private static final String CREATE_TABLE_SQL = "CREATE TABLE IF NOT EXISTS default.test1 (`age` Int32, `name` String, `int_nullable` Nullable(Int32)) Engine = Memory"; + private static final String INSERT_SQL = "INSERT INTO default.test1(`age`, `name`) VALUES(33, 'jdbc'),(44, 'ck')"; + private static final String SELECT_SQL = "SELECT * FROM default.test1"; + private static final String SELECT_NUMBER_SQL = "SELECT * FROM system.numbers LIMIT 13"; + private static final String DROP_TABLE_SQL = "DROP TABLE default.test1"; + + public static void main(String[] args) { + int i = 0; + String host = "127.0.0.1"; + String port = "9004"; + String user = "default"; + String password = ""; + String database = "default"; + while (i < args.length) { + switch (args[i]) { + case "--host": + host = args[++i]; + break; + case "--port": + port = args[++i]; + break; + case "--user": + user = args[++i]; + break; + case "--password": + password = args[++i]; + break; + case "--database": + database = args[++i]; + break; + default: + i++; + break; + } + } + + String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s", host, port, database); + + Connection conn = null; + Statement stmt = null; + try { + conn = DriverManager.getConnection(jdbcUrl, user, password); + stmt = conn.createStatement(); + stmt.executeUpdate(CREATE_TABLE_SQL); + stmt.executeUpdate(INSERT_SQL); + + ResultSet rs = stmt.executeQuery(SELECT_SQL); + while (rs.next()) { + System.out.print(rs.getString("age")); + System.out.print(rs.getString("name")); + System.out.print(rs.getString("int_nullable")); + System.out.println(); + } + + stmt.executeUpdate(DROP_TABLE_SQL); + + rs = stmt.executeQuery(SELECT_NUMBER_SQL); + while (rs.next()) { + System.out.print(rs.getString(1)); + System.out.println(); + } + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(1); + } + } +} diff --git a/tests/integration/test_mysql_protocol/clients/java/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/java/docker_compose.yml new file mode 100644 index 00000000000..dbe404232a0 --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/docker_compose.yml @@ -0,0 +1,8 @@ +version: '2.2' +services: + java1: + build: + context: ./ + network: host + # to keep container running + command: sleep infinity diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile b/tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile rename to tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysqljs/test.js b/tests/integration/test_mysql_protocol/clients/mysqljs/test.js similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysqljs/test.js rename to tests/integration/test_mysql_protocol/clients/mysqljs/test.js diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php diff --git a/dbms/tests/integration/test_mysql_protocol/configs/config.xml b/tests/integration/test_mysql_protocol/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/config.xml rename to tests/integration/test_mysql_protocol/configs/config.xml diff --git a/dbms/tests/integration/test_mysql_protocol/configs/dhparam.pem b/tests/integration/test_mysql_protocol/configs/dhparam.pem similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/dhparam.pem rename to tests/integration/test_mysql_protocol/configs/dhparam.pem diff --git a/dbms/tests/integration/test_mysql_protocol/configs/server.crt b/tests/integration/test_mysql_protocol/configs/server.crt similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/server.crt rename to tests/integration/test_mysql_protocol/configs/server.crt diff --git a/dbms/tests/integration/test_mysql_protocol/configs/server.key b/tests/integration/test_mysql_protocol/configs/server.key similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/server.key rename to tests/integration/test_mysql_protocol/configs/server.key diff --git a/dbms/tests/integration/test_mysql_protocol/configs/users.xml b/tests/integration/test_mysql_protocol/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/users.xml rename to tests/integration/test_mysql_protocol/configs/users.xml diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py new file mode 100644 index 00000000000..f75a168d5db --- /dev/null +++ b/tests/integration/test_mysql_protocol/test.py @@ -0,0 +1,357 @@ +# coding: utf-8 + +import docker +import datetime +import math +import os +import pytest +import subprocess +import time +import pymysql.connections + +from docker.models.containers import Container + +from helpers.cluster import ClickHouseCluster + + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + +config_dir = os.path.join(SCRIPT_DIR, './configs') +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node', config_dir=config_dir, env_variables={'UBSAN_OPTIONS': 'print_stacktrace=1'}) + +server_port = 9001 + + +@pytest.fixture(scope="module") +def server_address(): + cluster.start() + try: + yield cluster.get_instance_ip('node') + finally: + cluster.shutdown() + + +@pytest.fixture(scope='module') +def mysql_client(): + docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'mysql', 'docker_compose.yml') + subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) + yield docker.from_env().containers.get(cluster.project_name + '_mysql1_1') + + +@pytest.fixture(scope='module') +def mysql_server(mysql_client): + """Return MySQL container when it is healthy. + + :type mysql_client: Container + :rtype: Container + """ + retries = 30 + for i in range(retries): + info = mysql_client.client.api.inspect_container(mysql_client.name) + if info['State']['Health']['Status'] == 'healthy': + break + time.sleep(1) + else: + raise Exception('Mysql server has not started in %d seconds.' % retries) + + return mysql_client + + +@pytest.fixture(scope='module') +def golang_container(): + docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'golang', 'docker_compose.yml') + subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) + yield docker.from_env().containers.get(cluster.project_name + '_golang1_1') + + +@pytest.fixture(scope='module') +def php_container(): + docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'php-mysqlnd', 'docker_compose.yml') + subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) + yield docker.from_env().containers.get(cluster.project_name + '_php1_1') + + +@pytest.fixture(scope='module') +def nodejs_container(): + docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'mysqljs', 'docker_compose.yml') + subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) + yield docker.from_env().containers.get(cluster.project_name + '_mysqljs1_1') + + +@pytest.fixture(scope='module') +def java_container(): + docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'java', 'docker_compose.yml') + subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) + yield docker.from_env().containers.get(cluster.project_name + '_java1_1') + + +def test_mysql_client(mysql_client, server_address): + # type: (Container, str) -> None + code, (stdout, stderr) = mysql_client.exec_run(''' + mysql --protocol tcp -h {host} -P {port} default -u user_with_double_sha1 --password=abacaba + -e "SELECT 1;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == '\n'.join(['1', '1', '']) + + code, (stdout, stderr) = mysql_client.exec_run(''' + mysql --protocol tcp -h {host} -P {port} default -u default --password=123 + -e "SELECT 1 as a;" + -e "SELECT 'тест' as b;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == '\n'.join(['a', '1', 'b', 'тест', '']) + + code, (stdout, stderr) = mysql_client.exec_run(''' + mysql --protocol tcp -h {host} -P {port} default -u default --password=abc -e "select 1 as a;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stderr == 'mysql: [Warning] Using a password on the command line interface can be insecure.\n' \ + 'ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n' + + code, (stdout, stderr) = mysql_client.exec_run(''' + mysql --protocol tcp -h {host} -P {port} default -u default --password=123 + -e "use system;" + -e "select count(*) from (select name from tables limit 1);" + -e "use system2;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == 'count()\n1\n' + assert stderr == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \ + "ERROR 81 (00000) at line 1: Database system2 doesn't exist\n" + + code, (stdout, stderr) = mysql_client.exec_run(''' + mysql --protocol tcp -h {host} -P {port} default -u default --password=123 + -e "CREATE DATABASE x;" + -e "USE x;" + -e "CREATE TABLE table1 (column UInt32) ENGINE = Memory;" + -e "INSERT INTO table1 VALUES (0), (1), (5);" + -e "INSERT INTO table1 VALUES (0), (1), (5);" + -e "SELECT * FROM table1 ORDER BY column;" + -e "DROP DATABASE x;" + -e "CREATE TEMPORARY TABLE tmp (tmp_column UInt32);" + -e "INSERT INTO tmp VALUES (0), (1);" + -e "SELECT * FROM tmp ORDER BY tmp_column;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', '']) + + +def test_mysql_federated(mysql_server, server_address): + node.query('''DROP DATABASE IF EXISTS mysql_federated''', settings={"password": "123"}) + node.query('''CREATE DATABASE mysql_federated''', settings={"password": "123"}) + node.query('''CREATE TABLE mysql_federated.test (col UInt32) ENGINE = Log''', settings={"password": "123"}) + node.query('''INSERT INTO mysql_federated.test VALUES (0), (1), (5)''', settings={"password": "123"}) + + code, (_, stderr) = mysql_server.exec_run(''' + mysql + -e "DROP SERVER IF EXISTS clickhouse;" + -e "CREATE SERVER clickhouse FOREIGN DATA WRAPPER mysql OPTIONS (USER 'default', PASSWORD '123', HOST '{host}', PORT {port}, DATABASE 'mysql_federated');" + -e "DROP DATABASE IF EXISTS mysql_federated;" + -e "CREATE DATABASE mysql_federated;" + '''.format(host=server_address, port=server_port), demux=True) + + assert code == 0 + + code, (stdout, stderr) = mysql_server.exec_run(''' + mysql + -e "CREATE TABLE mysql_federated.test(`col` int UNSIGNED) ENGINE=FEDERATED CONNECTION='clickhouse';" + -e "SELECT * FROM mysql_federated.test ORDER BY col;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == '\n'.join(['col', '0', '1', '5', '']) + + code, (stdout, stderr) = mysql_server.exec_run(''' + mysql + -e "INSERT INTO mysql_federated.test VALUES (0), (1), (5);" + -e "SELECT * FROM mysql_federated.test ORDER BY col;" + '''.format(host=server_address, port=server_port), demux=True) + + assert stdout == '\n'.join(['col', '0', '0', '1', '1', '5', '5', '']) + + +def test_python_client(server_address): + client = pymysql.connections.Connection(host=server_address, user='user_with_double_sha1', password='abacaba', database='default', port=server_port) + + with pytest.raises(pymysql.InternalError) as exc_info: + client.query('select name from tables') + + assert exc_info.value.args == (60, "Table default.tables doesn't exist.") + + cursor = client.cursor(pymysql.cursors.DictCursor) + cursor.execute("select 1 as a, 'тест' as b") + assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] + + with pytest.raises(pymysql.InternalError) as exc_info: + pymysql.connections.Connection(host=server_address, user='default', password='abacab', database='default', port=server_port) + + assert exc_info.value.args == (516, 'default: Authentication failed: password is incorrect or there is no user with such name') + + client = pymysql.connections.Connection(host=server_address, user='default', password='123', database='default', port=server_port) + + with pytest.raises(pymysql.InternalError) as exc_info: + client.query('select name from tables') + + assert exc_info.value.args == (60, "Table default.tables doesn't exist.") + + cursor = client.cursor(pymysql.cursors.DictCursor) + cursor.execute("select 1 as a, 'тест' as b") + assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] + + client.select_db('system') + + with pytest.raises(pymysql.InternalError) as exc_info: + client.select_db('system2') + + assert exc_info.value.args == (81, "Database system2 doesn't exist") + + cursor = client.cursor(pymysql.cursors.DictCursor) + cursor.execute('CREATE DATABASE x') + client.select_db('x') + cursor.execute("CREATE TABLE table1 (a UInt32) ENGINE = Memory") + cursor.execute("INSERT INTO table1 VALUES (1), (3)") + cursor.execute("INSERT INTO table1 VALUES (1), (4)") + cursor.execute("SELECT * FROM table1 ORDER BY a") + assert cursor.fetchall() == [{'a': 1}, {'a': 1}, {'a': 3}, {'a': 4}] + + +def test_golang_client(server_address, golang_container): + # type: (str, Container) -> None + with open(os.path.join(SCRIPT_DIR, 'clients', 'golang', '0.reference')) as fp: + reference = fp.read() + + code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user default --password 123 --database ' + 'abc'.format(host=server_address, port=server_port), demux=True) + + assert code == 1 + assert stderr == "Error 81: Database abc doesn't exist\n" + + code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user default --password 123 --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + + assert code == 0 + assert stdout == reference + + code, (stdout, stderr) = golang_container.exec_run('./main --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference + + +def test_php_client(server_address, php_container): + # type: (str, Container) -> None + code, (stdout, stderr) = php_container.exec_run('php -f test.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == 'tables\n' + + code, (stdout, stderr) = php_container.exec_run('php -f test_ssl.php {host} {port} default 123'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == 'tables\n' + + code, (stdout, stderr) = php_container.exec_run('php -f test.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == 'tables\n' + + code, (stdout, stderr) = php_container.exec_run('php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == 'tables\n' + + +def test_mysqljs_client(server_address, nodejs_container): + code, (_, stderr) = nodejs_container.exec_run('node test.js {host} {port} user_with_sha256 abacaba'.format(host=server_address, port=server_port), demux=True) + assert code == 1 + assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr + + code, (_, stderr) = nodejs_container.exec_run('node test.js {host} {port} user_with_empty_password ""'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + + code, (_, _) = nodejs_container.exec_run('node test.js {host} {port} user_with_double_sha1 abacaba'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + + code, (_, _) = nodejs_container.exec_run('node test.js {host} {port} user_with_empty_password 123'.format(host=server_address, port=server_port), demux=True) + assert code == 1 + + +def test_java_client(server_address, java_container): + # type: (str, Container) -> None + with open(os.path.join(SCRIPT_DIR, 'clients', 'java', '0.reference')) as fp: + reference = fp.read() + + # database not exists exception. + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' + 'abc'.format(host=server_address, port=server_port), demux=True) + assert code == 1 + + # empty password passed. + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference + + # non-empty password passed. + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference + + # double-sha1 password passed. + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference + + +def test_types(server_address): + client = pymysql.connections.Connection(host=server_address, user='default', password='123', database='default', port=server_port) + + cursor = client.cursor(pymysql.cursors.DictCursor) + cursor.execute( + "select " + "toInt8(-pow(2, 7)) as Int8_column, " + "toUInt8(pow(2, 8) - 1) as UInt8_column, " + "toInt16(-pow(2, 15)) as Int16_column, " + "toUInt16(pow(2, 16) - 1) as UInt16_column, " + "toInt32(-pow(2, 31)) as Int32_column, " + "toUInt32(pow(2, 32) - 1) as UInt32_column, " + "toInt64('-9223372036854775808') as Int64_column, " # -2^63 + "toUInt64('18446744073709551615') as UInt64_column, " # 2^64 - 1 + "'тест' as String_column, " + "toFixedString('тест', 8) as FixedString_column, " + "toFloat32(1.5) as Float32_column, " + "toFloat64(1.5) as Float64_column, " + "toFloat32(NaN) as Float32_NaN_column, " + "-Inf as Float64_Inf_column, " + "toDate('2019-12-08') as Date_column, " + "toDate('1970-01-01') as Date_min_column, " + "toDate('1970-01-02') as Date_after_min_column, " + "toDateTime('2019-12-08 08:24:03') as DateTime_column" + ) + + result = cursor.fetchall()[0] + expected = [ + ('Int8_column', -2 ** 7), + ('UInt8_column', 2 ** 8 - 1), + ('Int16_column', -2 ** 15), + ('UInt16_column', 2 ** 16 - 1), + ('Int32_column', -2 ** 31), + ('UInt32_column', 2 ** 32 - 1), + ('Int64_column', -2 ** 63), + ('UInt64_column', 2 ** 64 - 1), + ('String_column', 'тест'), + ('FixedString_column', 'тест'), + ('Float32_column', 1.5), + ('Float64_column', 1.5), + ('Float32_NaN_column', float('nan')), + ('Float64_Inf_column', float('-inf')), + ('Date_column', datetime.date(2019, 12, 8)), + ('Date_min_column', '0000-00-00'), + ('Date_after_min_column', datetime.date(1970, 1, 2)), + ('DateTime_column', datetime.datetime(2019, 12, 8, 8, 24, 3)), + ] + + for key, value in expected: + if isinstance(value, float) and math.isnan(value): + assert math.isnan(result[key]) + else: + assert result[key] == value diff --git a/dbms/tests/integration/test_polymorphic_parts/__init__.py b/tests/integration/test_no_local_metadata_node/__init__.py similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/__init__.py rename to tests/integration/test_no_local_metadata_node/__init__.py diff --git a/tests/integration/test_no_local_metadata_node/test.py b/tests/integration/test_no_local_metadata_node/test.py new file mode 100644 index 00000000000..ef240cd710c --- /dev/null +++ b/tests/integration/test_no_local_metadata_node/test.py @@ -0,0 +1,54 @@ +import time +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', with_zookeeper=True) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def test_table_start_without_metadata(start_cluster): + node1.query(""" + CREATE TABLE test (date Date) + ENGINE = ReplicatedMergeTree('/clickhouse/table/test_table', '1') + ORDER BY tuple() + """) + + node1.query("INSERT INTO test VALUES(toDate('2019-12-01'))") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" + + # some fake alter + node1.query("ALTER TABLE test MODIFY COLUMN date Date DEFAULT toDate('2019-10-01')") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" + + node1.query("DETACH TABLE test") + zk_cli = cluster.get_kazoo_client('zoo1') + + # simulate update from old version + zk_cli.delete("/clickhouse/table/test_table/replicas/1/metadata") + zk_cli.delete("/clickhouse/table/test_table/replicas/1/metadata_version") + + node1.query("ATTACH TABLE test") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" + + node1.query("ALTER TABLE test MODIFY COLUMN date Date DEFAULT toDate('2019-09-01')") + + node1.query("DETACH TABLE test") + + zk_cli.set("/clickhouse/table/test_table/replicas/1/metadata", "") + + node1.query("ATTACH TABLE test") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" diff --git a/dbms/tests/integration/test_prometheus_endpoint/__init__.py b/tests/integration/test_non_default_compression/__init__.py similarity index 100% rename from dbms/tests/integration/test_prometheus_endpoint/__init__.py rename to tests/integration/test_non_default_compression/__init__.py diff --git a/dbms/tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml rename to tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml diff --git a/dbms/tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml b/tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml rename to tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml diff --git a/dbms/tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml rename to tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml diff --git a/dbms/tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml rename to tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml diff --git a/dbms/tests/integration/test_non_default_compression/test.py b/tests/integration/test_non_default_compression/test.py similarity index 100% rename from dbms/tests/integration/test_non_default_compression/test.py rename to tests/integration/test_non_default_compression/test.py diff --git a/dbms/tests/integration/test_quota/__init__.py b/tests/integration/test_odbc_interaction/__init__.py similarity index 100% rename from dbms/tests/integration/test_quota/__init__.py rename to tests/integration/test_odbc_interaction/__init__.py diff --git a/dbms/tests/integration/test_odbc_interaction/configs/config.xml b/tests/integration/test_odbc_interaction/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/config.xml rename to tests/integration/test_odbc_interaction/configs/config.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml b/tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml rename to tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml b/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml rename to tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml b/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml rename to tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/users.xml b/tests/integration/test_odbc_interaction/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/users.xml rename to tests/integration/test_odbc_interaction/configs/users.xml diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py new file mode 100644 index 00000000000..41f54ddd0e6 --- /dev/null +++ b/tests/integration/test_odbc_interaction/test.py @@ -0,0 +1,228 @@ +import time +import pytest + +import os +import pymysql.cursors +import psycopg2 +from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT +from helpers.cluster import ClickHouseCluster + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + +cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs')) +node1 = cluster.add_instance('node1', with_odbc_drivers=True, with_mysql=True, image='yandex/clickhouse-integration-test', main_configs=['configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml', 'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml', 'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'], stay_alive=True) + +create_table_sql_template = """ + CREATE TABLE `clickhouse`.`{}` ( + `id` int(11) NOT NULL, + `name` varchar(50) NOT NULL, + `age` int NOT NULL default 0, + `money` int NOT NULL default 0, + `column_x` int default NULL, + PRIMARY KEY (`id`)) ENGINE=InnoDB; + """ +def get_mysql_conn(): + conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308) + return conn + +def create_mysql_db(conn, name): + with conn.cursor() as cursor: + cursor.execute( + "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name)) + +def create_mysql_table(conn, table_name): + with conn.cursor() as cursor: + cursor.execute(create_table_sql_template.format(table_name)) + +def get_postgres_conn(): + conn_string = "host='localhost' user='postgres' password='mysecretpassword'" + conn = psycopg2.connect(conn_string) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + conn.autocommit = True + return conn + +def create_postgres_db(conn, name): + cursor = conn.cursor() + cursor.execute("CREATE SCHEMA {}".format(name)) + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] + + print "sqlite data received" + node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t1(x INTEGER PRIMARY KEY ASC, y, z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t2(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t3(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t4(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + print "sqlite tables created" + mysql_conn = get_mysql_conn() + print "mysql connection received" + ## create mysql db and table + create_mysql_db(mysql_conn, 'clickhouse') + print "mysql database created" + + postgres_conn = get_postgres_conn() + print "postgres connection received" + + create_postgres_db(postgres_conn, 'clickhouse') + print "postgres db created" + + cursor = postgres_conn.cursor() + cursor.execute("create table if not exists clickhouse.test_table (column1 int primary key, column2 varchar(40) not null)") + + yield cluster + + except Exception as ex: + print(ex) + raise ex + finally: + cluster.shutdown() + +def test_mysql_simple_select_works(started_cluster): + mysql_setup = node1.odbc_drivers["MySQL"] + + table_name = 'test_insert_select' + conn = get_mysql_conn() + create_mysql_table(conn, table_name) + + # Check that NULL-values are handled correctly by the ODBC-bridge + with conn.cursor() as cursor: + cursor.execute("INSERT INTO clickhouse.{} VALUES(50, 'null-guy', 127, 255, NULL), (100, 'non-null-guy', 127, 255, 511);".format(table_name)) + conn.commit() + assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), settings={"external_table_functions_use_nulls": "1"}) == '\\N\n511\n' + assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), settings={"external_table_functions_use_nulls": "0"}) == '0\n511\n' + + node1.query(''' +CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nullable(UInt32)) ENGINE = MySQL('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse'); +'''.format(table_name, table_name)) + + node1.query("INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, NULL from numbers(49) ".format(table_name)) + node1.query("INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, 42 from numbers(51, 49) ".format(table_name)) + + assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NOT NULL".format(table_name)) == '50\n' + assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NULL".format(table_name)) == '50\n' + assert node1.query("SELECT count(*) FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name)) == '100\n' + + # previously this test fails with segfault + # just to be sure :) + assert node1.query("select 1") == "1\n" + + conn.close() + + +def test_sqlite_simple_select_function_works(started_cluster): + sqlite_setup = node1.odbc_drivers["SQLite3"] + sqlite_db = sqlite_setup["Database"] + + node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t1 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + assert node1.query("select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\t3\n" + + assert node1.query("select y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "2\n" + assert node1.query("select z from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\n" + assert node1.query("select x from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\n" + assert node1.query("select x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\n" + assert node1.query("select z, x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\t1\t2\n" + assert node1.query("select count(), sum(x) from odbc('DSN={}', '{}') group by x".format(sqlite_setup["DSN"], 't1')) == "1\t1\n" + +def test_sqlite_simple_select_storage_works(started_cluster): + sqlite_setup = node1.odbc_drivers["SQLite3"] + sqlite_db = sqlite_setup["Database"] + + node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t4 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + node1.query("create table SqliteODBC (x Int32, y String, z String) engine = ODBC('DSN={}', '', 't4')".format(sqlite_setup["DSN"])) + + assert node1.query("select * from SqliteODBC") == "1\t2\t3\n" + assert node1.query("select y from SqliteODBC") == "2\n" + assert node1.query("select z from SqliteODBC") == "3\n" + assert node1.query("select x from SqliteODBC") == "1\n" + assert node1.query("select x, y from SqliteODBC") == "1\t2\n" + assert node1.query("select z, x, y from SqliteODBC") == "3\t1\t2\n" + assert node1.query("select count(), sum(x) from SqliteODBC group by x") == "1\t1\n" + +def test_sqlite_odbc_hashed_dictionary(started_cluster): + sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] + node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t2 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + + assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "3\n" + assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "1\n" # default + + time.sleep(5) # first reload + node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t2 values(200, 2, 7);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + + # No reload because of invalidate query + time.sleep(5) + assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "3\n" + assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "1\n" # still default + + node1.exec_in_container(["bash", "-c", "echo 'REPLACE INTO t2 values(1, 2, 5);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + + # waiting for reload + time.sleep(5) + + assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "5\n" + assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "7\n" # new value + +def test_sqlite_odbc_cached_dictionary(started_cluster): + sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] + node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t3 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + + assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "3\n" + + node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t3 values(200, 2, 7);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + + assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(200))") == "7\n" # new value + + node1.exec_in_container(["bash", "-c", "echo 'REPLACE INTO t3 values(1, 2, 12);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root') + + time.sleep(5) + + assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "12\n" + +def test_postgres_odbc_hached_dictionary_with_schema(started_cluster): + conn = get_postgres_conn() + cursor = conn.cursor() + cursor.execute("insert into clickhouse.test_table values(1, 'hello'),(2, 'world')") + time.sleep(5) + assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))") == "hello\n" + assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))") == "world\n" + +def test_postgres_odbc_hached_dictionary_no_tty_pipe_overflow(started_cluster): + conn = get_postgres_conn() + cursor = conn.cursor() + cursor.execute("insert into clickhouse.test_table values(3, 'xxx')") + for i in xrange(100): + try: + node1.query("system reload dictionary postgres_odbc_hashed", timeout=5) + except Exception as ex: + assert False, "Exception occured -- odbc-bridge hangs: " + str(ex) + + assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))") == "xxx\n" + +def test_bridge_dies_with_parent(started_cluster): + node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))") + + clickhouse_pid = node1.get_process_pid("clickhouse server") + bridge_pid = node1.get_process_pid("odbc-bridge") + assert clickhouse_pid is not None + assert bridge_pid is not None + + while clickhouse_pid is not None: + try: + node1.exec_in_container(["bash", "-c", "kill {}".format(clickhouse_pid)], privileged=True, user='root') + except: + pass + clickhouse_pid = node1.get_process_pid("clickhouse server") + time.sleep(1) + + time.sleep(1) # just for sure, that odbc-bridge caught signal + bridge_pid = node1.get_process_pid("odbc-bridge") + + if bridge_pid: + out = node1.exec_in_container(["gdb", "-p", str(bridge_pid), "--ex", "thread apply all bt", "--ex", "q"], privileged=True, user='root') + print("Bridge is running, gdb output:") + print(out) + + assert clickhouse_pid is None + assert bridge_pid is None diff --git a/dbms/tests/integration/test_read_temporary_tables_on_failure/__init__.py b/tests/integration/test_old_versions/__init__.py similarity index 100% rename from dbms/tests/integration/test_read_temporary_tables_on_failure/__init__.py rename to tests/integration/test_old_versions/__init__.py diff --git a/dbms/tests/integration/test_old_versions/configs/config.d/test_cluster.xml b/tests/integration/test_old_versions/configs/config.d/test_cluster.xml similarity index 100% rename from dbms/tests/integration/test_old_versions/configs/config.d/test_cluster.xml rename to tests/integration/test_old_versions/configs/config.d/test_cluster.xml diff --git a/dbms/tests/integration/test_old_versions/test.py b/tests/integration/test_old_versions/test.py similarity index 100% rename from dbms/tests/integration/test_old_versions/test.py rename to tests/integration/test_old_versions/test.py diff --git a/dbms/tests/integration/test_recovery_replica/__init__.py b/tests/integration/test_part_log_table/__init__.py similarity index 100% rename from dbms/tests/integration/test_recovery_replica/__init__.py rename to tests/integration/test_part_log_table/__init__.py diff --git a/dbms/tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml b/tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml similarity index 100% rename from dbms/tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml rename to tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml diff --git a/dbms/tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml b/tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml similarity index 100% rename from dbms/tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml rename to tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml diff --git a/dbms/tests/integration/test_part_log_table/test.py b/tests/integration/test_part_log_table/test.py similarity index 100% rename from dbms/tests/integration/test_part_log_table/test.py rename to tests/integration/test_part_log_table/test.py diff --git a/dbms/tests/integration/test_redirect_url_storage/__init__.py b/tests/integration/test_partition/__init__.py similarity index 100% rename from dbms/tests/integration/test_redirect_url_storage/__init__.py rename to tests/integration/test_partition/__init__.py diff --git a/dbms/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py similarity index 100% rename from dbms/tests/integration/test_partition/test.py rename to tests/integration/test_partition/test.py diff --git a/dbms/tests/integration/test_relative_filepath/__init__.py b/tests/integration/test_parts_delete_zookeeper/__init__.py similarity index 100% rename from dbms/tests/integration/test_relative_filepath/__init__.py rename to tests/integration/test_parts_delete_zookeeper/__init__.py diff --git a/dbms/tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml b/tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml rename to tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_parts_delete_zookeeper/test.py b/tests/integration/test_parts_delete_zookeeper/test.py similarity index 100% rename from dbms/tests/integration/test_parts_delete_zookeeper/test.py rename to tests/integration/test_parts_delete_zookeeper/test.py diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/__init__.py b/tests/integration/test_polymorphic_parts/__init__.py similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/__init__.py rename to tests/integration/test_polymorphic_parts/__init__.py diff --git a/dbms/tests/integration/test_polymorphic_parts/configs/compact_parts.xml b/tests/integration/test_polymorphic_parts/configs/compact_parts.xml similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/configs/compact_parts.xml rename to tests/integration/test_polymorphic_parts/configs/compact_parts.xml diff --git a/dbms/tests/integration/test_polymorphic_parts/configs/no_leader.xml b/tests/integration/test_polymorphic_parts/configs/no_leader.xml similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/configs/no_leader.xml rename to tests/integration/test_polymorphic_parts/configs/no_leader.xml diff --git a/dbms/tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml b/tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml rename to tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml diff --git a/dbms/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/test.py rename to tests/integration/test_polymorphic_parts/test.py diff --git a/dbms/tests/integration/test_reloading_storage_configuration/__init__.py b/tests/integration/test_prometheus_endpoint/__init__.py similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/__init__.py rename to tests/integration/test_prometheus_endpoint/__init__.py diff --git a/dbms/tests/integration/test_prometheus_endpoint/configs/prom_conf.xml b/tests/integration/test_prometheus_endpoint/configs/prom_conf.xml similarity index 100% rename from dbms/tests/integration/test_prometheus_endpoint/configs/prom_conf.xml rename to tests/integration/test_prometheus_endpoint/configs/prom_conf.xml diff --git a/dbms/tests/integration/test_prometheus_endpoint/test.py b/tests/integration/test_prometheus_endpoint/test.py similarity index 100% rename from dbms/tests/integration/test_prometheus_endpoint/test.py rename to tests/integration/test_prometheus_endpoint/test.py diff --git a/dbms/tests/integration/test_remote_prewhere/__init__.py b/tests/integration/test_quota/__init__.py similarity index 100% rename from dbms/tests/integration/test_remote_prewhere/__init__.py rename to tests/integration/test_quota/__init__.py diff --git a/tests/integration/test_quota/configs/users.d/assign_myquota.xml b/tests/integration/test_quota/configs/users.d/assign_myquota.xml new file mode 100644 index 00000000000..8b98ade8aeb --- /dev/null +++ b/tests/integration/test_quota/configs/users.d/assign_myquota.xml @@ -0,0 +1,7 @@ + + + + myQuota + + + diff --git a/tests/integration/test_quota/configs/users.d/drop_default_quota.xml b/tests/integration/test_quota/configs/users.d/drop_default_quota.xml new file mode 100644 index 00000000000..5f53ecf5f49 --- /dev/null +++ b/tests/integration/test_quota/configs/users.d/drop_default_quota.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/dbms/tests/integration/test_quota/configs/users.d/quota.xml b/tests/integration/test_quota/configs/users.d/quota.xml similarity index 100% rename from dbms/tests/integration/test_quota/configs/users.d/quota.xml rename to tests/integration/test_quota/configs/users.d/quota.xml diff --git a/dbms/tests/integration/test_quota/no_quotas.xml b/tests/integration/test_quota/no_quotas.xml similarity index 100% rename from dbms/tests/integration/test_quota/no_quotas.xml rename to tests/integration/test_quota/no_quotas.xml diff --git a/dbms/tests/integration/test_quota/normal_limits.xml b/tests/integration/test_quota/normal_limits.xml similarity index 100% rename from dbms/tests/integration/test_quota/normal_limits.xml rename to tests/integration/test_quota/normal_limits.xml diff --git a/dbms/tests/integration/test_quota/simpliest.xml b/tests/integration/test_quota/simpliest.xml similarity index 100% rename from dbms/tests/integration/test_quota/simpliest.xml rename to tests/integration/test_quota/simpliest.xml diff --git a/tests/integration/test_quota/test.py b/tests/integration/test_quota/test.py new file mode 100644 index 00000000000..ae68a34a03e --- /dev/null +++ b/tests/integration/test_quota/test.py @@ -0,0 +1,251 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry +import os +import re +import time + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', + config_dir="configs") + +query_from_system_quotas = "SELECT * FROM system.quotas ORDER BY name"; + +query_from_system_quota_usage = "SELECT id, key, duration, "\ + "queries, errors, result_rows, result_bytes, read_rows, read_bytes "\ + "FROM system.quota_usage ORDER BY id, key, duration"; + +def system_quotas(): + return instance.query(query_from_system_quotas).rstrip('\n') + +def system_quota_usage(): + return instance.query(query_from_system_quota_usage).rstrip('\n') + + +def copy_quota_xml(local_file_name, reload_immediately = True): + script_dir = os.path.dirname(os.path.realpath(__file__)) + instance.copy_file_to_container(os.path.join(script_dir, local_file_name), '/etc/clickhouse-server/users.d/quota.xml') + if reload_immediately: + instance.query("SYSTEM RELOAD CONFIG") + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + + instance.query("CREATE TABLE test_table(x UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query("INSERT INTO test_table SELECT number FROM numbers(50)") + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_quotas_and_usage_info(): + try: + yield + finally: + instance.query("DROP QUOTA IF EXISTS qA, qB") + copy_quota_xml('simpliest.xml') # To reset usage info. + copy_quota_xml('normal_limits.xml') + + +def test_quota_from_users_xml(): + assert instance.query("SELECT currentQuota()") == "myQuota\n" + assert instance.query("SELECT currentQuotaID()") == "e651da9c-a748-8703-061a-7e5e5096dae7\n" + assert instance.query("SELECT currentQuotaKey()") == "default\n" + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" + + instance.query("SELECT COUNT() from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t51\t208\t50\t200" + + +def test_simpliest_quota(): + # Simpliest quota doesn't even track usage. + copy_quota_xml('simpliest.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + +def test_tracking_quota(): + # Now we're tracking usage. + copy_quota_xml('tracking.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" + + instance.query("SELECT COUNT() from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t51\t208\t50\t200" + + +def test_exceed_quota(): + # Change quota, now the limits are tiny so we will exceed the quota. + copy_quota_xml('tiny_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1]\t[1]\t[1]\t[0]\t[1]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + assert re.search("Quota.*has\ been\ exceeded", instance.query_and_get_error("SELECT * from test_table")) + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t1\t0\t0\t50\t0" + + # Change quota, now the limits are enough to execute queries. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t1\t0\t0\t50\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t1\t50\t200\t100\t200" + + +def test_add_remove_interval(): + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + # Add interval. + copy_quota_xml('two_intervals.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952,63113904]\t[0,1]\t[1000,0]\t[0,0]\t[0,0]\t[0,30000]\t[1000,0]\t[0,20000]\t[0,120]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0\n"\ + "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t63113904\t0\t0\t0\t0\t0\t0" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200\n"\ + "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t63113904\t1\t0\t50\t200\t50\t200" + + # Remove interval. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t1\t0\t50\t200\t50\t200" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t2\t0\t100\t400\t100\t400" + + # Remove all intervals. + copy_quota_xml('simpliest.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]\t[]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + instance.query("SELECT * from test_table") + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N" + + # Add one interval back. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + +def test_add_remove_quota(): + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + # Add quota. + copy_quota_xml('two_quotas.xml') + assert system_quotas() ==\ + "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]\n"\ + "myQuota2\t4590510c-4d13-bf21-ec8a-c2187b092e73\tusers.xml\tclient key or user name\t[]\t[3600,2629746]\t[1,0]\t[0,0]\t[0,0]\t[4000,0]\t[400000,0]\t[4000,0]\t[400000,0]\t[60,1800]" + + # Drop quota. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + + # Drop all quotas. + copy_quota_xml('no_quotas.xml') + assert system_quotas() == "" + assert system_quota_usage() == "" + + # Add one quota back. + copy_quota_xml('normal_limits.xml') + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + assert system_quota_usage() == "e651da9c-a748-8703-061a-7e5e5096dae7\tdefault\t31556952\t0\t0\t0\t0\t0\t0" + + +def test_reload_users_xml_by_timer(): + assert system_quotas() == "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1000]\t[0]\t[0]\t[0]\t[1000]\t[0]\t[0]" + + time.sleep(1) # The modification time of the 'quota.xml' file should be different, + # because config files are reload by timer only when the modification time is changed. + copy_quota_xml('tiny_limits.xml', reload_immediately=False) + assert_eq_with_retry(instance, query_from_system_quotas, "myQuota\te651da9c-a748-8703-061a-7e5e5096dae7\tusers.xml\tuser name\t['default']\t[31556952]\t[0]\t[1]\t[1]\t[1]\t[0]\t[1]\t[0]\t[0]") + + +def test_dcl_introspection(): + assert instance.query("SHOW QUOTAS") == "myQuota\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES 1000, READ ROWS 1000 TO default\n" + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0/1000 errors=0 result_rows=0 result_bytes=0 read_rows=0/1000 read_bytes=0 execution_time=0" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE CURRENT")) + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE ALL")) + + instance.query("SELECT * from test_table") + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + # Add interval. + copy_quota_xml('two_intervals.xml') + assert instance.query("SHOW QUOTAS") == "myQuota\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES 1000, READ ROWS 1000, FOR RANDOMIZED INTERVAL 2 YEAR MAX RESULT BYTES 30000, READ BYTES 20000, EXECUTION TIME 120 TO default\n" + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*\n"\ + "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0/30000 read_rows=0 read_bytes=0/20000 execution_time=0/120" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + # Drop interval, add quota. + copy_quota_xml('two_quotas.xml') + assert instance.query("SHOW QUOTAS") == "myQuota\nmyQuota2\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES 1000, READ ROWS 1000 TO default\n" + assert instance.query("SHOW CREATE QUOTA myQuota2") == "CREATE QUOTA myQuota2 KEYED BY \\'client key or user name\\' FOR RANDOMIZED INTERVAL 1 HOUR MAX RESULT ROWS 4000, RESULT BYTES 400000, READ ROWS 4000, READ BYTES 400000, EXECUTION TIME 60, FOR INTERVAL 1 MONTH MAX EXECUTION TIME 1800\n" + expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + +def test_dcl_management(): + copy_quota_xml('no_quotas.xml') + assert instance.query("SHOW QUOTAS") == "" + assert instance.query("SHOW QUOTA USAGE") == "" + + instance.query("CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER") + assert instance.query("SHOW QUOTAS") == "qA\n" + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 5 QUARTER MAX QUERIES 123 TO default\n" + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0/123 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("SELECT * from test_table") + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=1/123 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 321, MAX ERRORS 10, FOR INTERVAL 0.5 HOUR MAX EXECUTION TIME 0.5") + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 30 MINUTE MAX EXECUTION TIME 0.5, FOR INTERVAL 5 QUARTER MAX QUERIES 321, ERRORS 10 TO default\n" + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*/0.5\n"\ + "qA key=\\\\'\\\\' interval=\[.*\] queries=1/321 errors=0/10 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH NO LIMITS, FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY, FOR INTERVAL 1800 SECOND NO LIMITS") + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY TO default\n" + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("SELECT * from test_table") + expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=1 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("ALTER QUOTA qA RENAME TO qB") + assert instance.query("SHOW CREATE QUOTA qB") == "CREATE QUOTA qB KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY TO default\n" + expected_usage = "qB key=\\\\'\\\\' interval=\[.*\] queries=1 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" + assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) + + instance.query("DROP QUOTA qB") + assert instance.query("SHOW QUOTAS") == "" + assert instance.query("SHOW QUOTA USAGE") == "" + + +def test_users_xml_is_readonly(): + assert re.search("storage is readonly", instance.query_and_get_error("DROP QUOTA myQuota")) diff --git a/dbms/tests/integration/test_quota/tiny_limits.xml b/tests/integration/test_quota/tiny_limits.xml similarity index 100% rename from dbms/tests/integration/test_quota/tiny_limits.xml rename to tests/integration/test_quota/tiny_limits.xml diff --git a/dbms/tests/integration/test_quota/tracking.xml b/tests/integration/test_quota/tracking.xml similarity index 100% rename from dbms/tests/integration/test_quota/tracking.xml rename to tests/integration/test_quota/tracking.xml diff --git a/dbms/tests/integration/test_quota/two_intervals.xml b/tests/integration/test_quota/two_intervals.xml similarity index 100% rename from dbms/tests/integration/test_quota/two_intervals.xml rename to tests/integration/test_quota/two_intervals.xml diff --git a/dbms/tests/integration/test_quota/two_quotas.xml b/tests/integration/test_quota/two_quotas.xml similarity index 100% rename from dbms/tests/integration/test_quota/two_quotas.xml rename to tests/integration/test_quota/two_quotas.xml diff --git a/dbms/tests/integration/test_random_inserts/__init__.py b/tests/integration/test_random_inserts/__init__.py similarity index 100% rename from dbms/tests/integration/test_random_inserts/__init__.py rename to tests/integration/test_random_inserts/__init__.py diff --git a/dbms/tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml b/tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml similarity index 100% rename from dbms/tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml rename to tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml diff --git a/dbms/tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml b/tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml rename to tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml diff --git a/dbms/tests/integration/test_random_inserts/test.py b/tests/integration/test_random_inserts/test.py similarity index 100% rename from dbms/tests/integration/test_random_inserts/test.py rename to tests/integration/test_random_inserts/test.py diff --git a/dbms/tests/integration/test_random_inserts/test.sh b/tests/integration/test_random_inserts/test.sh similarity index 100% rename from dbms/tests/integration/test_random_inserts/test.sh rename to tests/integration/test_random_inserts/test.sh diff --git a/dbms/tests/integration/test_replace_partition/__init__.py b/tests/integration/test_read_temporary_tables_on_failure/__init__.py similarity index 100% rename from dbms/tests/integration/test_replace_partition/__init__.py rename to tests/integration/test_read_temporary_tables_on_failure/__init__.py diff --git a/dbms/tests/integration/test_read_temporary_tables_on_failure/test.py b/tests/integration/test_read_temporary_tables_on_failure/test.py similarity index 100% rename from dbms/tests/integration/test_read_temporary_tables_on_failure/test.py rename to tests/integration/test_read_temporary_tables_on_failure/test.py diff --git a/dbms/tests/integration/test_replica_can_become_leader/__init__.py b/tests/integration/test_recovery_replica/__init__.py similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/__init__.py rename to tests/integration/test_recovery_replica/__init__.py diff --git a/dbms/tests/integration/test_recovery_replica/configs/remote_servers.xml b/tests/integration/test_recovery_replica/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_recovery_replica/configs/remote_servers.xml rename to tests/integration/test_recovery_replica/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_recovery_replica/test.py b/tests/integration/test_recovery_replica/test.py similarity index 100% rename from dbms/tests/integration/test_recovery_replica/test.py rename to tests/integration/test_recovery_replica/test.py diff --git a/dbms/tests/integration/test_replicated_mutations/__init__.py b/tests/integration/test_redirect_url_storage/__init__.py similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/__init__.py rename to tests/integration/test_redirect_url_storage/__init__.py diff --git a/dbms/tests/integration/test_redirect_url_storage/test.py b/tests/integration/test_redirect_url_storage/test.py similarity index 100% rename from dbms/tests/integration/test_redirect_url_storage/test.py rename to tests/integration/test_redirect_url_storage/test.py diff --git a/dbms/tests/integration/test_replicating_constants/__init__.py b/tests/integration/test_relative_filepath/__init__.py similarity index 100% rename from dbms/tests/integration/test_replicating_constants/__init__.py rename to tests/integration/test_relative_filepath/__init__.py diff --git a/dbms/tests/integration/test_relative_filepath/configs/config.xml b/tests/integration/test_relative_filepath/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_relative_filepath/configs/config.xml rename to tests/integration/test_relative_filepath/configs/config.xml diff --git a/dbms/tests/integration/test_relative_filepath/test.py b/tests/integration/test_relative_filepath/test.py similarity index 100% rename from dbms/tests/integration/test_relative_filepath/test.py rename to tests/integration/test_relative_filepath/test.py diff --git a/dbms/tests/integration/test_replication_credentials/__init__.py b/tests/integration/test_reload_max_table_size_to_drop/__init__.py similarity index 100% rename from dbms/tests/integration/test_replication_credentials/__init__.py rename to tests/integration/test_reload_max_table_size_to_drop/__init__.py diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/configs/config.xml b/tests/integration/test_reload_max_table_size_to_drop/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/configs/config.xml rename to tests/integration/test_reload_max_table_size_to_drop/configs/config.xml diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/configs/users.xml b/tests/integration/test_reload_max_table_size_to_drop/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/configs/users.xml rename to tests/integration/test_reload_max_table_size_to_drop/configs/users.xml diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/test.py b/tests/integration/test_reload_max_table_size_to_drop/test.py similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/test.py rename to tests/integration/test_reload_max_table_size_to_drop/test.py diff --git a/dbms/tests/integration/test_replication_without_zookeeper/__init__.py b/tests/integration/test_reloading_storage_configuration/__init__.py similarity index 100% rename from dbms/tests/integration/test_replication_without_zookeeper/__init__.py rename to tests/integration/test_reloading_storage_configuration/__init__.py diff --git a/dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml b/tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml rename to tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml b/tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml rename to tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_reloading_storage_configuration/configs/logs_config.xml b/tests/integration/test_reloading_storage_configuration/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/configs/logs_config.xml rename to tests/integration/test_reloading_storage_configuration/configs/logs_config.xml diff --git a/dbms/tests/integration/test_reloading_storage_configuration/test.py b/tests/integration/test_reloading_storage_configuration/test.py similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/test.py rename to tests/integration/test_reloading_storage_configuration/test.py diff --git a/dbms/tests/integration/test_row_policy/__init__.py b/tests/integration/test_remote_prewhere/__init__.py similarity index 100% rename from dbms/tests/integration/test_row_policy/__init__.py rename to tests/integration/test_remote_prewhere/__init__.py diff --git a/dbms/tests/integration/test_remote_prewhere/configs/log_conf.xml b/tests/integration/test_remote_prewhere/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_remote_prewhere/configs/log_conf.xml rename to tests/integration/test_remote_prewhere/configs/log_conf.xml diff --git a/dbms/tests/integration/test_remote_prewhere/test.py b/tests/integration/test_remote_prewhere/test.py similarity index 100% rename from dbms/tests/integration/test_remote_prewhere/test.py rename to tests/integration/test_remote_prewhere/test.py diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/__init__.py b/tests/integration/test_replace_partition/__init__.py similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/__init__.py rename to tests/integration/test_replace_partition/__init__.py diff --git a/dbms/tests/integration/test_replace_partition/configs/remote_servers.xml b/tests/integration/test_replace_partition/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_replace_partition/configs/remote_servers.xml rename to tests/integration/test_replace_partition/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_replace_partition/test.py b/tests/integration/test_replace_partition/test.py similarity index 100% rename from dbms/tests/integration/test_replace_partition/test.py rename to tests/integration/test_replace_partition/test.py diff --git a/dbms/tests/integration/test_server_initialization/__init__.py b/tests/integration/test_replica_can_become_leader/__init__.py similarity index 100% rename from dbms/tests/integration/test_server_initialization/__init__.py rename to tests/integration/test_replica_can_become_leader/__init__.py diff --git a/dbms/tests/integration/test_replica_can_become_leader/configs/notleader.xml b/tests/integration/test_replica_can_become_leader/configs/notleader.xml similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/configs/notleader.xml rename to tests/integration/test_replica_can_become_leader/configs/notleader.xml diff --git a/dbms/tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml b/tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml rename to tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml diff --git a/dbms/tests/integration/test_replica_can_become_leader/test.py b/tests/integration/test_replica_can_become_leader/test.py similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/test.py rename to tests/integration/test_replica_can_become_leader/test.py diff --git a/dbms/tests/integration/test_settings_constraints/__init__.py b/tests/integration/test_replicated_mutations/__init__.py similarity index 100% rename from dbms/tests/integration/test_settings_constraints/__init__.py rename to tests/integration/test_replicated_mutations/__init__.py diff --git a/dbms/tests/integration/test_replicated_mutations/configs/merge_tree.xml b/tests/integration/test_replicated_mutations/configs/merge_tree.xml similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/configs/merge_tree.xml rename to tests/integration/test_replicated_mutations/configs/merge_tree.xml diff --git a/dbms/tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml b/tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml rename to tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml diff --git a/dbms/tests/integration/test_replicated_mutations/test.py b/tests/integration/test_replicated_mutations/test.py similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/test.py rename to tests/integration/test_replicated_mutations/test.py diff --git a/dbms/tests/integration/test_settings_constraints_distributed/__init__.py b/tests/integration/test_replicating_constants/__init__.py similarity index 100% rename from dbms/tests/integration/test_settings_constraints_distributed/__init__.py rename to tests/integration/test_replicating_constants/__init__.py diff --git a/dbms/tests/integration/test_replicating_constants/test.py b/tests/integration/test_replicating_constants/test.py similarity index 100% rename from dbms/tests/integration/test_replicating_constants/test.py rename to tests/integration/test_replicating_constants/test.py diff --git a/dbms/tests/integration/test_settings_profile/__init__.py b/tests/integration/test_replication_credentials/__init__.py similarity index 100% rename from dbms/tests/integration/test_settings_profile/__init__.py rename to tests/integration/test_replication_credentials/__init__.py diff --git a/dbms/tests/integration/test_replication_credentials/configs/credentials1.xml b/tests/integration/test_replication_credentials/configs/credentials1.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/credentials1.xml rename to tests/integration/test_replication_credentials/configs/credentials1.xml diff --git a/dbms/tests/integration/test_replication_credentials/configs/credentials2.xml b/tests/integration/test_replication_credentials/configs/credentials2.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/credentials2.xml rename to tests/integration/test_replication_credentials/configs/credentials2.xml diff --git a/dbms/tests/integration/test_replication_credentials/configs/no_credentials.xml b/tests/integration/test_replication_credentials/configs/no_credentials.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/no_credentials.xml rename to tests/integration/test_replication_credentials/configs/no_credentials.xml diff --git a/dbms/tests/integration/test_replication_credentials/configs/remote_servers.xml b/tests/integration/test_replication_credentials/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/remote_servers.xml rename to tests/integration/test_replication_credentials/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_replication_credentials/test.py b/tests/integration/test_replication_credentials/test.py similarity index 100% rename from dbms/tests/integration/test_replication_credentials/test.py rename to tests/integration/test_replication_credentials/test.py diff --git a/dbms/tests/integration/test_storage_hdfs/__init__.py b/tests/integration/test_replication_without_zookeeper/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_hdfs/__init__.py rename to tests/integration/test_replication_without_zookeeper/__init__.py diff --git a/dbms/tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml b/tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml rename to tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_replication_without_zookeeper/test.py b/tests/integration/test_replication_without_zookeeper/test.py similarity index 100% rename from dbms/tests/integration/test_replication_without_zookeeper/test.py rename to tests/integration/test_replication_without_zookeeper/test.py diff --git a/dbms/tests/integration/test_storage_kafka/__init__.py b/tests/integration/test_row_policy/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_kafka/__init__.py rename to tests/integration/test_row_policy/__init__.py diff --git a/dbms/tests/integration/test_row_policy/all_rows.xml b/tests/integration/test_row_policy/all_rows.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/all_rows.xml rename to tests/integration/test_row_policy/all_rows.xml diff --git a/dbms/tests/integration/test_row_policy/configs/config.d/remote_servers.xml b/tests/integration/test_row_policy/configs/config.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/configs/config.d/remote_servers.xml rename to tests/integration/test_row_policy/configs/config.d/remote_servers.xml diff --git a/dbms/tests/integration/test_row_policy/configs/users.d/row_policy.xml b/tests/integration/test_row_policy/configs/users.d/row_policy.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/configs/users.d/row_policy.xml rename to tests/integration/test_row_policy/configs/users.d/row_policy.xml diff --git a/tests/integration/test_row_policy/configs/users.xml b/tests/integration/test_row_policy/configs/users.xml new file mode 100644 index 00000000000..ce29b7f7308 --- /dev/null +++ b/tests/integration/test_row_policy/configs/users.xml @@ -0,0 +1,29 @@ + + + + + 1 + + + + + + + ::/0 + + default + default + + + + + ::/0 + + default + default + + + + + + diff --git a/dbms/tests/integration/test_row_policy/multiple_tags_with_table_names.xml b/tests/integration/test_row_policy/multiple_tags_with_table_names.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/multiple_tags_with_table_names.xml rename to tests/integration/test_row_policy/multiple_tags_with_table_names.xml diff --git a/dbms/tests/integration/test_row_policy/no_filters.xml b/tests/integration/test_row_policy/no_filters.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/no_filters.xml rename to tests/integration/test_row_policy/no_filters.xml diff --git a/dbms/tests/integration/test_row_policy/no_rows.xml b/tests/integration/test_row_policy/no_rows.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/no_rows.xml rename to tests/integration/test_row_policy/no_rows.xml diff --git a/dbms/tests/integration/test_row_policy/normal_filters.xml b/tests/integration/test_row_policy/normal_filters.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/normal_filters.xml rename to tests/integration/test_row_policy/normal_filters.xml diff --git a/dbms/tests/integration/test_row_policy/tag_with_table_name.xml b/tests/integration/test_row_policy/tag_with_table_name.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/tag_with_table_name.xml rename to tests/integration/test_row_policy/tag_with_table_name.xml diff --git a/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py new file mode 100644 index 00000000000..3a5b7340528 --- /dev/null +++ b/tests/integration/test_row_policy/test.py @@ -0,0 +1,314 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry +import os +import re +import time + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance1', config_dir="configs", with_zookeeper=True) +instance2 = cluster.add_instance('instance2', config_dir="configs", with_zookeeper=True) + + +def copy_policy_xml(local_file_name, reload_immediately = True): + script_dir = os.path.dirname(os.path.realpath(__file__)) + instance.copy_file_to_container(os.path.join(script_dir, local_file_name), '/etc/clickhouse-server/users.d/row_policy.xml') + instance2.copy_file_to_container(os.path.join(script_dir, local_file_name), '/etc/clickhouse-server/users.d/row_policy.xml') + if reload_immediately: + instance.query("SYSTEM RELOAD CONFIG") + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + + instance.query(''' + CREATE DATABASE mydb; + + CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1); + + CREATE TABLE mydb.table (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.table values (0, 0), (0, 1), (1, 0), (1, 1); + + CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0); + + CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1); + + CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.`.filtered_table4` values (0, 0), (0, 1), (1, 0), (1, 1); + ''') + instance2.query(''' + CREATE DATABASE mydb; + + CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1); + + CREATE TABLE mydb.table (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.table values (0, 0), (0, 1), (1, 0), (1, 1); + + CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0); + + CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1); + + CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a; + INSERT INTO mydb.`.filtered_table4` values (0, 0), (0, 1), (1, 0), (1, 1); + ''') + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_policies(): + try: + yield + finally: + copy_policy_xml('normal_filters.xml') + instance.query("DROP POLICY IF EXISTS pA, pB ON mydb.filtered_table1") + + +def test_smoke(): + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" + + assert instance.query("SELECT a FROM mydb.filtered_table1") == "1\n1\n" + assert instance.query("SELECT b FROM mydb.filtered_table1") == "0\n1\n" + assert instance.query("SELECT a FROM mydb.filtered_table1 WHERE a = 1") == "1\n1\n" + assert instance.query("SELECT a FROM mydb.filtered_table1 WHERE a IN (1)") == "1\n1\n" + assert instance.query("SELECT a = 1 FROM mydb.filtered_table1") == "1\n1\n" + + assert instance.query("SELECT a FROM mydb.filtered_table3") == "0\n1\n" + assert instance.query("SELECT b FROM mydb.filtered_table3") == "1\n0\n" + assert instance.query("SELECT c FROM mydb.filtered_table3") == "1\n1\n" + assert instance.query("SELECT a + b FROM mydb.filtered_table3") == "1\n1\n" + assert instance.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == "0\n1\n" + assert instance.query("SELECT c = 1 FROM mydb.filtered_table3") == "1\n1\n" + assert instance.query("SELECT a + b = 1 FROM mydb.filtered_table3") == "1\n1\n" + + +def test_join(): + assert instance.query("SELECT * FROM mydb.filtered_table1 as t1 ANY LEFT JOIN mydb.filtered_table1 as t2 ON t1.a = t2.b") == "1\t0\t1\t1\n1\t1\t1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table1 as t2 ANY RIGHT JOIN mydb.filtered_table1 as t1 ON t2.b = t1.a") == "1\t1\t1\t0\n" + + +def test_cannot_trick_row_policy_with_keyword_with(): + assert instance.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + assert instance.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + assert instance.query("WITH 0 AS a SELECT a FROM mydb.filtered_table1") == "1\n1\n" + assert instance.query("WITH 0 AS a SELECT b FROM mydb.filtered_table1") == "0\n1\n" + + +def test_prewhere_not_supported(): + expected_error = "PREWHERE is not supported if the table is filtered by row-level security" + assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table1 PREWHERE 1") + assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table2 PREWHERE 1") + assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table3 PREWHERE 1") + + # However PREWHERE should still work for user without filtering. + assert instance.query("SELECT * FROM mydb.filtered_table1 PREWHERE 1", user="another") == "0\t0\n0\t1\n1\t0\n1\t1\n" + + +def test_single_table_name(): + copy_policy_xml('tag_with_table_name.xml') + assert instance.query("SELECT * FROM mydb.table") == "1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" + + assert instance.query("SELECT a FROM mydb.table") == "1\n1\n" + assert instance.query("SELECT b FROM mydb.table") == "0\n1\n" + assert instance.query("SELECT a FROM mydb.table WHERE a = 1") == "1\n1\n" + assert instance.query("SELECT a = 1 FROM mydb.table") == "1\n1\n" + + assert instance.query("SELECT a FROM mydb.filtered_table3") == "0\n1\n" + assert instance.query("SELECT b FROM mydb.filtered_table3") == "1\n0\n" + assert instance.query("SELECT c FROM mydb.filtered_table3") == "1\n1\n" + assert instance.query("SELECT a + b FROM mydb.filtered_table3") == "1\n1\n" + assert instance.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == "0\n1\n" + assert instance.query("SELECT c = 1 FROM mydb.filtered_table3") == "1\n1\n" + assert instance.query("SELECT a + b = 1 FROM mydb.filtered_table3") == "1\n1\n" + + +def test_custom_table_name(): + copy_policy_xml('multiple_tags_with_table_names.xml') + assert instance.query("SELECT * FROM mydb.table") == "1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" + assert instance.query("SELECT * FROM mydb.`.filtered_table4`") == "0\t1\n1\t0\n" + + assert instance.query("SELECT a FROM mydb.table") == "1\n1\n" + assert instance.query("SELECT b FROM mydb.table") == "0\n1\n" + assert instance.query("SELECT a FROM mydb.table WHERE a = 1") == "1\n1\n" + assert instance.query("SELECT a = 1 FROM mydb.table") == "1\n1\n" + + assert instance.query("SELECT a FROM mydb.`.filtered_table4`") == "0\n1\n" + assert instance.query("SELECT b FROM mydb.`.filtered_table4`") == "1\n0\n" + assert instance.query("SELECT c FROM mydb.`.filtered_table4`") == "1\n1\n" + assert instance.query("SELECT a + b FROM mydb.`.filtered_table4`") == "1\n1\n" + assert instance.query("SELECT a FROM mydb.`.filtered_table4` WHERE c = 1") == "0\n1\n" + assert instance.query("SELECT c = 1 FROM mydb.`.filtered_table4`") == "1\n1\n" + assert instance.query("SELECT a + b = 1 FROM mydb.`.filtered_table4`") == "1\n1\n" + + +def test_change_of_users_xml_changes_row_policies(): + copy_policy_xml('normal_filters.xml') + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" + + copy_policy_xml('all_rows.xml') + assert instance.query("SELECT * FROM mydb.filtered_table1") == "0\t0\n0\t1\n1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n1\t2\t3\t4\n4\t3\t2\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t0\n0\t1\n1\t0\n1\t1\n" + + copy_policy_xml('no_rows.xml') + assert instance.query("SELECT * FROM mydb.filtered_table1") == "" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "" + + copy_policy_xml('normal_filters.xml') + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" + + copy_policy_xml('no_filters.xml') + assert instance.query("SELECT * FROM mydb.filtered_table1") == "0\t0\n0\t1\n1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n1\t2\t3\t4\n4\t3\t2\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t0\n0\t1\n1\t0\n1\t1\n" + + copy_policy_xml('normal_filters.xml') + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" + + +def test_reload_users_xml_by_timer(): + copy_policy_xml('normal_filters.xml') + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + assert instance.query("SELECT * FROM mydb.filtered_table2") == "0\t0\t0\t0\n0\t0\t6\t0\n" + assert instance.query("SELECT * FROM mydb.filtered_table3") == "0\t1\n1\t0\n" + + time.sleep(1) # The modification time of the 'row_policy.xml' file should be different. + copy_policy_xml('all_rows.xml', False) + assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table1", "0\t0\n0\t1\n1\t0\n1\t1") + assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table2", "0\t0\t0\t0\n0\t0\t6\t0\n1\t2\t3\t4\n4\t3\t2\t1") + assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table3", "0\t0\n0\t1\n1\t0\n1\t1") + + time.sleep(1) # The modification time of the 'row_policy.xml' file should be different. + copy_policy_xml('normal_filters.xml', False) + assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table1", "1\t0\n1\t1") + assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table2", "0\t0\t0\t0\n0\t0\t6\t0") + assert_eq_with_retry(instance, "SELECT * FROM mydb.filtered_table3", "0\t1\n1\t0") + + +def test_introspection(): + assert instance.query("SELECT currentRowPolicies('mydb', 'filtered_table1')") == "['default']\n" + assert instance.query("SELECT currentRowPolicies('mydb', 'filtered_table2')") == "['default']\n" + assert instance.query("SELECT currentRowPolicies('mydb', 'filtered_table3')") == "['default']\n" + assert instance.query("SELECT arraySort(currentRowPolicies())") == "[('mydb','filtered_table1','default'),('mydb','filtered_table2','default'),('mydb','filtered_table3','default'),('mydb','local','default')]\n" + + policy1 = "mydb\tfiltered_table1\tdefault\tdefault ON mydb.filtered_table1\t9e8a8f62-4965-2b5e-8599-57c7b99b3549\tusers.xml\t0\ta = 1\t\t\t\t\n" + policy2 = "mydb\tfiltered_table2\tdefault\tdefault ON mydb.filtered_table2\tcffae79d-b9bf-a2ef-b798-019c18470b25\tusers.xml\t0\ta + b < 1 or c - d > 5\t\t\t\t\n" + policy3 = "mydb\tfiltered_table3\tdefault\tdefault ON mydb.filtered_table3\t12fc5cef-e3da-3940-ec79-d8be3911f42b\tusers.xml\t0\tc = 1\t\t\t\t\n" + policy4 = "mydb\tlocal\tdefault\tdefault ON mydb.local\tcdacaeb5-1d97-f99d-2bb0-4574f290629c\tusers.xml\t0\t1\t\t\t\t\n" + assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'filtered_table1'), id) ORDER BY table, name") == policy1 + assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'filtered_table2'), id) ORDER BY table, name") == policy2 + assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'filtered_table3'), id) ORDER BY table, name") == policy3 + assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs('mydb', 'local'), id) ORDER BY table, name") == policy4 + assert instance.query("SELECT * from system.row_policies WHERE has(currentRowPolicyIDs(), id) ORDER BY table, name") == policy1 + policy2 + policy3 + policy4 + + +def test_dcl_introspection(): + assert instance.query("SHOW POLICIES ON mydb.filtered_table1") == "another\ndefault\n" + assert instance.query("SHOW POLICIES CURRENT ON mydb.filtered_table2") == "default\n" + assert instance.query("SHOW POLICIES") == "another ON mydb.filtered_table1\nanother ON mydb.filtered_table2\nanother ON mydb.filtered_table3\nanother ON mydb.local\ndefault ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\ndefault ON mydb.local\n" + assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\ndefault ON mydb.local\n" + + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.local") == "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" + + copy_policy_xml('all_rows.xml') + assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING 1 TO default\n" + + copy_policy_xml('no_rows.xml') + assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING NULL TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING NULL TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING NULL TO default\n" + + copy_policy_xml('no_filters.xml') + assert instance.query("SHOW POLICIES") == "" + + +def test_dcl_management(): + copy_policy_xml('no_filters.xml') + assert instance.query("SHOW POLICIES") == "" + + instance.query("CREATE POLICY pA ON mydb.filtered_table1 FOR SELECT USING ab") + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n" + + instance.query("ALTER POLICY pA ON mydb.filtered_table1 RENAME TO pB") + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n" + assert instance.query("SHOW POLICIES CURRENT ON mydb.filtered_table1") == "pB\n" + assert instance.query("SHOW CREATE POLICY pB ON mydb.filtered_table1") == "CREATE ROW POLICY pB ON mydb.filtered_table1 FOR SELECT USING a > b TO default\n" + + instance.query("DROP POLICY pB ON mydb.filtered_table1") + assert instance.query("SELECT * FROM mydb.filtered_table1") == "0\t0\n0\t1\n1\t0\n1\t1\n" + assert instance.query("SHOW POLICIES") == "" + + +def test_users_xml_is_readonly(): + assert re.search("storage is readonly", instance.query_and_get_error("DROP POLICY default ON mydb.filtered_table1")) + + +def test_miscellaneous_engines(): + copy_policy_xml('normal_filters.xml') + + # ReplicatedMergeTree + instance.query("DROP TABLE mydb.filtered_table1") + instance.query("CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/00-00/filtered_table1', 'replica1') ORDER BY a") + instance.query("INSERT INTO mydb.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1)") + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n1\t1\n" + + # CollapsingMergeTree + instance.query("DROP TABLE mydb.filtered_table1") + instance.query("CREATE TABLE mydb.filtered_table1 (a UInt8, b Int8) ENGINE CollapsingMergeTree(b) ORDER BY a") + instance.query("INSERT INTO mydb.filtered_table1 values (0, 1), (0, 1), (1, 1), (1, 1)") + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t1\n1\t1\n" + + # ReplicatedCollapsingMergeTree + instance.query("DROP TABLE mydb.filtered_table1") + instance.query("CREATE TABLE mydb.filtered_table1 (a UInt8, b Int8) ENGINE ReplicatedCollapsingMergeTree('/clickhouse/tables/00-00/filtered_table1', 'replica1', b) ORDER BY a") + instance.query("INSERT INTO mydb.filtered_table1 values (0, 1), (0, 1), (1, 1), (1, 1)") + assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t1\n1\t1\n" + + # DistributedMergeTree + instance.query("DROP TABLE IF EXISTS mydb.not_filtered_table") + instance.query("CREATE TABLE mydb.not_filtered_table (a UInt8, b UInt8) ENGINE Distributed('test_local_cluster', mydb, local)") + instance.query("CREATE TABLE mydb.local (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a") + instance2.query("CREATE TABLE mydb.local (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a") + instance.query("INSERT INTO mydb.local values (2, 0), (2, 1), (1, 0), (1, 1)") + instance2.query("INSERT INTO mydb.local values (3, 0), (3, 1), (1, 0), (1, 1)") + assert instance.query("SELECT * FROM mydb.not_filtered_table", user="another") == "1\t0\n1\t1\n1\t0\n1\t1\n" + assert instance.query("SELECT sum(a), b FROM mydb.not_filtered_table GROUP BY b ORDER BY b", user="another") == "2\t0\n2\t1\n" diff --git a/dbms/tests/integration/test_storage_mysql/__init__.py b/tests/integration/test_send_request_to_leader_replica/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_mysql/__init__.py rename to tests/integration/test_send_request_to_leader_replica/__init__.py diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml b/tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml rename to tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml b/tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml rename to tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml b/tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml rename to tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/test.py b/tests/integration/test_send_request_to_leader_replica/test.py similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/test.py rename to tests/integration/test_send_request_to_leader_replica/test.py diff --git a/dbms/tests/integration/test_storage_s3/__init__.py b/tests/integration/test_server_initialization/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_s3/__init__.py rename to tests/integration/test_server_initialization/__init__.py diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV b/tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV rename to tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop b/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop rename to tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop b/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop rename to tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql b/tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql rename to tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql b/tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql rename to tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql diff --git a/dbms/tests/integration/test_server_initialization/test.py b/tests/integration/test_server_initialization/test.py similarity index 100% rename from dbms/tests/integration/test_server_initialization/test.py rename to tests/integration/test_server_initialization/test.py diff --git a/dbms/tests/integration/test_system_merges/__init__.py b/tests/integration/test_settings_constraints/__init__.py similarity index 100% rename from dbms/tests/integration/test_system_merges/__init__.py rename to tests/integration/test_settings_constraints/__init__.py diff --git a/dbms/tests/integration/test_settings_constraints/configs/users.xml b/tests/integration/test_settings_constraints/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_settings_constraints/configs/users.xml rename to tests/integration/test_settings_constraints/configs/users.xml diff --git a/dbms/tests/integration/test_settings_constraints/test.py b/tests/integration/test_settings_constraints/test.py similarity index 100% rename from dbms/tests/integration/test_settings_constraints/test.py rename to tests/integration/test_settings_constraints/test.py diff --git a/dbms/tests/integration/test_system_queries/__init__.py b/tests/integration/test_settings_constraints_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_system_queries/__init__.py rename to tests/integration/test_settings_constraints_distributed/__init__.py diff --git a/dbms/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml b/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml rename to tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml diff --git a/tests/integration/test_settings_constraints_distributed/test.py b/tests/integration/test_settings_constraints_distributed/test.py new file mode 100644 index 00000000000..854f101fb18 --- /dev/null +++ b/tests/integration/test_settings_constraints_distributed/test.py @@ -0,0 +1,106 @@ +import time + +import pytest + +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance('node1') +node2 = cluster.add_instance('node2') +distributed = cluster.add_instance('distributed', main_configs=["configs/remote_servers.xml"], stay_alive=True) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + + for node in [node1, node2]: + node.query("CREATE TABLE sometable (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;") + node.query("INSERT INTO sometable VALUES (toDate('2010-01-10'), 1, 1)") + node.query("CREATE USER shard") + node.query("GRANT ALL ON *.* TO shard") + + distributed.query("CREATE TABLE proxy (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable, toUInt64(date));") + distributed.query("CREATE TABLE shard_settings (name String, value String) ENGINE = Distributed(test_cluster, system, settings);") + distributed.query("CREATE ROLE admin") + distributed.query("GRANT ALL ON *.* TO admin") + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def restart_distributed(): + # Magic: Distributed table tries to keep connections to shards open, and after changing shards' default settings + # we need to reset connections to force the shards to reset sessions and therefore to reset current settings + # to their new defaults. + distributed.restart_clickhouse() + + +def test_select_clamps_settings(): + distributed.query("CREATE USER normal DEFAULT ROLE admin SETTINGS max_memory_usage = 80000000") + distributed.query("CREATE USER wasteful DEFAULT ROLE admin SETTINGS max_memory_usage = 2000000000") + distributed.query("CREATE USER readonly DEFAULT ROLE admin SETTINGS readonly = 1") + node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") + node2.query("ALTER USER shard SETTINGS readonly = 1") + + # Check that shards doesn't throw exceptions on constraints violation + query = "SELECT COUNT() FROM proxy" + assert distributed.query(query) == '2\n' + assert distributed.query(query, user = 'normal') == '2\n' + assert distributed.query(query, user = 'wasteful') == '2\n' + assert distributed.query(query, user = 'readonly') == '2\n' + + assert distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) == '2\n' + assert distributed.query(query, settings={"max_memory_usage": 3000000000, "readonly": 2}) == '2\n' + + query = "SELECT COUNT() FROM remote('node{1,2}', 'default', 'sometable')" + assert distributed.query(query) == '2\n' + assert distributed.query(query, user = 'normal') == '2\n' + assert distributed.query(query, user = 'wasteful') == '2\n' + + # Check that shards clamp passed settings. + query = "SELECT hostName() as host, name, value FROM shard_settings WHERE name = 'max_memory_usage' OR name = 'readonly' ORDER BY host, name, value" + assert distributed.query(query) == 'node1\tmax_memory_usage\t99999999\n'\ + 'node1\treadonly\t0\n'\ + 'node2\tmax_memory_usage\t10000000000\n'\ + 'node2\treadonly\t1\n' + assert distributed.query(query, user = 'normal') == 'node1\tmax_memory_usage\t80000000\n'\ + 'node1\treadonly\t0\n'\ + 'node2\tmax_memory_usage\t10000000000\n'\ + 'node2\treadonly\t1\n' + assert distributed.query(query, user = 'wasteful') == 'node1\tmax_memory_usage\t99999999\n'\ + 'node1\treadonly\t0\n'\ + 'node2\tmax_memory_usage\t10000000000\n'\ + 'node2\treadonly\t1\n' + assert distributed.query(query, user = 'readonly') == 'node1\tmax_memory_usage\t99999999\n'\ + 'node1\treadonly\t1\n'\ + 'node2\tmax_memory_usage\t10000000000\n'\ + 'node2\treadonly\t1\n' + + assert distributed.query(query, settings={"max_memory_usage": 1}) == 'node1\tmax_memory_usage\t11111111\n'\ + 'node1\treadonly\t0\n'\ + 'node2\tmax_memory_usage\t10000000000\n'\ + 'node2\treadonly\t1\n' + assert distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) == 'node1\tmax_memory_usage\t40000000\n'\ + 'node1\treadonly\t2\n'\ + 'node2\tmax_memory_usage\t10000000000\n'\ + 'node2\treadonly\t1\n' + assert distributed.query(query, settings={"max_memory_usage": 3000000000, "readonly": 2}) == 'node1\tmax_memory_usage\t99999999\n'\ + 'node1\treadonly\t2\n'\ + 'node2\tmax_memory_usage\t10000000000\n'\ + 'node2\treadonly\t1\n' + +def test_insert_clamps_settings(): + node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") + node2.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") + + distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-20'), 2, 2)") + distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-21'), 2, 2)", settings={"max_memory_usage": 5000000}) + assert distributed.query("SELECT COUNT() FROM proxy") == "4\n" diff --git a/dbms/tests/integration/test_text_log_level/__init__.py b/tests/integration/test_settings_profile/__init__.py similarity index 100% rename from dbms/tests/integration/test_text_log_level/__init__.py rename to tests/integration/test_settings_profile/__init__.py diff --git a/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py new file mode 100644 index 00000000000..8b9d023d56f --- /dev/null +++ b/tests/integration/test_settings_profile/test.py @@ -0,0 +1,139 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance') + + +@pytest.fixture(scope="module", autouse=True) +def setup_nodes(): + try: + cluster.start() + + instance.query("CREATE USER robin") + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_after_test(): + try: + yield + finally: + instance.query("CREATE USER OR REPLACE robin") + instance.query("DROP ROLE IF EXISTS worker") + instance.query("DROP SETTINGS PROFILE IF EXISTS xyz, alpha") + + +def test_settings_profile(): + # Set settings and constraints via CREATE SETTINGS PROFILE ... TO user + instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" + assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") + + instance.query("ALTER SETTINGS PROFILE xyz TO NONE") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" + instance.query("SET max_memory_usage = 80000000", user="robin") + instance.query("SET max_memory_usage = 120000000", user="robin") + + # Set settings and constraints via CREATE USER ... SETTINGS PROFILE + instance.query("ALTER USER robin SETTINGS PROFILE xyz") + assert instance.query("SHOW CREATE USER robin") == "CREATE USER robin SETTINGS PROFILE xyz\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" + assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") + + instance.query("ALTER USER robin SETTINGS NONE") + assert instance.query("SHOW CREATE USER robin") == "CREATE USER robin\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" + instance.query("SET max_memory_usage = 80000000", user="robin") + instance.query("SET max_memory_usage = 120000000", user="robin") + + +def test_settings_profile_from_granted_role(): + # Set settings and constraints via granted role + instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000") + instance.query("CREATE ROLE worker SETTINGS PROFILE xyz") + instance.query("GRANT worker TO robin") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" + assert instance.query("SHOW CREATE ROLE worker") == "CREATE ROLE worker SETTINGS PROFILE xyz\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" + assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") + + instance.query("REVOKE worker FROM robin") + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" + instance.query("SET max_memory_usage = 80000000", user="robin") + instance.query("SET max_memory_usage = 120000000", user="robin") + + instance.query("ALTER ROLE worker SETTINGS NONE") + instance.query("GRANT worker TO robin") + assert instance.query("SHOW CREATE ROLE worker") == "CREATE ROLE worker\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" + instance.query("SET max_memory_usage = 80000000", user="robin") + instance.query("SET max_memory_usage = 120000000", user="robin") + + # Set settings and constraints via CREATE SETTINGS PROFILE ... TO granted role + instance.query("ALTER SETTINGS PROFILE xyz TO worker") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO worker\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" + assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") + + instance.query("ALTER SETTINGS PROFILE xyz TO NONE") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" + instance.query("SET max_memory_usage = 80000000", user="robin") + instance.query("SET max_memory_usage = 120000000", user="robin") + + +def test_inheritance_of_settings_profile(): + instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY") + instance.query("CREATE SETTINGS PROFILE alpha SETTINGS PROFILE xyz TO robin") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE alpha") == "CREATE SETTINGS PROFILE alpha SETTINGS INHERIT xyz TO robin\n" + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000002\n" + assert "Setting max_memory_usage should not be changed" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + + +def test_alter_and_drop(): + instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000003 MIN 90000000 MAX 110000000 TO robin") + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000003\n" + assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") + + instance.query("ALTER SETTINGS PROFILE xyz SETTINGS readonly=1") + assert "Cannot modify 'max_memory_usage' setting in readonly mode" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + + instance.query("DROP SETTINGS PROFILE xyz") + assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" + instance.query("SET max_memory_usage = 80000000", user="robin") + instance.query("SET max_memory_usage = 120000000", user="robin") + + +def test_allow_introspection(): + assert "Not enough privileges" in instance.query_and_get_error("SELECT demangle('a')", user="robin") + + instance.query("GRANT ALL ON *.* TO robin") + assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") + + instance.query("ALTER USER robin SETTINGS allow_introspection_functions=1") + assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n" + + instance.query("ALTER USER robin SETTINGS NONE") + assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") + + instance.query("CREATE SETTINGS PROFILE xyz SETTINGS allow_introspection_functions=1 TO robin") + assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n" + + instance.query("DROP SETTINGS PROFILE xyz") + assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") + + instance.query("REVOKE ALL ON *.* FROM robin") + assert "Not enough privileges" in instance.query_and_get_error("SELECT demangle('a')", user="robin") diff --git a/dbms/tests/integration/test_timezone_config/__init__.py b/tests/integration/test_storage_hdfs/__init__.py similarity index 100% rename from dbms/tests/integration/test_timezone_config/__init__.py rename to tests/integration/test_storage_hdfs/__init__.py diff --git a/dbms/tests/integration/test_storage_hdfs/configs/log_conf.xml b/tests/integration/test_storage_hdfs/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_storage_hdfs/configs/log_conf.xml rename to tests/integration/test_storage_hdfs/configs/log_conf.xml diff --git a/dbms/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py similarity index 100% rename from dbms/tests/integration/test_storage_hdfs/test.py rename to tests/integration/test_storage_hdfs/test.py diff --git a/dbms/tests/integration/test_tmp_policy/__init__.py b/tests/integration/test_storage_kafka/__init__.py similarity index 100% rename from dbms/tests/integration/test_tmp_policy/__init__.py rename to tests/integration/test_storage_kafka/__init__.py diff --git a/dbms/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto similarity index 100% rename from dbms/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto rename to tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto diff --git a/dbms/tests/integration/test_storage_kafka/configs/kafka.xml b/tests/integration/test_storage_kafka/configs/kafka.xml similarity index 100% rename from dbms/tests/integration/test_storage_kafka/configs/kafka.xml rename to tests/integration/test_storage_kafka/configs/kafka.xml diff --git a/dbms/tests/integration/test_storage_kafka/configs/log_conf.xml b/tests/integration/test_storage_kafka/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_storage_kafka/configs/log_conf.xml rename to tests/integration/test_storage_kafka/configs/log_conf.xml diff --git a/dbms/tests/integration/test_storage_kafka/configs/users.xml b/tests/integration/test_storage_kafka/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_storage_kafka/configs/users.xml rename to tests/integration/test_storage_kafka/configs/users.xml diff --git a/dbms/tests/integration/test_storage_kafka/kafka_pb2.py b/tests/integration/test_storage_kafka/kafka_pb2.py similarity index 100% rename from dbms/tests/integration/test_storage_kafka/kafka_pb2.py rename to tests/integration/test_storage_kafka/kafka_pb2.py diff --git a/dbms/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test.py rename to tests/integration/test_storage_kafka/test.py diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_json.reference b/tests/integration/test_storage_kafka/test_kafka_json.reference similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test_kafka_json.reference rename to tests/integration/test_storage_kafka/test_kafka_json.reference diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference b/tests/integration/test_storage_kafka/test_kafka_virtual1.reference similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference rename to tests/integration/test_storage_kafka/test_kafka_virtual1.reference diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference b/tests/integration/test_storage_kafka/test_kafka_virtual2.reference similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference rename to tests/integration/test_storage_kafka/test_kafka_virtual2.reference diff --git a/dbms/tests/integration/test_ttl_move/__init__.py b/tests/integration/test_storage_mysql/__init__.py similarity index 100% rename from dbms/tests/integration/test_ttl_move/__init__.py rename to tests/integration/test_storage_mysql/__init__.py diff --git a/dbms/tests/integration/test_storage_mysql/configs/remote_servers.xml b/tests/integration/test_storage_mysql/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_storage_mysql/configs/remote_servers.xml rename to tests/integration/test_storage_mysql/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py similarity index 100% rename from dbms/tests/integration/test_storage_mysql/test.py rename to tests/integration/test_storage_mysql/test.py diff --git a/dbms/tests/integration/test_ttl_replicated/__init__.py b/tests/integration/test_storage_s3/__init__.py similarity index 100% rename from dbms/tests/integration/test_ttl_replicated/__init__.py rename to tests/integration/test_storage_s3/__init__.py diff --git a/dbms/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml b/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml similarity index 100% rename from dbms/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml rename to tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml diff --git a/dbms/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py similarity index 100% rename from dbms/tests/integration/test_storage_s3/test.py rename to tests/integration/test_storage_s3/test.py diff --git a/dbms/tests/integration/test_union_header/__init__.py b/tests/integration/test_system_merges/__init__.py similarity index 100% rename from dbms/tests/integration/test_union_header/__init__.py rename to tests/integration/test_system_merges/__init__.py diff --git a/dbms/tests/integration/test_system_merges/configs/config.d/cluster.xml b/tests/integration/test_system_merges/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_system_merges/configs/config.d/cluster.xml rename to tests/integration/test_system_merges/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_system_merges/configs/logs_config.xml b/tests/integration/test_system_merges/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_system_merges/configs/logs_config.xml rename to tests/integration/test_system_merges/configs/logs_config.xml diff --git a/dbms/tests/integration/test_system_merges/test.py b/tests/integration/test_system_merges/test.py similarity index 100% rename from dbms/tests/integration/test_system_merges/test.py rename to tests/integration/test_system_merges/test.py diff --git a/dbms/tests/integration/test_user_ip_restrictions/__init__.py b/tests/integration/test_system_queries/__init__.py similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/__init__.py rename to tests/integration/test_system_queries/__init__.py diff --git a/dbms/tests/integration/test_system_queries/configs/config.d/clusters_config.xml b/tests/integration/test_system_queries/configs/config.d/clusters_config.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/config.d/clusters_config.xml rename to tests/integration/test_system_queries/configs/config.d/clusters_config.xml diff --git a/dbms/tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml b/tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml rename to tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml diff --git a/dbms/tests/integration/test_system_queries/configs/config.d/query_log.xml b/tests/integration/test_system_queries/configs/config.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/config.d/query_log.xml rename to tests/integration/test_system_queries/configs/config.d/query_log.xml diff --git a/dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml b/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml rename to tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml diff --git a/dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml b/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml rename to tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml diff --git a/dbms/tests/integration/test_system_queries/configs/users.xml b/tests/integration/test_system_queries/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/users.xml rename to tests/integration/test_system_queries/configs/users.xml diff --git a/dbms/tests/integration/test_system_queries/test.py b/tests/integration/test_system_queries/test.py similarity index 100% rename from dbms/tests/integration/test_system_queries/test.py rename to tests/integration/test_system_queries/test.py diff --git a/dbms/tests/integration/test_user_zero_database_access/__init__.py b/tests/integration/test_text_log_level/__init__.py similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/__init__.py rename to tests/integration/test_text_log_level/__init__.py diff --git a/dbms/tests/integration/test_text_log_level/configs/config.d/text_log.xml b/tests/integration/test_text_log_level/configs/config.d/text_log.xml similarity index 100% rename from dbms/tests/integration/test_text_log_level/configs/config.d/text_log.xml rename to tests/integration/test_text_log_level/configs/config.d/text_log.xml diff --git a/dbms/tests/integration/test_text_log_level/test.py b/tests/integration/test_text_log_level/test.py similarity index 100% rename from dbms/tests/integration/test_text_log_level/test.py rename to tests/integration/test_text_log_level/test.py diff --git a/dbms/tests/integration/test_version_update_after_mutation/__init__.py b/tests/integration/test_timezone_config/__init__.py similarity index 100% rename from dbms/tests/integration/test_version_update_after_mutation/__init__.py rename to tests/integration/test_timezone_config/__init__.py diff --git a/dbms/tests/integration/test_timezone_config/configs/config.xml b/tests/integration/test_timezone_config/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_timezone_config/configs/config.xml rename to tests/integration/test_timezone_config/configs/config.xml diff --git a/dbms/tests/integration/test_timezone_config/test.py b/tests/integration/test_timezone_config/test.py similarity index 100% rename from dbms/tests/integration/test_timezone_config/test.py rename to tests/integration/test_timezone_config/test.py diff --git a/dbms/tests/integration/test_zookeeper_config/__init__.py b/tests/integration/test_tmp_policy/__init__.py similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/__init__.py rename to tests/integration/test_tmp_policy/__init__.py diff --git a/dbms/tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml b/tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml rename to tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_tmp_policy/test.py b/tests/integration/test_tmp_policy/test.py similarity index 100% rename from dbms/tests/integration/test_tmp_policy/test.py rename to tests/integration/test_tmp_policy/test.py diff --git a/dbms/tests/queries/0_stateless/00068_empty_tiny_log.reference b/tests/integration/test_ttl_move/__init__.py similarity index 100% rename from dbms/tests/queries/0_stateless/00068_empty_tiny_log.reference rename to tests/integration/test_ttl_move/__init__.py diff --git a/dbms/tests/integration/test_ttl_move/configs/config.d/cluster.xml b/tests/integration/test_ttl_move/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/config.d/cluster.xml rename to tests/integration/test_ttl_move/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_ttl_move/configs/config.d/instant_moves.xml b/tests/integration/test_ttl_move/configs/config.d/instant_moves.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/config.d/instant_moves.xml rename to tests/integration/test_ttl_move/configs/config.d/instant_moves.xml diff --git a/dbms/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml b/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml rename to tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_ttl_move/configs/logs_config.xml b/tests/integration/test_ttl_move/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/logs_config.xml rename to tests/integration/test_ttl_move/configs/logs_config.xml diff --git a/dbms/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py similarity index 100% rename from dbms/tests/integration/test_ttl_move/test.py rename to tests/integration/test_ttl_move/test.py diff --git a/dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference b/tests/integration/test_ttl_replicated/__init__.py similarity index 100% rename from dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference rename to tests/integration/test_ttl_replicated/__init__.py diff --git a/dbms/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py similarity index 100% rename from dbms/tests/integration/test_ttl_replicated/test.py rename to tests/integration/test_ttl_replicated/test.py diff --git a/dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference b/tests/integration/test_union_header/__init__.py similarity index 100% rename from dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference rename to tests/integration/test_union_header/__init__.py diff --git a/dbms/tests/integration/test_union_header/configs/remote_servers.xml b/tests/integration/test_union_header/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_union_header/configs/remote_servers.xml rename to tests/integration/test_union_header/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_union_header/test.py b/tests/integration/test_union_header/test.py similarity index 100% rename from dbms/tests/integration/test_union_header/test.py rename to tests/integration/test_union_header/test.py diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference b/tests/integration/test_user_ip_restrictions/__init__.py similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference rename to tests/integration/test_user_ip_restrictions/__init__.py diff --git a/dbms/tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml b/tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml rename to tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml diff --git a/dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml b/tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml rename to tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml diff --git a/dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml b/tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml rename to tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml diff --git a/dbms/tests/integration/test_user_ip_restrictions/test.py b/tests/integration/test_user_ip_restrictions/test.py similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/test.py rename to tests/integration/test_user_ip_restrictions/test.py diff --git a/dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.reference b/tests/integration/test_user_zero_database_access/__init__.py similarity index 100% rename from dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.reference rename to tests/integration/test_user_zero_database_access/__init__.py diff --git a/dbms/tests/integration/test_user_zero_database_access/configs/config.xml b/tests/integration/test_user_zero_database_access/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/configs/config.xml rename to tests/integration/test_user_zero_database_access/configs/config.xml diff --git a/dbms/tests/integration/test_user_zero_database_access/configs/users.xml b/tests/integration/test_user_zero_database_access/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/configs/users.xml rename to tests/integration/test_user_zero_database_access/configs/users.xml diff --git a/dbms/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py b/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py rename to tests/integration/test_user_zero_database_access/test_user_zero_database_access.py diff --git a/dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference b/tests/integration/test_version_update_after_mutation/__init__.py similarity index 100% rename from dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference rename to tests/integration/test_version_update_after_mutation/__init__.py diff --git a/dbms/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py similarity index 100% rename from dbms/tests/integration/test_version_update_after_mutation/test.py rename to tests/integration/test_version_update_after_mutation/test.py diff --git a/dbms/tests/queries/0_stateless/00180_attach_materialized_view.reference b/tests/integration/test_zookeeper_config/__init__.py similarity index 100% rename from dbms/tests/queries/0_stateless/00180_attach_materialized_view.reference rename to tests/integration/test_zookeeper_config/__init__.py diff --git a/dbms/tests/integration/test_zookeeper_config/configs/remote_servers.xml b/tests/integration/test_zookeeper_config/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/remote_servers.xml rename to tests/integration/test_zookeeper_config/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml rename to tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml diff --git a/dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml rename to tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml diff --git a/dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml rename to tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml diff --git a/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_ssl.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_ssl.xml new file mode 100644 index 00000000000..fc03b609146 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_ssl.xml @@ -0,0 +1,20 @@ + + + + zoo1 + 2281 + 1 + + + zoo2 + 2281 + 1 + + + zoo3 + 2281 + 1 + + 3000 + + diff --git a/tests/integration/test_zookeeper_config/configs_secure/client.crt b/tests/integration/test_zookeeper_config/configs_secure/client.crt new file mode 100644 index 00000000000..7ade2d96273 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/client.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/TCCAeWgAwIBAgIJANjx1QSR77HBMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAgFw0xODA3MzAxODE2MDhaGA8yMjkyMDUxNDE4MTYwOFow +FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAs9uSo6lJG8o8pw0fbVGVu0tPOljSWcVSXH9uiJBwlZLQnhN4SFSFohfI +4K8U1tBDTnxPLUo/V1K9yzoLiRDGMkwVj6+4+hE2udS2ePTQv5oaMeJ9wrs+5c9T +4pOtlq3pLAdm04ZMB1nbrEysceVudHRkQbGHzHp6VG29Fw7Ga6YpqyHQihRmEkTU +7UCYNA+Vk7aDPdMS/khweyTpXYZimaK9f0ECU3/VOeG3fH6Sp2X6FN4tUj/aFXEj +sRmU5G2TlYiSIUMF2JPdhSihfk1hJVALrHPTU38SOL+GyyBRWdNcrIwVwbpvsvPg +pryMSNxnpr0AK0dFhjwnupIv5hJIOQIDAQABo1AwTjAdBgNVHQ4EFgQUjPLb3uYC +kcamyZHK4/EV8jAP0wQwHwYDVR0jBBgwFoAUjPLb3uYCkcamyZHK4/EV8jAP0wQw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAM/ocuDvfPus/KpMVD51j +4IdlU8R0vmnYLQ+ygzOAo7+hUWP5j0yvq4ILWNmQX6HNvUggCgFv9bjwDFhb/5Vr +85ieWfTd9+LTjrOzTw4avdGwpX9G+6jJJSSq15tw5ElOIFb/qNA9O4dBiu8vn03C +L/zRSXrARhSqTW5w/tZkUcSTT+M5h28+Lgn9ysx4Ff5vi44LJ1NnrbJbEAIYsAAD ++UA+4MBFKx1r6hHINULev8+lCfkpwIaeS8RL+op4fr6kQPxnULw8wT8gkuc8I4+L +P9gg/xDHB44T3ADGZ5Ib6O0DJaNiToO6rnoaaxs0KkotbvDWvRoxEytSbXKoYjYp +0g== +-----END CERTIFICATE----- diff --git a/tests/integration/test_zookeeper_config/configs_secure/client.key b/tests/integration/test_zookeeper_config/configs_secure/client.key new file mode 100644 index 00000000000..f0fb61ac443 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz25KjqUkbyjyn +DR9tUZW7S086WNJZxVJcf26IkHCVktCeE3hIVIWiF8jgrxTW0ENOfE8tSj9XUr3L +OguJEMYyTBWPr7j6ETa51LZ49NC/mhox4n3Cuz7lz1Pik62WreksB2bThkwHWdus +TKxx5W50dGRBsYfMenpUbb0XDsZrpimrIdCKFGYSRNTtQJg0D5WTtoM90xL+SHB7 +JOldhmKZor1/QQJTf9U54bd8fpKnZfoU3i1SP9oVcSOxGZTkbZOViJIhQwXYk92F +KKF+TWElUAusc9NTfxI4v4bLIFFZ01ysjBXBum+y8+CmvIxI3GemvQArR0WGPCe6 +ki/mEkg5AgMBAAECggEATrbIBIxwDJOD2/BoUqWkDCY3dGevF8697vFuZKIiQ7PP +TX9j4vPq0DfsmDjHvAPFkTHiTQXzlroFik3LAp+uvhCCVzImmHq0IrwvZ9xtB43f +7Pkc5P6h1l3Ybo8HJ6zRIY3TuLtLxuPSuiOMTQSGRL0zq3SQ5DKuGwkz+kVjHXUN +MR2TECFwMHKQ5VLrC+7PMpsJYyOMlDAWhRfUalxC55xOXTpaN8TxNnwQ8K2ISVY5 +212Jz/a4hn4LdwxSz3Tiu95PN072K87HLWx3EdT6vW4Ge5P/A3y+smIuNAlanMnu +plHBRtpATLiTxZt/n6npyrfQVbYjSH7KWhB8hBHtaQKBgQDh9Cq1c/KtqDtE0Ccr +/r9tZNTUwBE6VP+3OJeKdEdtsfuxjOCkS1oAjgBJiSDOiWPh1DdoDeVZjPKq6pIu +Mq12OE3Doa8znfCXGbkSzEKOb2unKZMJxzrz99kXt40W5DtrqKPNb24CNqTiY8Aa +CjtcX+3weat82VRXvph6U8ltMwKBgQDLxjiQQzNoY7qvg7CwJCjf9qq8jmLK766g +1FHXopqS+dTxDLM8eJSRrpmxGWJvNeNc1uPhsKsKgotqAMdBUQTf7rSTbt4MyoH5 +bUcRLtr+0QTK9hDWMOOvleqNXha68vATkohWYfCueNsC60qD44o8RZAS6UNy3ENq +cM1cxqe84wKBgQDKkHutWnooJtajlTxY27O/nZKT/HA1bDgniMuKaz4R4Gr1PIez +on3YW3V0d0P7BP6PWRIm7bY79vkiMtLEKdiKUGWeyZdo3eHvhDb/3DCawtau8L2K +GZsHVp2//mS1Lfz7Qh8/L/NedqCQ+L4iWiPnZ3THjjwn3CoZ05ucpvrAMwKBgB54 +nay039MUVq44Owub3KDg+dcIU62U+cAC/9oG7qZbxYPmKkc4oL7IJSNecGHA5SbU +2268RFdl/gLz6tfRjbEOuOHzCjFPdvAdbysanpTMHLNc6FefJ+zxtgk9sJh0C4Jh +vxFrw9nTKKzfEl12gQ1SOaEaUIO0fEBGbe8ZpauRAoGAMAlGV+2/K4ebvAJKOVTa +dKAzQ+TD2SJmeR1HZmKDYddNqwtZlzg3v4ZhCk4eaUmGeC1Bdh8MDuB3QQvXz4Dr +vOIP4UVaOr+uM+7TgAgVnP4/K6IeJGzUDhX93pmpWhODfdu/oojEKVcpCojmEmS1 +KCBtmIrQLqzMpnBpLNuSY+Q= +-----END PRIVATE KEY----- diff --git a/tests/integration/test_zookeeper_config/configs_secure/conf.d/remote_servers.xml b/tests/integration/test_zookeeper_config/configs_secure/conf.d/remote_servers.xml new file mode 100644 index 00000000000..01865e33a85 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/conf.d/remote_servers.xml @@ -0,0 +1,17 @@ + + + + + + node1 + 9000 + + + + node2 + 9000 + + + + + diff --git a/tests/integration/test_zookeeper_config/configs_secure/conf.d/ssl_conf.xml b/tests/integration/test_zookeeper_config/configs_secure/conf.d/ssl_conf.xml new file mode 100644 index 00000000000..5e6f5f37624 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/conf.d/ssl_conf.xml @@ -0,0 +1,16 @@ + + + + /etc/clickhouse-server/client.crt + /etc/clickhouse-server/client.key + true + true + sslv2,sslv3 + true + none + + RejectCertificateHandler + + + + diff --git a/tests/integration/test_zookeeper_config/test.py b/tests/integration/test_zookeeper_config/test.py new file mode 100644 index 00000000000..4be99c8711d --- /dev/null +++ b/tests/integration/test_zookeeper_config/test.py @@ -0,0 +1,161 @@ +from __future__ import print_function +from helpers.cluster import ClickHouseCluster +import helpers +import pytest +import time +from tempfile import NamedTemporaryFile +from os import path as p, unlink + + +def test_chroot_with_same_root(): + + cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml') + cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml') + + node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True) + node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True) + nodes = [node1, node2] + + def create_zk_root(zk): + zk.ensure_path('/root_a') + print(zk.get_children('/')) + cluster_1.add_zookeeper_startup_command(create_zk_root) + + try: + cluster_1.start() + + try: + cluster_2.start(destroy_dirs=False) + for i, node in enumerate(nodes): + node.query(''' + CREATE TABLE simple (date Date, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); + '''.format(replica=node.name)) + for j in range(2): # Second insert to test deduplication + node.query("INSERT INTO simple VALUES ({0}, {0})".format(i)) + + time.sleep(1) + + assert node1.query('select count() from simple').strip() == '2' + assert node2.query('select count() from simple').strip() == '2' + + finally: + cluster_2.shutdown() + + finally: + cluster_1.shutdown() + + +def test_chroot_with_different_root(): + + cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml') + cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_b.xml') + + node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True) + node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True) + nodes = [node1, node2] + + def create_zk_roots(zk): + zk.ensure_path('/root_a') + zk.ensure_path('/root_b') + print(zk.get_children('/')) + cluster_1.add_zookeeper_startup_command(create_zk_roots) + + try: + cluster_1.start() + + try: + cluster_2.start(destroy_dirs=False) + + for i, node in enumerate(nodes): + node.query(''' + CREATE TABLE simple (date Date, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); + '''.format(replica=node.name)) + for j in range(2): # Second insert to test deduplication + node.query("INSERT INTO simple VALUES ({0}, {0})".format(i)) + + assert node1.query('select count() from simple').strip() == '1' + assert node2.query('select count() from simple').strip() == '1' + + finally: + cluster_2.shutdown() + + finally: + cluster_1.shutdown() + + +def test_identity(): + + cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_password.xml') + cluster_2 = ClickHouseCluster(__file__) + + node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True) + node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True) + + try: + cluster_1.start() + + node1.query(''' + CREATE TABLE simple (date Date, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); + '''.format(replica=node1.name)) + + with pytest.raises(Exception): + cluster_2.start(destroy_dirs=False) + node2.query(''' + CREATE TABLE simple (date Date, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192); + ''') + + finally: + cluster_1.shutdown() + cluster_2.shutdown() + + +def test_secure_connection(): + # We need absolute path in zookeeper volumes. Generate it dynamically. + TEMPLATE = ''' + zoo{zoo_id}: + image: zookeeper:3.5.6 + restart: always + environment: + ZOO_TICK_TIME: 500 + ZOO_MY_ID: {zoo_id} + ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 + ZOO_SECURE_CLIENT_PORT: 2281 + volumes: + - {helpers_dir}/zookeeper-ssl-entrypoint.sh:/zookeeper-ssl-entrypoint.sh + - {configs_dir}:/clickhouse-config + command: ["zkServer.sh", "start-foreground"] + entrypoint: /zookeeper-ssl-entrypoint.sh + ''' + configs_dir = p.abspath(p.join(p.dirname(__file__), 'configs_secure')) + helpers_dir = p.abspath(p.dirname(helpers.__file__)) + + cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_ssl.xml') + + docker_compose = NamedTemporaryFile(delete=False) + + docker_compose.write( + "version: '2.2'\nservices:\n" + + TEMPLATE.format(zoo_id=1, configs_dir=configs_dir, helpers_dir=helpers_dir) + + TEMPLATE.format(zoo_id=2, configs_dir=configs_dir, helpers_dir=helpers_dir) + + TEMPLATE.format(zoo_id=3, configs_dir=configs_dir, helpers_dir=helpers_dir) + ) + docker_compose.close() + + node1 = cluster.add_instance('node1', config_dir='configs_secure', with_zookeeper=True, + zookeeper_docker_compose_path=docker_compose.name) + node2 = cluster.add_instance('node2', config_dir='configs_secure', with_zookeeper=True, + zookeeper_docker_compose_path=docker_compose.name) + + try: + cluster.start() + + assert node1.query("SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n' + assert node2.query("SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n' + + finally: + cluster.shutdown() + unlink(docker_compose.name) diff --git a/tests/ints_dictionary.xml b/tests/ints_dictionary.xml new file mode 120000 index 00000000000..1daa76b8267 --- /dev/null +++ b/tests/ints_dictionary.xml @@ -0,0 +1 @@ +config/ints_dictionary.xml \ No newline at end of file diff --git a/dbms/tests/msan_suppressions.txt b/tests/msan_suppressions.txt similarity index 100% rename from dbms/tests/msan_suppressions.txt rename to tests/msan_suppressions.txt diff --git a/dbms/tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh b/tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh similarity index 100% rename from dbms/tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh rename to tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh diff --git a/dbms/tests/perf_drafts/vert_merge/add_id_to_csv b/tests/perf_drafts/vert_merge/add_id_to_csv similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/add_id_to_csv rename to tests/perf_drafts/vert_merge/add_id_to_csv diff --git a/dbms/tests/perf_drafts/vert_merge/ontime.struct b/tests/perf_drafts/vert_merge/ontime.struct similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/ontime.struct rename to tests/perf_drafts/vert_merge/ontime.struct diff --git a/dbms/tests/perf_drafts/vert_merge/test_merges b/tests/perf_drafts/vert_merge/test_merges similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/test_merges rename to tests/perf_drafts/vert_merge/test_merges diff --git a/dbms/tests/perf_drafts/vert_merge/wait_clickhouse_server b/tests/perf_drafts/vert_merge/wait_clickhouse_server similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/wait_clickhouse_server rename to tests/perf_drafts/vert_merge/wait_clickhouse_server diff --git a/dbms/tests/performance/IPv4.xml b/tests/performance/IPv4.xml similarity index 100% rename from dbms/tests/performance/IPv4.xml rename to tests/performance/IPv4.xml diff --git a/dbms/tests/performance/IPv6.xml b/tests/performance/IPv6.xml similarity index 100% rename from dbms/tests/performance/IPv6.xml rename to tests/performance/IPv6.xml diff --git a/dbms/tests/performance/README.md b/tests/performance/README.md similarity index 100% rename from dbms/tests/performance/README.md rename to tests/performance/README.md diff --git a/dbms/tests/performance/agg_functions_min_max_any.xml b/tests/performance/agg_functions_min_max_any.xml similarity index 100% rename from dbms/tests/performance/agg_functions_min_max_any.xml rename to tests/performance/agg_functions_min_max_any.xml diff --git a/dbms/tests/performance/analyze_array_tuples.xml b/tests/performance/analyze_array_tuples.xml similarity index 97% rename from dbms/tests/performance/analyze_array_tuples.xml rename to tests/performance/analyze_array_tuples.xml index 50e6573b8b9..bd57f4f5904 100644 --- a/dbms/tests/performance/analyze_array_tuples.xml +++ b/tests/performance/analyze_array_tuples.xml @@ -1,16 +1,6 @@ - - - 3 - 10000 - - - 5 - 60000 - - -SELECT [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60), (61, 61), (62, 62), (63, 63), (64, 64), (65, 65), (66, 66), (67, 67), (68, 68), (69, 69), (70, 70), (71, 71), (72, 72), (73, 73), (74, 74), (75, 75), (76, 76), (77, 77), (78, 78), (79, 79), (80, 80), (81, 81), (82, 82), (83, 83), (84, 84), (85, 85), (86, 86), (87, 87), (88, 88), (89, 89), (90, 90), (91, 91), (92, 92), (93, 93), (94, 94), (95, 95), (96, 96), (97, 97), (98, 98), (99, 99), (100, 100), (101, 101), (102, 102), (103, 103), (104, 104), (105, 105), (106, 106), (107, 107), (108, 108), (109, 109), (110, 110), (111, 111), (112, 112), (113, 113), (114, 114), (115, 115), (116, 116), (117, 117), (118, 118), (119, 119), (120, 120), (121, 121), (122, 122), (123, 123), (124, 124), (125, 125), (126, 126), (127, 127), (128, 128), (129, 129), (130, 130), (131, 131), (132, 132), (133, 133), (134, 134), (135, 135), (136, 136), (137, 137), (138, 138), (139, 139), (140, 140), (141, 141), (142, 142), (143, 143), (144, 144), (145, 145), (146, 146), (147, 147), (148, 148), (149, 149), (150, 150), (151, 151), (152, 152), (153, 153), (154, 154), (155, 155), (156, 156), (157, 157), (158, 158), (159, 159), (160, 160), (161, 161), (162, 162), (163, 163), (164, 164), (165, 165), (166, 166), (167, 167), (168, 168), (169, 169), (170, 170), (171, 171), (172, 172), (173, 173), (174, 174), (175, 175), (176, 176), (177, 177), (178, 178), (179, 179), (180, 180), (181, 181), (182, 182), (183, 183), (184, 184), (185, 185), (186, 186), (187, 187), (188, 188), (189, 189), (190, 190), (191, 191), (192, 192), (193, 193), (194, 194), (195, 195), (196, 196), (197, 197), (198, 198), (199, 199), (200, 200), (201, 201), (202, 202), (203, 203), (204, 204), (205, 205), (206, 206), (207, 207), (208, 208), (209, 209), (210, 210), (211, 211), (212, 212), (213, 213), (214, 214), (215, 215), (216, 216), (217, 217), (218, 218), (219, 219), (220, 220), (221, 221), (222, 222), (223, 223), (224, 224), (225, 225), (226, 226), (227, 227), (228, 228), (229, 229), (230, 230), (231, 231), (232, 232), (233, 233), (234, 234), (235, 235), (236, 236), (237, 237), (238, 238), (239, 239), (240, 240), (241, 241), (242, 242), (243, 243), (244, 244), (245, 245), (246, 246), (247, 247), (248, 248), (249, 249), (250, 250), (251, 251), (252, 252), (253, 253), (254, 254), (255, 255), (256, 256), (257, 257), (258, 258), (259, 259), (260, 260), (261, 261), (262, 262), (263, 263), (264, 264), (265, 265), (266, 266), (267, 267), (268, 268), (269, 269), (270, 270), (271, 271), (272, 272), (273, 273), (274, 274), (275, 275), (276, 276), (277, 277), (278, 278), (279, 279), (280, 280), (281, 281), (282, 282), (283, 283), (284, 284), (285, 285), (286, 286), (287, 287), (288, 288), (289, 289), (290, 290), (291, 291), (292, 292), (293, 293), (294, 294), (295, 295), (296, 296), (297, 297), (298, 298), (299, 299), (300, 300), (301, 301), (302, 302), (303, 303), (304, 304), (305, 305), (306, 306), (307, 307), (308, 308), (309, 309), (310, 310), (311, 311), (312, 312), (313, 313), (314, 314), (315, 315), (316, 316), (317, 317), (318, 318), (319, 319), (320, 320), (321, 321), (322, 322), (323, 323), (324, 324), (325, 325), (326, 326), (327, 327), (328, 328), (329, 329), (330, 330), (331, 331), (332, 332), (333, 333), (334, 334), (335, 335), (336, 336), (337, 337), (338, 338), (339, 339), (340, 340), (341, 341), (342, 342), (343, 343), (344, 344), (345, 345), (346, 346), (347, 347), (348, 348), (349, 349), (350, 350), (351, 351), (352, 352), (353, 353), (354, 354), (355, 355), (356, 356), (357, 357), (358, 358), (359, 359), (360, 360), (361, 361), (362, 362), (363, 363), (364, 364), (365, 365), (366, 366), (367, 367), (368, 368), (369, 369), (370, 370), (371, 371), (372, 372), (373, 373), (374, 374), (375, 375), (376, 376), (377, 377), (378, 378), (379, 379), (380, 380), (381, 381), (382, 382), (383, 383), (384, 384), (385, 385), (386, 386), (387, 387), (388, 388), (389, 389), (390, 390), (391, 391), (392, 392), (393, 393), (394, 394), (395, 395), (396, 396), (397, 397), (398, 398), (399, 399), (400, 400), (401, 401), (402, 402), (403, 403), (404, 404), (405, 405), (406, 406), (407, 407), (408, 408), (409, 409), (410, 410), (411, 411), (412, 412), (413, 413), (414, 414), (415, 415), (416, 416), (417, 417), (418, 418), (419, 419), (420, 420), (421, 421), (422, 422), (423, 423), (424, 424), (425, 425), (426, 426), (427, 427), (428, 428), (429, 429), (430, 430), (431, 431), (432, 432), (433, 433), (434, 434), (435, 435), (436, 436), (437, 437), (438, 438), (439, 439), (440, 440), (441, 441), (442, 442), (443, 443), (444, 444), (445, 445), (446, 446), (447, 447), (448, 448), (449, 449), (450, 450), (451, 451), (452, 452), (453, 453), (454, 454), (455, 455), (456, 456), (457, 457), (458, 458), (459, 459), (460, 460), (461, 461), (462, 462), (463, 463), (464, 464), (465, 465), (466, 466), (467, 467), (468, 468), (469, 469), (470, 470), (471, 471), (472, 472), (473, 473), (474, 474), (475, 475), (476, 476), (477, 477), (478, 478), (479, 479), (480, 480), (481, 481), (482, 482), (483, 483), (484, 484), (485, 485), (486, 486), (487, 487), (488, 488), (489, 489), (490, 490), (491, 491), (492, 492), (493, 493), (494, 494), (495, 495), (496, 496), (497, 497), (498, 498), (499, 499), (500, 500), (501, 501), (502, 502), (503, 503), (504, 504), (505, 505), (506, 506), (507, 507), (508, 508), (509, 509), (510, 510), (511, 511), (512, 512), (513, 513), (514, 514), (515, 515), (516, 516), (517, 517), (518, 518), (519, 519), (520, 520), (521, 521), (522, 522), (523, 523), (524, 524), (525, 525), (526, 526), (527, 527), (528, 528), (529, 529), (530, 530), (531, 531), (532, 532), (533, 533), (534, 534), (535, 535), (536, 536), (537, 537), (538, 538), (539, 539), (540, 540), (541, 541), (542, 542), (543, 543), (544, 544), (545, 545), (546, 546), (547, 547), (548, 548), (549, 549), (550, 550), (551, 551), (552, 552), (553, 553), (554, 554), (555, 555), (556, 556), (557, 557), (558, 558), (559, 559), (560, 560), (561, 561), (562, 562), (563, 563), (564, 564), (565, 565), (566, 566), (567, 567), (568, 568), (569, 569), (570, 570), (571, 571), (572, 572), (573, 573), (574, 574), (575, 575), (576, 576), (577, 577), (578, 578), (579, 579), (580, 580), (581, 581), (582, 582), (583, 583), (584, 584), (585, 585), (586, 586), (587, 587), (588, 588), (589, 589), (590, 590), (591, 591), (592, 592), (593, 593), (594, 594), (595, 595), (596, 596), (597, 597), (598, 598), (599, 599), (600, 600), (601, 601), (602, 602), (603, 603), (604, 604), (605, 605), (606, 606), (607, 607), (608, 608), (609, 609), (610, 610), (611, 611), (612, 612), (613, 613), (614, 614), (615, 615), (616, 616), (617, 617), (618, 618), (619, 619), (620, 620), (621, 621), (622, 622), (623, 623), (624, 624), (625, 625), (626, 626), (627, 627), (628, 628), (629, 629), (630, 630), (631, 631), (632, 632), (633, 633), (634, 634), (635, 635), (636, 636), (637, 637), (638, 638), (639, 639), (640, 640), (641, 641), (642, 642), (643, 643), (644, 644), (645, 645), (646, 646), (647, 647), (648, 648), (649, 649), (650, 650), (651, 651), (652, 652), (653, 653), (654, 654), (655, 655), (656, 656), (657, 657), (658, 658), (659, 659), (660, 660), (661, 661), (662, 662), (663, 663), (664, 664), (665, 665), (666, 666), (667, 667), (668, 668), (669, 669), (670, 670), (671, 671), (672, 672), (673, 673), (674, 674), (675, 675), (676, 676), (677, 677), (678, 678), (679, 679), (680, 680), (681, 681), (682, 682), (683, 683), (684, 684), (685, 685), (686, 686), (687, 687), (688, 688), (689, 689), (690, 690), (691, 691), (692, 692), (693, 693), (694, 694), (695, 695), (696, 696), (697, 697), (698, 698), (699, 699), (700, 700), (701, 701), (702, 702), (703, 703), (704, 704), (705, 705), (706, 706), (707, 707), (708, 708), (709, 709), (710, 710), (711, 711), (712, 712), (713, 713), (714, 714), (715, 715), (716, 716), (717, 717), (718, 718), (719, 719), (720, 720), (721, 721), (722, 722), (723, 723), (724, 724), (725, 725), (726, 726), (727, 727), (728, 728), (729, 729), (730, 730), (731, 731), (732, 732), (733, 733), (734, 734), (735, 735), (736, 736), (737, 737), (738, 738), (739, 739), (740, 740), (741, 741), (742, 742), (743, 743), (744, 744), (745, 745), (746, 746), (747, 747), (748, 748), (749, 749), (750, 750), (751, 751), (752, 752), (753, 753), (754, 754), (755, 755), (756, 756), (757, 757), (758, 758), (759, 759), (760, 760), (761, 761), (762, 762), (763, 763), (764, 764), (765, 765), (766, 766), (767, 767), (768, 768), (769, 769), (770, 770), (771, 771), (772, 772), (773, 773), (774, 774), (775, 775), (776, 776), (777, 777), (778, 778), (779, 779), (780, 780), (781, 781), (782, 782), (783, 783), (784, 784), (785, 785), (786, 786), (787, 787), (788, 788), (789, 789), (790, 790), (791, 791), (792, 792), (793, 793), (794, 794), (795, 795), (796, 796), (797, 797), (798, 798), (799, 799), (800, 800), (801, 801), (802, 802), (803, 803), (804, 804), (805, 805), (806, 806), (807, 807), (808, 808), (809, 809), (810, 810), (811, 811), (812, 812), (813, 813), (814, 814), (815, 815), (816, 816), (817, 817), (818, 818), (819, 819), (820, 820), (821, 821), (822, 822), (823, 823), (824, 824), (825, 825), (826, 826), (827, 827), (828, 828), (829, 829), (830, 830), (831, 831), (832, 832), (833, 833), (834, 834), (835, 835), (836, 836), (837, 837), (838, 838), (839, 839), (840, 840), (841, 841), (842, 842), (843, 843), (844, 844), (845, 845), (846, 846), (847, 847), (848, 848), (849, 849), (850, 850), (851, 851), (852, 852), (853, 853), (854, 854), (855, 855), (856, 856), (857, 857), (858, 858), (859, 859), (860, 860), (861, 861), (862, 862), (863, 863), (864, 864), (865, 865), (866, 866), (867, 867), (868, 868), (869, 869), (870, 870), (871, 871), (872, 872), (873, 873), (874, 874), (875, 875), (876, 876), (877, 877), (878, 878), (879, 879), (880, 880), (881, 881), (882, 882), (883, 883), (884, 884), (885, 885), (886, 886), (887, 887), (888, 888), (889, 889), (890, 890), (891, 891), (892, 892), (893, 893), (894, 894), (895, 895), (896, 896), (897, 897), (898, 898), (899, 899), (900, 900), (901, 901), (902, 902), (903, 903), (904, 904), (905, 905), (906, 906), (907, 907), (908, 908), (909, 909), (910, 910), (911, 911), (912, 912), (913, 913), (914, 914), (915, 915), (916, 916), (917, 917), (918, 918), (919, 919), (920, 920), (921, 921), (922, 922), (923, 923), (924, 924), (925, 925), (926, 926), (927, 927), (928, 928), (929, 929), (930, 930), (931, 931), (932, 932), (933, 933), (934, 934), (935, 935), (936, 936), (937, 937), (938, 938), (939, 939), (940, 940), (941, 941), (942, 942), (943, 943), (944, 944), (945, 945), (946, 946), (947, 947), (948, 948), (949, 949), (950, 950), (951, 951), (952, 952), (953, 953), (954, 954), (955, 955), (956, 956), (957, 957), (958, 958), (959, 959), (960, 960), (961, 961), (962, 962), (963, 963), (964, 964), (965, 965), (966, 966), (967, 967), (968, 968), (969, 969), (970, 970), (971, 971), (972, 972), (973, 973), (974, 974), (975, 975), (976, 976), (977, 977), (978, 978), (979, 979), (980, 980), (981, 981), (982, 982), (983, 983), (984, 984), (985, 985), (986, 986), (987, 987), (988, 988), (989, 989), (990, 990), (991, 991), (992, 992), (993, 993), (994, 994), (995, 995), (996, 996), (997, 997), (998, 998), (999, 999), (1000, 1000)] AS x +SELECT [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60), (61, 61), (62, 62), (63, 63), (64, 64), (65, 65), (66, 66), (67, 67), (68, 68), (69, 69), (70, 70), (71, 71), (72, 72), (73, 73), (74, 74), (75, 75), (76, 76), (77, 77), (78, 78), (79, 79), (80, 80), (81, 81), (82, 82), (83, 83), (84, 84), (85, 85), (86, 86), (87, 87), (88, 88), (89, 89), (90, 90), (91, 91), (92, 92), (93, 93), (94, 94), (95, 95), (96, 96), (97, 97), (98, 98), (99, 99), (100, 100), (101, 101), (102, 102), (103, 103), (104, 104), (105, 105), (106, 106), (107, 107), (108, 108), (109, 109), (110, 110), (111, 111), (112, 112), (113, 113), (114, 114), (115, 115), (116, 116), (117, 117), (118, 118), (119, 119), (120, 120), (121, 121), (122, 122), (123, 123), (124, 124), (125, 125), (126, 126), (127, 127), (128, 128), (129, 129), (130, 130), (131, 131), (132, 132), (133, 133), (134, 134), (135, 135), (136, 136), (137, 137), (138, 138), (139, 139), (140, 140), (141, 141), (142, 142), (143, 143), (144, 144), (145, 145), (146, 146), (147, 147), (148, 148), (149, 149), (150, 150), (151, 151), (152, 152), (153, 153), (154, 154), (155, 155), (156, 156), (157, 157), (158, 158), (159, 159), (160, 160), (161, 161), (162, 162), (163, 163), (164, 164), (165, 165), (166, 166), (167, 167), (168, 168), (169, 169), (170, 170), (171, 171), (172, 172), (173, 173), (174, 174), (175, 175), (176, 176), (177, 177), (178, 178), (179, 179), (180, 180), (181, 181), (182, 182), (183, 183), (184, 184), (185, 185), (186, 186), (187, 187), (188, 188), (189, 189), (190, 190), (191, 191), (192, 192), (193, 193), (194, 194), (195, 195), (196, 196), (197, 197), (198, 198), (199, 199), (200, 200), (201, 201), (202, 202), (203, 203), (204, 204), (205, 205), (206, 206), (207, 207), (208, 208), (209, 209), (210, 210), (211, 211), (212, 212), (213, 213), (214, 214), (215, 215), (216, 216), (217, 217), (218, 218), (219, 219), (220, 220), (221, 221), (222, 222), (223, 223), (224, 224), (225, 225), (226, 226), (227, 227), (228, 228), (229, 229), (230, 230), (231, 231), (232, 232), (233, 233), (234, 234), (235, 235), (236, 236), (237, 237), (238, 238), (239, 239), (240, 240), (241, 241), (242, 242), (243, 243), (244, 244), (245, 245), (246, 246), (247, 247), (248, 248), (249, 249), (250, 250), (251, 251), (252, 252), (253, 253), (254, 254), (255, 255), (256, 256), (257, 257), (258, 258), (259, 259), (260, 260), (261, 261), (262, 262), (263, 263), (264, 264), (265, 265), (266, 266), (267, 267), (268, 268), (269, 269), (270, 270), (271, 271), (272, 272), (273, 273), (274, 274), (275, 275), (276, 276), (277, 277), (278, 278), (279, 279), (280, 280), (281, 281), (282, 282), (283, 283), (284, 284), (285, 285), (286, 286), (287, 287), (288, 288), (289, 289), (290, 290), (291, 291), (292, 292), (293, 293), (294, 294), (295, 295), (296, 296), (297, 297), (298, 298), (299, 299), (300, 300), (301, 301), (302, 302), (303, 303), (304, 304), (305, 305), (306, 306), (307, 307), (308, 308), (309, 309), (310, 310), (311, 311), (312, 312), (313, 313), (314, 314), (315, 315), (316, 316), (317, 317), (318, 318), (319, 319), (320, 320), (321, 321), (322, 322), (323, 323), (324, 324), (325, 325), (326, 326), (327, 327), (328, 328), (329, 329), (330, 330), (331, 331), (332, 332), (333, 333), (334, 334), (335, 335), (336, 336), (337, 337), (338, 338), (339, 339), (340, 340), (341, 341), (342, 342), (343, 343), (344, 344), (345, 345), (346, 346), (347, 347), (348, 348), (349, 349), (350, 350), (351, 351), (352, 352), (353, 353), (354, 354), (355, 355), (356, 356), (357, 357), (358, 358), (359, 359), (360, 360), (361, 361), (362, 362), (363, 363), (364, 364), (365, 365), (366, 366), (367, 367), (368, 368), (369, 369), (370, 370), (371, 371), (372, 372), (373, 373), (374, 374), (375, 375), (376, 376), (377, 377), (378, 378), (379, 379), (380, 380), (381, 381), (382, 382), (383, 383), (384, 384), (385, 385), (386, 386), (387, 387), (388, 388), (389, 389), (390, 390), (391, 391), (392, 392), (393, 393), (394, 394), (395, 395), (396, 396), (397, 397), (398, 398), (399, 399), (400, 400), (401, 401), (402, 402), (403, 403), (404, 404), (405, 405), (406, 406), (407, 407), (408, 408), (409, 409), (410, 410), (411, 411), (412, 412), (413, 413), (414, 414), (415, 415), (416, 416), (417, 417), (418, 418), (419, 419), (420, 420), (421, 421), (422, 422), (423, 423), (424, 424), (425, 425), (426, 426), (427, 427), (428, 428), (429, 429), (430, 430), (431, 431), (432, 432), (433, 433), (434, 434), (435, 435), (436, 436), (437, 437), (438, 438), (439, 439), (440, 440), (441, 441), (442, 442), (443, 443), (444, 444), (445, 445), (446, 446), (447, 447), (448, 448), (449, 449), (450, 450), (451, 451), (452, 452), (453, 453), (454, 454), (455, 455), (456, 456), (457, 457), (458, 458), (459, 459), (460, 460), (461, 461), (462, 462), (463, 463), (464, 464), (465, 465), (466, 466), (467, 467), (468, 468), (469, 469), (470, 470), (471, 471), (472, 472), (473, 473), (474, 474), (475, 475), (476, 476), (477, 477), (478, 478), (479, 479), (480, 480), (481, 481), (482, 482), (483, 483), (484, 484), (485, 485), (486, 486), (487, 487), (488, 488), (489, 489), (490, 490), (491, 491), (492, 492), (493, 493), (494, 494), (495, 495), (496, 496), (497, 497), (498, 498), (499, 499), (500, 500), (501, 501), (502, 502), (503, 503), (504, 504), (505, 505), (506, 506), (507, 507), (508, 508), (509, 509), (510, 510), (511, 511), (512, 512), (513, 513), (514, 514), (515, 515), (516, 516), (517, 517), (518, 518), (519, 519), (520, 520), (521, 521), (522, 522), (523, 523), (524, 524), (525, 525), (526, 526), (527, 527), (528, 528), (529, 529), (530, 530), (531, 531), (532, 532), (533, 533), (534, 534), (535, 535), (536, 536), (537, 537), (538, 538), (539, 539), (540, 540), (541, 541), (542, 542), (543, 543), (544, 544), (545, 545), (546, 546), (547, 547), (548, 548), (549, 549), (550, 550), (551, 551), (552, 552), (553, 553), (554, 554), (555, 555), (556, 556), (557, 557), (558, 558), (559, 559), (560, 560), (561, 561), (562, 562), (563, 563), (564, 564), (565, 565), (566, 566), (567, 567), (568, 568), (569, 569), (570, 570), (571, 571), (572, 572), (573, 573), (574, 574), (575, 575), (576, 576), (577, 577), (578, 578), (579, 579), (580, 580), (581, 581), (582, 582), (583, 583), (584, 584), (585, 585), (586, 586), (587, 587), (588, 588), (589, 589), (590, 590), (591, 591), (592, 592), (593, 593), (594, 594), (595, 595), (596, 596), (597, 597), (598, 598), (599, 599), (600, 600), (601, 601), (602, 602), (603, 603), (604, 604), (605, 605), (606, 606), (607, 607), (608, 608), (609, 609), (610, 610), (611, 611), (612, 612), (613, 613), (614, 614), (615, 615), (616, 616), (617, 617), (618, 618), (619, 619), (620, 620), (621, 621), (622, 622), (623, 623), (624, 624), (625, 625), (626, 626), (627, 627), (628, 628), (629, 629), (630, 630), (631, 631), (632, 632), (633, 633), (634, 634), (635, 635), (636, 636), (637, 637), (638, 638), (639, 639), (640, 640), (641, 641), (642, 642), (643, 643), (644, 644), (645, 645), (646, 646), (647, 647), (648, 648), (649, 649), (650, 650), (651, 651), (652, 652), (653, 653), (654, 654), (655, 655), (656, 656), (657, 657), (658, 658), (659, 659), (660, 660), (661, 661), (662, 662), (663, 663), (664, 664), (665, 665), (666, 666), (667, 667), (668, 668), (669, 669), (670, 670), (671, 671), (672, 672), (673, 673), (674, 674), (675, 675), (676, 676), (677, 677), (678, 678), (679, 679), (680, 680), (681, 681), (682, 682), (683, 683), (684, 684), (685, 685), (686, 686), (687, 687), (688, 688), (689, 689), (690, 690), (691, 691), (692, 692), (693, 693), (694, 694), (695, 695), (696, 696), (697, 697), (698, 698), (699, 699), (700, 700), (701, 701), (702, 702), (703, 703), (704, 704), (705, 705), (706, 706), (707, 707), (708, 708), (709, 709), (710, 710), (711, 711), (712, 712), (713, 713), (714, 714), (715, 715), (716, 716), (717, 717), (718, 718), (719, 719), (720, 720), (721, 721), (722, 722), (723, 723), (724, 724), (725, 725), (726, 726), (727, 727), (728, 728), (729, 729), (730, 730), (731, 731), (732, 732), (733, 733), (734, 734), (735, 735), (736, 736), (737, 737), (738, 738), (739, 739), (740, 740), (741, 741), (742, 742), (743, 743), (744, 744), (745, 745), (746, 746), (747, 747), (748, 748), (749, 749), (750, 750), (751, 751), (752, 752), (753, 753), (754, 754), (755, 755), (756, 756), (757, 757), (758, 758), (759, 759), (760, 760), (761, 761), (762, 762), (763, 763), (764, 764), (765, 765), (766, 766), (767, 767), (768, 768), (769, 769), (770, 770), (771, 771), (772, 772), (773, 773), (774, 774), (775, 775), (776, 776), (777, 777), (778, 778), (779, 779), (780, 780), (781, 781), (782, 782), (783, 783), (784, 784), (785, 785), (786, 786), (787, 787), (788, 788), (789, 789), (790, 790), (791, 791), (792, 792), (793, 793), (794, 794), (795, 795), (796, 796), (797, 797), (798, 798), (799, 799), (800, 800), (801, 801), (802, 802), (803, 803), (804, 804), (805, 805), (806, 806), (807, 807), (808, 808), (809, 809), (810, 810), (811, 811), (812, 812), (813, 813), (814, 814), (815, 815), (816, 816), (817, 817), (818, 818), (819, 819), (820, 820), (821, 821), (822, 822), (823, 823), (824, 824), (825, 825), (826, 826), (827, 827), (828, 828), (829, 829), (830, 830), (831, 831), (832, 832), (833, 833), (834, 834), (835, 835), (836, 836), (837, 837), (838, 838), (839, 839), (840, 840), (841, 841), (842, 842), (843, 843), (844, 844), (845, 845), (846, 846), (847, 847), (848, 848), (849, 849), (850, 850), (851, 851), (852, 852), (853, 853), (854, 854), (855, 855), (856, 856), (857, 857), (858, 858), (859, 859), (860, 860), (861, 861), (862, 862), (863, 863), (864, 864), (865, 865), (866, 866), (867, 867), (868, 868), (869, 869), (870, 870), (871, 871), (872, 872), (873, 873), (874, 874), (875, 875), (876, 876), (877, 877), (878, 878), (879, 879), (880, 880), (881, 881), (882, 882), (883, 883), (884, 884), (885, 885), (886, 886), (887, 887), (888, 888), (889, 889), (890, 890), (891, 891), (892, 892), (893, 893), (894, 894), (895, 895), (896, 896), (897, 897), (898, 898), (899, 899), (900, 900), (901, 901), (902, 902), (903, 903), (904, 904), (905, 905), (906, 906), (907, 907), (908, 908), (909, 909), (910, 910), (911, 911), (912, 912), (913, 913), (914, 914), (915, 915), (916, 916), (917, 917), (918, 918), (919, 919), (920, 920), (921, 921), (922, 922), (923, 923), (924, 924), (925, 925), (926, 926), (927, 927), (928, 928), (929, 929), (930, 930), (931, 931), (932, 932), (933, 933), (934, 934), (935, 935), (936, 936), (937, 937), (938, 938), (939, 939), (940, 940), (941, 941), (942, 942), (943, 943), (944, 944), (945, 945), (946, 946), (947, 947), (948, 948), (949, 949), (950, 950), (951, 951), (952, 952), (953, 953), (954, 954), (955, 955), (956, 956), (957, 957), (958, 958), (959, 959), (960, 960), (961, 961), (962, 962), (963, 963), (964, 964), (965, 965), (966, 966), (967, 967), (968, 968), (969, 969), (970, 970), (971, 971), (972, 972), (973, 973), (974, 974), (975, 975), (976, 976), (977, 977), (978, 978), (979, 979), (980, 980), (981, 981), (982, 982), (983, 983), (984, 984), (985, 985), (986, 986), (987, 987), (988, 988), (989, 989), (990, 990), (991, 991), (992, 992), (993, 993), (994, 994), (995, 995), (996, 996), (997, 997), (998, 998), (999, 999), (1000, 1000)] AS x FORMAT Null diff --git a/dbms/tests/performance/and_function.xml b/tests/performance/and_function.xml similarity index 100% rename from dbms/tests/performance/and_function.xml rename to tests/performance/and_function.xml diff --git a/dbms/tests/performance/arithmetic.xml b/tests/performance/arithmetic.xml similarity index 100% rename from dbms/tests/performance/arithmetic.xml rename to tests/performance/arithmetic.xml diff --git a/dbms/tests/performance/array_auc.xml b/tests/performance/array_auc.xml similarity index 100% rename from dbms/tests/performance/array_auc.xml rename to tests/performance/array_auc.xml diff --git a/dbms/tests/performance/array_element.xml b/tests/performance/array_element.xml similarity index 100% rename from dbms/tests/performance/array_element.xml rename to tests/performance/array_element.xml diff --git a/tests/performance/array_fill.xml b/tests/performance/array_fill.xml new file mode 100644 index 00000000000..d29e02730a3 --- /dev/null +++ b/tests/performance/array_fill.xml @@ -0,0 +1,8 @@ + + SELECT arraySlice(arrayFill(x -> ((x % 2) >= 0), range(100000000)), 1, 10) FORMAT Null + SELECT arraySlice(arrayFill(x -> (((x.1) % 2) >= 0), arrayMap(x -> (x, toString(x)), range(100000000))), 1, 10) FORMAT Null + SELECT arraySlice(arrayFill(x -> ((x % 2) >= 2), range(100000000)), 1, 10) FORMAT Null + SELECT arraySlice(arrayFill(x -> (((x.1) % 2) >= 2), arrayMap(x -> (x, toString(x)), range(100000000))), 1, 10) FORMAT Null + SELECT arraySlice(arrayFill(x -> ((x % 2) = 0), range(100000000)), 1, 10) FORMAT Null + SELECT arraySlice(arrayFill(x -> (((x.1) % 2) = 0), arrayMap(x -> (x, toString(x)), range(100000000))), 1, 10) FORMAT Null + diff --git a/dbms/tests/performance/array_join.xml b/tests/performance/array_join.xml similarity index 100% rename from dbms/tests/performance/array_join.xml rename to tests/performance/array_join.xml diff --git a/dbms/tests/performance/array_reduce.xml b/tests/performance/array_reduce.xml similarity index 100% rename from dbms/tests/performance/array_reduce.xml rename to tests/performance/array_reduce.xml diff --git a/dbms/tests/performance/base64.xml b/tests/performance/base64.xml similarity index 100% rename from dbms/tests/performance/base64.xml rename to tests/performance/base64.xml diff --git a/dbms/tests/performance/base64_hits.xml b/tests/performance/base64_hits.xml similarity index 100% rename from dbms/tests/performance/base64_hits.xml rename to tests/performance/base64_hits.xml diff --git a/dbms/tests/performance/basename.xml b/tests/performance/basename.xml similarity index 100% rename from dbms/tests/performance/basename.xml rename to tests/performance/basename.xml diff --git a/dbms/tests/performance/bitCount.xml b/tests/performance/bitCount.xml similarity index 100% rename from dbms/tests/performance/bitCount.xml rename to tests/performance/bitCount.xml diff --git a/dbms/tests/performance/bit_operations_fixed_string.xml b/tests/performance/bit_operations_fixed_string.xml similarity index 100% rename from dbms/tests/performance/bit_operations_fixed_string.xml rename to tests/performance/bit_operations_fixed_string.xml diff --git a/dbms/tests/performance/bit_operations_fixed_string_numbers.xml b/tests/performance/bit_operations_fixed_string_numbers.xml similarity index 100% rename from dbms/tests/performance/bit_operations_fixed_string_numbers.xml rename to tests/performance/bit_operations_fixed_string_numbers.xml diff --git a/dbms/tests/performance/bloom_filter.xml b/tests/performance/bloom_filter.xml similarity index 100% rename from dbms/tests/performance/bloom_filter.xml rename to tests/performance/bloom_filter.xml diff --git a/dbms/tests/performance/bounding_ratio.xml b/tests/performance/bounding_ratio.xml similarity index 100% rename from dbms/tests/performance/bounding_ratio.xml rename to tests/performance/bounding_ratio.xml diff --git a/dbms/tests/performance/cidr.xml b/tests/performance/cidr.xml similarity index 100% rename from dbms/tests/performance/cidr.xml rename to tests/performance/cidr.xml diff --git a/dbms/tests/performance/codecs_float_insert.xml b/tests/performance/codecs_float_insert.xml similarity index 100% rename from dbms/tests/performance/codecs_float_insert.xml rename to tests/performance/codecs_float_insert.xml diff --git a/dbms/tests/performance/codecs_float_select.xml b/tests/performance/codecs_float_select.xml similarity index 100% rename from dbms/tests/performance/codecs_float_select.xml rename to tests/performance/codecs_float_select.xml diff --git a/dbms/tests/performance/codecs_int_insert.xml b/tests/performance/codecs_int_insert.xml similarity index 100% rename from dbms/tests/performance/codecs_int_insert.xml rename to tests/performance/codecs_int_insert.xml diff --git a/dbms/tests/performance/codecs_int_select.xml b/tests/performance/codecs_int_select.xml similarity index 100% rename from dbms/tests/performance/codecs_int_select.xml rename to tests/performance/codecs_int_select.xml diff --git a/dbms/tests/performance/collations.xml b/tests/performance/collations.xml similarity index 100% rename from dbms/tests/performance/collations.xml rename to tests/performance/collations.xml diff --git a/dbms/tests/performance/column_column_comparison.xml b/tests/performance/column_column_comparison.xml similarity index 100% rename from dbms/tests/performance/column_column_comparison.xml rename to tests/performance/column_column_comparison.xml diff --git a/dbms/tests/performance/columns_hashing.xml b/tests/performance/columns_hashing.xml similarity index 100% rename from dbms/tests/performance/columns_hashing.xml rename to tests/performance/columns_hashing.xml diff --git a/dbms/tests/performance/complex_array_creation.xml b/tests/performance/complex_array_creation.xml similarity index 100% rename from dbms/tests/performance/complex_array_creation.xml rename to tests/performance/complex_array_creation.xml diff --git a/tests/performance/concat_hits.xml b/tests/performance/concat_hits.xml new file mode 100644 index 00000000000..c4d8461ce04 --- /dev/null +++ b/tests/performance/concat_hits.xml @@ -0,0 +1,35 @@ + + + test.hits + + + SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, URL)) + SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, SearchPhrase)) + SELECT count() FROM test.hits WHERE NOT ignore(concat(MobilePhoneModel, SearchPhrase)) + SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, 'Hello')) + SELECT count() FROM test.hits WHERE NOT ignore(concat('World', SearchPhrase)) + SELECT count() FROM test.hits WHERE NOT ignore(concat(MobilePhoneModel, 'Hello')) + SELECT count() FROM test.hits WHERE NOT ignore(concat(PageCharset, 'a')) + + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}{{}}', URL, URL)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}{{}}', URL, SearchPhrase)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}{{}}', MobilePhoneModel, SearchPhrase)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}Hello', URL)) + SELECT count() FROM test.hits WHERE NOT ignore(format('World{{}}', SearchPhrase)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}Hello', MobilePhoneModel)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}a', PageCharset)) + + SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, URL, URL)) + SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, SearchPhrase, MobilePhoneModel)) + SELECT count() FROM test.hits WHERE NOT ignore(concat(URL, 'Hello', URL)) + SELECT count() FROM test.hits WHERE NOT ignore(concat('Hello', SearchPhrase, 'World')) + SELECT count() FROM test.hits WHERE NOT ignore(concat(MobilePhoneModel, 'Hello', PageCharset)) + SELECT count() FROM test.hits WHERE NOT ignore(concat('a', PageCharset, 'b')) + + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}{{}}{{}}', URL, URL, URL)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}{{}}{{}}', URL, SearchPhrase, MobilePhoneModel)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}Hello{{}}', URL, URL)) + SELECT count() FROM test.hits WHERE NOT ignore(format('Hello{{}}World', SearchPhrase)) + SELECT count() FROM test.hits WHERE NOT ignore(format('{{}}Hello{{}}', MobilePhoneModel, PageCharset)) + SELECT count() FROM test.hits WHERE NOT ignore(format('a{{}}b', PageCharset)) + diff --git a/dbms/tests/performance/conditional.xml b/tests/performance/conditional.xml similarity index 100% rename from dbms/tests/performance/conditional.xml rename to tests/performance/conditional.xml diff --git a/dbms/tests/performance/consistent_hashes.xml b/tests/performance/consistent_hashes.xml similarity index 100% rename from dbms/tests/performance/consistent_hashes.xml rename to tests/performance/consistent_hashes.xml diff --git a/dbms/tests/performance/constant_column_comparison.xml b/tests/performance/constant_column_comparison.xml similarity index 100% rename from dbms/tests/performance/constant_column_comparison.xml rename to tests/performance/constant_column_comparison.xml diff --git a/dbms/tests/performance/constant_column_search.xml b/tests/performance/constant_column_search.xml similarity index 100% rename from dbms/tests/performance/constant_column_search.xml rename to tests/performance/constant_column_search.xml diff --git a/dbms/tests/performance/count.xml b/tests/performance/count.xml similarity index 100% rename from dbms/tests/performance/count.xml rename to tests/performance/count.xml diff --git a/dbms/tests/performance/cpu_synthetic.xml b/tests/performance/cpu_synthetic.xml similarity index 100% rename from dbms/tests/performance/cpu_synthetic.xml rename to tests/performance/cpu_synthetic.xml diff --git a/dbms/tests/performance/create_benchmark_page.py b/tests/performance/create_benchmark_page.py similarity index 100% rename from dbms/tests/performance/create_benchmark_page.py rename to tests/performance/create_benchmark_page.py diff --git a/dbms/tests/performance/cryptographic_hashes.xml b/tests/performance/cryptographic_hashes.xml similarity index 100% rename from dbms/tests/performance/cryptographic_hashes.xml rename to tests/performance/cryptographic_hashes.xml diff --git a/dbms/tests/performance/date_parsing.xml b/tests/performance/date_parsing.xml similarity index 100% rename from dbms/tests/performance/date_parsing.xml rename to tests/performance/date_parsing.xml diff --git a/dbms/tests/performance/date_time.xml b/tests/performance/date_time.xml similarity index 100% rename from dbms/tests/performance/date_time.xml rename to tests/performance/date_time.xml diff --git a/dbms/tests/performance/date_time_64.xml b/tests/performance/date_time_64.xml similarity index 100% rename from dbms/tests/performance/date_time_64.xml rename to tests/performance/date_time_64.xml diff --git a/dbms/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml similarity index 100% rename from dbms/tests/performance/decimal_aggregates.xml rename to tests/performance/decimal_aggregates.xml diff --git a/dbms/tests/performance/distributed_aggregation.xml b/tests/performance/distributed_aggregation.xml similarity index 100% rename from dbms/tests/performance/distributed_aggregation.xml rename to tests/performance/distributed_aggregation.xml diff --git a/dbms/tests/performance/early_constant_folding.xml b/tests/performance/early_constant_folding.xml similarity index 100% rename from dbms/tests/performance/early_constant_folding.xml rename to tests/performance/early_constant_folding.xml diff --git a/dbms/tests/performance/empty_string_deserialization.xml b/tests/performance/empty_string_deserialization.xml similarity index 100% rename from dbms/tests/performance/empty_string_deserialization.xml rename to tests/performance/empty_string_deserialization.xml diff --git a/dbms/tests/performance/empty_string_serialization.xml b/tests/performance/empty_string_serialization.xml similarity index 100% rename from dbms/tests/performance/empty_string_serialization.xml rename to tests/performance/empty_string_serialization.xml diff --git a/dbms/tests/performance/entropy.xml b/tests/performance/entropy.xml similarity index 100% rename from dbms/tests/performance/entropy.xml rename to tests/performance/entropy.xml diff --git a/dbms/tests/performance/first_significant_subdomain.xml b/tests/performance/first_significant_subdomain.xml similarity index 100% rename from dbms/tests/performance/first_significant_subdomain.xml rename to tests/performance/first_significant_subdomain.xml diff --git a/dbms/tests/performance/fixed_string16.xml b/tests/performance/fixed_string16.xml similarity index 100% rename from dbms/tests/performance/fixed_string16.xml rename to tests/performance/fixed_string16.xml diff --git a/dbms/tests/performance/float_formatting.xml b/tests/performance/float_formatting.xml similarity index 100% rename from dbms/tests/performance/float_formatting.xml rename to tests/performance/float_formatting.xml diff --git a/dbms/tests/performance/float_parsing.xml b/tests/performance/float_parsing.xml similarity index 100% rename from dbms/tests/performance/float_parsing.xml rename to tests/performance/float_parsing.xml diff --git a/dbms/tests/performance/format_date_time.xml b/tests/performance/format_date_time.xml similarity index 100% rename from dbms/tests/performance/format_date_time.xml rename to tests/performance/format_date_time.xml diff --git a/dbms/tests/performance/functions_coding.xml b/tests/performance/functions_coding.xml similarity index 100% rename from dbms/tests/performance/functions_coding.xml rename to tests/performance/functions_coding.xml diff --git a/dbms/tests/performance/functions_geo.xml b/tests/performance/functions_geo.xml similarity index 100% rename from dbms/tests/performance/functions_geo.xml rename to tests/performance/functions_geo.xml diff --git a/dbms/tests/performance/general_purpose_hashes.xml b/tests/performance/general_purpose_hashes.xml similarity index 100% rename from dbms/tests/performance/general_purpose_hashes.xml rename to tests/performance/general_purpose_hashes.xml diff --git a/dbms/tests/performance/general_purpose_hashes_on_UUID.xml b/tests/performance/general_purpose_hashes_on_UUID.xml similarity index 100% rename from dbms/tests/performance/general_purpose_hashes_on_UUID.xml rename to tests/performance/general_purpose_hashes_on_UUID.xml diff --git a/dbms/tests/performance/generate_table_function.xml b/tests/performance/generate_table_function.xml similarity index 100% rename from dbms/tests/performance/generate_table_function.xml rename to tests/performance/generate_table_function.xml diff --git a/dbms/tests/performance/great_circle_dist.xml b/tests/performance/great_circle_dist.xml similarity index 100% rename from dbms/tests/performance/great_circle_dist.xml rename to tests/performance/great_circle_dist.xml diff --git a/dbms/tests/performance/group_array_moving_sum.xml b/tests/performance/group_array_moving_sum.xml similarity index 100% rename from dbms/tests/performance/group_array_moving_sum.xml rename to tests/performance/group_array_moving_sum.xml diff --git a/dbms/tests/performance/h3.xml b/tests/performance/h3.xml similarity index 100% rename from dbms/tests/performance/h3.xml rename to tests/performance/h3.xml diff --git a/dbms/tests/performance/if_array_num.xml b/tests/performance/if_array_num.xml similarity index 100% rename from dbms/tests/performance/if_array_num.xml rename to tests/performance/if_array_num.xml diff --git a/dbms/tests/performance/if_array_string.xml b/tests/performance/if_array_string.xml similarity index 100% rename from dbms/tests/performance/if_array_string.xml rename to tests/performance/if_array_string.xml diff --git a/dbms/tests/performance/if_string_const.xml b/tests/performance/if_string_const.xml similarity index 100% rename from dbms/tests/performance/if_string_const.xml rename to tests/performance/if_string_const.xml diff --git a/dbms/tests/performance/if_string_hits.xml b/tests/performance/if_string_hits.xml similarity index 100% rename from dbms/tests/performance/if_string_hits.xml rename to tests/performance/if_string_hits.xml diff --git a/dbms/tests/performance/if_to_multiif.xml b/tests/performance/if_to_multiif.xml similarity index 100% rename from dbms/tests/performance/if_to_multiif.xml rename to tests/performance/if_to_multiif.xml diff --git a/dbms/tests/performance/information_value.xml b/tests/performance/information_value.xml similarity index 100% rename from dbms/tests/performance/information_value.xml rename to tests/performance/information_value.xml diff --git a/dbms/tests/performance/insert_values_with_expressions.xml b/tests/performance/insert_values_with_expressions.xml similarity index 100% rename from dbms/tests/performance/insert_values_with_expressions.xml rename to tests/performance/insert_values_with_expressions.xml diff --git a/dbms/tests/performance/inserts_arrays_lowcardinality.xml b/tests/performance/inserts_arrays_lowcardinality.xml similarity index 100% rename from dbms/tests/performance/inserts_arrays_lowcardinality.xml rename to tests/performance/inserts_arrays_lowcardinality.xml diff --git a/dbms/tests/performance/int_parsing.xml b/tests/performance/int_parsing.xml similarity index 100% rename from dbms/tests/performance/int_parsing.xml rename to tests/performance/int_parsing.xml diff --git a/dbms/tests/performance/jit_large_requests.xml b/tests/performance/jit_large_requests.xml similarity index 100% rename from dbms/tests/performance/jit_large_requests.xml rename to tests/performance/jit_large_requests.xml diff --git a/dbms/tests/performance/jit_small_requests.xml b/tests/performance/jit_small_requests.xml similarity index 100% rename from dbms/tests/performance/jit_small_requests.xml rename to tests/performance/jit_small_requests.xml diff --git a/dbms/tests/performance/joins_in_memory.xml b/tests/performance/joins_in_memory.xml similarity index 100% rename from dbms/tests/performance/joins_in_memory.xml rename to tests/performance/joins_in_memory.xml diff --git a/dbms/tests/performance/joins_in_memory_pmj.xml b/tests/performance/joins_in_memory_pmj.xml similarity index 100% rename from dbms/tests/performance/joins_in_memory_pmj.xml rename to tests/performance/joins_in_memory_pmj.xml diff --git a/dbms/tests/performance/json_extract_rapidjson.xml b/tests/performance/json_extract_rapidjson.xml similarity index 100% rename from dbms/tests/performance/json_extract_rapidjson.xml rename to tests/performance/json_extract_rapidjson.xml diff --git a/dbms/tests/performance/json_extract_simdjson.xml b/tests/performance/json_extract_simdjson.xml similarity index 100% rename from dbms/tests/performance/json_extract_simdjson.xml rename to tests/performance/json_extract_simdjson.xml diff --git a/dbms/tests/performance/leftpad.xml b/tests/performance/leftpad.xml similarity index 100% rename from dbms/tests/performance/leftpad.xml rename to tests/performance/leftpad.xml diff --git a/dbms/tests/performance/linear_regression.xml b/tests/performance/linear_regression.xml similarity index 100% rename from dbms/tests/performance/linear_regression.xml rename to tests/performance/linear_regression.xml diff --git a/dbms/tests/performance/logical_functions_large.xml b/tests/performance/logical_functions_large.xml similarity index 100% rename from dbms/tests/performance/logical_functions_large.xml rename to tests/performance/logical_functions_large.xml diff --git a/dbms/tests/performance/logical_functions_medium.xml b/tests/performance/logical_functions_medium.xml similarity index 100% rename from dbms/tests/performance/logical_functions_medium.xml rename to tests/performance/logical_functions_medium.xml diff --git a/dbms/tests/performance/logical_functions_small.xml b/tests/performance/logical_functions_small.xml similarity index 100% rename from dbms/tests/performance/logical_functions_small.xml rename to tests/performance/logical_functions_small.xml diff --git a/dbms/tests/performance/math.xml b/tests/performance/math.xml similarity index 100% rename from dbms/tests/performance/math.xml rename to tests/performance/math.xml diff --git a/dbms/tests/performance/merge_table_streams.xml b/tests/performance/merge_table_streams.xml similarity index 100% rename from dbms/tests/performance/merge_table_streams.xml rename to tests/performance/merge_table_streams.xml diff --git a/dbms/tests/performance/merge_tree_huge_pk.xml b/tests/performance/merge_tree_huge_pk.xml similarity index 100% rename from dbms/tests/performance/merge_tree_huge_pk.xml rename to tests/performance/merge_tree_huge_pk.xml diff --git a/dbms/tests/performance/merge_tree_many_partitions.xml b/tests/performance/merge_tree_many_partitions.xml similarity index 100% rename from dbms/tests/performance/merge_tree_many_partitions.xml rename to tests/performance/merge_tree_many_partitions.xml diff --git a/dbms/tests/performance/merge_tree_many_partitions_2.xml b/tests/performance/merge_tree_many_partitions_2.xml similarity index 100% rename from dbms/tests/performance/merge_tree_many_partitions_2.xml rename to tests/performance/merge_tree_many_partitions_2.xml diff --git a/dbms/tests/performance/merge_tree_simple_select.xml b/tests/performance/merge_tree_simple_select.xml similarity index 100% rename from dbms/tests/performance/merge_tree_simple_select.xml rename to tests/performance/merge_tree_simple_select.xml diff --git a/dbms/tests/performance/mingroupby-orderbylimit1.xml b/tests/performance/mingroupby-orderbylimit1.xml similarity index 100% rename from dbms/tests/performance/mingroupby-orderbylimit1.xml rename to tests/performance/mingroupby-orderbylimit1.xml diff --git a/dbms/tests/performance/modulo.xml b/tests/performance/modulo.xml similarity index 100% rename from dbms/tests/performance/modulo.xml rename to tests/performance/modulo.xml diff --git a/dbms/tests/performance/ngram_distance.xml b/tests/performance/ngram_distance.xml similarity index 100% rename from dbms/tests/performance/ngram_distance.xml rename to tests/performance/ngram_distance.xml diff --git a/dbms/tests/performance/number_formatting_formats.xml b/tests/performance/number_formatting_formats.xml similarity index 100% rename from dbms/tests/performance/number_formatting_formats.xml rename to tests/performance/number_formatting_formats.xml diff --git a/dbms/tests/performance/nyc_taxi.xml b/tests/performance/nyc_taxi.xml similarity index 100% rename from dbms/tests/performance/nyc_taxi.xml rename to tests/performance/nyc_taxi.xml diff --git a/dbms/tests/performance/order_by_decimals.xml b/tests/performance/order_by_decimals.xml similarity index 100% rename from dbms/tests/performance/order_by_decimals.xml rename to tests/performance/order_by_decimals.xml diff --git a/dbms/tests/performance/order_by_read_in_order.xml b/tests/performance/order_by_read_in_order.xml similarity index 100% rename from dbms/tests/performance/order_by_read_in_order.xml rename to tests/performance/order_by_read_in_order.xml diff --git a/dbms/tests/performance/order_by_single_column.xml b/tests/performance/order_by_single_column.xml similarity index 100% rename from dbms/tests/performance/order_by_single_column.xml rename to tests/performance/order_by_single_column.xml diff --git a/dbms/tests/performance/parallel_insert.xml b/tests/performance/parallel_insert.xml similarity index 100% rename from dbms/tests/performance/parallel_insert.xml rename to tests/performance/parallel_insert.xml diff --git a/dbms/tests/performance/parse_engine_file.xml b/tests/performance/parse_engine_file.xml similarity index 93% rename from dbms/tests/performance/parse_engine_file.xml rename to tests/performance/parse_engine_file.xml index fb10fa97915..a88945125b3 100644 --- a/dbms/tests/performance/parse_engine_file.xml +++ b/tests/performance/parse_engine_file.xml @@ -34,6 +34,7 @@ RowBinary Native Avro + MsgPack @@ -42,7 +43,7 @@ test.hits -SELECT * FROM table_{format} +SELECT * FROM table_{format} FORMAT Null DROP TABLE IF EXISTS table_{format} diff --git a/dbms/tests/performance/pre_limit_no_sorting.xml b/tests/performance/pre_limit_no_sorting.xml similarity index 100% rename from dbms/tests/performance/pre_limit_no_sorting.xml rename to tests/performance/pre_limit_no_sorting.xml diff --git a/dbms/tests/performance/prewhere.xml b/tests/performance/prewhere.xml similarity index 100% rename from dbms/tests/performance/prewhere.xml rename to tests/performance/prewhere.xml diff --git a/dbms/tests/performance/random_printable_ascii.xml b/tests/performance/random_printable_ascii.xml similarity index 100% rename from dbms/tests/performance/random_printable_ascii.xml rename to tests/performance/random_printable_ascii.xml diff --git a/dbms/tests/performance/range.xml b/tests/performance/range.xml similarity index 100% rename from dbms/tests/performance/range.xml rename to tests/performance/range.xml diff --git a/dbms/tests/performance/read_hits_with_aio.xml b/tests/performance/read_hits_with_aio.xml similarity index 100% rename from dbms/tests/performance/read_hits_with_aio.xml rename to tests/performance/read_hits_with_aio.xml diff --git a/dbms/tests/performance/right.xml b/tests/performance/right.xml similarity index 100% rename from dbms/tests/performance/right.xml rename to tests/performance/right.xml diff --git a/dbms/tests/performance/round_down.xml b/tests/performance/round_down.xml similarity index 100% rename from dbms/tests/performance/round_down.xml rename to tests/performance/round_down.xml diff --git a/dbms/tests/performance/round_methods.xml b/tests/performance/round_methods.xml similarity index 100% rename from dbms/tests/performance/round_methods.xml rename to tests/performance/round_methods.xml diff --git a/dbms/tests/performance/scalar.xml b/tests/performance/scalar.xml similarity index 100% rename from dbms/tests/performance/scalar.xml rename to tests/performance/scalar.xml diff --git a/dbms/tests/performance/select_format.xml b/tests/performance/select_format.xml similarity index 94% rename from dbms/tests/performance/select_format.xml rename to tests/performance/select_format.xml index b8df874304f..2bdbde83c2d 100644 --- a/dbms/tests/performance/select_format.xml +++ b/tests/performance/select_format.xml @@ -44,6 +44,7 @@ ODBCDriver2 MySQLWire Avro + diff --git a/dbms/tests/performance/set.xml b/tests/performance/set.xml similarity index 100% rename from dbms/tests/performance/set.xml rename to tests/performance/set.xml diff --git a/dbms/tests/performance/set_hits.xml b/tests/performance/set_hits.xml similarity index 100% rename from dbms/tests/performance/set_hits.xml rename to tests/performance/set_hits.xml diff --git a/dbms/tests/performance/set_index.xml b/tests/performance/set_index.xml similarity index 99% rename from dbms/tests/performance/set_index.xml rename to tests/performance/set_index.xml index 090d8ac8c08..f158c481d93 100644 --- a/dbms/tests/performance/set_index.xml +++ b/tests/performance/set_index.xml @@ -14,7 +14,14 @@ - SELECT count() FROM test_in WHERE a IN (SELECT rand(1) FROM zeros(100000)) SETTINGS max_rows_to_read = 1, read_overflow_mode = 'break' + SELECT count() FROM test_in WHERE a IN (SELECT rand(1) FROM numbers(100000)) SETTINGS max_rows_to_read = 1, read_overflow_mode = 'break' + + SELECT count() FROM test_in WHERE toInt64(a) IN (SELECT toInt64(rand(1)) FROM numbers(100000)) settings max_rows_to_read=1, read_overflow_mode='break' + + + SELECT count() FROM test_in WHERE -toInt64(a) IN (SELECT toInt64(rand(1)) FROM numbers(100000)) settings max_rows_to_read=1, read_overflow_mode='break' + + SELECT count() FROM test_in WHERE -toInt64(a) NOT IN (SELECT toInt64(rand(1)) FROM numbers(100000)) settings max_rows_to_read=1, read_overflow_mode='break' SELECT count() FROM numbers(1000) WHERE toString(number) IN ('41577', '83972', '51697', '50014', '37553', '93459', '87438', '95971', '83186', '74326', '67871', '50406', '83678', '29655', '18580', '83905', '61518', '29059', '56700', '82787', '98672', '30884', '81822', '39850', '80852', '57627', '91346', '64522', '17781', '49467', '41099', '41929', '85618', '91389', '68564', '91769', '81219', '52218', '37220', '97097', '2129', '9886', '52049', '34847', '25364', '36429', '76897', '71868', '58121', '71199', '84819', '69991', '34046', '64507', '34892', '24228', '36986', '28588', '51159', '53444', '80531', '9941', '20256', '48103', '32565', '62890', '5379', '60302', '46434', '3205', '18821', '31030', '19794', '71557', '71703', '15024', '14004', '82164', '95659', '40227', '83358', '24395', '9610', '19814', '48491', '66412', '16012', '71586', '42143', '51103', '24463', '89949', '35694', '39193', '63904', '40489', '77144', '94014', '84836', '9980', '46554', '43905', '25588', '25205', '72624', '10249', '35888', '98478', '99030', '26834', '31', '81499', '14847', '82997', '92357', '92893', '17426', '56630', '22252', '68119', '62710', '8740', '82144', '79916', '23391', '30192', '99271', '96435', '44237', '98327', '69481', '16691', '13643', '84554', '38571', '70926', '99283', '79000', '20926', '86495', '4834', '1222', '39486', '57697', '58002', '40790', '15623', '3999', '31515', '12694', '26143', '35951', '54085', '97534', '35329', '73535', '88715', '29572', '75799', '45166', '32066', '48023', '69523', '93150', '8740', '96790', '15534', '63252', '5142', '67045', '93992', '16663', '292', '63924', '6588', '12190', '31506', '69590', '35394', '55168', '65223', '79183', '32600', '69676', '28316', '72111', '53531', '15073', '41127', '73451', '24725', '61647', '65315', '41143', '26493', '95608', '34407', '76098', '53105', '83691', '48755', '35696', '62587', '81826', '3963', '45766', '82751', '12430', '97685', '29919', '78155', '71636', '50215', '89734', '9892', '47151', '54855', '3428', '9712', '52592', '2403', '79602', '81243', '79859', '57361', '82000', '42107', '28860', '99591', '28296', '57337', '64969', '32332', '25535', '30924', '21313', '32554', '17342', '87311', '19825', '24898', '61323', '83209', '79322', '79009', '50746', '33396', '62033', '16548', '17427', '24073', '34640', '52368', '4724', '80408', '40', '33787', '16666', '19665', '86751', '27264', '2241', '88134', '53566', '10589', '79711', '92823', '58972', '91767', '60885', '51659', '7867', '96849', '30360', '20914', '9584', '1250', '22871', '23282', '99312', '4683', '33429', '68361', '82614', '81440', '47863', '69790', '11968', '75210', '66854', '37002', '61142', '71514', '1588', '42336', '11069', '26291', '2261', '71056', '13492', '9133', '91216', '72207', '71586', '86535', '83898', '24392', '45384', '48545', '61972', '503', '80180', '35834', '97025', '70411', '55039', '35430', '27631', '82533', '96831', '74077', '42533', '14451', '26943', '53783', '69489', '71969', '8432', '37230', '61348', '19472', '59115', '9886', '50951', '57109', '7141', '1902', '84130', '4323', '55889', '47784', '2220', '75988', '66988', '63721', '8131', '95601', '95207', '2311', '26541', '50991', '6717', '2969', '71857', '51034', '65958', '94716', '90275', '21012', '46859', '7984', '31131', '46457', '69578', '44540', '7294', '80117', '9925', '60155', '90608', '82684', '32193', '87071', '28006', '87604', '24501', '79087', '2848', '29237', '11221', '81319', '40966', '87641', '35325', '78705', '88636', '78717', '62831', '56390', '99271', '43821', '14453', '17923', '62695', '77322', '21038', '67677', '41271', '4376', '65426', '46091', '19887', '97251', '55583', '58763', '3826', '35037', '73533', '64267', '82319', '9836', '42622', '96829', '16363', '10455', '49290', '99992', '98229', '66356', '59087', '73998', '25986', '4279', '56790', '69540', '588', '36620', '60358', '45056', '89297', '42740', '8323', '19245', '82417', '41431', '699', '11554', '73910', '44491', '56019', '68901', '45816', '68126', '89379', '23885', '13263', '56395', '73130', '19089', '23771', '10335', '48547', '16903', '6453', '33560', '89668', '38159', '43177', '90655', '49712', '62', '66920', '34180', '12150', '48564', '39538', '85026', '87195', '14928', '8956', '71157', '53287', '39161', '67583', '83309', '92054', '86977', '56188', '15229', '88170', '60894', '58497', '89254', '40082', '86890', '60161', '97291', '45878', '23368', '14577', '92870', '37017', '97356', '99426', '76061', '89186', '99751', '85153', '61580', '39360', '90107', '25603', '26798', '76224', '6469', '7912', '69838', '16404', '67497', '28965', '80836', '80365', '91249', '48713', '17113', '33090', '40793', '70450', '66689', '83698', '17802', '43869', '13355', '18959', '79411', '87930', '9265', '37504', '44876', '97234', '94149', '35040', '22049', '49248', '6535', '36080', '28346', '94437', '78319', '17961', '89056', '56161', '35810', '41632', '45494', '53351', '89729', '99510', '51584', '59688', '6193', '70809', '51093', '92589', '90247', '34910', '78235', '17362', '49423', '63324', '525', '37638', '72325', '89356', '15298', '59116', '17848', '65429', '27029', '84781', '70247', '8825', '35082', '70451', '22522', '58125', '91879', '90531', '2478', '463', '37902', '54405', '87267', '72688', '22803', '33134', '35177', '84551', '44974', '88375', '76407', '27774', '33849', '19915', '82014', '80434', '26380', '48777', '53811', '14838', '26829', '56441', '99869', '49574', '85476', '19723', '16907', '4018', '37338', '78510', '47912', '13030', '65277', '95716', '67363', '21393', '89887', '78842', '81650', '903', '17436', '30704', '49223', '27198', '25500', '52214', '54258', '70082', '53950', '49312', '43615', '99473', '94348', '53661', '96213', '96346', '62010', '38268', '32861', '75660', '10392', '89491', '68335', '29817', '88706', '24184', '36298', '43440', '21626', '26535', '44560', '46363', '12534', '99070', '95606', '33714', '73070', '8303', '29853', '23014', '99982', '4530', '14955', '45803', '50', '90750', '30394', '81276', '95563', '47314', '58520', '91299', '88944', '54402', '67405', '29253', '47079', '71734', '99728', '17652', '13307', '35556', '18962', '26780', '17771', '53712', '60055', '37628', '35830', '90739', '61151', '41309', '27652', '3051', '53167', '98417', '19382', '36833', '75085', '65374', '87732', '30352', '31776', '32765', '97565', '92199', '49050', '29503', '51024', '18834', '8515', '24069', '96216', '10777', '90680', '18974', '68884', '85305', '36007', '56707', '4212', '47352', '34426', '13185', '92939', '95782', '70577', '58080', '98279', '3906', '5065', '56896', '16382', '31273', '17117', '98602', '12786', '24086', '63970', '72756', '35798', '82367', '7356', '53398', '68503', '2962', '16425', '67334', '68461', '65439', '15620', '70906', '29649', '46461', '74602', '38012', '71714', '16825', '89480', '53386', '88532', '35104', '28556', '82120', '23155', '23347', '24797', '60061', '54962', '99427', '82248', '82447', '39968', '63727', '27431', '81511', '91168', '71425', '80740', '84127', '40717', '15503', '15419', '46594', '61263', '19212', '53175', '70724', '74445', '23034', '71818', '40246', '18886', '53066', '4880', '83701', '86107', '87862', '44751', '392', '73440', '90291', '93395', '20894', '38463', '32664', '55158', '20090', '50004', '79070', '98471', '85478', '96615', '68149', '78334', '97752', '73207', '71678', '91238', '96757', '82598', '194', '35797', '45120', '60782', '28721', '17676', '78066', '60957', '11826', '51563', '50516', '16485', '47053', '31738', '48923', '23554', '96850', '42033', '73701', '78607', '45979', '54571', '12415', '31693', '15356', '36902', '9126', '3767', '3295', '90402', '24005', '95350', '67033', '49137', '72606', '51899', '17522', '31957', '44641', '53982', '23767', '68257', '15766', '19995', '2107', '48788', '11765', '91055', '46576', '54651', '50381', '62827', '73636', '46606', '98753', '37631', '70441', '87916', '66983', '33870', '31125', '12904', '57040', '4874', '58632', '42037', '18782', '5998', '18974', '57949', '81010', '90407', '99874', '20462', '89949', '10952', '71454', '95130', '46115', '3518', '13384', '69039', '79482', '22076', '59782', '32042', '40930', '60243', '29298', '6790', '46985', '44398', '85631', '14380', '66179', '2629', '32126', '49833', '14118', '58492', '31493', '81172', '96638', '8745', '89663', '76842', '78633', '41373', '83721', '42886', '11123', '32739', '11051', '1303', '92314', '83324', '85600', '44276', '69064', '56125', '84650', '31028', '12628', '14502', '64764', '39405', '44855', '79046', '51716', '46824', '83389', '1941', '1257', '9280', '73176', '84729', '2579', '63366', '22606', '35541', '51096', '13447', '18355', '68037', '28436', '94116', '81070', '78355', '67897', '5296', '32742', '77645', '91853', '18767', '67949', '40963', '5792', '17278', '25597', '41884', '80829', '7099', '18645', '60295', '12082', '81800', '78415', '18082', '38789', '16295', '72377', '74949', '55583', '66853', '15402', '72977', '15123', '99434', '34999', '21687', '76049', '42987', '83748', '88256', '66688', '21766', '20304', '29271', '10069', '19822', '11792', '42526', '74143', '17289', '30253', '6367', '20888', '12975', '94073', '98639', '30134', '26320', '65507', '69002', '53120', '4550', '38893', '18954', '38283', '54863', '17698', '99670', '10521', '92467', '60994', '18052', '48673', '35811', '87282', '62706', '16061', '53112', '22652', '37780', '55662', '26331', '49410', '79074', '10623', '69577', '79613', '9491', '31229', '43922', '84231', '58409', '36386', '46875', '74431', '76735', '38776', '23350', '7314', '9079', '51519', '98544', '70216', '63380', '90381', '1295', '46901', '58225', '55339', '89918', '75522', '35431', '89460', '49552', '89302', '23068', '28493', '3042', '25194', '59520', '9810', '95706', '81297', '89638', '54794', '94527', '45262', '97932', '78685', '6947', '22818', '48700', '9153', '12289', '22011', '58825', '93854', '65438', '4509', '33741', '28208', '69061', '48578', '40247', '77725', '31837', '39003', '69363', '78113', '76398', '97262', '67795', diff --git a/dbms/tests/performance/simple_join_query.xml b/tests/performance/simple_join_query.xml similarity index 100% rename from dbms/tests/performance/simple_join_query.xml rename to tests/performance/simple_join_query.xml diff --git a/dbms/tests/performance/slices_hits.xml b/tests/performance/slices_hits.xml similarity index 100% rename from dbms/tests/performance/slices_hits.xml rename to tests/performance/slices_hits.xml diff --git a/dbms/tests/performance/sort.xml b/tests/performance/sort.xml similarity index 100% rename from dbms/tests/performance/sort.xml rename to tests/performance/sort.xml diff --git a/dbms/tests/performance/string_join.xml b/tests/performance/string_join.xml similarity index 100% rename from dbms/tests/performance/string_join.xml rename to tests/performance/string_join.xml diff --git a/dbms/tests/performance/string_set.xml b/tests/performance/string_set.xml similarity index 100% rename from dbms/tests/performance/string_set.xml rename to tests/performance/string_set.xml diff --git a/dbms/tests/performance/string_sort.xml b/tests/performance/string_sort.xml similarity index 100% rename from dbms/tests/performance/string_sort.xml rename to tests/performance/string_sort.xml diff --git a/dbms/tests/performance/sum_map.xml b/tests/performance/sum_map.xml similarity index 100% rename from dbms/tests/performance/sum_map.xml rename to tests/performance/sum_map.xml diff --git a/dbms/tests/performance/synthetic_hardware_benchmark.xml b/tests/performance/synthetic_hardware_benchmark.xml similarity index 100% rename from dbms/tests/performance/synthetic_hardware_benchmark.xml rename to tests/performance/synthetic_hardware_benchmark.xml diff --git a/dbms/tests/performance/trim_numbers.xml b/tests/performance/trim_numbers.xml similarity index 100% rename from dbms/tests/performance/trim_numbers.xml rename to tests/performance/trim_numbers.xml diff --git a/dbms/tests/performance/trim_urls.xml b/tests/performance/trim_urls.xml similarity index 100% rename from dbms/tests/performance/trim_urls.xml rename to tests/performance/trim_urls.xml diff --git a/dbms/tests/performance/trim_whitespace.xml b/tests/performance/trim_whitespace.xml similarity index 100% rename from dbms/tests/performance/trim_whitespace.xml rename to tests/performance/trim_whitespace.xml diff --git a/dbms/tests/performance/uniq.xml b/tests/performance/uniq.xml similarity index 100% rename from dbms/tests/performance/uniq.xml rename to tests/performance/uniq.xml diff --git a/dbms/tests/performance/url_hits.xml b/tests/performance/url_hits.xml similarity index 100% rename from dbms/tests/performance/url_hits.xml rename to tests/performance/url_hits.xml diff --git a/dbms/tests/performance/vectorize_aggregation_combinators.xml b/tests/performance/vectorize_aggregation_combinators.xml similarity index 100% rename from dbms/tests/performance/vectorize_aggregation_combinators.xml rename to tests/performance/vectorize_aggregation_combinators.xml diff --git a/dbms/tests/performance/visit_param_extract_raw.xml b/tests/performance/visit_param_extract_raw.xml similarity index 100% rename from dbms/tests/performance/visit_param_extract_raw.xml rename to tests/performance/visit_param_extract_raw.xml diff --git a/dbms/tests/performance/website.xml b/tests/performance/website.xml similarity index 100% rename from dbms/tests/performance/website.xml rename to tests/performance/website.xml diff --git a/dbms/tests/queries/.gitignore b/tests/queries/.gitignore similarity index 100% rename from dbms/tests/queries/.gitignore rename to tests/queries/.gitignore diff --git a/dbms/tests/queries/0_stateless/00001_select_1.reference b/tests/queries/0_stateless/00001_select_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00001_select_1.reference rename to tests/queries/0_stateless/00001_select_1.reference diff --git a/dbms/tests/queries/0_stateless/00001_select_1.sql b/tests/queries/0_stateless/00001_select_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00001_select_1.sql rename to tests/queries/0_stateless/00001_select_1.sql diff --git a/dbms/tests/queries/0_stateless/00002_system_numbers.reference b/tests/queries/0_stateless/00002_system_numbers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00002_system_numbers.reference rename to tests/queries/0_stateless/00002_system_numbers.reference diff --git a/dbms/tests/queries/0_stateless/00002_system_numbers.sql b/tests/queries/0_stateless/00002_system_numbers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00002_system_numbers.sql rename to tests/queries/0_stateless/00002_system_numbers.sql diff --git a/dbms/tests/queries/0_stateless/00003_reinterpret_as_string.reference b/tests/queries/0_stateless/00003_reinterpret_as_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00003_reinterpret_as_string.reference rename to tests/queries/0_stateless/00003_reinterpret_as_string.reference diff --git a/dbms/tests/queries/0_stateless/00003_reinterpret_as_string.sql b/tests/queries/0_stateless/00003_reinterpret_as_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00003_reinterpret_as_string.sql rename to tests/queries/0_stateless/00003_reinterpret_as_string.sql diff --git a/dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference b/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference rename to tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference diff --git a/dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql b/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql rename to tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql diff --git a/dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference b/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference rename to tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference diff --git a/dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql b/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql rename to tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql diff --git a/dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.reference b/tests/queries/0_stateless/00006_extremes_and_subquery_from.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.reference rename to tests/queries/0_stateless/00006_extremes_and_subquery_from.reference diff --git a/dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.sql b/tests/queries/0_stateless/00006_extremes_and_subquery_from.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.sql rename to tests/queries/0_stateless/00006_extremes_and_subquery_from.sql diff --git a/dbms/tests/queries/0_stateless/00007_array.reference b/tests/queries/0_stateless/00007_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00007_array.reference rename to tests/queries/0_stateless/00007_array.reference diff --git a/dbms/tests/queries/0_stateless/00007_array.sql b/tests/queries/0_stateless/00007_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00007_array.sql rename to tests/queries/0_stateless/00007_array.sql diff --git a/dbms/tests/queries/0_stateless/00008_array_join.reference b/tests/queries/0_stateless/00008_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00008_array_join.reference rename to tests/queries/0_stateless/00008_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00008_array_join.sql b/tests/queries/0_stateless/00008_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00008_array_join.sql rename to tests/queries/0_stateless/00008_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00009_array_join_subquery.reference b/tests/queries/0_stateless/00009_array_join_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00009_array_join_subquery.reference rename to tests/queries/0_stateless/00009_array_join_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00009_array_join_subquery.sql b/tests/queries/0_stateless/00009_array_join_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00009_array_join_subquery.sql rename to tests/queries/0_stateless/00009_array_join_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00010_big_array_join.reference b/tests/queries/0_stateless/00010_big_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00010_big_array_join.reference rename to tests/queries/0_stateless/00010_big_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00010_big_array_join.sql b/tests/queries/0_stateless/00010_big_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00010_big_array_join.sql rename to tests/queries/0_stateless/00010_big_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00011_array_join_alias.reference b/tests/queries/0_stateless/00011_array_join_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00011_array_join_alias.reference rename to tests/queries/0_stateless/00011_array_join_alias.reference diff --git a/dbms/tests/queries/0_stateless/00011_array_join_alias.sql b/tests/queries/0_stateless/00011_array_join_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00011_array_join_alias.sql rename to tests/queries/0_stateless/00011_array_join_alias.sql diff --git a/dbms/tests/queries/0_stateless/00012_array_join_alias_2.reference b/tests/queries/0_stateless/00012_array_join_alias_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00012_array_join_alias_2.reference rename to tests/queries/0_stateless/00012_array_join_alias_2.reference diff --git a/dbms/tests/queries/0_stateless/00012_array_join_alias_2.sql b/tests/queries/0_stateless/00012_array_join_alias_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00012_array_join_alias_2.sql rename to tests/queries/0_stateless/00012_array_join_alias_2.sql diff --git a/dbms/tests/queries/0_stateless/00013_create_table_with_arrays.reference b/tests/queries/0_stateless/00013_create_table_with_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00013_create_table_with_arrays.reference rename to tests/queries/0_stateless/00013_create_table_with_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00013_create_table_with_arrays.sql b/tests/queries/0_stateless/00013_create_table_with_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00013_create_table_with_arrays.sql rename to tests/queries/0_stateless/00013_create_table_with_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.reference b/tests/queries/0_stateless/00014_select_from_table_with_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.reference rename to tests/queries/0_stateless/00014_select_from_table_with_nested.reference diff --git a/dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.sql b/tests/queries/0_stateless/00014_select_from_table_with_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.sql rename to tests/queries/0_stateless/00014_select_from_table_with_nested.sql diff --git a/dbms/tests/queries/0_stateless/00015_totals_having_constants.reference b/tests/queries/0_stateless/00015_totals_having_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00015_totals_having_constants.reference rename to tests/queries/0_stateless/00015_totals_having_constants.reference diff --git a/dbms/tests/queries/0_stateless/00015_totals_having_constants.sql b/tests/queries/0_stateless/00015_totals_having_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00015_totals_having_constants.sql rename to tests/queries/0_stateless/00015_totals_having_constants.sql diff --git a/dbms/tests/queries/0_stateless/00016_totals_having_constants.reference b/tests/queries/0_stateless/00016_totals_having_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00016_totals_having_constants.reference rename to tests/queries/0_stateless/00016_totals_having_constants.reference diff --git a/dbms/tests/queries/0_stateless/00016_totals_having_constants.sql b/tests/queries/0_stateless/00016_totals_having_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00016_totals_having_constants.sql rename to tests/queries/0_stateless/00016_totals_having_constants.sql diff --git a/dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference b/tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference rename to tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference diff --git a/dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql b/tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql rename to tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql diff --git a/dbms/tests/queries/0_stateless/00018_distinct_in_subquery.reference b/tests/queries/0_stateless/00018_distinct_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00018_distinct_in_subquery.reference rename to tests/queries/0_stateless/00018_distinct_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00018_distinct_in_subquery.sql b/tests/queries/0_stateless/00018_distinct_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00018_distinct_in_subquery.sql rename to tests/queries/0_stateless/00018_distinct_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference b/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference rename to tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql b/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql rename to tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00020_sorting_arrays.reference b/tests/queries/0_stateless/00020_sorting_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00020_sorting_arrays.reference rename to tests/queries/0_stateless/00020_sorting_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00020_sorting_arrays.sql b/tests/queries/0_stateless/00020_sorting_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00020_sorting_arrays.sql rename to tests/queries/0_stateless/00020_sorting_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00021_sorting_arrays.reference b/tests/queries/0_stateless/00021_sorting_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00021_sorting_arrays.reference rename to tests/queries/0_stateless/00021_sorting_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00021_sorting_arrays.sql b/tests/queries/0_stateless/00021_sorting_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00021_sorting_arrays.sql rename to tests/queries/0_stateless/00021_sorting_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.reference b/tests/queries/0_stateless/00022_func_higher_order_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.reference rename to tests/queries/0_stateless/00022_func_higher_order_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.sql b/tests/queries/0_stateless/00022_func_higher_order_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.sql rename to tests/queries/0_stateless/00022_func_higher_order_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.reference b/tests/queries/0_stateless/00023_agg_select_agg_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.reference rename to tests/queries/0_stateless/00023_agg_select_agg_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.sql b/tests/queries/0_stateless/00023_agg_select_agg_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.sql rename to tests/queries/0_stateless/00023_agg_select_agg_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference b/tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference rename to tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql b/tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql rename to tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference b/tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference rename to tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference diff --git a/dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql b/tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql rename to tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql diff --git a/dbms/tests/queries/0_stateless/00026_shard_something_distributed.reference b/tests/queries/0_stateless/00026_shard_something_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00026_shard_something_distributed.reference rename to tests/queries/0_stateless/00026_shard_something_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00026_shard_something_distributed.sql b/tests/queries/0_stateless/00026_shard_something_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00026_shard_something_distributed.sql rename to tests/queries/0_stateless/00026_shard_something_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00027_distinct_and_order_by.reference b/tests/queries/0_stateless/00027_distinct_and_order_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00027_distinct_and_order_by.reference rename to tests/queries/0_stateless/00027_distinct_and_order_by.reference diff --git a/dbms/tests/queries/0_stateless/00027_distinct_and_order_by.sql b/tests/queries/0_stateless/00027_distinct_and_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00027_distinct_and_order_by.sql rename to tests/queries/0_stateless/00027_distinct_and_order_by.sql diff --git a/dbms/tests/queries/0_stateless/00027_simple_argMinArray.reference b/tests/queries/0_stateless/00027_simple_argMinArray.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00027_simple_argMinArray.reference rename to tests/queries/0_stateless/00027_simple_argMinArray.reference diff --git a/dbms/tests/queries/0_stateless/00027_simple_argMinArray.sql b/tests/queries/0_stateless/00027_simple_argMinArray.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00027_simple_argMinArray.sql rename to tests/queries/0_stateless/00027_simple_argMinArray.sql diff --git a/dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference b/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference rename to tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql b/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql rename to tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference rename to tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference diff --git a/dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh rename to tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh diff --git a/dbms/tests/queries/0_stateless/00030_alter_table.reference b/tests/queries/0_stateless/00030_alter_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00030_alter_table.reference rename to tests/queries/0_stateless/00030_alter_table.reference diff --git a/dbms/tests/queries/0_stateless/00030_alter_table.sql b/tests/queries/0_stateless/00030_alter_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00030_alter_table.sql rename to tests/queries/0_stateless/00030_alter_table.sql diff --git a/dbms/tests/queries/0_stateless/00031_parser_number.reference b/tests/queries/0_stateless/00031_parser_number.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00031_parser_number.reference rename to tests/queries/0_stateless/00031_parser_number.reference diff --git a/dbms/tests/queries/0_stateless/00031_parser_number.sql b/tests/queries/0_stateless/00031_parser_number.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00031_parser_number.sql rename to tests/queries/0_stateless/00031_parser_number.sql diff --git a/dbms/tests/queries/0_stateless/00032_fixed_string_to_string.reference b/tests/queries/0_stateless/00032_fixed_string_to_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00032_fixed_string_to_string.reference rename to tests/queries/0_stateless/00032_fixed_string_to_string.reference diff --git a/dbms/tests/queries/0_stateless/00032_fixed_string_to_string.sql b/tests/queries/0_stateless/00032_fixed_string_to_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00032_fixed_string_to_string.sql rename to tests/queries/0_stateless/00032_fixed_string_to_string.sql diff --git a/dbms/tests/queries/0_stateless/00033_fixed_string_to_string.reference b/tests/queries/0_stateless/00033_fixed_string_to_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00033_fixed_string_to_string.reference rename to tests/queries/0_stateless/00033_fixed_string_to_string.reference diff --git a/dbms/tests/queries/0_stateless/00033_fixed_string_to_string.sql b/tests/queries/0_stateless/00033_fixed_string_to_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00033_fixed_string_to_string.sql rename to tests/queries/0_stateless/00033_fixed_string_to_string.sql diff --git a/dbms/tests/queries/0_stateless/00034_fixed_string_to_number.reference b/tests/queries/0_stateless/00034_fixed_string_to_number.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00034_fixed_string_to_number.reference rename to tests/queries/0_stateless/00034_fixed_string_to_number.reference diff --git a/dbms/tests/queries/0_stateless/00034_fixed_string_to_number.sql b/tests/queries/0_stateless/00034_fixed_string_to_number.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00034_fixed_string_to_number.sql rename to tests/queries/0_stateless/00034_fixed_string_to_number.sql diff --git a/dbms/tests/queries/0_stateless/00035_function_array_return_type.reference b/tests/queries/0_stateless/00035_function_array_return_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00035_function_array_return_type.reference rename to tests/queries/0_stateless/00035_function_array_return_type.reference diff --git a/dbms/tests/queries/0_stateless/00035_function_array_return_type.sql b/tests/queries/0_stateless/00035_function_array_return_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00035_function_array_return_type.sql rename to tests/queries/0_stateless/00035_function_array_return_type.sql diff --git a/dbms/tests/queries/0_stateless/00036_array_element.reference b/tests/queries/0_stateless/00036_array_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00036_array_element.reference rename to tests/queries/0_stateless/00036_array_element.reference diff --git a/dbms/tests/queries/0_stateless/00036_array_element.sql b/tests/queries/0_stateless/00036_array_element.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00036_array_element.sql rename to tests/queries/0_stateless/00036_array_element.sql diff --git a/dbms/tests/queries/0_stateless/00037_totals_limit.reference b/tests/queries/0_stateless/00037_totals_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00037_totals_limit.reference rename to tests/queries/0_stateless/00037_totals_limit.reference diff --git a/dbms/tests/queries/0_stateless/00037_totals_limit.sql b/tests/queries/0_stateless/00037_totals_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00037_totals_limit.sql rename to tests/queries/0_stateless/00037_totals_limit.sql diff --git a/dbms/tests/queries/0_stateless/00038_totals_limit.reference b/tests/queries/0_stateless/00038_totals_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00038_totals_limit.reference rename to tests/queries/0_stateless/00038_totals_limit.reference diff --git a/dbms/tests/queries/0_stateless/00038_totals_limit.sql b/tests/queries/0_stateless/00038_totals_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00038_totals_limit.sql rename to tests/queries/0_stateless/00038_totals_limit.sql diff --git a/dbms/tests/queries/0_stateless/00039_inserts_through_http.reference b/tests/queries/0_stateless/00039_inserts_through_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00039_inserts_through_http.reference rename to tests/queries/0_stateless/00039_inserts_through_http.reference diff --git a/dbms/tests/queries/0_stateless/00039_inserts_through_http.sh b/tests/queries/0_stateless/00039_inserts_through_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00039_inserts_through_http.sh rename to tests/queries/0_stateless/00039_inserts_through_http.sh diff --git a/dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.reference b/tests/queries/0_stateless/00040_array_enumerate_uniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.reference rename to tests/queries/0_stateless/00040_array_enumerate_uniq.reference diff --git a/dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.sql b/tests/queries/0_stateless/00040_array_enumerate_uniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.sql rename to tests/queries/0_stateless/00040_array_enumerate_uniq.sql diff --git a/dbms/tests/queries/0_stateless/00041_aggregation_remap.reference b/tests/queries/0_stateless/00041_aggregation_remap.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00041_aggregation_remap.reference rename to tests/queries/0_stateless/00041_aggregation_remap.reference diff --git a/dbms/tests/queries/0_stateless/00041_aggregation_remap.sql b/tests/queries/0_stateless/00041_aggregation_remap.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00041_aggregation_remap.sql rename to tests/queries/0_stateless/00041_aggregation_remap.sql diff --git a/dbms/tests/queries/0_stateless/00041_big_array_join.reference b/tests/queries/0_stateless/00041_big_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00041_big_array_join.reference rename to tests/queries/0_stateless/00041_big_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00041_big_array_join.sql b/tests/queries/0_stateless/00041_big_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00041_big_array_join.sql rename to tests/queries/0_stateless/00041_big_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00042_set.reference b/tests/queries/0_stateless/00042_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00042_set.reference rename to tests/queries/0_stateless/00042_set.reference diff --git a/dbms/tests/queries/0_stateless/00042_set.sql b/tests/queries/0_stateless/00042_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00042_set.sql rename to tests/queries/0_stateless/00042_set.sql diff --git a/dbms/tests/queries/0_stateless/00043_summing_empty_part.reference b/tests/queries/0_stateless/00043_summing_empty_part.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00043_summing_empty_part.reference rename to tests/queries/0_stateless/00043_summing_empty_part.reference diff --git a/dbms/tests/queries/0_stateless/00043_summing_empty_part.sql b/tests/queries/0_stateless/00043_summing_empty_part.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00043_summing_empty_part.sql rename to tests/queries/0_stateless/00043_summing_empty_part.sql diff --git a/dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.reference b/tests/queries/0_stateless/00044_sorting_by_string_descending.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.reference rename to tests/queries/0_stateless/00044_sorting_by_string_descending.reference diff --git a/dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.sql b/tests/queries/0_stateless/00044_sorting_by_string_descending.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.sql rename to tests/queries/0_stateless/00044_sorting_by_string_descending.sql diff --git a/dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference b/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference rename to tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference diff --git a/dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql b/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql rename to tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql diff --git a/dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.reference b/tests/queries/0_stateless/00046_stored_aggregates_simple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.reference rename to tests/queries/0_stateless/00046_stored_aggregates_simple.reference diff --git a/dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.sql b/tests/queries/0_stateless/00046_stored_aggregates_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.sql rename to tests/queries/0_stateless/00046_stored_aggregates_simple.sql diff --git a/dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.reference b/tests/queries/0_stateless/00047_stored_aggregates_complex.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.reference rename to tests/queries/0_stateless/00047_stored_aggregates_complex.reference diff --git a/dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.sql b/tests/queries/0_stateless/00047_stored_aggregates_complex.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.sql rename to tests/queries/0_stateless/00047_stored_aggregates_complex.sql diff --git a/dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference b/tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference rename to tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference diff --git a/dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql b/tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql rename to tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql diff --git a/dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference b/tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference rename to tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference diff --git a/dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql b/tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql rename to tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql diff --git a/dbms/tests/queries/0_stateless/00049_any_left_join.reference b/tests/queries/0_stateless/00049_any_left_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00049_any_left_join.reference rename to tests/queries/0_stateless/00049_any_left_join.reference diff --git a/dbms/tests/queries/0_stateless/00049_any_left_join.sql b/tests/queries/0_stateless/00049_any_left_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00049_any_left_join.sql rename to tests/queries/0_stateless/00049_any_left_join.sql diff --git a/dbms/tests/queries/0_stateless/00050_any_left_join.reference b/tests/queries/0_stateless/00050_any_left_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00050_any_left_join.reference rename to tests/queries/0_stateless/00050_any_left_join.reference diff --git a/dbms/tests/queries/0_stateless/00050_any_left_join.sql b/tests/queries/0_stateless/00050_any_left_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00050_any_left_join.sql rename to tests/queries/0_stateless/00050_any_left_join.sql diff --git a/dbms/tests/queries/0_stateless/00051_any_inner_join.reference b/tests/queries/0_stateless/00051_any_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00051_any_inner_join.reference rename to tests/queries/0_stateless/00051_any_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00051_any_inner_join.sql b/tests/queries/0_stateless/00051_any_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00051_any_inner_join.sql rename to tests/queries/0_stateless/00051_any_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00052_all_left_join.reference b/tests/queries/0_stateless/00052_all_left_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00052_all_left_join.reference rename to tests/queries/0_stateless/00052_all_left_join.reference diff --git a/dbms/tests/queries/0_stateless/00052_all_left_join.sql b/tests/queries/0_stateless/00052_all_left_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00052_all_left_join.sql rename to tests/queries/0_stateless/00052_all_left_join.sql diff --git a/dbms/tests/queries/0_stateless/00053_all_inner_join.reference b/tests/queries/0_stateless/00053_all_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00053_all_inner_join.reference rename to tests/queries/0_stateless/00053_all_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00053_all_inner_join.sql b/tests/queries/0_stateless/00053_all_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00053_all_inner_join.sql rename to tests/queries/0_stateless/00053_all_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00054_join_string.reference b/tests/queries/0_stateless/00054_join_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00054_join_string.reference rename to tests/queries/0_stateless/00054_join_string.reference diff --git a/dbms/tests/queries/0_stateless/00054_join_string.sql b/tests/queries/0_stateless/00054_join_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00054_join_string.sql rename to tests/queries/0_stateless/00054_join_string.sql diff --git a/dbms/tests/queries/0_stateless/00055_join_two_numbers.reference b/tests/queries/0_stateless/00055_join_two_numbers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00055_join_two_numbers.reference rename to tests/queries/0_stateless/00055_join_two_numbers.reference diff --git a/dbms/tests/queries/0_stateless/00055_join_two_numbers.sql b/tests/queries/0_stateless/00055_join_two_numbers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00055_join_two_numbers.sql rename to tests/queries/0_stateless/00055_join_two_numbers.sql diff --git a/dbms/tests/queries/0_stateless/00056_join_number_string.reference b/tests/queries/0_stateless/00056_join_number_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00056_join_number_string.reference rename to tests/queries/0_stateless/00056_join_number_string.reference diff --git a/dbms/tests/queries/0_stateless/00056_join_number_string.sql b/tests/queries/0_stateless/00056_join_number_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00056_join_number_string.sql rename to tests/queries/0_stateless/00056_join_number_string.sql diff --git a/dbms/tests/queries/0_stateless/00057_join_aliases.reference b/tests/queries/0_stateless/00057_join_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00057_join_aliases.reference rename to tests/queries/0_stateless/00057_join_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00057_join_aliases.sql b/tests/queries/0_stateless/00057_join_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00057_join_aliases.sql rename to tests/queries/0_stateless/00057_join_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00059_shard_global_in.reference b/tests/queries/0_stateless/00059_shard_global_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00059_shard_global_in.reference rename to tests/queries/0_stateless/00059_shard_global_in.reference diff --git a/dbms/tests/queries/0_stateless/00059_shard_global_in.sql b/tests/queries/0_stateless/00059_shard_global_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00059_shard_global_in.sql rename to tests/queries/0_stateless/00059_shard_global_in.sql diff --git a/dbms/tests/queries/0_stateless/00060_date_lut.reference b/tests/queries/0_stateless/00060_date_lut.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00060_date_lut.reference rename to tests/queries/0_stateless/00060_date_lut.reference diff --git a/dbms/tests/queries/0_stateless/00060_date_lut.sql b/tests/queries/0_stateless/00060_date_lut.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00060_date_lut.sql rename to tests/queries/0_stateless/00060_date_lut.sql diff --git a/tests/queries/0_stateless/00061_merge_tree_alter.reference b/tests/queries/0_stateless/00061_merge_tree_alter.reference new file mode 100644 index 00000000000..b609bc257f1 --- /dev/null +++ b/tests/queries/0_stateless/00061_merge_tree_alter.reference @@ -0,0 +1,101 @@ +d Date +k UInt64 +i32 Int32 +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 10 42 +d Date +k UInt64 +i32 Int32 +n.ui8 Array(UInt8) +n.s Array(String) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 8 40 [1,2,3] ['12','13','14'] +2015-01-01 10 42 [] [] +d Date +k UInt64 +i32 Int32 +n.ui8 Array(UInt8) +n.s Array(String) +n.d Array(Date) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] +2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 10 42 [] [] [] +d Date +k UInt64 +i32 Int32 +n.ui8 Array(UInt8) +n.s Array(String) +n.d Array(Date) +s String DEFAULT \'0\' +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 +2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 +2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 +2015-01-01 10 42 [] [] [] 0 +d Date +k UInt64 +i32 Int32 +n.ui8 Array(UInt8) +n.s Array(String) +s Int64 DEFAULT \'0\' +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 +2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 +2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 +2015-01-01 10 42 [] [] 0 +d Date +k UInt64 +i32 Int32 +n.ui8 Array(UInt8) +n.s Array(String) +s UInt32 DEFAULT \'0\' +n.d Array(Date) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 10 42 [] [] 0 [] +2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 10 42 [] [] 0 [] +d Date +k UInt64 +i32 Int32 +n.s Array(String) +s UInt32 DEFAULT \'0\' +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 6 38 ['asd','qwe','qwe'] 100500 +2015-01-01 7 39 ['120','130','140'] 0 +2015-01-01 8 40 ['12','13','14'] 0 +2015-01-01 10 42 [] 0 +d Date +k UInt64 +i32 Int32 +s UInt32 DEFAULT \'0\' +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 6 38 100500 +2015-01-01 7 39 0 +2015-01-01 8 40 0 +2015-01-01 10 42 0 +d Date +k UInt64 +i32 Int32 +s UInt32 DEFAULT \'0\' +n.s Array(String) +n.d Array(Date) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 6 38 100500 [] [] +2015-01-01 7 39 0 [] [] +2015-01-01 8 40 0 [] [] +2015-01-01 10 42 0 [] [] +d Date +k UInt64 +i32 Int32 +s UInt32 DEFAULT \'0\' +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +2015-01-01 6 38 100500 +2015-01-01 7 39 0 +2015-01-01 8 40 0 +2015-01-01 10 42 0 diff --git a/dbms/tests/queries/0_stateless/00061_merge_tree_alter.sql b/tests/queries/0_stateless/00061_merge_tree_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00061_merge_tree_alter.sql rename to tests/queries/0_stateless/00061_merge_tree_alter.sql diff --git a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference new file mode 100644 index 00000000000..fa5e65d2d60 --- /dev/null +++ b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference @@ -0,0 +1,216 @@ +d Date +k UInt64 +i32 Int32 +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 10 42 +d Date +k UInt64 +i32 Int32 +dt DateTime +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 9 41 1992-01-01 08:00:00 +2015-01-01 10 42 0000-00-00 00:00:00 +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] +2015-01-01 9 41 1992-01-01 08:00:00 [] [] +2015-01-01 10 42 0000-00-00 00:00:00 [] [] +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +n.d Array(Date) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +n.d Array(Date) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] +2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] +2015-01-01 10 42 0000-00-00 00:00:00 [] [] [] +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +n.d Array(Date) +s String DEFAULT \'0\' +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +n.d Array(Date) +s String DEFAULT \'0\' +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 +2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 +2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 +2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] 0 +2015-01-01 10 42 0000-00-00 00:00:00 [] [] [] 0 +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +s Int64 DEFAULT \'0\' +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +s Int64 DEFAULT \'0\' +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 +2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 +2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 +2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0 +2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0 +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +s UInt32 DEFAULT \'0\' +n.d Array(Date) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +n.ui8 Array(UInt8) +n.s Array(String) +s UInt32 DEFAULT \'0\' +n.d Array(Date) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] +2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0 [] +2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0 [] +d Date +k UInt64 +i32 Int32 +dt DateTime +n.s Array(String) +s UInt32 DEFAULT \'0\' +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +n.s Array(String) +s UInt32 DEFAULT \'0\' +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500 +2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0 +2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0 +2015-01-01 9 41 1992-01-01 08:00:00 [] 0 +2015-01-01 10 42 0000-00-00 00:00:00 [] 0 +d Date +k UInt64 +i32 Int32 +dt DateTime +s UInt32 DEFAULT \'0\' +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +s UInt32 DEFAULT \'0\' +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 13:26:50 100500 +2015-01-01 7 39 2014-07-14 13:26:50 0 +2015-01-01 8 40 2012-12-12 12:12:12 0 +2015-01-01 9 41 1992-01-01 08:00:00 0 +2015-01-01 10 42 0000-00-00 00:00:00 0 +d Date +k UInt64 +i32 Int32 +dt DateTime +s UInt32 DEFAULT \'0\' +n.s Array(String) +n.d Array(Date) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +s UInt32 DEFAULT \'0\' +n.s Array(String) +n.d Array(Date) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 13:26:50 100500 [] [] +2015-01-01 7 39 2014-07-14 13:26:50 0 [] [] +2015-01-01 8 40 2012-12-12 12:12:12 0 [] [] +2015-01-01 9 41 1992-01-01 08:00:00 0 [] [] +2015-01-01 10 42 0000-00-00 00:00:00 0 [] [] +d Date +k UInt64 +i32 Int32 +dt DateTime +s UInt32 DEFAULT \'0\' +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt DateTime +s UInt32 DEFAULT \'0\' +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 13:26:50 100500 +2015-01-01 7 39 2014-07-14 13:26:50 0 +2015-01-01 8 40 2012-12-12 12:12:12 0 +2015-01-01 9 41 1992-01-01 08:00:00 0 +2015-01-01 10 42 0000-00-00 00:00:00 0 +d Date +k UInt64 +i32 Int32 +dt Date +s DateTime DEFAULT \'0000-00-00 00:00:00\' +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` Date, \n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +d Date +k UInt64 +i32 Int32 +dt Date +s DateTime DEFAULT \'0000-00-00 00:00:00\' +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` Date, \n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00 +2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00 +2015-01-01 8 40 2012-12-12 0000-00-00 00:00:00 +2015-01-01 9 41 1992-01-01 0000-00-00 00:00:00 +2015-01-01 10 42 0000-00-00 0000-00-00 00:00:00 diff --git a/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql rename to tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00063_check_query.reference b/tests/queries/0_stateless/00063_check_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00063_check_query.reference rename to tests/queries/0_stateless/00063_check_query.reference diff --git a/dbms/tests/queries/0_stateless/00063_check_query.sql b/tests/queries/0_stateless/00063_check_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00063_check_query.sql rename to tests/queries/0_stateless/00063_check_query.sql diff --git a/dbms/tests/queries/0_stateless/00064_negate_bug.reference b/tests/queries/0_stateless/00064_negate_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00064_negate_bug.reference rename to tests/queries/0_stateless/00064_negate_bug.reference diff --git a/dbms/tests/queries/0_stateless/00064_negate_bug.sql b/tests/queries/0_stateless/00064_negate_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00064_negate_bug.sql rename to tests/queries/0_stateless/00064_negate_bug.sql diff --git a/dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.reference b/tests/queries/0_stateless/00065_shard_float_literals_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.reference rename to tests/queries/0_stateless/00065_shard_float_literals_formatting.reference diff --git a/dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.sql b/tests/queries/0_stateless/00065_shard_float_literals_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.sql rename to tests/queries/0_stateless/00065_shard_float_literals_formatting.sql diff --git a/dbms/tests/queries/0_stateless/00066_group_by_in.reference b/tests/queries/0_stateless/00066_group_by_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00066_group_by_in.reference rename to tests/queries/0_stateless/00066_group_by_in.reference diff --git a/dbms/tests/queries/0_stateless/00066_group_by_in.sql b/tests/queries/0_stateless/00066_group_by_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00066_group_by_in.sql rename to tests/queries/0_stateless/00066_group_by_in.sql diff --git a/dbms/tests/queries/0_stateless/00067_replicate_segfault.reference b/tests/queries/0_stateless/00067_replicate_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00067_replicate_segfault.reference rename to tests/queries/0_stateless/00067_replicate_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00067_replicate_segfault.sql b/tests/queries/0_stateless/00067_replicate_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00067_replicate_segfault.sql rename to tests/queries/0_stateless/00067_replicate_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference b/tests/queries/0_stateless/00068_empty_tiny_log.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference rename to tests/queries/0_stateless/00068_empty_tiny_log.reference diff --git a/dbms/tests/queries/0_stateless/00068_empty_tiny_log.sql b/tests/queries/0_stateless/00068_empty_tiny_log.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00068_empty_tiny_log.sql rename to tests/queries/0_stateless/00068_empty_tiny_log.sql diff --git a/dbms/tests/queries/0_stateless/00069_date_arithmetic.reference b/tests/queries/0_stateless/00069_date_arithmetic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00069_date_arithmetic.reference rename to tests/queries/0_stateless/00069_date_arithmetic.reference diff --git a/dbms/tests/queries/0_stateless/00069_date_arithmetic.sql b/tests/queries/0_stateless/00069_date_arithmetic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00069_date_arithmetic.sql rename to tests/queries/0_stateless/00069_date_arithmetic.sql diff --git a/dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.reference b/tests/queries/0_stateless/00070_insert_fewer_columns_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.reference rename to tests/queries/0_stateless/00070_insert_fewer_columns_http.reference diff --git a/dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh b/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh rename to tests/queries/0_stateless/00070_insert_fewer_columns_http.sh diff --git a/dbms/tests/queries/0_stateless/00071_insert_fewer_columns.reference b/tests/queries/0_stateless/00071_insert_fewer_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00071_insert_fewer_columns.reference rename to tests/queries/0_stateless/00071_insert_fewer_columns.reference diff --git a/dbms/tests/queries/0_stateless/00071_insert_fewer_columns.sql b/tests/queries/0_stateless/00071_insert_fewer_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00071_insert_fewer_columns.sql rename to tests/queries/0_stateless/00071_insert_fewer_columns.sql diff --git a/dbms/tests/queries/0_stateless/00072_in_types.reference b/tests/queries/0_stateless/00072_in_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00072_in_types.reference rename to tests/queries/0_stateless/00072_in_types.reference diff --git a/dbms/tests/queries/0_stateless/00072_in_types.sql b/tests/queries/0_stateless/00072_in_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00072_in_types.sql rename to tests/queries/0_stateless/00072_in_types.sql diff --git a/dbms/tests/queries/0_stateless/00281_compile_sizeof_packed.re b/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00281_compile_sizeof_packed.re rename to tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference diff --git a/dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql b/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql rename to tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql diff --git a/dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference b/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference rename to tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference diff --git a/dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql b/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql rename to tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql diff --git a/dbms/tests/queries/0_stateless/00076_ip_coding_functions.reference b/tests/queries/0_stateless/00076_ip_coding_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00076_ip_coding_functions.reference rename to tests/queries/0_stateless/00076_ip_coding_functions.reference diff --git a/dbms/tests/queries/0_stateless/00076_ip_coding_functions.sql b/tests/queries/0_stateless/00076_ip_coding_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00076_ip_coding_functions.sql rename to tests/queries/0_stateless/00076_ip_coding_functions.sql diff --git a/dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference b/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference rename to tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference diff --git a/dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql b/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql rename to tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql diff --git a/dbms/tests/queries/0_stateless/00078_string_concat.reference b/tests/queries/0_stateless/00078_string_concat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00078_string_concat.reference rename to tests/queries/0_stateless/00078_string_concat.reference diff --git a/dbms/tests/queries/0_stateless/00078_string_concat.sql b/tests/queries/0_stateless/00078_string_concat.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00078_string_concat.sql rename to tests/queries/0_stateless/00078_string_concat.sql diff --git a/dbms/tests/queries/0_stateless/00079_defaulted_columns.reference b/tests/queries/0_stateless/00079_defaulted_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00079_defaulted_columns.reference rename to tests/queries/0_stateless/00079_defaulted_columns.reference diff --git a/dbms/tests/queries/0_stateless/00079_defaulted_columns.sql b/tests/queries/0_stateless/00079_defaulted_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00079_defaulted_columns.sql rename to tests/queries/0_stateless/00079_defaulted_columns.sql diff --git a/dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.reference b/tests/queries/0_stateless/00080_show_tables_and_system_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.reference rename to tests/queries/0_stateless/00080_show_tables_and_system_tables.reference diff --git a/dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.sql b/tests/queries/0_stateless/00080_show_tables_and_system_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.sql rename to tests/queries/0_stateless/00080_show_tables_and_system_tables.sql diff --git a/dbms/tests/queries/0_stateless/00081_int_div_or_zero.reference b/tests/queries/0_stateless/00081_int_div_or_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00081_int_div_or_zero.reference rename to tests/queries/0_stateless/00081_int_div_or_zero.reference diff --git a/dbms/tests/queries/0_stateless/00081_int_div_or_zero.sql b/tests/queries/0_stateless/00081_int_div_or_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00081_int_div_or_zero.sql rename to tests/queries/0_stateless/00081_int_div_or_zero.sql diff --git a/dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference b/tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference rename to tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference diff --git a/dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql b/tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql rename to tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql diff --git a/dbms/tests/queries/0_stateless/00374_any_last_if_merge.reference b/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00374_any_last_if_merge.reference rename to tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql b/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql rename to tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00084_summing_merge_tree.reference b/tests/queries/0_stateless/00084_summing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00084_summing_merge_tree.reference rename to tests/queries/0_stateless/00084_summing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00084_summing_merge_tree.sql b/tests/queries/0_stateless/00084_summing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00084_summing_merge_tree.sql rename to tests/queries/0_stateless/00084_summing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference b/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference rename to tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference diff --git a/dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql b/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql rename to tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql diff --git a/dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference b/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference rename to tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql b/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql rename to tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference b/tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference rename to tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql b/tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql rename to tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00087_math_functions.reference b/tests/queries/0_stateless/00087_math_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00087_math_functions.reference rename to tests/queries/0_stateless/00087_math_functions.reference diff --git a/dbms/tests/queries/0_stateless/00087_math_functions.sql b/tests/queries/0_stateless/00087_math_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00087_math_functions.sql rename to tests/queries/0_stateless/00087_math_functions.sql diff --git a/dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference b/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference rename to tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference diff --git a/dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql b/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql rename to tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql diff --git a/dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference b/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference rename to tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference diff --git a/dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql b/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql rename to tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql diff --git a/dbms/tests/queries/0_stateless/00090_union_race_conditions_1.reference b/tests/queries/0_stateless/00090_union_race_conditions_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00090_union_race_conditions_1.reference rename to tests/queries/0_stateless/00090_union_race_conditions_1.reference diff --git a/dbms/tests/queries/0_stateless/00090_union_race_conditions_1.sh b/tests/queries/0_stateless/00090_union_race_conditions_1.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00090_union_race_conditions_1.sh rename to tests/queries/0_stateless/00090_union_race_conditions_1.sh diff --git a/dbms/tests/queries/0_stateless/00091_union_race_conditions_2.reference b/tests/queries/0_stateless/00091_union_race_conditions_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00091_union_race_conditions_2.reference rename to tests/queries/0_stateless/00091_union_race_conditions_2.reference diff --git a/dbms/tests/queries/0_stateless/00091_union_race_conditions_2.sh b/tests/queries/0_stateless/00091_union_race_conditions_2.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00091_union_race_conditions_2.sh rename to tests/queries/0_stateless/00091_union_race_conditions_2.sh diff --git a/dbms/tests/queries/0_stateless/00092_union_race_conditions_3.reference b/tests/queries/0_stateless/00092_union_race_conditions_3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00092_union_race_conditions_3.reference rename to tests/queries/0_stateless/00092_union_race_conditions_3.reference diff --git a/dbms/tests/queries/0_stateless/00092_union_race_conditions_3.sh b/tests/queries/0_stateless/00092_union_race_conditions_3.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00092_union_race_conditions_3.sh rename to tests/queries/0_stateless/00092_union_race_conditions_3.sh diff --git a/dbms/tests/queries/0_stateless/00093_union_race_conditions_4.reference b/tests/queries/0_stateless/00093_union_race_conditions_4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00093_union_race_conditions_4.reference rename to tests/queries/0_stateless/00093_union_race_conditions_4.reference diff --git a/dbms/tests/queries/0_stateless/00093_union_race_conditions_4.sh b/tests/queries/0_stateless/00093_union_race_conditions_4.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00093_union_race_conditions_4.sh rename to tests/queries/0_stateless/00093_union_race_conditions_4.sh diff --git a/dbms/tests/queries/0_stateless/00094_union_race_conditions_5.reference b/tests/queries/0_stateless/00094_union_race_conditions_5.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00094_union_race_conditions_5.reference rename to tests/queries/0_stateless/00094_union_race_conditions_5.reference diff --git a/dbms/tests/queries/0_stateless/00094_union_race_conditions_5.sh b/tests/queries/0_stateless/00094_union_race_conditions_5.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00094_union_race_conditions_5.sh rename to tests/queries/0_stateless/00094_union_race_conditions_5.sh diff --git a/dbms/tests/queries/0_stateless/00096_aggregation_min_if.reference b/tests/queries/0_stateless/00096_aggregation_min_if.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00096_aggregation_min_if.reference rename to tests/queries/0_stateless/00096_aggregation_min_if.reference diff --git a/dbms/tests/queries/0_stateless/00096_aggregation_min_if.sql b/tests/queries/0_stateless/00096_aggregation_min_if.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00096_aggregation_min_if.sql rename to tests/queries/0_stateless/00096_aggregation_min_if.sql diff --git a/dbms/tests/queries/0_stateless/00386_long_in_pk.reference b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00386_long_in_pk.reference rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh diff --git a/dbms/tests/queries/0_stateless/00098_1_union_all.reference b/tests/queries/0_stateless/00098_1_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_1_union_all.reference rename to tests/queries/0_stateless/00098_1_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_1_union_all.sql b/tests/queries/0_stateless/00098_1_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_1_union_all.sql rename to tests/queries/0_stateless/00098_1_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_2_union_all.reference b/tests/queries/0_stateless/00098_2_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_2_union_all.reference rename to tests/queries/0_stateless/00098_2_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_2_union_all.sql b/tests/queries/0_stateless/00098_2_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_2_union_all.sql rename to tests/queries/0_stateless/00098_2_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_3_union_all.reference b/tests/queries/0_stateless/00098_3_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_3_union_all.reference rename to tests/queries/0_stateless/00098_3_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_3_union_all.sql b/tests/queries/0_stateless/00098_3_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_3_union_all.sql rename to tests/queries/0_stateless/00098_3_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_4_union_all.reference b/tests/queries/0_stateless/00098_4_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_4_union_all.reference rename to tests/queries/0_stateless/00098_4_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_4_union_all.sql b/tests/queries/0_stateless/00098_4_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_4_union_all.sql rename to tests/queries/0_stateless/00098_4_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_5_union_all.reference b/tests/queries/0_stateless/00098_5_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_5_union_all.reference rename to tests/queries/0_stateless/00098_5_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_5_union_all.sql b/tests/queries/0_stateless/00098_5_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_5_union_all.sql rename to tests/queries/0_stateless/00098_5_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_6_union_all.reference b/tests/queries/0_stateless/00098_6_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_6_union_all.reference rename to tests/queries/0_stateless/00098_6_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_6_union_all.sql b/tests/queries/0_stateless/00098_6_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_6_union_all.sql rename to tests/queries/0_stateless/00098_6_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_7_union_all.reference b/tests/queries/0_stateless/00098_7_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_7_union_all.reference rename to tests/queries/0_stateless/00098_7_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_7_union_all.sql b/tests/queries/0_stateless/00098_7_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_7_union_all.sql rename to tests/queries/0_stateless/00098_7_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_8_union_all.reference b/tests/queries/0_stateless/00098_8_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_8_union_all.reference rename to tests/queries/0_stateless/00098_8_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_8_union_all.sql b/tests/queries/0_stateless/00098_8_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_8_union_all.sql rename to tests/queries/0_stateless/00098_8_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_9_union_all.reference b/tests/queries/0_stateless/00098_9_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_9_union_all.reference rename to tests/queries/0_stateless/00098_9_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_9_union_all.sql b/tests/queries/0_stateless/00098_9_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_9_union_all.sql rename to tests/queries/0_stateless/00098_9_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_a_union_all.reference b/tests/queries/0_stateless/00098_a_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_a_union_all.reference rename to tests/queries/0_stateless/00098_a_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_a_union_all.sql b/tests/queries/0_stateless/00098_a_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_a_union_all.sql rename to tests/queries/0_stateless/00098_a_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_b_union_all.reference b/tests/queries/0_stateless/00098_b_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_b_union_all.reference rename to tests/queries/0_stateless/00098_b_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_b_union_all.sql b/tests/queries/0_stateless/00098_b_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_b_union_all.sql rename to tests/queries/0_stateless/00098_b_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_c_union_all.reference b/tests/queries/0_stateless/00098_c_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_c_union_all.reference rename to tests/queries/0_stateless/00098_c_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_c_union_all.sql b/tests/queries/0_stateless/00098_c_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_c_union_all.sql rename to tests/queries/0_stateless/00098_c_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_d_union_all.reference b/tests/queries/0_stateless/00098_d_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_d_union_all.reference rename to tests/queries/0_stateless/00098_d_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_d_union_all.sql b/tests/queries/0_stateless/00098_d_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_d_union_all.sql rename to tests/queries/0_stateless/00098_d_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_e_union_all.reference b/tests/queries/0_stateless/00098_e_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_e_union_all.reference rename to tests/queries/0_stateless/00098_e_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_e_union_all.sql b/tests/queries/0_stateless/00098_e_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_e_union_all.sql rename to tests/queries/0_stateless/00098_e_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_f_union_all.reference b/tests/queries/0_stateless/00098_f_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_f_union_all.reference rename to tests/queries/0_stateless/00098_f_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_f_union_all.sql b/tests/queries/0_stateless/00098_f_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_f_union_all.sql rename to tests/queries/0_stateless/00098_f_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_g_union_all.reference b/tests/queries/0_stateless/00098_g_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_g_union_all.reference rename to tests/queries/0_stateless/00098_g_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_g_union_all.sql b/tests/queries/0_stateless/00098_g_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_g_union_all.sql rename to tests/queries/0_stateless/00098_g_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_h_union_all.reference b/tests/queries/0_stateless/00098_h_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_h_union_all.reference rename to tests/queries/0_stateless/00098_h_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_h_union_all.sql b/tests/queries/0_stateless/00098_h_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_h_union_all.sql rename to tests/queries/0_stateless/00098_h_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_j_union_all.reference b/tests/queries/0_stateless/00098_j_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_j_union_all.reference rename to tests/queries/0_stateless/00098_j_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_j_union_all.sql b/tests/queries/0_stateless/00098_j_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_j_union_all.sql rename to tests/queries/0_stateless/00098_j_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_k_union_all.reference b/tests/queries/0_stateless/00098_k_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_k_union_all.reference rename to tests/queries/0_stateless/00098_k_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_k_union_all.sql b/tests/queries/0_stateless/00098_k_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_k_union_all.sql rename to tests/queries/0_stateless/00098_k_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_l_union_all.reference b/tests/queries/0_stateless/00098_l_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_l_union_all.reference rename to tests/queries/0_stateless/00098_l_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_l_union_all.sql b/tests/queries/0_stateless/00098_l_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_l_union_all.sql rename to tests/queries/0_stateless/00098_l_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_shard_i_union_all.reference b/tests/queries/0_stateless/00098_shard_i_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_shard_i_union_all.reference rename to tests/queries/0_stateless/00098_shard_i_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_shard_i_union_all.sql b/tests/queries/0_stateless/00098_shard_i_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_shard_i_union_all.sql rename to tests/queries/0_stateless/00098_shard_i_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.reference b/tests/queries/0_stateless/00099_join_many_blocks_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.reference rename to tests/queries/0_stateless/00099_join_many_blocks_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.sql b/tests/queries/0_stateless/00099_join_many_blocks_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.sql rename to tests/queries/0_stateless/00099_join_many_blocks_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00100_subquery_table_identifier.reference b/tests/queries/0_stateless/00100_subquery_table_identifier.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00100_subquery_table_identifier.reference rename to tests/queries/0_stateless/00100_subquery_table_identifier.reference diff --git a/dbms/tests/queries/0_stateless/00100_subquery_table_identifier.sh b/tests/queries/0_stateless/00100_subquery_table_identifier.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00100_subquery_table_identifier.sh rename to tests/queries/0_stateless/00100_subquery_table_identifier.sh diff --git a/dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference b/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference rename to tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference diff --git a/dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql b/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql rename to tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql diff --git a/dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.reference b/tests/queries/0_stateless/00102_insert_into_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.reference rename to tests/queries/0_stateless/00102_insert_into_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.sql b/tests/queries/0_stateless/00102_insert_into_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.sql rename to tests/queries/0_stateless/00102_insert_into_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference b/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference rename to tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference diff --git a/dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql b/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql rename to tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql diff --git a/dbms/tests/queries/0_stateless/00104_totals_having_mode.reference b/tests/queries/0_stateless/00104_totals_having_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00104_totals_having_mode.reference rename to tests/queries/0_stateless/00104_totals_having_mode.reference diff --git a/dbms/tests/queries/0_stateless/00104_totals_having_mode.sql b/tests/queries/0_stateless/00104_totals_having_mode.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00104_totals_having_mode.sql rename to tests/queries/0_stateless/00104_totals_having_mode.sql diff --git a/dbms/tests/queries/0_stateless/00105_shard_collations.reference b/tests/queries/0_stateless/00105_shard_collations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00105_shard_collations.reference rename to tests/queries/0_stateless/00105_shard_collations.reference diff --git a/dbms/tests/queries/0_stateless/00105_shard_collations.sql b/tests/queries/0_stateless/00105_shard_collations.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00105_shard_collations.sql rename to tests/queries/0_stateless/00105_shard_collations.sql diff --git a/dbms/tests/queries/0_stateless/00106_totals_after_having.reference b/tests/queries/0_stateless/00106_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00106_totals_after_having.reference rename to tests/queries/0_stateless/00106_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00106_totals_after_having.sql b/tests/queries/0_stateless/00106_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00106_totals_after_having.sql rename to tests/queries/0_stateless/00106_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00107_totals_after_having.reference b/tests/queries/0_stateless/00107_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00107_totals_after_having.reference rename to tests/queries/0_stateless/00107_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00107_totals_after_having.sql b/tests/queries/0_stateless/00107_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00107_totals_after_having.sql rename to tests/queries/0_stateless/00107_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00108_shard_totals_after_having.reference b/tests/queries/0_stateless/00108_shard_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00108_shard_totals_after_having.reference rename to tests/queries/0_stateless/00108_shard_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00108_shard_totals_after_having.sql b/tests/queries/0_stateless/00108_shard_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00108_shard_totals_after_having.sql rename to tests/queries/0_stateless/00108_shard_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.reference b/tests/queries/0_stateless/00109_shard_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00109_shard_totals_after_having.reference rename to tests/queries/0_stateless/00109_shard_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql b/tests/queries/0_stateless/00109_shard_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql rename to tests/queries/0_stateless/00109_shard_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00110_external_sort.reference b/tests/queries/0_stateless/00110_external_sort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00110_external_sort.reference rename to tests/queries/0_stateless/00110_external_sort.reference diff --git a/dbms/tests/queries/0_stateless/00110_external_sort.sql b/tests/queries/0_stateless/00110_external_sort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00110_external_sort.sql rename to tests/queries/0_stateless/00110_external_sort.sql diff --git a/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference b/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference rename to tests/queries/0_stateless/00111_shard_external_sort_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql rename to tests/queries/0_stateless/00111_shard_external_sort_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00112_shard_totals_after_having.reference b/tests/queries/0_stateless/00112_shard_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00112_shard_totals_after_having.reference rename to tests/queries/0_stateless/00112_shard_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00112_shard_totals_after_having.sql b/tests/queries/0_stateless/00112_shard_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00112_shard_totals_after_having.sql rename to tests/queries/0_stateless/00112_shard_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00113_shard_group_array.reference b/tests/queries/0_stateless/00113_shard_group_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00113_shard_group_array.reference rename to tests/queries/0_stateless/00113_shard_group_array.reference diff --git a/dbms/tests/queries/0_stateless/00113_shard_group_array.sql b/tests/queries/0_stateless/00113_shard_group_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00113_shard_group_array.sql rename to tests/queries/0_stateless/00113_shard_group_array.sql diff --git a/dbms/tests/queries/0_stateless/00114_float_type_result_of_division.reference b/tests/queries/0_stateless/00114_float_type_result_of_division.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00114_float_type_result_of_division.reference rename to tests/queries/0_stateless/00114_float_type_result_of_division.reference diff --git a/dbms/tests/queries/0_stateless/00114_float_type_result_of_division.sql b/tests/queries/0_stateless/00114_float_type_result_of_division.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00114_float_type_result_of_division.sql rename to tests/queries/0_stateless/00114_float_type_result_of_division.sql diff --git a/dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.reference b/tests/queries/0_stateless/00115_shard_in_incomplete_result.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.reference rename to tests/queries/0_stateless/00115_shard_in_incomplete_result.reference diff --git a/dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh rename to tests/queries/0_stateless/00115_shard_in_incomplete_result.sh diff --git a/dbms/tests/queries/0_stateless/00116_storage_set.reference b/tests/queries/0_stateless/00116_storage_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00116_storage_set.reference rename to tests/queries/0_stateless/00116_storage_set.reference diff --git a/dbms/tests/queries/0_stateless/00116_storage_set.sql b/tests/queries/0_stateless/00116_storage_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00116_storage_set.sql rename to tests/queries/0_stateless/00116_storage_set.sql diff --git a/dbms/tests/queries/0_stateless/00117_parsing_arrays.reference b/tests/queries/0_stateless/00117_parsing_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00117_parsing_arrays.reference rename to tests/queries/0_stateless/00117_parsing_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00117_parsing_arrays.sql b/tests/queries/0_stateless/00117_parsing_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00117_parsing_arrays.sql rename to tests/queries/0_stateless/00117_parsing_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00118_storage_join.reference b/tests/queries/0_stateless/00118_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00118_storage_join.reference rename to tests/queries/0_stateless/00118_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00118_storage_join.sql b/tests/queries/0_stateless/00118_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00118_storage_join.sql rename to tests/queries/0_stateless/00118_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00119_storage_join.reference b/tests/queries/0_stateless/00119_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00119_storage_join.reference rename to tests/queries/0_stateless/00119_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00119_storage_join.sql b/tests/queries/0_stateless/00119_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00119_storage_join.sql rename to tests/queries/0_stateless/00119_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00120_join_and_group_by.reference b/tests/queries/0_stateless/00120_join_and_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00120_join_and_group_by.reference rename to tests/queries/0_stateless/00120_join_and_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00120_join_and_group_by.sql b/tests/queries/0_stateless/00120_join_and_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00120_join_and_group_by.sql rename to tests/queries/0_stateless/00120_join_and_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.reference b/tests/queries/0_stateless/00121_drop_column_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.reference rename to tests/queries/0_stateless/00121_drop_column_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.sql b/tests/queries/0_stateless/00121_drop_column_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.sql rename to tests/queries/0_stateless/00121_drop_column_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference b/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference rename to tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql b/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql rename to tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference b/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference rename to tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference diff --git a/dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql b/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql rename to tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql diff --git a/dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference b/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference rename to tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference diff --git a/dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql b/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql rename to tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql diff --git a/dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference b/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference rename to tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql b/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql rename to tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00126_buffer.reference b/tests/queries/0_stateless/00126_buffer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00126_buffer.reference rename to tests/queries/0_stateless/00126_buffer.reference diff --git a/dbms/tests/queries/0_stateless/00126_buffer.sql b/tests/queries/0_stateless/00126_buffer.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00126_buffer.sql rename to tests/queries/0_stateless/00126_buffer.sql diff --git a/dbms/tests/queries/0_stateless/00127_group_by_concat.reference b/tests/queries/0_stateless/00127_group_by_concat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00127_group_by_concat.reference rename to tests/queries/0_stateless/00127_group_by_concat.reference diff --git a/dbms/tests/queries/0_stateless/00127_group_by_concat.sql b/tests/queries/0_stateless/00127_group_by_concat.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00127_group_by_concat.sql rename to tests/queries/0_stateless/00127_group_by_concat.sql diff --git a/dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference b/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference rename to tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql b/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql rename to tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.reference b/tests/queries/0_stateless/00129_quantile_timing_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.reference rename to tests/queries/0_stateless/00129_quantile_timing_weighted.reference diff --git a/dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.sql b/tests/queries/0_stateless/00129_quantile_timing_weighted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.sql rename to tests/queries/0_stateless/00129_quantile_timing_weighted.sql diff --git a/dbms/tests/queries/0_stateless/00131_set_hashed.reference b/tests/queries/0_stateless/00131_set_hashed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00131_set_hashed.reference rename to tests/queries/0_stateless/00131_set_hashed.reference diff --git a/dbms/tests/queries/0_stateless/00131_set_hashed.sql b/tests/queries/0_stateless/00131_set_hashed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00131_set_hashed.sql rename to tests/queries/0_stateless/00131_set_hashed.sql diff --git a/dbms/tests/queries/0_stateless/00132_sets.reference b/tests/queries/0_stateless/00132_sets.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00132_sets.reference rename to tests/queries/0_stateless/00132_sets.reference diff --git a/dbms/tests/queries/0_stateless/00132_sets.sql b/tests/queries/0_stateless/00132_sets.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00132_sets.sql rename to tests/queries/0_stateless/00132_sets.sql diff --git a/dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference rename to tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference diff --git a/dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh rename to tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh diff --git a/dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference b/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference rename to tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference diff --git a/dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql b/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql rename to tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql diff --git a/dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference b/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference rename to tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql b/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql rename to tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00419_show_sql_queries.reference b/tests/queries/0_stateless/00136_duplicate_order_by_elems.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00419_show_sql_queries.reference rename to tests/queries/0_stateless/00136_duplicate_order_by_elems.reference diff --git a/dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.sql b/tests/queries/0_stateless/00136_duplicate_order_by_elems.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.sql rename to tests/queries/0_stateless/00136_duplicate_order_by_elems.sql diff --git a/dbms/tests/queries/0_stateless/00137_in_constants.reference b/tests/queries/0_stateless/00137_in_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00137_in_constants.reference rename to tests/queries/0_stateless/00137_in_constants.reference diff --git a/dbms/tests/queries/0_stateless/00137_in_constants.sql b/tests/queries/0_stateless/00137_in_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00137_in_constants.sql rename to tests/queries/0_stateless/00137_in_constants.sql diff --git a/dbms/tests/queries/0_stateless/00138_table_aliases.reference b/tests/queries/0_stateless/00138_table_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00138_table_aliases.reference rename to tests/queries/0_stateless/00138_table_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00138_table_aliases.sql b/tests/queries/0_stateless/00138_table_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00138_table_aliases.sql rename to tests/queries/0_stateless/00138_table_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference b/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference rename to tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql b/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql rename to tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00140_prewhere_column_order.reference b/tests/queries/0_stateless/00140_prewhere_column_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00140_prewhere_column_order.reference rename to tests/queries/0_stateless/00140_prewhere_column_order.reference diff --git a/dbms/tests/queries/0_stateless/00140_prewhere_column_order.sql b/tests/queries/0_stateless/00140_prewhere_column_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00140_prewhere_column_order.sql rename to tests/queries/0_stateless/00140_prewhere_column_order.sql diff --git a/dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference b/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference rename to tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql b/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql rename to tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference b/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference rename to tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql b/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql rename to tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00143_number_classification_functions.reference b/tests/queries/0_stateless/00143_number_classification_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00143_number_classification_functions.reference rename to tests/queries/0_stateless/00143_number_classification_functions.reference diff --git a/dbms/tests/queries/0_stateless/00143_number_classification_functions.sql b/tests/queries/0_stateless/00143_number_classification_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00143_number_classification_functions.sql rename to tests/queries/0_stateless/00143_number_classification_functions.sql diff --git a/dbms/tests/queries/0_stateless/00144_empty_regexp.reference b/tests/queries/0_stateless/00144_empty_regexp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00144_empty_regexp.reference rename to tests/queries/0_stateless/00144_empty_regexp.reference diff --git a/dbms/tests/queries/0_stateless/00144_empty_regexp.sql b/tests/queries/0_stateless/00144_empty_regexp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00144_empty_regexp.sql rename to tests/queries/0_stateless/00144_empty_regexp.sql diff --git a/dbms/tests/queries/0_stateless/00145_empty_likes.reference b/tests/queries/0_stateless/00145_empty_likes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00145_empty_likes.reference rename to tests/queries/0_stateless/00145_empty_likes.reference diff --git a/dbms/tests/queries/0_stateless/00145_empty_likes.sql b/tests/queries/0_stateless/00145_empty_likes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00145_empty_likes.sql rename to tests/queries/0_stateless/00145_empty_likes.sql diff --git a/dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference b/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference rename to tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference diff --git a/dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql b/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql rename to tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql diff --git a/dbms/tests/queries/0_stateless/00147_alter_nested_default.reference b/tests/queries/0_stateless/00147_alter_nested_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00147_alter_nested_default.reference rename to tests/queries/0_stateless/00147_alter_nested_default.reference diff --git a/dbms/tests/queries/0_stateless/00147_alter_nested_default.sql b/tests/queries/0_stateless/00147_alter_nested_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00147_alter_nested_default.sql rename to tests/queries/0_stateless/00147_alter_nested_default.sql diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference b/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference rename to tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql b/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql rename to tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference b/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference rename to tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql b/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql rename to tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql diff --git a/dbms/tests/queries/0_stateless/00149_function_url_hash.reference b/tests/queries/0_stateless/00149_function_url_hash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00149_function_url_hash.reference rename to tests/queries/0_stateless/00149_function_url_hash.reference diff --git a/dbms/tests/queries/0_stateless/00149_function_url_hash.sql b/tests/queries/0_stateless/00149_function_url_hash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00149_function_url_hash.sql rename to tests/queries/0_stateless/00149_function_url_hash.sql diff --git a/dbms/tests/queries/0_stateless/00150_with_totals_and_join.reference b/tests/queries/0_stateless/00150_with_totals_and_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00150_with_totals_and_join.reference rename to tests/queries/0_stateless/00150_with_totals_and_join.reference diff --git a/dbms/tests/queries/0_stateless/00150_with_totals_and_join.sql b/tests/queries/0_stateless/00150_with_totals_and_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00150_with_totals_and_join.sql rename to tests/queries/0_stateless/00150_with_totals_and_join.sql diff --git a/dbms/tests/queries/0_stateless/00151_tuple_with_array.reference b/tests/queries/0_stateless/00151_tuple_with_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00151_tuple_with_array.reference rename to tests/queries/0_stateless/00151_tuple_with_array.reference diff --git a/dbms/tests/queries/0_stateless/00151_tuple_with_array.sql b/tests/queries/0_stateless/00151_tuple_with_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00151_tuple_with_array.sql rename to tests/queries/0_stateless/00151_tuple_with_array.sql diff --git a/dbms/tests/queries/0_stateless/00152_totals_in_subquery.reference b/tests/queries/0_stateless/00152_totals_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00152_totals_in_subquery.reference rename to tests/queries/0_stateless/00152_totals_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00152_totals_in_subquery.sql b/tests/queries/0_stateless/00152_totals_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00152_totals_in_subquery.sql rename to tests/queries/0_stateless/00152_totals_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00153_transform.reference b/tests/queries/0_stateless/00153_transform.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00153_transform.reference rename to tests/queries/0_stateless/00153_transform.reference diff --git a/dbms/tests/queries/0_stateless/00153_transform.sql b/tests/queries/0_stateless/00153_transform.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00153_transform.sql rename to tests/queries/0_stateless/00153_transform.sql diff --git a/dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference b/tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference rename to tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql b/tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql rename to tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00155_long_merges.reference b/tests/queries/0_stateless/00155_long_merges.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00155_long_merges.reference rename to tests/queries/0_stateless/00155_long_merges.reference diff --git a/dbms/tests/queries/0_stateless/00155_long_merges.sh b/tests/queries/0_stateless/00155_long_merges.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00155_long_merges.sh rename to tests/queries/0_stateless/00155_long_merges.sh diff --git a/dbms/tests/queries/0_stateless/00156_array_map_to_constant.reference b/tests/queries/0_stateless/00156_array_map_to_constant.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00156_array_map_to_constant.reference rename to tests/queries/0_stateless/00156_array_map_to_constant.reference diff --git a/dbms/tests/queries/0_stateless/00156_array_map_to_constant.sql b/tests/queries/0_stateless/00156_array_map_to_constant.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00156_array_map_to_constant.sql rename to tests/queries/0_stateless/00156_array_map_to_constant.sql diff --git a/dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference b/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference rename to tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference diff --git a/dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql b/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql rename to tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql diff --git a/dbms/tests/queries/0_stateless/00429_long_http_bufferization.reference b/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00429_long_http_bufferization.reference rename to tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference diff --git a/dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql b/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql rename to tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql diff --git a/dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.reference b/tests/queries/0_stateless/00159_whitespace_in_columns_list.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.reference rename to tests/queries/0_stateless/00159_whitespace_in_columns_list.reference diff --git a/dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.sql b/tests/queries/0_stateless/00159_whitespace_in_columns_list.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.sql rename to tests/queries/0_stateless/00159_whitespace_in_columns_list.sql diff --git a/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.reference b/tests/queries/0_stateless/00160_merge_and_index_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.reference rename to tests/queries/0_stateless/00160_merge_and_index_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql b/tests/queries/0_stateless/00160_merge_and_index_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql rename to tests/queries/0_stateless/00160_merge_and_index_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00161_rounding_functions.reference b/tests/queries/0_stateless/00161_rounding_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00161_rounding_functions.reference rename to tests/queries/0_stateless/00161_rounding_functions.reference diff --git a/dbms/tests/queries/0_stateless/00161_rounding_functions.sql b/tests/queries/0_stateless/00161_rounding_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00161_rounding_functions.sql rename to tests/queries/0_stateless/00161_rounding_functions.sql diff --git a/dbms/tests/queries/0_stateless/00162_shard_global_join.reference b/tests/queries/0_stateless/00162_shard_global_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00162_shard_global_join.reference rename to tests/queries/0_stateless/00162_shard_global_join.reference diff --git a/dbms/tests/queries/0_stateless/00162_shard_global_join.sql b/tests/queries/0_stateless/00162_shard_global_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00162_shard_global_join.sql rename to tests/queries/0_stateless/00162_shard_global_join.sql diff --git a/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.reference b/tests/queries/0_stateless/00163_shard_join_with_empty_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.reference rename to tests/queries/0_stateless/00163_shard_join_with_empty_table.reference diff --git a/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql b/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql rename to tests/queries/0_stateless/00163_shard_join_with_empty_table.sql diff --git a/dbms/tests/queries/0_stateless/00164_not_chain.reference b/tests/queries/0_stateless/00164_not_chain.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00164_not_chain.reference rename to tests/queries/0_stateless/00164_not_chain.reference diff --git a/dbms/tests/queries/0_stateless/00164_not_chain.sql b/tests/queries/0_stateless/00164_not_chain.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00164_not_chain.sql rename to tests/queries/0_stateless/00164_not_chain.sql diff --git a/dbms/tests/queries/0_stateless/00165_transform_non_const_default.reference b/tests/queries/0_stateless/00165_transform_non_const_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00165_transform_non_const_default.reference rename to tests/queries/0_stateless/00165_transform_non_const_default.reference diff --git a/dbms/tests/queries/0_stateless/00165_transform_non_const_default.sql b/tests/queries/0_stateless/00165_transform_non_const_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00165_transform_non_const_default.sql rename to tests/queries/0_stateless/00165_transform_non_const_default.sql diff --git a/dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.reference b/tests/queries/0_stateless/00166_functions_of_aggregation_states.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.reference rename to tests/queries/0_stateless/00166_functions_of_aggregation_states.reference diff --git a/dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql b/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql rename to tests/queries/0_stateless/00166_functions_of_aggregation_states.sql diff --git a/dbms/tests/queries/0_stateless/00167_settings_inside_query.reference b/tests/queries/0_stateless/00167_settings_inside_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00167_settings_inside_query.reference rename to tests/queries/0_stateless/00167_settings_inside_query.reference diff --git a/dbms/tests/queries/0_stateless/00167_settings_inside_query.sql b/tests/queries/0_stateless/00167_settings_inside_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00167_settings_inside_query.sql rename to tests/queries/0_stateless/00167_settings_inside_query.sql diff --git a/dbms/tests/queries/0_stateless/00168_buffer_defaults.reference b/tests/queries/0_stateless/00168_buffer_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00168_buffer_defaults.reference rename to tests/queries/0_stateless/00168_buffer_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00168_buffer_defaults.sql b/tests/queries/0_stateless/00168_buffer_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00168_buffer_defaults.sql rename to tests/queries/0_stateless/00168_buffer_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00169_join_constant_keys.reference b/tests/queries/0_stateless/00169_join_constant_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00169_join_constant_keys.reference rename to tests/queries/0_stateless/00169_join_constant_keys.reference diff --git a/dbms/tests/queries/0_stateless/00169_join_constant_keys.sql b/tests/queries/0_stateless/00169_join_constant_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00169_join_constant_keys.sql rename to tests/queries/0_stateless/00169_join_constant_keys.sql diff --git a/dbms/tests/queries/0_stateless/00170_lower_upper_utf8.reference b/tests/queries/0_stateless/00170_lower_upper_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00170_lower_upper_utf8.reference rename to tests/queries/0_stateless/00170_lower_upper_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00170_lower_upper_utf8.sql b/tests/queries/0_stateless/00170_lower_upper_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00170_lower_upper_utf8.sql rename to tests/queries/0_stateless/00170_lower_upper_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference b/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference rename to tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference diff --git a/dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql b/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql rename to tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql diff --git a/dbms/tests/queries/0_stateless/00172_constexprs_in_set.reference b/tests/queries/0_stateless/00172_constexprs_in_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00172_constexprs_in_set.reference rename to tests/queries/0_stateless/00172_constexprs_in_set.reference diff --git a/dbms/tests/queries/0_stateless/00172_constexprs_in_set.sql b/tests/queries/0_stateless/00172_constexprs_in_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00172_constexprs_in_set.sql rename to tests/queries/0_stateless/00172_constexprs_in_set.sql diff --git a/dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference b/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference rename to tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference diff --git a/dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql b/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql rename to tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql diff --git a/dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference b/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference rename to tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql b/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql rename to tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00175_if_num_arrays.reference b/tests/queries/0_stateless/00175_if_num_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00175_if_num_arrays.reference rename to tests/queries/0_stateless/00175_if_num_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00175_if_num_arrays.sql b/tests/queries/0_stateless/00175_if_num_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00175_if_num_arrays.sql rename to tests/queries/0_stateless/00175_if_num_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00176_if_string_arrays.reference b/tests/queries/0_stateless/00176_if_string_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00176_if_string_arrays.reference rename to tests/queries/0_stateless/00176_if_string_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00176_if_string_arrays.sql b/tests/queries/0_stateless/00176_if_string_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00176_if_string_arrays.sql rename to tests/queries/0_stateless/00176_if_string_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.reference b/tests/queries/0_stateless/00177_inserts_through_http_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.reference rename to tests/queries/0_stateless/00177_inserts_through_http_parts.reference diff --git a/dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.sh b/tests/queries/0_stateless/00177_inserts_through_http_parts.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.sh rename to tests/queries/0_stateless/00177_inserts_through_http_parts.sh diff --git a/dbms/tests/queries/0_stateless/00178_function_replicate.reference b/tests/queries/0_stateless/00178_function_replicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00178_function_replicate.reference rename to tests/queries/0_stateless/00178_function_replicate.reference diff --git a/dbms/tests/queries/0_stateless/00178_function_replicate.sql b/tests/queries/0_stateless/00178_function_replicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00178_function_replicate.sql rename to tests/queries/0_stateless/00178_function_replicate.sql diff --git a/dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference b/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference rename to tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference diff --git a/dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql b/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql rename to tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql diff --git a/dbms/tests/queries/0_stateless/00430_https_server.reference b/tests/queries/0_stateless/00180_attach_materialized_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00430_https_server.reference rename to tests/queries/0_stateless/00180_attach_materialized_view.reference diff --git a/dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql b/tests/queries/0_stateless/00180_attach_materialized_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql rename to tests/queries/0_stateless/00180_attach_materialized_view.sql diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.reference b/tests/queries/0_stateless/00181_aggregate_functions_statistics.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.reference rename to tests/queries/0_stateless/00181_aggregate_functions_statistics.reference diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql b/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql rename to tests/queries/0_stateless/00181_aggregate_functions_statistics.sql diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference b/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference rename to tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql b/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql rename to tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql diff --git a/dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference b/tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference rename to tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference diff --git a/dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql b/tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql rename to tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql diff --git a/dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.reference b/tests/queries/0_stateless/00183_skip_unavailable_shards.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.reference rename to tests/queries/0_stateless/00183_skip_unavailable_shards.reference diff --git a/dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.sql b/tests/queries/0_stateless/00183_skip_unavailable_shards.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.sql rename to tests/queries/0_stateless/00183_skip_unavailable_shards.sql diff --git a/dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference rename to tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference diff --git a/dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql rename to tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql diff --git a/dbms/tests/queries/0_stateless/00185_array_literals.reference b/tests/queries/0_stateless/00185_array_literals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00185_array_literals.reference rename to tests/queries/0_stateless/00185_array_literals.reference diff --git a/dbms/tests/queries/0_stateless/00185_array_literals.sql b/tests/queries/0_stateless/00185_array_literals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00185_array_literals.sql rename to tests/queries/0_stateless/00185_array_literals.sql diff --git a/dbms/tests/queries/0_stateless/00186_very_long_arrays.reference b/tests/queries/0_stateless/00186_very_long_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00186_very_long_arrays.reference rename to tests/queries/0_stateless/00186_very_long_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00186_very_long_arrays.sh b/tests/queries/0_stateless/00186_very_long_arrays.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00186_very_long_arrays.sh rename to tests/queries/0_stateless/00186_very_long_arrays.sh diff --git a/dbms/tests/queries/0_stateless/00187_like_regexp_prefix.reference b/tests/queries/0_stateless/00187_like_regexp_prefix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00187_like_regexp_prefix.reference rename to tests/queries/0_stateless/00187_like_regexp_prefix.reference diff --git a/dbms/tests/queries/0_stateless/00187_like_regexp_prefix.sql b/tests/queries/0_stateless/00187_like_regexp_prefix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00187_like_regexp_prefix.sql rename to tests/queries/0_stateless/00187_like_regexp_prefix.sql diff --git a/dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference b/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference rename to tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql b/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql rename to tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00189_time_zones.reference b/tests/queries/0_stateless/00189_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00189_time_zones.reference rename to tests/queries/0_stateless/00189_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00189_time_zones.sql b/tests/queries/0_stateless/00189_time_zones.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00189_time_zones.sql rename to tests/queries/0_stateless/00189_time_zones.sql diff --git a/dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference b/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference rename to tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference diff --git a/dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql b/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql rename to tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql diff --git a/dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference b/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference rename to tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference diff --git a/dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql b/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql rename to tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql diff --git a/dbms/tests/queries/0_stateless/00192_least_greatest.reference b/tests/queries/0_stateless/00192_least_greatest.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00192_least_greatest.reference rename to tests/queries/0_stateless/00192_least_greatest.reference diff --git a/dbms/tests/queries/0_stateless/00192_least_greatest.sql b/tests/queries/0_stateless/00192_least_greatest.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00192_least_greatest.sql rename to tests/queries/0_stateless/00192_least_greatest.sql diff --git a/dbms/tests/queries/0_stateless/00193_parallel_replicas.reference b/tests/queries/0_stateless/00193_parallel_replicas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00193_parallel_replicas.reference rename to tests/queries/0_stateless/00193_parallel_replicas.reference diff --git a/dbms/tests/queries/0_stateless/00193_parallel_replicas.sql b/tests/queries/0_stateless/00193_parallel_replicas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00193_parallel_replicas.sql rename to tests/queries/0_stateless/00193_parallel_replicas.sql diff --git a/dbms/tests/queries/0_stateless/00194_identity.reference b/tests/queries/0_stateless/00194_identity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00194_identity.reference rename to tests/queries/0_stateless/00194_identity.reference diff --git a/dbms/tests/queries/0_stateless/00194_identity.sql b/tests/queries/0_stateless/00194_identity.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00194_identity.sql rename to tests/queries/0_stateless/00194_identity.sql diff --git a/dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference b/tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference rename to tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference diff --git a/dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql b/tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql rename to tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql diff --git a/dbms/tests/queries/0_stateless/00196_float32_formatting.reference b/tests/queries/0_stateless/00196_float32_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00196_float32_formatting.reference rename to tests/queries/0_stateless/00196_float32_formatting.reference diff --git a/dbms/tests/queries/0_stateless/00196_float32_formatting.sql b/tests/queries/0_stateless/00196_float32_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00196_float32_formatting.sql rename to tests/queries/0_stateless/00196_float32_formatting.sql diff --git a/dbms/tests/queries/0_stateless/00197_if_fixed_string.reference b/tests/queries/0_stateless/00197_if_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00197_if_fixed_string.reference rename to tests/queries/0_stateless/00197_if_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00197_if_fixed_string.sql b/tests/queries/0_stateless/00197_if_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00197_if_fixed_string.sql rename to tests/queries/0_stateless/00197_if_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.reference b/tests/queries/0_stateless/00198_group_by_empty_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.reference rename to tests/queries/0_stateless/00198_group_by_empty_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.sql b/tests/queries/0_stateless/00198_group_by_empty_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.sql rename to tests/queries/0_stateless/00198_group_by_empty_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.reference b/tests/queries/0_stateless/00199_ternary_operator_type_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.reference rename to tests/queries/0_stateless/00199_ternary_operator_type_check.reference diff --git a/dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.sql b/tests/queries/0_stateless/00199_ternary_operator_type_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.sql rename to tests/queries/0_stateless/00199_ternary_operator_type_check.sql diff --git a/dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference b/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference rename to tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql b/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql rename to tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00201_array_uniq.reference b/tests/queries/0_stateless/00201_array_uniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00201_array_uniq.reference rename to tests/queries/0_stateless/00201_array_uniq.reference diff --git a/dbms/tests/queries/0_stateless/00201_array_uniq.sql b/tests/queries/0_stateless/00201_array_uniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00201_array_uniq.sql rename to tests/queries/0_stateless/00201_array_uniq.sql diff --git a/dbms/tests/queries/0_stateless/00202_cross_join.reference b/tests/queries/0_stateless/00202_cross_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00202_cross_join.reference rename to tests/queries/0_stateless/00202_cross_join.reference diff --git a/dbms/tests/queries/0_stateless/00202_cross_join.sql b/tests/queries/0_stateless/00202_cross_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00202_cross_join.sql rename to tests/queries/0_stateless/00202_cross_join.sql diff --git a/dbms/tests/queries/0_stateless/00203_full_join.reference b/tests/queries/0_stateless/00203_full_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00203_full_join.reference rename to tests/queries/0_stateless/00203_full_join.reference diff --git a/dbms/tests/queries/0_stateless/00203_full_join.sql b/tests/queries/0_stateless/00203_full_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00203_full_join.sql rename to tests/queries/0_stateless/00203_full_join.sql diff --git a/dbms/tests/queries/0_stateless/00204_extract_url_parameter.reference b/tests/queries/0_stateless/00204_extract_url_parameter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00204_extract_url_parameter.reference rename to tests/queries/0_stateless/00204_extract_url_parameter.reference diff --git a/dbms/tests/queries/0_stateless/00204_extract_url_parameter.sql b/tests/queries/0_stateless/00204_extract_url_parameter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00204_extract_url_parameter.sql rename to tests/queries/0_stateless/00204_extract_url_parameter.sql diff --git a/dbms/tests/queries/0_stateless/00205_scalar_subqueries.reference b/tests/queries/0_stateless/00205_scalar_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00205_scalar_subqueries.reference rename to tests/queries/0_stateless/00205_scalar_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00205_scalar_subqueries.sql b/tests/queries/0_stateless/00205_scalar_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00205_scalar_subqueries.sql rename to tests/queries/0_stateless/00205_scalar_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00206_empty_array_to_single.reference b/tests/queries/0_stateless/00206_empty_array_to_single.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00206_empty_array_to_single.reference rename to tests/queries/0_stateless/00206_empty_array_to_single.reference diff --git a/dbms/tests/queries/0_stateless/00206_empty_array_to_single.sql b/tests/queries/0_stateless/00206_empty_array_to_single.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00206_empty_array_to_single.sql rename to tests/queries/0_stateless/00206_empty_array_to_single.sql diff --git a/dbms/tests/queries/0_stateless/00207_left_array_join.reference b/tests/queries/0_stateless/00207_left_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00207_left_array_join.reference rename to tests/queries/0_stateless/00207_left_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00207_left_array_join.sql b/tests/queries/0_stateless/00207_left_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00207_left_array_join.sql rename to tests/queries/0_stateless/00207_left_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00208_agg_state_merge.reference b/tests/queries/0_stateless/00208_agg_state_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00208_agg_state_merge.reference rename to tests/queries/0_stateless/00208_agg_state_merge.reference diff --git a/dbms/tests/queries/0_stateless/00208_agg_state_merge.sql b/tests/queries/0_stateless/00208_agg_state_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00208_agg_state_merge.sql rename to tests/queries/0_stateless/00208_agg_state_merge.sql diff --git a/dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference b/tests/queries/0_stateless/00209_insert_select_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference rename to tests/queries/0_stateless/00209_insert_select_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00209_insert_select_extremes.sql b/tests/queries/0_stateless/00209_insert_select_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00209_insert_select_extremes.sql rename to tests/queries/0_stateless/00209_insert_select_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.reference b/tests/queries/0_stateless/00210_insert_select_extremes_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.reference rename to tests/queries/0_stateless/00210_insert_select_extremes_http.reference diff --git a/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.sh b/tests/queries/0_stateless/00210_insert_select_extremes_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.sh rename to tests/queries/0_stateless/00210_insert_select_extremes_http.sh diff --git a/dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference b/tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference rename to tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql b/tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql rename to tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference b/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference rename to tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference diff --git a/dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql b/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql rename to tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql diff --git a/dbms/tests/queries/0_stateless/00213_multiple_global_in.reference b/tests/queries/0_stateless/00213_multiple_global_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00213_multiple_global_in.reference rename to tests/queries/0_stateless/00213_multiple_global_in.reference diff --git a/dbms/tests/queries/0_stateless/00213_multiple_global_in.sql b/tests/queries/0_stateless/00213_multiple_global_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00213_multiple_global_in.sql rename to tests/queries/0_stateless/00213_multiple_global_in.sql diff --git a/dbms/tests/queries/0_stateless/00214_primary_key_order.reference b/tests/queries/0_stateless/00214_primary_key_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00214_primary_key_order.reference rename to tests/queries/0_stateless/00214_primary_key_order.reference diff --git a/dbms/tests/queries/0_stateless/00214_primary_key_order.sql b/tests/queries/0_stateless/00214_primary_key_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00214_primary_key_order.sql rename to tests/queries/0_stateless/00214_primary_key_order.sql diff --git a/dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference b/tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference rename to tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql b/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql rename to tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00216_bit_test_function_family.reference b/tests/queries/0_stateless/00216_bit_test_function_family.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00216_bit_test_function_family.reference rename to tests/queries/0_stateless/00216_bit_test_function_family.reference diff --git a/dbms/tests/queries/0_stateless/00216_bit_test_function_family.sql b/tests/queries/0_stateless/00216_bit_test_function_family.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00216_bit_test_function_family.sql rename to tests/queries/0_stateless/00216_bit_test_function_family.sql diff --git a/dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference b/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference rename to tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference diff --git a/dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql b/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql rename to tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql diff --git a/dbms/tests/queries/0_stateless/00218_like_regexp_newline.reference b/tests/queries/0_stateless/00218_like_regexp_newline.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00218_like_regexp_newline.reference rename to tests/queries/0_stateless/00218_like_regexp_newline.reference diff --git a/dbms/tests/queries/0_stateless/00218_like_regexp_newline.sql b/tests/queries/0_stateless/00218_like_regexp_newline.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00218_like_regexp_newline.sql rename to tests/queries/0_stateless/00218_like_regexp_newline.sql diff --git a/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.reference b/tests/queries/0_stateless/00219_full_right_join_column_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00219_full_right_join_column_order.reference rename to tests/queries/0_stateless/00219_full_right_join_column_order.reference diff --git a/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql b/tests/queries/0_stateless/00219_full_right_join_column_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql rename to tests/queries/0_stateless/00219_full_right_join_column_order.sql diff --git a/dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference b/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference rename to tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference diff --git a/dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql b/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql rename to tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql diff --git a/dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference b/tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference rename to tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference diff --git a/dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql b/tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql rename to tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql diff --git a/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference b/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference rename to tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference diff --git a/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql b/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql rename to tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql diff --git a/dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference b/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference rename to tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference diff --git a/dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql b/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql rename to tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql diff --git a/dbms/tests/queries/0_stateless/00225_join_duplicate_columns.reference b/tests/queries/0_stateless/00225_join_duplicate_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00225_join_duplicate_columns.reference rename to tests/queries/0_stateless/00225_join_duplicate_columns.reference diff --git a/dbms/tests/queries/0_stateless/00225_join_duplicate_columns.sql b/tests/queries/0_stateless/00225_join_duplicate_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00225_join_duplicate_columns.sql rename to tests/queries/0_stateless/00225_join_duplicate_columns.sql diff --git a/dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference b/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference rename to tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference diff --git a/dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql b/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql rename to tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql diff --git a/dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference b/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference rename to tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference diff --git a/dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql b/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql rename to tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql diff --git a/dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference b/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference rename to tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql b/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql rename to tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00229_prewhere_column_missing.reference b/tests/queries/0_stateless/00229_prewhere_column_missing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00229_prewhere_column_missing.reference rename to tests/queries/0_stateless/00229_prewhere_column_missing.reference diff --git a/dbms/tests/queries/0_stateless/00229_prewhere_column_missing.sql b/tests/queries/0_stateless/00229_prewhere_column_missing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00229_prewhere_column_missing.sql rename to tests/queries/0_stateless/00229_prewhere_column_missing.sql diff --git a/dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference b/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference rename to tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference diff --git a/dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql b/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql rename to tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql diff --git a/dbms/tests/queries/0_stateless/00231_format_vertical_raw.reference b/tests/queries/0_stateless/00231_format_vertical_raw.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00231_format_vertical_raw.reference rename to tests/queries/0_stateless/00231_format_vertical_raw.reference diff --git a/dbms/tests/queries/0_stateless/00231_format_vertical_raw.sql b/tests/queries/0_stateless/00231_format_vertical_raw.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00231_format_vertical_raw.sql rename to tests/queries/0_stateless/00231_format_vertical_raw.sql diff --git a/dbms/tests/queries/0_stateless/00232_format_readable_size.reference b/tests/queries/0_stateless/00232_format_readable_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00232_format_readable_size.reference rename to tests/queries/0_stateless/00232_format_readable_size.reference diff --git a/dbms/tests/queries/0_stateless/00232_format_readable_size.sql b/tests/queries/0_stateless/00232_format_readable_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00232_format_readable_size.sql rename to tests/queries/0_stateless/00232_format_readable_size.sql diff --git a/dbms/tests/queries/0_stateless/00233_position_function_family.reference b/tests/queries/0_stateless/00233_position_function_family.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00233_position_function_family.reference rename to tests/queries/0_stateless/00233_position_function_family.reference diff --git a/dbms/tests/queries/0_stateless/00233_position_function_family.sql b/tests/queries/0_stateless/00233_position_function_family.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00233_position_function_family.sql rename to tests/queries/0_stateless/00233_position_function_family.sql diff --git a/dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference b/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference rename to tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference diff --git a/dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql b/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql rename to tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql diff --git a/dbms/tests/queries/0_stateless/00235_create_temporary_table_as.reference b/tests/queries/0_stateless/00235_create_temporary_table_as.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00235_create_temporary_table_as.reference rename to tests/queries/0_stateless/00235_create_temporary_table_as.reference diff --git a/dbms/tests/queries/0_stateless/00235_create_temporary_table_as.sql b/tests/queries/0_stateless/00235_create_temporary_table_as.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00235_create_temporary_table_as.sql rename to tests/queries/0_stateless/00235_create_temporary_table_as.sql diff --git a/dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference b/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference rename to tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql b/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql rename to tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00237_group_by_arrays.reference b/tests/queries/0_stateless/00237_group_by_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00237_group_by_arrays.reference rename to tests/queries/0_stateless/00237_group_by_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00237_group_by_arrays.sql b/tests/queries/0_stateless/00237_group_by_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00237_group_by_arrays.sql rename to tests/queries/0_stateless/00237_group_by_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.reference b/tests/queries/0_stateless/00238_removal_of_temporary_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.reference rename to tests/queries/0_stateless/00238_removal_of_temporary_columns.reference diff --git a/dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.sql b/tests/queries/0_stateless/00238_removal_of_temporary_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.sql rename to tests/queries/0_stateless/00238_removal_of_temporary_columns.sql diff --git a/dbms/tests/queries/0_stateless/00239_type_conversion_in_in.reference b/tests/queries/0_stateless/00239_type_conversion_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00239_type_conversion_in_in.reference rename to tests/queries/0_stateless/00239_type_conversion_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00239_type_conversion_in_in.sql b/tests/queries/0_stateless/00239_type_conversion_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00239_type_conversion_in_in.sql rename to tests/queries/0_stateless/00239_type_conversion_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00240_replace_substring_loop.reference b/tests/queries/0_stateless/00240_replace_substring_loop.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00240_replace_substring_loop.reference rename to tests/queries/0_stateless/00240_replace_substring_loop.reference diff --git a/dbms/tests/queries/0_stateless/00240_replace_substring_loop.sql b/tests/queries/0_stateless/00240_replace_substring_loop.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00240_replace_substring_loop.sql rename to tests/queries/0_stateless/00240_replace_substring_loop.sql diff --git a/dbms/tests/queries/0_stateless/00250_tuple_comparison.reference b/tests/queries/0_stateless/00250_tuple_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00250_tuple_comparison.reference rename to tests/queries/0_stateless/00250_tuple_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00250_tuple_comparison.sql b/tests/queries/0_stateless/00250_tuple_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00250_tuple_comparison.sql rename to tests/queries/0_stateless/00250_tuple_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00251_has_types.reference b/tests/queries/0_stateless/00251_has_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00251_has_types.reference rename to tests/queries/0_stateless/00251_has_types.reference diff --git a/dbms/tests/queries/0_stateless/00251_has_types.sql b/tests/queries/0_stateless/00251_has_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00251_has_types.sql rename to tests/queries/0_stateless/00251_has_types.sql diff --git a/dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference b/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference rename to tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference diff --git a/dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql b/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql rename to tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql diff --git a/dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.reference b/tests/queries/0_stateless/00253_insert_recursive_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.reference rename to tests/queries/0_stateless/00253_insert_recursive_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.sql b/tests/queries/0_stateless/00253_insert_recursive_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.sql rename to tests/queries/0_stateless/00253_insert_recursive_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00254_tuple_extremes.reference b/tests/queries/0_stateless/00254_tuple_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00254_tuple_extremes.reference rename to tests/queries/0_stateless/00254_tuple_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00254_tuple_extremes.sql b/tests/queries/0_stateless/00254_tuple_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00254_tuple_extremes.sql rename to tests/queries/0_stateless/00254_tuple_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00255_array_concat_string.reference b/tests/queries/0_stateless/00255_array_concat_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00255_array_concat_string.reference rename to tests/queries/0_stateless/00255_array_concat_string.reference diff --git a/dbms/tests/queries/0_stateless/00255_array_concat_string.sql b/tests/queries/0_stateless/00255_array_concat_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00255_array_concat_string.sql rename to tests/queries/0_stateless/00255_array_concat_string.sql diff --git a/dbms/tests/queries/0_stateless/00256_reverse.reference b/tests/queries/0_stateless/00256_reverse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00256_reverse.reference rename to tests/queries/0_stateless/00256_reverse.reference diff --git a/dbms/tests/queries/0_stateless/00256_reverse.sql b/tests/queries/0_stateless/00256_reverse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00256_reverse.sql rename to tests/queries/0_stateless/00256_reverse.sql diff --git a/dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference rename to tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference diff --git a/dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql rename to tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql diff --git a/dbms/tests/queries/0_stateless/00258_materializing_tuples.reference b/tests/queries/0_stateless/00258_materializing_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00258_materializing_tuples.reference rename to tests/queries/0_stateless/00258_materializing_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00258_materializing_tuples.sql b/tests/queries/0_stateless/00258_materializing_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00258_materializing_tuples.sql rename to tests/queries/0_stateless/00258_materializing_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00259_hashing_tuples.reference b/tests/queries/0_stateless/00259_hashing_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00259_hashing_tuples.reference rename to tests/queries/0_stateless/00259_hashing_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00259_hashing_tuples.sql b/tests/queries/0_stateless/00259_hashing_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00259_hashing_tuples.sql rename to tests/queries/0_stateless/00259_hashing_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00260_like_and_curly_braces.reference b/tests/queries/0_stateless/00260_like_and_curly_braces.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00260_like_and_curly_braces.reference rename to tests/queries/0_stateless/00260_like_and_curly_braces.reference diff --git a/dbms/tests/queries/0_stateless/00260_like_and_curly_braces.sql b/tests/queries/0_stateless/00260_like_and_curly_braces.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00260_like_and_curly_braces.sql rename to tests/queries/0_stateless/00260_like_and_curly_braces.sql diff --git a/dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference b/tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference rename to tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql b/tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql rename to tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00262_alter_alias.reference b/tests/queries/0_stateless/00262_alter_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00262_alter_alias.reference rename to tests/queries/0_stateless/00262_alter_alias.reference diff --git a/dbms/tests/queries/0_stateless/00262_alter_alias.sql b/tests/queries/0_stateless/00262_alter_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00262_alter_alias.sql rename to tests/queries/0_stateless/00262_alter_alias.sql diff --git a/dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference b/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference rename to tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql b/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql rename to tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00264_uniq_many_args.reference b/tests/queries/0_stateless/00264_uniq_many_args.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00264_uniq_many_args.reference rename to tests/queries/0_stateless/00264_uniq_many_args.reference diff --git a/dbms/tests/queries/0_stateless/00264_uniq_many_args.sql b/tests/queries/0_stateless/00264_uniq_many_args.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00264_uniq_many_args.sql rename to tests/queries/0_stateless/00264_uniq_many_args.sql diff --git a/dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.reference b/tests/queries/0_stateless/00265_http_content_type_format_timezone.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.reference rename to tests/queries/0_stateless/00265_http_content_type_format_timezone.reference diff --git a/dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh b/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh rename to tests/queries/0_stateless/00265_http_content_type_format_timezone.sh diff --git a/dbms/tests/queries/0_stateless/00266_read_overflow_mode.reference b/tests/queries/0_stateless/00266_read_overflow_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00266_read_overflow_mode.reference rename to tests/queries/0_stateless/00266_read_overflow_mode.reference diff --git a/dbms/tests/queries/0_stateless/00266_read_overflow_mode.sql b/tests/queries/0_stateless/00266_read_overflow_mode.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00266_read_overflow_mode.sql rename to tests/queries/0_stateless/00266_read_overflow_mode.sql diff --git a/dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference b/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference rename to tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql b/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql rename to tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference b/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference rename to tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference diff --git a/dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql b/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql rename to tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql diff --git a/dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.reference b/tests/queries/0_stateless/00268_aliases_without_as_keyword.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.reference rename to tests/queries/0_stateless/00268_aliases_without_as_keyword.reference diff --git a/dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.sql b/tests/queries/0_stateless/00268_aliases_without_as_keyword.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.sql rename to tests/queries/0_stateless/00268_aliases_without_as_keyword.sql diff --git a/dbms/tests/queries/0_stateless/00269_database_table_whitespace.reference b/tests/queries/0_stateless/00269_database_table_whitespace.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00269_database_table_whitespace.reference rename to tests/queries/0_stateless/00269_database_table_whitespace.reference diff --git a/dbms/tests/queries/0_stateless/00269_database_table_whitespace.sql b/tests/queries/0_stateless/00269_database_table_whitespace.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00269_database_table_whitespace.sql rename to tests/queries/0_stateless/00269_database_table_whitespace.sql diff --git a/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.reference b/tests/queries/0_stateless/00270_views_query_processing_stage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00270_views_query_processing_stage.reference rename to tests/queries/0_stateless/00270_views_query_processing_stage.reference diff --git a/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql b/tests/queries/0_stateless/00270_views_query_processing_stage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql rename to tests/queries/0_stateless/00270_views_query_processing_stage.sql diff --git a/dbms/tests/queries/0_stateless/00271_agg_state_and_totals.reference b/tests/queries/0_stateless/00271_agg_state_and_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00271_agg_state_and_totals.reference rename to tests/queries/0_stateless/00271_agg_state_and_totals.reference diff --git a/dbms/tests/queries/0_stateless/00271_agg_state_and_totals.sql b/tests/queries/0_stateless/00271_agg_state_and_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00271_agg_state_and_totals.sql rename to tests/queries/0_stateless/00271_agg_state_and_totals.sql diff --git a/dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.reference b/tests/queries/0_stateless/00272_union_all_and_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.reference rename to tests/queries/0_stateless/00272_union_all_and_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.sql b/tests/queries/0_stateless/00272_union_all_and_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.sql rename to tests/queries/0_stateless/00272_union_all_and_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00273_quantiles.reference b/tests/queries/0_stateless/00273_quantiles.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00273_quantiles.reference rename to tests/queries/0_stateless/00273_quantiles.reference diff --git a/dbms/tests/queries/0_stateless/00273_quantiles.sql b/tests/queries/0_stateless/00273_quantiles.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00273_quantiles.sql rename to tests/queries/0_stateless/00273_quantiles.sql diff --git a/dbms/tests/queries/0_stateless/00274_shard_group_array.reference b/tests/queries/0_stateless/00274_shard_group_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00274_shard_group_array.reference rename to tests/queries/0_stateless/00274_shard_group_array.reference diff --git a/dbms/tests/queries/0_stateless/00274_shard_group_array.sql b/tests/queries/0_stateless/00274_shard_group_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00274_shard_group_array.sql rename to tests/queries/0_stateless/00274_shard_group_array.sql diff --git a/dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.reference b/tests/queries/0_stateless/00275_shard_quantiles_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.reference rename to tests/queries/0_stateless/00275_shard_quantiles_weighted.reference diff --git a/dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.sql b/tests/queries/0_stateless/00275_shard_quantiles_weighted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.sql rename to tests/queries/0_stateless/00275_shard_quantiles_weighted.sql diff --git a/dbms/tests/queries/0_stateless/00276_sample.reference b/tests/queries/0_stateless/00276_sample.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00276_sample.reference rename to tests/queries/0_stateless/00276_sample.reference diff --git a/dbms/tests/queries/0_stateless/00276_sample.sql b/tests/queries/0_stateless/00276_sample.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00276_sample.sql rename to tests/queries/0_stateless/00276_sample.sql diff --git a/dbms/tests/queries/0_stateless/00277_array_filter.reference b/tests/queries/0_stateless/00277_array_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00277_array_filter.reference rename to tests/queries/0_stateless/00277_array_filter.reference diff --git a/dbms/tests/queries/0_stateless/00277_array_filter.sql b/tests/queries/0_stateless/00277_array_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00277_array_filter.sql rename to tests/queries/0_stateless/00277_array_filter.sql diff --git a/dbms/tests/queries/0_stateless/00278_insert_already_sorted.reference b/tests/queries/0_stateless/00278_insert_already_sorted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00278_insert_already_sorted.reference rename to tests/queries/0_stateless/00278_insert_already_sorted.reference diff --git a/dbms/tests/queries/0_stateless/00278_insert_already_sorted.sql b/tests/queries/0_stateless/00278_insert_already_sorted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00278_insert_already_sorted.sql rename to tests/queries/0_stateless/00278_insert_already_sorted.sql diff --git a/dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.reference b/tests/queries/0_stateless/00279_quantiles_permuted_args.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.reference rename to tests/queries/0_stateless/00279_quantiles_permuted_args.reference diff --git a/dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.sql b/tests/queries/0_stateless/00279_quantiles_permuted_args.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.sql rename to tests/queries/0_stateless/00279_quantiles_permuted_args.sql diff --git a/dbms/tests/queries/0_stateless/00280_hex_escape_sequence.reference b/tests/queries/0_stateless/00280_hex_escape_sequence.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00280_hex_escape_sequence.reference rename to tests/queries/0_stateless/00280_hex_escape_sequence.reference diff --git a/dbms/tests/queries/0_stateless/00280_hex_escape_sequence.sql b/tests/queries/0_stateless/00280_hex_escape_sequence.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00280_hex_escape_sequence.sql rename to tests/queries/0_stateless/00280_hex_escape_sequence.sql diff --git a/dbms/tests/queries/0_stateless/00477_parsing_data_types.reference b/tests/queries/0_stateless/00281_compile_sizeof_packed.re similarity index 100% rename from dbms/tests/queries/0_stateless/00477_parsing_data_types.reference rename to tests/queries/0_stateless/00281_compile_sizeof_packed.re diff --git a/dbms/tests/queries/0_stateless/00282_merging.reference b/tests/queries/0_stateless/00282_merging.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00282_merging.reference rename to tests/queries/0_stateless/00282_merging.reference diff --git a/dbms/tests/queries/0_stateless/00282_merging.sql b/tests/queries/0_stateless/00282_merging.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00282_merging.sql rename to tests/queries/0_stateless/00282_merging.sql diff --git a/dbms/tests/queries/0_stateless/00283_column_cut.reference b/tests/queries/0_stateless/00283_column_cut.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00283_column_cut.reference rename to tests/queries/0_stateless/00283_column_cut.reference diff --git a/dbms/tests/queries/0_stateless/00283_column_cut.sql b/tests/queries/0_stateless/00283_column_cut.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00283_column_cut.sql rename to tests/queries/0_stateless/00283_column_cut.sql diff --git a/dbms/tests/queries/0_stateless/00284_external_aggregation.reference b/tests/queries/0_stateless/00284_external_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00284_external_aggregation.reference rename to tests/queries/0_stateless/00284_external_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00284_external_aggregation.sql b/tests/queries/0_stateless/00284_external_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00284_external_aggregation.sql rename to tests/queries/0_stateless/00284_external_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.reference b/tests/queries/0_stateless/00285_not_all_data_in_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.reference rename to tests/queries/0_stateless/00285_not_all_data_in_totals.reference diff --git a/dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.sql b/tests/queries/0_stateless/00285_not_all_data_in_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.sql rename to tests/queries/0_stateless/00285_not_all_data_in_totals.sql diff --git a/dbms/tests/queries/0_stateless/00286_format_long_negative_float.reference b/tests/queries/0_stateless/00286_format_long_negative_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00286_format_long_negative_float.reference rename to tests/queries/0_stateless/00286_format_long_negative_float.reference diff --git a/dbms/tests/queries/0_stateless/00286_format_long_negative_float.sql b/tests/queries/0_stateless/00286_format_long_negative_float.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00286_format_long_negative_float.sql rename to tests/queries/0_stateless/00286_format_long_negative_float.sql diff --git a/dbms/tests/queries/0_stateless/00287_column_const_with_nan.reference b/tests/queries/0_stateless/00287_column_const_with_nan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00287_column_const_with_nan.reference rename to tests/queries/0_stateless/00287_column_const_with_nan.reference diff --git a/dbms/tests/queries/0_stateless/00287_column_const_with_nan.sql b/tests/queries/0_stateless/00287_column_const_with_nan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00287_column_const_with_nan.sql rename to tests/queries/0_stateless/00287_column_const_with_nan.sql diff --git a/dbms/tests/queries/0_stateless/00288_empty_stripelog.reference b/tests/queries/0_stateless/00288_empty_stripelog.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00288_empty_stripelog.reference rename to tests/queries/0_stateless/00288_empty_stripelog.reference diff --git a/dbms/tests/queries/0_stateless/00288_empty_stripelog.sql b/tests/queries/0_stateless/00288_empty_stripelog.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00288_empty_stripelog.sql rename to tests/queries/0_stateless/00288_empty_stripelog.sql diff --git a/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference b/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference rename to tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference diff --git a/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql b/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql rename to tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql diff --git a/dbms/tests/queries/0_stateless/00291_array_reduce.reference b/tests/queries/0_stateless/00291_array_reduce.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00291_array_reduce.reference rename to tests/queries/0_stateless/00291_array_reduce.reference diff --git a/dbms/tests/queries/0_stateless/00291_array_reduce.sql b/tests/queries/0_stateless/00291_array_reduce.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00291_array_reduce.sql rename to tests/queries/0_stateless/00291_array_reduce.sql diff --git a/dbms/tests/queries/0_stateless/00292_parser_tuple_element.reference b/tests/queries/0_stateless/00292_parser_tuple_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00292_parser_tuple_element.reference rename to tests/queries/0_stateless/00292_parser_tuple_element.reference diff --git a/dbms/tests/queries/0_stateless/00292_parser_tuple_element.sql b/tests/queries/0_stateless/00292_parser_tuple_element.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00292_parser_tuple_element.sql rename to tests/queries/0_stateless/00292_parser_tuple_element.sql diff --git a/dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.reference b/tests/queries/0_stateless/00293_shard_max_subquery_depth.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.reference rename to tests/queries/0_stateless/00293_shard_max_subquery_depth.reference diff --git a/dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.sql b/tests/queries/0_stateless/00293_shard_max_subquery_depth.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.sql rename to tests/queries/0_stateless/00293_shard_max_subquery_depth.sql diff --git a/dbms/tests/queries/0_stateless/00294_shard_enums.reference b/tests/queries/0_stateless/00294_shard_enums.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00294_shard_enums.reference rename to tests/queries/0_stateless/00294_shard_enums.reference diff --git a/dbms/tests/queries/0_stateless/00294_shard_enums.sql b/tests/queries/0_stateless/00294_shard_enums.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00294_shard_enums.sql rename to tests/queries/0_stateless/00294_shard_enums.sql diff --git a/dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference b/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference rename to tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference diff --git a/dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql b/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql rename to tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql diff --git a/dbms/tests/queries/0_stateless/00296_url_parameters.reference b/tests/queries/0_stateless/00296_url_parameters.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00296_url_parameters.reference rename to tests/queries/0_stateless/00296_url_parameters.reference diff --git a/dbms/tests/queries/0_stateless/00296_url_parameters.sql b/tests/queries/0_stateless/00296_url_parameters.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00296_url_parameters.sql rename to tests/queries/0_stateless/00296_url_parameters.sql diff --git a/dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference b/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference rename to tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled b/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled rename to tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled diff --git a/dbms/tests/queries/0_stateless/00298_enum_width_and_cast.reference b/tests/queries/0_stateless/00298_enum_width_and_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00298_enum_width_and_cast.reference rename to tests/queries/0_stateless/00298_enum_width_and_cast.reference diff --git a/dbms/tests/queries/0_stateless/00298_enum_width_and_cast.sql b/tests/queries/0_stateless/00298_enum_width_and_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00298_enum_width_and_cast.sql rename to tests/queries/0_stateless/00298_enum_width_and_cast.sql diff --git a/dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference b/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference rename to tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference diff --git a/dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql b/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql rename to tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql diff --git a/dbms/tests/queries/0_stateless/00300_csv.reference b/tests/queries/0_stateless/00300_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00300_csv.reference rename to tests/queries/0_stateless/00300_csv.reference diff --git a/dbms/tests/queries/0_stateless/00300_csv.sql b/tests/queries/0_stateless/00300_csv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00300_csv.sql rename to tests/queries/0_stateless/00300_csv.sql diff --git a/dbms/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00301_csv.reference rename to tests/queries/0_stateless/00301_csv.reference diff --git a/dbms/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00301_csv.sh rename to tests/queries/0_stateless/00301_csv.sh diff --git a/dbms/tests/queries/0_stateless/00302_http_compression.reference b/tests/queries/0_stateless/00302_http_compression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00302_http_compression.reference rename to tests/queries/0_stateless/00302_http_compression.reference diff --git a/dbms/tests/queries/0_stateless/00302_http_compression.sh b/tests/queries/0_stateless/00302_http_compression.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00302_http_compression.sh rename to tests/queries/0_stateless/00302_http_compression.sh diff --git a/dbms/tests/queries/0_stateless/00304_http_external_data.reference b/tests/queries/0_stateless/00304_http_external_data.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00304_http_external_data.reference rename to tests/queries/0_stateless/00304_http_external_data.reference diff --git a/dbms/tests/queries/0_stateless/00304_http_external_data.sh b/tests/queries/0_stateless/00304_http_external_data.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00304_http_external_data.sh rename to tests/queries/0_stateless/00304_http_external_data.sh diff --git a/dbms/tests/queries/0_stateless/00305_http_and_readonly.reference b/tests/queries/0_stateless/00305_http_and_readonly.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00305_http_and_readonly.reference rename to tests/queries/0_stateless/00305_http_and_readonly.reference diff --git a/dbms/tests/queries/0_stateless/00305_http_and_readonly.sh b/tests/queries/0_stateless/00305_http_and_readonly.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00305_http_and_readonly.sh rename to tests/queries/0_stateless/00305_http_and_readonly.sh diff --git a/dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.reference b/tests/queries/0_stateless/00306_insert_values_and_expressions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.reference rename to tests/queries/0_stateless/00306_insert_values_and_expressions.reference diff --git a/dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.sql b/tests/queries/0_stateless/00306_insert_values_and_expressions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.sql rename to tests/queries/0_stateless/00306_insert_values_and_expressions.sql diff --git a/dbms/tests/queries/0_stateless/00307_format_xml.reference b/tests/queries/0_stateless/00307_format_xml.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00307_format_xml.reference rename to tests/queries/0_stateless/00307_format_xml.reference diff --git a/dbms/tests/queries/0_stateless/00307_format_xml.sql b/tests/queries/0_stateless/00307_format_xml.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00307_format_xml.sql rename to tests/queries/0_stateless/00307_format_xml.sql diff --git a/dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference b/tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference rename to tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql b/tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql rename to tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00309_formats.reference b/tests/queries/0_stateless/00309_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00309_formats.reference rename to tests/queries/0_stateless/00309_formats.reference diff --git a/dbms/tests/queries/0_stateless/00309_formats.sql b/tests/queries/0_stateless/00309_formats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00309_formats.sql rename to tests/queries/0_stateless/00309_formats.sql diff --git a/dbms/tests/queries/0_stateless/00310_tskv.reference b/tests/queries/0_stateless/00310_tskv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00310_tskv.reference rename to tests/queries/0_stateless/00310_tskv.reference diff --git a/dbms/tests/queries/0_stateless/00310_tskv.sh b/tests/queries/0_stateless/00310_tskv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00310_tskv.sh rename to tests/queries/0_stateless/00310_tskv.sh diff --git a/dbms/tests/queries/0_stateless/00311_array_primary_key.reference b/tests/queries/0_stateless/00311_array_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00311_array_primary_key.reference rename to tests/queries/0_stateless/00311_array_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00311_array_primary_key.sql b/tests/queries/0_stateless/00311_array_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00311_array_primary_key.sql rename to tests/queries/0_stateless/00311_array_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference b/tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference rename to tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql b/tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql rename to tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00313_const_totals_extremes.reference b/tests/queries/0_stateless/00313_const_totals_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00313_const_totals_extremes.reference rename to tests/queries/0_stateless/00313_const_totals_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00313_const_totals_extremes.sh b/tests/queries/0_stateless/00313_const_totals_extremes.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00313_const_totals_extremes.sh rename to tests/queries/0_stateless/00313_const_totals_extremes.sh diff --git a/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.reference b/tests/queries/0_stateless/00314_sample_factor_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.reference rename to tests/queries/0_stateless/00314_sample_factor_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql b/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql rename to tests/queries/0_stateless/00314_sample_factor_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00315_quantile_off_by_one.reference b/tests/queries/0_stateless/00315_quantile_off_by_one.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00315_quantile_off_by_one.reference rename to tests/queries/0_stateless/00315_quantile_off_by_one.reference diff --git a/dbms/tests/queries/0_stateless/00315_quantile_off_by_one.sql b/tests/queries/0_stateless/00315_quantile_off_by_one.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00315_quantile_off_by_one.sql rename to tests/queries/0_stateless/00315_quantile_off_by_one.sql diff --git a/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference b/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference rename to tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference diff --git a/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql b/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql rename to tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql diff --git a/dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference b/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference rename to tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference diff --git a/dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql b/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql rename to tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql diff --git a/dbms/tests/queries/0_stateless/00318_pk_tuple_order.reference b/tests/queries/0_stateless/00318_pk_tuple_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00318_pk_tuple_order.reference rename to tests/queries/0_stateless/00318_pk_tuple_order.reference diff --git a/dbms/tests/queries/0_stateless/00318_pk_tuple_order.sql b/tests/queries/0_stateless/00318_pk_tuple_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00318_pk_tuple_order.sql rename to tests/queries/0_stateless/00318_pk_tuple_order.sql diff --git a/dbms/tests/queries/0_stateless/00319_index_for_like.reference b/tests/queries/0_stateless/00319_index_for_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00319_index_for_like.reference rename to tests/queries/0_stateless/00319_index_for_like.reference diff --git a/dbms/tests/queries/0_stateless/00319_index_for_like.sql b/tests/queries/0_stateless/00319_index_for_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00319_index_for_like.sql rename to tests/queries/0_stateless/00319_index_for_like.sql diff --git a/dbms/tests/queries/0_stateless/00320_between.reference b/tests/queries/0_stateless/00320_between.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00320_between.reference rename to tests/queries/0_stateless/00320_between.reference diff --git a/dbms/tests/queries/0_stateless/00320_between.sql b/tests/queries/0_stateless/00320_between.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00320_between.sql rename to tests/queries/0_stateless/00320_between.sql diff --git a/dbms/tests/queries/0_stateless/00321_pk_set.reference b/tests/queries/0_stateless/00321_pk_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00321_pk_set.reference rename to tests/queries/0_stateless/00321_pk_set.reference diff --git a/dbms/tests/queries/0_stateless/00321_pk_set.sql b/tests/queries/0_stateless/00321_pk_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00321_pk_set.sql rename to tests/queries/0_stateless/00321_pk_set.sql diff --git a/dbms/tests/queries/0_stateless/00322_disable_checksumming.reference b/tests/queries/0_stateless/00322_disable_checksumming.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00322_disable_checksumming.reference rename to tests/queries/0_stateless/00322_disable_checksumming.reference diff --git a/dbms/tests/queries/0_stateless/00322_disable_checksumming.sh b/tests/queries/0_stateless/00322_disable_checksumming.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00322_disable_checksumming.sh rename to tests/queries/0_stateless/00322_disable_checksumming.sh diff --git a/dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.reference b/tests/queries/0_stateless/00323_quantiles_timing_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.reference rename to tests/queries/0_stateless/00323_quantiles_timing_bug.reference diff --git a/dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.sql b/tests/queries/0_stateless/00323_quantiles_timing_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.sql rename to tests/queries/0_stateless/00323_quantiles_timing_bug.sql diff --git a/dbms/tests/queries/0_stateless/00324_hashing_enums.reference b/tests/queries/0_stateless/00324_hashing_enums.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00324_hashing_enums.reference rename to tests/queries/0_stateless/00324_hashing_enums.reference diff --git a/dbms/tests/queries/0_stateless/00324_hashing_enums.sql b/tests/queries/0_stateless/00324_hashing_enums.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00324_hashing_enums.sql rename to tests/queries/0_stateless/00324_hashing_enums.sql diff --git a/dbms/tests/queries/0_stateless/00325_replacing_merge_tree.reference b/tests/queries/0_stateless/00325_replacing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00325_replacing_merge_tree.reference rename to tests/queries/0_stateless/00325_replacing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled b/tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled rename to tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled diff --git a/dbms/tests/queries/0_stateless/00326_long_function_multi_if.reference b/tests/queries/0_stateless/00326_long_function_multi_if.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00326_long_function_multi_if.reference rename to tests/queries/0_stateless/00326_long_function_multi_if.reference diff --git a/dbms/tests/queries/0_stateless/00326_long_function_multi_if.sql b/tests/queries/0_stateless/00326_long_function_multi_if.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00326_long_function_multi_if.sql rename to tests/queries/0_stateless/00326_long_function_multi_if.sql diff --git a/dbms/tests/queries/0_stateless/00327_summing_composite_nested.reference b/tests/queries/0_stateless/00327_summing_composite_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00327_summing_composite_nested.reference rename to tests/queries/0_stateless/00327_summing_composite_nested.reference diff --git a/dbms/tests/queries/0_stateless/00327_summing_composite_nested.sql b/tests/queries/0_stateless/00327_summing_composite_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00327_summing_composite_nested.sql rename to tests/queries/0_stateless/00327_summing_composite_nested.sql diff --git a/dbms/tests/queries/0_stateless/00328_long_case_construction.reference b/tests/queries/0_stateless/00328_long_case_construction.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00328_long_case_construction.reference rename to tests/queries/0_stateless/00328_long_case_construction.reference diff --git a/dbms/tests/queries/0_stateless/00328_long_case_construction.sql b/tests/queries/0_stateless/00328_long_case_construction.sql similarity index 99% rename from dbms/tests/queries/0_stateless/00328_long_case_construction.sql rename to tests/queries/0_stateless/00328_long_case_construction.sql index d0fc90d0884..aa653d667f7 100644 --- a/dbms/tests/queries/0_stateless/00328_long_case_construction.sql +++ b/tests/queries/0_stateless/00328_long_case_construction.sql @@ -1,4 +1,3 @@ - /* Trivial case */ SELECT CASE WHEN 1 THEN 2 WHEN 3 THEN 4 ELSE 5 END; diff --git a/dbms/tests/queries/0_stateless/00330_view_subqueries.reference b/tests/queries/0_stateless/00330_view_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00330_view_subqueries.reference rename to tests/queries/0_stateless/00330_view_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00330_view_subqueries.sql b/tests/queries/0_stateless/00330_view_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00330_view_subqueries.sql rename to tests/queries/0_stateless/00330_view_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00331_final_and_prewhere.reference b/tests/queries/0_stateless/00331_final_and_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00331_final_and_prewhere.reference rename to tests/queries/0_stateless/00331_final_and_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00331_final_and_prewhere.sql b/tests/queries/0_stateless/00331_final_and_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00331_final_and_prewhere.sql rename to tests/queries/0_stateless/00331_final_and_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference b/tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference rename to tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference diff --git a/dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql b/tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql rename to tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql diff --git a/dbms/tests/queries/0_stateless/00333_parser_number_bug.reference b/tests/queries/0_stateless/00333_parser_number_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00333_parser_number_bug.reference rename to tests/queries/0_stateless/00333_parser_number_bug.reference diff --git a/dbms/tests/queries/0_stateless/00333_parser_number_bug.sql b/tests/queries/0_stateless/00333_parser_number_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00333_parser_number_bug.sql rename to tests/queries/0_stateless/00333_parser_number_bug.sql diff --git a/dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.reference b/tests/queries/0_stateless/00334_column_aggregate_function_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.reference rename to tests/queries/0_stateless/00334_column_aggregate_function_limit.reference diff --git a/dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.sql b/tests/queries/0_stateless/00334_column_aggregate_function_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.sql rename to tests/queries/0_stateless/00334_column_aggregate_function_limit.sql diff --git a/dbms/tests/queries/0_stateless/00335_bom.reference b/tests/queries/0_stateless/00335_bom.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00335_bom.reference rename to tests/queries/0_stateless/00335_bom.reference diff --git a/dbms/tests/queries/0_stateless/00335_bom.sh b/tests/queries/0_stateless/00335_bom.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00335_bom.sh rename to tests/queries/0_stateless/00335_bom.sh diff --git a/dbms/tests/queries/0_stateless/00336_shard_stack_trace.reference b/tests/queries/0_stateless/00336_shard_stack_trace.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00336_shard_stack_trace.reference rename to tests/queries/0_stateless/00336_shard_stack_trace.reference diff --git a/dbms/tests/queries/0_stateless/00336_shard_stack_trace.sh b/tests/queries/0_stateless/00336_shard_stack_trace.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00336_shard_stack_trace.sh rename to tests/queries/0_stateless/00336_shard_stack_trace.sh diff --git a/dbms/tests/queries/0_stateless/00337_shard_any_heavy.reference b/tests/queries/0_stateless/00337_shard_any_heavy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00337_shard_any_heavy.reference rename to tests/queries/0_stateless/00337_shard_any_heavy.reference diff --git a/dbms/tests/queries/0_stateless/00337_shard_any_heavy.sql b/tests/queries/0_stateless/00337_shard_any_heavy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00337_shard_any_heavy.sql rename to tests/queries/0_stateless/00337_shard_any_heavy.sql diff --git a/dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.reference b/tests/queries/0_stateless/00338_replicate_array_of_strings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.reference rename to tests/queries/0_stateless/00338_replicate_array_of_strings.reference diff --git a/dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.sql b/tests/queries/0_stateless/00338_replicate_array_of_strings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.sql rename to tests/queries/0_stateless/00338_replicate_array_of_strings.sql diff --git a/dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.reference b/tests/queries/0_stateless/00339_parsing_bad_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.reference rename to tests/queries/0_stateless/00339_parsing_bad_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.sh b/tests/queries/0_stateless/00339_parsing_bad_arrays.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.sh rename to tests/queries/0_stateless/00339_parsing_bad_arrays.sh diff --git a/dbms/tests/queries/0_stateless/00340_squashing_insert_select.reference b/tests/queries/0_stateless/00340_squashing_insert_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00340_squashing_insert_select.reference rename to tests/queries/0_stateless/00340_squashing_insert_select.reference diff --git a/dbms/tests/queries/0_stateless/00340_squashing_insert_select.sql b/tests/queries/0_stateless/00340_squashing_insert_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00340_squashing_insert_select.sql rename to tests/queries/0_stateless/00340_squashing_insert_select.sql diff --git a/dbms/tests/queries/0_stateless/00341_squashing_insert_select2.reference b/tests/queries/0_stateless/00341_squashing_insert_select2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00341_squashing_insert_select2.reference rename to tests/queries/0_stateless/00341_squashing_insert_select2.reference diff --git a/dbms/tests/queries/0_stateless/00341_squashing_insert_select2.sql b/tests/queries/0_stateless/00341_squashing_insert_select2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00341_squashing_insert_select2.sql rename to tests/queries/0_stateless/00341_squashing_insert_select2.sql diff --git a/dbms/tests/queries/0_stateless/00342_escape_sequences.reference b/tests/queries/0_stateless/00342_escape_sequences.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00342_escape_sequences.reference rename to tests/queries/0_stateless/00342_escape_sequences.reference diff --git a/dbms/tests/queries/0_stateless/00342_escape_sequences.sql b/tests/queries/0_stateless/00342_escape_sequences.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00342_escape_sequences.sql rename to tests/queries/0_stateless/00342_escape_sequences.sql diff --git a/dbms/tests/queries/0_stateless/00343_array_element_generic.reference b/tests/queries/0_stateless/00343_array_element_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00343_array_element_generic.reference rename to tests/queries/0_stateless/00343_array_element_generic.reference diff --git a/dbms/tests/queries/0_stateless/00343_array_element_generic.sql b/tests/queries/0_stateless/00343_array_element_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00343_array_element_generic.sql rename to tests/queries/0_stateless/00343_array_element_generic.sql diff --git a/dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.reference b/tests/queries/0_stateless/00344_row_number_in_all_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.reference rename to tests/queries/0_stateless/00344_row_number_in_all_blocks.reference diff --git a/dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.sql b/tests/queries/0_stateless/00344_row_number_in_all_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.sql rename to tests/queries/0_stateless/00344_row_number_in_all_blocks.sql diff --git a/dbms/tests/queries/0_stateless/00345_index_accurate_comparison.reference b/tests/queries/0_stateless/00345_index_accurate_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00345_index_accurate_comparison.reference rename to tests/queries/0_stateless/00345_index_accurate_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00345_index_accurate_comparison.sql b/tests/queries/0_stateless/00345_index_accurate_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00345_index_accurate_comparison.sql rename to tests/queries/0_stateless/00345_index_accurate_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00346_if_tuple.reference b/tests/queries/0_stateless/00346_if_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00346_if_tuple.reference rename to tests/queries/0_stateless/00346_if_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00346_if_tuple.sql b/tests/queries/0_stateless/00346_if_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00346_if_tuple.sql rename to tests/queries/0_stateless/00346_if_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00347_has_tuple.reference b/tests/queries/0_stateless/00347_has_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00347_has_tuple.reference rename to tests/queries/0_stateless/00347_has_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00347_has_tuple.sql b/tests/queries/0_stateless/00347_has_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00347_has_tuple.sql rename to tests/queries/0_stateless/00347_has_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00348_tuples.reference b/tests/queries/0_stateless/00348_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00348_tuples.reference rename to tests/queries/0_stateless/00348_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00348_tuples.sql b/tests/queries/0_stateless/00348_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00348_tuples.sql rename to tests/queries/0_stateless/00348_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00349_visible_width.reference b/tests/queries/0_stateless/00349_visible_width.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00349_visible_width.reference rename to tests/queries/0_stateless/00349_visible_width.reference diff --git a/dbms/tests/queries/0_stateless/00349_visible_width.sql b/tests/queries/0_stateless/00349_visible_width.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00349_visible_width.sql rename to tests/queries/0_stateless/00349_visible_width.sql diff --git a/dbms/tests/queries/0_stateless/00350_count_distinct.reference b/tests/queries/0_stateless/00350_count_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00350_count_distinct.reference rename to tests/queries/0_stateless/00350_count_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00350_count_distinct.sql b/tests/queries/0_stateless/00350_count_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00350_count_distinct.sql rename to tests/queries/0_stateless/00350_count_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference b/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference rename to tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql b/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql rename to tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.reference b/tests/queries/0_stateless/00352_external_sorting_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.reference rename to tests/queries/0_stateless/00352_external_sorting_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.sql b/tests/queries/0_stateless/00352_external_sorting_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.sql rename to tests/queries/0_stateless/00352_external_sorting_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00353_join_by_tuple.reference b/tests/queries/0_stateless/00353_join_by_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00353_join_by_tuple.reference rename to tests/queries/0_stateless/00353_join_by_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00353_join_by_tuple.sql b/tests/queries/0_stateless/00353_join_by_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00353_join_by_tuple.sql rename to tests/queries/0_stateless/00353_join_by_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00354_host_command_line_option.reference b/tests/queries/0_stateless/00354_host_command_line_option.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00354_host_command_line_option.reference rename to tests/queries/0_stateless/00354_host_command_line_option.reference diff --git a/dbms/tests/queries/0_stateless/00354_host_command_line_option.sh b/tests/queries/0_stateless/00354_host_command_line_option.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00354_host_command_line_option.sh rename to tests/queries/0_stateless/00354_host_command_line_option.sh diff --git a/dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference b/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference rename to tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference diff --git a/dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql b/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql rename to tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql diff --git a/dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference b/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference rename to tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql b/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql rename to tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00357_to_string_complex_types.reference b/tests/queries/0_stateless/00357_to_string_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00357_to_string_complex_types.reference rename to tests/queries/0_stateless/00357_to_string_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00357_to_string_complex_types.sql b/tests/queries/0_stateless/00357_to_string_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00357_to_string_complex_types.sql rename to tests/queries/0_stateless/00357_to_string_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00358_from_string_complex_types.reference b/tests/queries/0_stateless/00358_from_string_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00358_from_string_complex_types.reference rename to tests/queries/0_stateless/00358_from_string_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00358_from_string_complex_types.sql b/tests/queries/0_stateless/00358_from_string_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00358_from_string_complex_types.sql rename to tests/queries/0_stateless/00358_from_string_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.reference b/tests/queries/0_stateless/00359_convert_or_zero_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.reference rename to tests/queries/0_stateless/00359_convert_or_zero_functions.reference diff --git a/dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.sql b/tests/queries/0_stateless/00359_convert_or_zero_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.sql rename to tests/queries/0_stateless/00359_convert_or_zero_functions.sql diff --git a/dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference b/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference rename to tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql b/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql rename to tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference b/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference rename to tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference diff --git a/dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql b/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql rename to tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql diff --git a/dbms/tests/queries/0_stateless/00362_great_circle_distance.reference b/tests/queries/0_stateless/00362_great_circle_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00362_great_circle_distance.reference rename to tests/queries/0_stateless/00362_great_circle_distance.reference diff --git a/dbms/tests/queries/0_stateless/00362_great_circle_distance.sql b/tests/queries/0_stateless/00362_great_circle_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00362_great_circle_distance.sql rename to tests/queries/0_stateless/00362_great_circle_distance.sql diff --git a/dbms/tests/queries/0_stateless/00363_defaults.reference b/tests/queries/0_stateless/00363_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00363_defaults.reference rename to tests/queries/0_stateless/00363_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00363_defaults.sql b/tests/queries/0_stateless/00363_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00363_defaults.sql rename to tests/queries/0_stateless/00363_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00364_java_style_denormals.reference b/tests/queries/0_stateless/00364_java_style_denormals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00364_java_style_denormals.reference rename to tests/queries/0_stateless/00364_java_style_denormals.reference diff --git a/dbms/tests/queries/0_stateless/00364_java_style_denormals.sql b/tests/queries/0_stateless/00364_java_style_denormals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00364_java_style_denormals.sql rename to tests/queries/0_stateless/00364_java_style_denormals.sql diff --git a/dbms/tests/queries/0_stateless/00365_statistics_in_formats.reference b/tests/queries/0_stateless/00365_statistics_in_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00365_statistics_in_formats.reference rename to tests/queries/0_stateless/00365_statistics_in_formats.reference diff --git a/dbms/tests/queries/0_stateless/00365_statistics_in_formats.sh b/tests/queries/0_stateless/00365_statistics_in_formats.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00365_statistics_in_formats.sh rename to tests/queries/0_stateless/00365_statistics_in_formats.sh diff --git a/dbms/tests/queries/0_stateless/00366_multi_statements.reference b/tests/queries/0_stateless/00366_multi_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00366_multi_statements.reference rename to tests/queries/0_stateless/00366_multi_statements.reference diff --git a/dbms/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00366_multi_statements.sh rename to tests/queries/0_stateless/00366_multi_statements.sh diff --git a/dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference b/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference rename to tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference diff --git a/dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql b/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql rename to tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql diff --git a/dbms/tests/queries/0_stateless/00368_format_option_collision.reference b/tests/queries/0_stateless/00368_format_option_collision.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00368_format_option_collision.reference rename to tests/queries/0_stateless/00368_format_option_collision.reference diff --git a/dbms/tests/queries/0_stateless/00368_format_option_collision.sh b/tests/queries/0_stateless/00368_format_option_collision.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00368_format_option_collision.sh rename to tests/queries/0_stateless/00368_format_option_collision.sh diff --git a/dbms/tests/queries/0_stateless/00369_int_div_of_float.reference b/tests/queries/0_stateless/00369_int_div_of_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00369_int_div_of_float.reference rename to tests/queries/0_stateless/00369_int_div_of_float.reference diff --git a/dbms/tests/queries/0_stateless/00369_int_div_of_float.sql b/tests/queries/0_stateless/00369_int_div_of_float.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00369_int_div_of_float.sql rename to tests/queries/0_stateless/00369_int_div_of_float.sql diff --git a/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference b/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference rename to tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql b/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql rename to tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00371_union_all.reference b/tests/queries/0_stateless/00371_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00371_union_all.reference rename to tests/queries/0_stateless/00371_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00371_union_all.sql b/tests/queries/0_stateless/00371_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00371_union_all.sql rename to tests/queries/0_stateless/00371_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00372_cors_header.reference b/tests/queries/0_stateless/00372_cors_header.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00372_cors_header.reference rename to tests/queries/0_stateless/00372_cors_header.reference diff --git a/dbms/tests/queries/0_stateless/00372_cors_header.sh b/tests/queries/0_stateless/00372_cors_header.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00372_cors_header.sh rename to tests/queries/0_stateless/00372_cors_header.sh diff --git a/dbms/tests/queries/0_stateless/00373_group_by_tuple.reference b/tests/queries/0_stateless/00373_group_by_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00373_group_by_tuple.reference rename to tests/queries/0_stateless/00373_group_by_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00373_group_by_tuple.sql b/tests/queries/0_stateless/00373_group_by_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00373_group_by_tuple.sql rename to tests/queries/0_stateless/00373_group_by_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00481_create_view_for_null.reference b/tests/queries/0_stateless/00374_any_last_if_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00481_create_view_for_null.reference rename to tests/queries/0_stateless/00374_any_last_if_merge.reference diff --git a/dbms/tests/queries/0_stateless/00374_any_last_if_merge.sql b/tests/queries/0_stateless/00374_any_last_if_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00374_any_last_if_merge.sql rename to tests/queries/0_stateless/00374_any_last_if_merge.sql diff --git a/dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference b/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference rename to tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference diff --git a/dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh b/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh rename to tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh diff --git a/dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference rename to tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference diff --git a/dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql rename to tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql diff --git a/dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference rename to tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference diff --git a/dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql rename to tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql diff --git a/dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference rename to tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference diff --git a/dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql rename to tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql diff --git a/dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.reference b/tests/queries/0_stateless/00378_json_quote_64bit_integers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.reference rename to tests/queries/0_stateless/00378_json_quote_64bit_integers.reference diff --git a/dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql b/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql rename to tests/queries/0_stateless/00378_json_quote_64bit_integers.sql diff --git a/dbms/tests/queries/0_stateless/00379_system_processes_port.reference b/tests/queries/0_stateless/00379_system_processes_port.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00379_system_processes_port.reference rename to tests/queries/0_stateless/00379_system_processes_port.reference diff --git a/dbms/tests/queries/0_stateless/00379_system_processes_port.sh b/tests/queries/0_stateless/00379_system_processes_port.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00379_system_processes_port.sh rename to tests/queries/0_stateless/00379_system_processes_port.sh diff --git a/dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference rename to tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference diff --git a/dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh rename to tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh diff --git a/dbms/tests/queries/0_stateless/00381_first_significant_subdomain.reference b/tests/queries/0_stateless/00381_first_significant_subdomain.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00381_first_significant_subdomain.reference rename to tests/queries/0_stateless/00381_first_significant_subdomain.reference diff --git a/dbms/tests/queries/0_stateless/00381_first_significant_subdomain.sql b/tests/queries/0_stateless/00381_first_significant_subdomain.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00381_first_significant_subdomain.sql rename to tests/queries/0_stateless/00381_first_significant_subdomain.sql diff --git a/dbms/tests/queries/0_stateless/00383_utf8_validation.reference b/tests/queries/0_stateless/00383_utf8_validation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00383_utf8_validation.reference rename to tests/queries/0_stateless/00383_utf8_validation.reference diff --git a/dbms/tests/queries/0_stateless/00383_utf8_validation.sql b/tests/queries/0_stateless/00383_utf8_validation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00383_utf8_validation.sql rename to tests/queries/0_stateless/00383_utf8_validation.sql diff --git a/dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference b/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference rename to tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference diff --git a/dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql b/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql rename to tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql diff --git a/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference rename to tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference diff --git a/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh rename to tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh diff --git a/dbms/tests/queries/0_stateless/00386_enum_in_pk.reference b/tests/queries/0_stateless/00386_enum_in_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00386_enum_in_pk.reference rename to tests/queries/0_stateless/00386_enum_in_pk.reference diff --git a/dbms/tests/queries/0_stateless/00386_enum_in_pk.sql b/tests/queries/0_stateless/00386_enum_in_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00386_enum_in_pk.sql rename to tests/queries/0_stateless/00386_enum_in_pk.sql diff --git a/dbms/tests/queries/0_stateless/00386_has_column_in_table.reference b/tests/queries/0_stateless/00386_has_column_in_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00386_has_column_in_table.reference rename to tests/queries/0_stateless/00386_has_column_in_table.reference diff --git a/dbms/tests/queries/0_stateless/00386_has_column_in_table.sql b/tests/queries/0_stateless/00386_has_column_in_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00386_has_column_in_table.sql rename to tests/queries/0_stateless/00386_has_column_in_table.sql diff --git a/dbms/tests/queries/0_stateless/00386_long_in_pk.python b/tests/queries/0_stateless/00386_long_in_pk.python similarity index 100% rename from dbms/tests/queries/0_stateless/00386_long_in_pk.python rename to tests/queries/0_stateless/00386_long_in_pk.python diff --git a/dbms/tests/queries/0_stateless/00483_reading_from_array_structure.reference b/tests/queries/0_stateless/00386_long_in_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00483_reading_from_array_structure.reference rename to tests/queries/0_stateless/00386_long_in_pk.reference diff --git a/dbms/tests/queries/0_stateless/00386_long_in_pk.sh b/tests/queries/0_stateless/00386_long_in_pk.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00386_long_in_pk.sh rename to tests/queries/0_stateless/00386_long_in_pk.sh diff --git a/dbms/tests/queries/0_stateless/00387_use_client_time_zone.reference b/tests/queries/0_stateless/00387_use_client_time_zone.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00387_use_client_time_zone.reference rename to tests/queries/0_stateless/00387_use_client_time_zone.reference diff --git a/dbms/tests/queries/0_stateless/00387_use_client_time_zone.sh b/tests/queries/0_stateless/00387_use_client_time_zone.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00387_use_client_time_zone.sh rename to tests/queries/0_stateless/00387_use_client_time_zone.sh diff --git a/dbms/tests/queries/0_stateless/00388_enum_with_totals.reference b/tests/queries/0_stateless/00388_enum_with_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00388_enum_with_totals.reference rename to tests/queries/0_stateless/00388_enum_with_totals.reference diff --git a/dbms/tests/queries/0_stateless/00388_enum_with_totals.sql b/tests/queries/0_stateless/00388_enum_with_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00388_enum_with_totals.sql rename to tests/queries/0_stateless/00388_enum_with_totals.sql diff --git a/dbms/tests/queries/0_stateless/00389_concat_operator.reference b/tests/queries/0_stateless/00389_concat_operator.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00389_concat_operator.reference rename to tests/queries/0_stateless/00389_concat_operator.reference diff --git a/dbms/tests/queries/0_stateless/00389_concat_operator.sql b/tests/queries/0_stateless/00389_concat_operator.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00389_concat_operator.sql rename to tests/queries/0_stateless/00389_concat_operator.sql diff --git a/dbms/tests/queries/0_stateless/00390_array_sort.reference b/tests/queries/0_stateless/00390_array_sort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00390_array_sort.reference rename to tests/queries/0_stateless/00390_array_sort.reference diff --git a/dbms/tests/queries/0_stateless/00390_array_sort.sql b/tests/queries/0_stateless/00390_array_sort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00390_array_sort.sql rename to tests/queries/0_stateless/00390_array_sort.sql diff --git a/dbms/tests/queries/0_stateless/00392_enum_nested_alter.reference b/tests/queries/0_stateless/00392_enum_nested_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00392_enum_nested_alter.reference rename to tests/queries/0_stateless/00392_enum_nested_alter.reference diff --git a/dbms/tests/queries/0_stateless/00392_enum_nested_alter.sql b/tests/queries/0_stateless/00392_enum_nested_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00392_enum_nested_alter.sql rename to tests/queries/0_stateless/00392_enum_nested_alter.sql diff --git a/dbms/tests/queries/0_stateless/00393_if_with_constant_condition.reference b/tests/queries/0_stateless/00393_if_with_constant_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00393_if_with_constant_condition.reference rename to tests/queries/0_stateless/00393_if_with_constant_condition.reference diff --git a/dbms/tests/queries/0_stateless/00393_if_with_constant_condition.sql b/tests/queries/0_stateless/00393_if_with_constant_condition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00393_if_with_constant_condition.sql rename to tests/queries/0_stateless/00393_if_with_constant_condition.sql diff --git a/dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference b/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference rename to tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference diff --git a/dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql b/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql rename to tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql diff --git a/dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.reference b/tests/queries/0_stateless/00394_replaceall_vector_fixed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.reference rename to tests/queries/0_stateless/00394_replaceall_vector_fixed.reference diff --git a/dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.sql b/tests/queries/0_stateless/00394_replaceall_vector_fixed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.sql rename to tests/queries/0_stateless/00394_replaceall_vector_fixed.sql diff --git a/dbms/tests/queries/0_stateless/00395_nullable.reference b/tests/queries/0_stateless/00395_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00395_nullable.reference rename to tests/queries/0_stateless/00395_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00395_nullable.sql b/tests/queries/0_stateless/00395_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00395_nullable.sql rename to tests/queries/0_stateless/00395_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00396_uuid.reference b/tests/queries/0_stateless/00396_uuid.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00396_uuid.reference rename to tests/queries/0_stateless/00396_uuid.reference diff --git a/dbms/tests/queries/0_stateless/00396_uuid.sql b/tests/queries/0_stateless/00396_uuid.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00396_uuid.sql rename to tests/queries/0_stateless/00396_uuid.sql diff --git a/dbms/tests/queries/0_stateless/00397_tsv_format_synonym.reference b/tests/queries/0_stateless/00397_tsv_format_synonym.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00397_tsv_format_synonym.reference rename to tests/queries/0_stateless/00397_tsv_format_synonym.reference diff --git a/dbms/tests/queries/0_stateless/00397_tsv_format_synonym.sql b/tests/queries/0_stateless/00397_tsv_format_synonym.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00397_tsv_format_synonym.sql rename to tests/queries/0_stateless/00397_tsv_format_synonym.sql diff --git a/dbms/tests/queries/0_stateless/00398_url_functions.reference b/tests/queries/0_stateless/00398_url_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00398_url_functions.reference rename to tests/queries/0_stateless/00398_url_functions.reference diff --git a/dbms/tests/queries/0_stateless/00398_url_functions.sql b/tests/queries/0_stateless/00398_url_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00398_url_functions.sql rename to tests/queries/0_stateless/00398_url_functions.sql diff --git a/dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference b/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference rename to tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql b/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql rename to tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00400_client_external_options.reference b/tests/queries/0_stateless/00400_client_external_options.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00400_client_external_options.reference rename to tests/queries/0_stateless/00400_client_external_options.reference diff --git a/dbms/tests/queries/0_stateless/00400_client_external_options.sh b/tests/queries/0_stateless/00400_client_external_options.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00400_client_external_options.sh rename to tests/queries/0_stateless/00400_client_external_options.sh diff --git a/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.reference b/tests/queries/0_stateless/00401_merge_and_stripelog.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00401_merge_and_stripelog.reference rename to tests/queries/0_stateless/00401_merge_and_stripelog.reference diff --git a/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql b/tests/queries/0_stateless/00401_merge_and_stripelog.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql rename to tests/queries/0_stateless/00401_merge_and_stripelog.sql diff --git a/dbms/tests/queries/0_stateless/00402_nan_and_extremes.reference b/tests/queries/0_stateless/00402_nan_and_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00402_nan_and_extremes.reference rename to tests/queries/0_stateless/00402_nan_and_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00402_nan_and_extremes.sql b/tests/queries/0_stateless/00402_nan_and_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00402_nan_and_extremes.sql rename to tests/queries/0_stateless/00402_nan_and_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00403_to_start_of_day.reference b/tests/queries/0_stateless/00403_to_start_of_day.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00403_to_start_of_day.reference rename to tests/queries/0_stateless/00403_to_start_of_day.reference diff --git a/dbms/tests/queries/0_stateless/00403_to_start_of_day.sql b/tests/queries/0_stateless/00403_to_start_of_day.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00403_to_start_of_day.sql rename to tests/queries/0_stateless/00403_to_start_of_day.sql diff --git a/dbms/tests/queries/0_stateless/00404_null_literal.reference b/tests/queries/0_stateless/00404_null_literal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00404_null_literal.reference rename to tests/queries/0_stateless/00404_null_literal.reference diff --git a/dbms/tests/queries/0_stateless/00404_null_literal.sql b/tests/queries/0_stateless/00404_null_literal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00404_null_literal.sql rename to tests/queries/0_stateless/00404_null_literal.sql diff --git a/dbms/tests/queries/0_stateless/00405_pretty_formats.reference b/tests/queries/0_stateless/00405_pretty_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00405_pretty_formats.reference rename to tests/queries/0_stateless/00405_pretty_formats.reference diff --git a/dbms/tests/queries/0_stateless/00405_pretty_formats.sql b/tests/queries/0_stateless/00405_pretty_formats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00405_pretty_formats.sql rename to tests/queries/0_stateless/00405_pretty_formats.sql diff --git a/dbms/tests/queries/0_stateless/00406_tuples_with_nulls.reference b/tests/queries/0_stateless/00406_tuples_with_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00406_tuples_with_nulls.reference rename to tests/queries/0_stateless/00406_tuples_with_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00406_tuples_with_nulls.sql b/tests/queries/0_stateless/00406_tuples_with_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00406_tuples_with_nulls.sql rename to tests/queries/0_stateless/00406_tuples_with_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00407_parsing_nulls.reference b/tests/queries/0_stateless/00407_parsing_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00407_parsing_nulls.reference rename to tests/queries/0_stateless/00407_parsing_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00407_parsing_nulls.sh b/tests/queries/0_stateless/00407_parsing_nulls.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00407_parsing_nulls.sh rename to tests/queries/0_stateless/00407_parsing_nulls.sh diff --git a/dbms/tests/queries/0_stateless/00408_http_keep_alive.reference b/tests/queries/0_stateless/00408_http_keep_alive.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00408_http_keep_alive.reference rename to tests/queries/0_stateless/00408_http_keep_alive.reference diff --git a/dbms/tests/queries/0_stateless/00408_http_keep_alive.sh b/tests/queries/0_stateless/00408_http_keep_alive.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00408_http_keep_alive.sh rename to tests/queries/0_stateless/00408_http_keep_alive.sh diff --git a/dbms/tests/queries/0_stateless/00409_shard_limit_by.reference b/tests/queries/0_stateless/00409_shard_limit_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00409_shard_limit_by.reference rename to tests/queries/0_stateless/00409_shard_limit_by.reference diff --git a/dbms/tests/queries/0_stateless/00409_shard_limit_by.sql b/tests/queries/0_stateless/00409_shard_limit_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00409_shard_limit_by.sql rename to tests/queries/0_stateless/00409_shard_limit_by.sql diff --git a/dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference b/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference rename to tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference diff --git a/dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql b/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql rename to tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison.python b/tests/queries/0_stateless/00411_long_accurate_number_comparison.python similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison.python rename to tests/queries/0_stateless/00411_long_accurate_number_comparison.python diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh diff --git a/dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference b/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference rename to tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference diff --git a/dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql b/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql rename to tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql diff --git a/dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.reference b/tests/queries/0_stateless/00412_logical_expressions_optimizer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.reference rename to tests/queries/0_stateless/00412_logical_expressions_optimizer.reference diff --git a/dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.sql b/tests/queries/0_stateless/00412_logical_expressions_optimizer.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.sql rename to tests/queries/0_stateless/00412_logical_expressions_optimizer.sql diff --git a/dbms/tests/queries/0_stateless/00413_distinct.reference b/tests/queries/0_stateless/00413_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00413_distinct.reference rename to tests/queries/0_stateless/00413_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00413_distinct.sql b/tests/queries/0_stateless/00413_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00413_distinct.sql rename to tests/queries/0_stateless/00413_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.reference b/tests/queries/0_stateless/00413_least_greatest_new_behavior.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.reference rename to tests/queries/0_stateless/00413_least_greatest_new_behavior.reference diff --git a/dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.sql b/tests/queries/0_stateless/00413_least_greatest_new_behavior.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.sql rename to tests/queries/0_stateless/00413_least_greatest_new_behavior.sql diff --git a/dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.reference b/tests/queries/0_stateless/00414_time_zones_direct_conversion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.reference rename to tests/queries/0_stateless/00414_time_zones_direct_conversion.reference diff --git a/dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.sql b/tests/queries/0_stateless/00414_time_zones_direct_conversion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.sql rename to tests/queries/0_stateless/00414_time_zones_direct_conversion.sql diff --git a/dbms/tests/queries/0_stateless/00415_into_outfile.reference b/tests/queries/0_stateless/00415_into_outfile.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00415_into_outfile.reference rename to tests/queries/0_stateless/00415_into_outfile.reference diff --git a/dbms/tests/queries/0_stateless/00415_into_outfile.sh b/tests/queries/0_stateless/00415_into_outfile.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00415_into_outfile.sh rename to tests/queries/0_stateless/00415_into_outfile.sh diff --git a/dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference rename to tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference diff --git a/dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh rename to tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh diff --git a/dbms/tests/queries/0_stateless/00417_kill_query.reference b/tests/queries/0_stateless/00417_kill_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00417_kill_query.reference rename to tests/queries/0_stateless/00417_kill_query.reference diff --git a/dbms/tests/queries/0_stateless/00417_kill_query.sh b/tests/queries/0_stateless/00417_kill_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00417_kill_query.sh rename to tests/queries/0_stateless/00417_kill_query.sh diff --git a/dbms/tests/queries/0_stateless/00417_system_build_options.reference b/tests/queries/0_stateless/00417_system_build_options.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00417_system_build_options.reference rename to tests/queries/0_stateless/00417_system_build_options.reference diff --git a/dbms/tests/queries/0_stateless/00417_system_build_options.sh b/tests/queries/0_stateless/00417_system_build_options.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00417_system_build_options.sh rename to tests/queries/0_stateless/00417_system_build_options.sh diff --git a/dbms/tests/queries/0_stateless/00418_input_format_allow_errors.reference b/tests/queries/0_stateless/00418_input_format_allow_errors.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00418_input_format_allow_errors.reference rename to tests/queries/0_stateless/00418_input_format_allow_errors.reference diff --git a/dbms/tests/queries/0_stateless/00418_input_format_allow_errors.sh b/tests/queries/0_stateless/00418_input_format_allow_errors.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00418_input_format_allow_errors.sh rename to tests/queries/0_stateless/00418_input_format_allow_errors.sh diff --git a/dbms/tests/queries/0_stateless/00495_reading_const_zero_column.reference b/tests/queries/0_stateless/00419_show_sql_queries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00495_reading_const_zero_column.reference rename to tests/queries/0_stateless/00419_show_sql_queries.reference diff --git a/dbms/tests/queries/0_stateless/00419_show_sql_queries.sh b/tests/queries/0_stateless/00419_show_sql_queries.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00419_show_sql_queries.sh rename to tests/queries/0_stateless/00419_show_sql_queries.sh diff --git a/dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference b/tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference rename to tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql b/tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql rename to tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00421_storage_merge__table_index.reference b/tests/queries/0_stateless/00421_storage_merge__table_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00421_storage_merge__table_index.reference rename to tests/queries/0_stateless/00421_storage_merge__table_index.reference diff --git a/dbms/tests/queries/0_stateless/00421_storage_merge__table_index.sh b/tests/queries/0_stateless/00421_storage_merge__table_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00421_storage_merge__table_index.sh rename to tests/queries/0_stateless/00421_storage_merge__table_index.sh diff --git a/dbms/tests/queries/0_stateless/00422_hash_function_constexpr.reference b/tests/queries/0_stateless/00422_hash_function_constexpr.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00422_hash_function_constexpr.reference rename to tests/queries/0_stateless/00422_hash_function_constexpr.reference diff --git a/dbms/tests/queries/0_stateless/00422_hash_function_constexpr.sql b/tests/queries/0_stateless/00422_hash_function_constexpr.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00422_hash_function_constexpr.sql rename to tests/queries/0_stateless/00422_hash_function_constexpr.sql diff --git a/dbms/tests/queries/0_stateless/00423_storage_log_single_thread.reference b/tests/queries/0_stateless/00423_storage_log_single_thread.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00423_storage_log_single_thread.reference rename to tests/queries/0_stateless/00423_storage_log_single_thread.reference diff --git a/dbms/tests/queries/0_stateless/00423_storage_log_single_thread.sql b/tests/queries/0_stateless/00423_storage_log_single_thread.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00423_storage_log_single_thread.sql rename to tests/queries/0_stateless/00423_storage_log_single_thread.sql diff --git a/dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference b/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference rename to tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql b/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql rename to tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00425_count_nullable.reference b/tests/queries/0_stateless/00425_count_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00425_count_nullable.reference rename to tests/queries/0_stateless/00425_count_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00425_count_nullable.sql b/tests/queries/0_stateless/00425_count_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00425_count_nullable.sql rename to tests/queries/0_stateless/00425_count_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00426_nulls_sorting.reference b/tests/queries/0_stateless/00426_nulls_sorting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00426_nulls_sorting.reference rename to tests/queries/0_stateless/00426_nulls_sorting.reference diff --git a/dbms/tests/queries/0_stateless/00426_nulls_sorting.sql b/tests/queries/0_stateless/00426_nulls_sorting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00426_nulls_sorting.sql rename to tests/queries/0_stateless/00426_nulls_sorting.sql diff --git a/dbms/tests/queries/0_stateless/00427_alter_primary_key.reference b/tests/queries/0_stateless/00427_alter_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00427_alter_primary_key.reference rename to tests/queries/0_stateless/00427_alter_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00427_alter_primary_key.sh b/tests/queries/0_stateless/00427_alter_primary_key.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00427_alter_primary_key.sh rename to tests/queries/0_stateless/00427_alter_primary_key.sh diff --git a/dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference b/tests/queries/0_stateless/00429_long_http_bufferization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference rename to tests/queries/0_stateless/00429_long_http_bufferization.reference diff --git a/dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh b/tests/queries/0_stateless/00429_long_http_bufferization.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh rename to tests/queries/0_stateless/00429_long_http_bufferization.sh diff --git a/dbms/tests/queries/0_stateless/00429_point_in_ellipses.reference b/tests/queries/0_stateless/00429_point_in_ellipses.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00429_point_in_ellipses.reference rename to tests/queries/0_stateless/00429_point_in_ellipses.reference diff --git a/dbms/tests/queries/0_stateless/00429_point_in_ellipses.sql b/tests/queries/0_stateless/00429_point_in_ellipses.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00429_point_in_ellipses.sql rename to tests/queries/0_stateless/00429_point_in_ellipses.sql diff --git a/dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference b/tests/queries/0_stateless/00430_https_server.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference rename to tests/queries/0_stateless/00430_https_server.reference diff --git a/dbms/tests/queries/0_stateless/00430_https_server.sh b/tests/queries/0_stateless/00430_https_server.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00430_https_server.sh rename to tests/queries/0_stateless/00430_https_server.sh diff --git a/dbms/tests/queries/0_stateless/00431_if_nulls.reference b/tests/queries/0_stateless/00431_if_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00431_if_nulls.reference rename to tests/queries/0_stateless/00431_if_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00431_if_nulls.sql b/tests/queries/0_stateless/00431_if_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00431_if_nulls.sql rename to tests/queries/0_stateless/00431_if_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference b/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference rename to tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql b/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql rename to tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00433_ifnull.reference b/tests/queries/0_stateless/00433_ifnull.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00433_ifnull.reference rename to tests/queries/0_stateless/00433_ifnull.reference diff --git a/dbms/tests/queries/0_stateless/00433_ifnull.sql b/tests/queries/0_stateless/00433_ifnull.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00433_ifnull.sql rename to tests/queries/0_stateless/00433_ifnull.sql diff --git a/dbms/tests/queries/0_stateless/00434_tonullable.reference b/tests/queries/0_stateless/00434_tonullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00434_tonullable.reference rename to tests/queries/0_stateless/00434_tonullable.reference diff --git a/dbms/tests/queries/0_stateless/00434_tonullable.sql b/tests/queries/0_stateless/00434_tonullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00434_tonullable.sql rename to tests/queries/0_stateless/00434_tonullable.sql diff --git a/dbms/tests/queries/0_stateless/00435_coalesce.reference b/tests/queries/0_stateless/00435_coalesce.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00435_coalesce.reference rename to tests/queries/0_stateless/00435_coalesce.reference diff --git a/dbms/tests/queries/0_stateless/00435_coalesce.sql b/tests/queries/0_stateless/00435_coalesce.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00435_coalesce.sql rename to tests/queries/0_stateless/00435_coalesce.sql diff --git a/dbms/tests/queries/0_stateless/00436_convert_charset.reference b/tests/queries/0_stateless/00436_convert_charset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00436_convert_charset.reference rename to tests/queries/0_stateless/00436_convert_charset.reference diff --git a/dbms/tests/queries/0_stateless/00436_convert_charset.sql b/tests/queries/0_stateless/00436_convert_charset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00436_convert_charset.sql rename to tests/queries/0_stateless/00436_convert_charset.sql diff --git a/dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference b/tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference rename to tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference diff --git a/dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql b/tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql rename to tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql diff --git a/dbms/tests/queries/0_stateless/00437_nulls_first_last.reference b/tests/queries/0_stateless/00437_nulls_first_last.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00437_nulls_first_last.reference rename to tests/queries/0_stateless/00437_nulls_first_last.reference diff --git a/dbms/tests/queries/0_stateless/00437_nulls_first_last.sql b/tests/queries/0_stateless/00437_nulls_first_last.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00437_nulls_first_last.sql rename to tests/queries/0_stateless/00437_nulls_first_last.sql diff --git a/dbms/tests/queries/0_stateless/00438_bit_rotate.reference b/tests/queries/0_stateless/00438_bit_rotate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00438_bit_rotate.reference rename to tests/queries/0_stateless/00438_bit_rotate.reference diff --git a/dbms/tests/queries/0_stateless/00438_bit_rotate.sql b/tests/queries/0_stateless/00438_bit_rotate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00438_bit_rotate.sql rename to tests/queries/0_stateless/00438_bit_rotate.sql diff --git a/dbms/tests/queries/0_stateless/00439_fixed_string_filter.reference b/tests/queries/0_stateless/00439_fixed_string_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00439_fixed_string_filter.reference rename to tests/queries/0_stateless/00439_fixed_string_filter.reference diff --git a/dbms/tests/queries/0_stateless/00439_fixed_string_filter.sql b/tests/queries/0_stateless/00439_fixed_string_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00439_fixed_string_filter.sql rename to tests/queries/0_stateless/00439_fixed_string_filter.sql diff --git a/dbms/tests/queries/0_stateless/00440_nulls_merge_tree.reference b/tests/queries/0_stateless/00440_nulls_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00440_nulls_merge_tree.reference rename to tests/queries/0_stateless/00440_nulls_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00440_nulls_merge_tree.sql b/tests/queries/0_stateless/00440_nulls_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00440_nulls_merge_tree.sql rename to tests/queries/0_stateless/00440_nulls_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00441_nulls_in.reference b/tests/queries/0_stateless/00441_nulls_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00441_nulls_in.reference rename to tests/queries/0_stateless/00441_nulls_in.reference diff --git a/dbms/tests/queries/0_stateless/00441_nulls_in.sql b/tests/queries/0_stateless/00441_nulls_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00441_nulls_in.sql rename to tests/queries/0_stateless/00441_nulls_in.sql diff --git a/dbms/tests/queries/0_stateless/00442_filter_by_nullable.reference b/tests/queries/0_stateless/00442_filter_by_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00442_filter_by_nullable.reference rename to tests/queries/0_stateless/00442_filter_by_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00442_filter_by_nullable.sql b/tests/queries/0_stateless/00442_filter_by_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00442_filter_by_nullable.sql rename to tests/queries/0_stateless/00442_filter_by_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference b/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference rename to tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference diff --git a/dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh b/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh rename to tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh diff --git a/dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference b/tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference rename to tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference diff --git a/dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh b/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh rename to tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh diff --git a/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.reference b/tests/queries/0_stateless/00443_preferred_block_size_bytes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.reference rename to tests/queries/0_stateless/00443_preferred_block_size_bytes.reference diff --git a/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh rename to tests/queries/0_stateless/00443_preferred_block_size_bytes.sh diff --git a/dbms/tests/queries/0_stateless/00444_join_use_nulls.reference b/tests/queries/0_stateless/00444_join_use_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00444_join_use_nulls.reference rename to tests/queries/0_stateless/00444_join_use_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00444_join_use_nulls.sql b/tests/queries/0_stateless/00444_join_use_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00444_join_use_nulls.sql rename to tests/queries/0_stateless/00444_join_use_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00445_join_nullable_keys.reference b/tests/queries/0_stateless/00445_join_nullable_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00445_join_nullable_keys.reference rename to tests/queries/0_stateless/00445_join_nullable_keys.reference diff --git a/dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql b/tests/queries/0_stateless/00445_join_nullable_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql rename to tests/queries/0_stateless/00445_join_nullable_keys.sql diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference rename to tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh rename to tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference rename to tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql rename to tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00447_foreach_modifier.reference b/tests/queries/0_stateless/00447_foreach_modifier.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00447_foreach_modifier.reference rename to tests/queries/0_stateless/00447_foreach_modifier.reference diff --git a/dbms/tests/queries/0_stateless/00447_foreach_modifier.sql b/tests/queries/0_stateless/00447_foreach_modifier.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00447_foreach_modifier.sql rename to tests/queries/0_stateless/00447_foreach_modifier.sql diff --git a/dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference b/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference rename to tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference diff --git a/dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql b/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql rename to tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql diff --git a/dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.reference b/tests/queries/0_stateless/00448_to_string_cut_to_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.reference rename to tests/queries/0_stateless/00448_to_string_cut_to_zero.reference diff --git a/dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.sql b/tests/queries/0_stateless/00448_to_string_cut_to_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.sql rename to tests/queries/0_stateless/00448_to_string_cut_to_zero.sql diff --git a/dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference b/tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference rename to tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql b/tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql rename to tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.reference b/tests/queries/0_stateless/00450_higher_order_and_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.reference rename to tests/queries/0_stateless/00450_higher_order_and_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.sql b/tests/queries/0_stateless/00450_higher_order_and_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.sql rename to tests/queries/0_stateless/00450_higher_order_and_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.reference b/tests/queries/0_stateless/00451_left_array_join_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.reference rename to tests/queries/0_stateless/00451_left_array_join_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.sql b/tests/queries/0_stateless/00451_left_array_join_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.sql rename to tests/queries/0_stateless/00451_left_array_join_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.reference b/tests/queries/0_stateless/00452_left_array_join_and_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.reference rename to tests/queries/0_stateless/00452_left_array_join_and_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.sql b/tests/queries/0_stateless/00452_left_array_join_and_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.sql rename to tests/queries/0_stateless/00452_left_array_join_and_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00453_cast_enum.reference b/tests/queries/0_stateless/00453_cast_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00453_cast_enum.reference rename to tests/queries/0_stateless/00453_cast_enum.reference diff --git a/dbms/tests/queries/0_stateless/00453_cast_enum.sql b/tests/queries/0_stateless/00453_cast_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00453_cast_enum.sql rename to tests/queries/0_stateless/00453_cast_enum.sql diff --git a/dbms/tests/queries/0_stateless/00453_top_k.reference b/tests/queries/0_stateless/00453_top_k.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00453_top_k.reference rename to tests/queries/0_stateless/00453_top_k.reference diff --git a/dbms/tests/queries/0_stateless/00453_top_k.sql b/tests/queries/0_stateless/00453_top_k.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00453_top_k.sql rename to tests/queries/0_stateless/00453_top_k.sql diff --git a/dbms/tests/queries/0_stateless/00456_alter_nullable.reference b/tests/queries/0_stateless/00456_alter_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00456_alter_nullable.reference rename to tests/queries/0_stateless/00456_alter_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00456_alter_nullable.sql b/tests/queries/0_stateless/00456_alter_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00456_alter_nullable.sql rename to tests/queries/0_stateless/00456_alter_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference b/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference rename to tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql b/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql rename to tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00458_merge_type_cast.reference b/tests/queries/0_stateless/00458_merge_type_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00458_merge_type_cast.reference rename to tests/queries/0_stateless/00458_merge_type_cast.reference diff --git a/dbms/tests/queries/0_stateless/00458_merge_type_cast.sql b/tests/queries/0_stateless/00458_merge_type_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00458_merge_type_cast.sql rename to tests/queries/0_stateless/00458_merge_type_cast.sql diff --git a/dbms/tests/queries/0_stateless/00459_group_array_insert_at.reference b/tests/queries/0_stateless/00459_group_array_insert_at.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00459_group_array_insert_at.reference rename to tests/queries/0_stateless/00459_group_array_insert_at.reference diff --git a/dbms/tests/queries/0_stateless/00459_group_array_insert_at.sql b/tests/queries/0_stateless/00459_group_array_insert_at.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00459_group_array_insert_at.sql rename to tests/queries/0_stateless/00459_group_array_insert_at.sql diff --git a/dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference b/tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference rename to tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql b/tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql rename to tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.reference b/tests/queries/0_stateless/00461_default_value_of_argument_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.reference rename to tests/queries/0_stateless/00461_default_value_of_argument_type.reference diff --git a/dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.sql b/tests/queries/0_stateless/00461_default_value_of_argument_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.sql rename to tests/queries/0_stateless/00461_default_value_of_argument_type.sql diff --git a/dbms/tests/queries/0_stateless/00462_json_true_false_literals.reference b/tests/queries/0_stateless/00462_json_true_false_literals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00462_json_true_false_literals.reference rename to tests/queries/0_stateless/00462_json_true_false_literals.reference diff --git a/dbms/tests/queries/0_stateless/00462_json_true_false_literals.sql b/tests/queries/0_stateless/00462_json_true_false_literals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00462_json_true_false_literals.sql rename to tests/queries/0_stateless/00462_json_true_false_literals.sql diff --git a/dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference rename to tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference diff --git a/dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh rename to tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh diff --git a/dbms/tests/queries/0_stateless/00464_array_element_out_of_range.reference b/tests/queries/0_stateless/00464_array_element_out_of_range.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00464_array_element_out_of_range.reference rename to tests/queries/0_stateless/00464_array_element_out_of_range.reference diff --git a/dbms/tests/queries/0_stateless/00464_array_element_out_of_range.sql b/tests/queries/0_stateless/00464_array_element_out_of_range.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00464_array_element_out_of_range.sql rename to tests/queries/0_stateless/00464_array_element_out_of_range.sql diff --git a/dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.reference b/tests/queries/0_stateless/00464_sort_all_constant_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.reference rename to tests/queries/0_stateless/00464_sort_all_constant_columns.reference diff --git a/dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.sql b/tests/queries/0_stateless/00464_sort_all_constant_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.sql rename to tests/queries/0_stateless/00464_sort_all_constant_columns.sql diff --git a/dbms/tests/queries/0_stateless/00465_nullable_default.reference b/tests/queries/0_stateless/00465_nullable_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00465_nullable_default.reference rename to tests/queries/0_stateless/00465_nullable_default.reference diff --git a/dbms/tests/queries/0_stateless/00465_nullable_default.sql b/tests/queries/0_stateless/00465_nullable_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00465_nullable_default.sql rename to tests/queries/0_stateless/00465_nullable_default.sql diff --git a/dbms/tests/queries/0_stateless/00466_comments_in_keyword.reference b/tests/queries/0_stateless/00466_comments_in_keyword.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00466_comments_in_keyword.reference rename to tests/queries/0_stateless/00466_comments_in_keyword.reference diff --git a/dbms/tests/queries/0_stateless/00466_comments_in_keyword.sql b/tests/queries/0_stateless/00466_comments_in_keyword.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00466_comments_in_keyword.sql rename to tests/queries/0_stateless/00466_comments_in_keyword.sql diff --git a/dbms/tests/queries/0_stateless/00467_qualified_names.reference b/tests/queries/0_stateless/00467_qualified_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00467_qualified_names.reference rename to tests/queries/0_stateless/00467_qualified_names.reference diff --git a/dbms/tests/queries/0_stateless/00467_qualified_names.sql b/tests/queries/0_stateless/00467_qualified_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00467_qualified_names.sql rename to tests/queries/0_stateless/00467_qualified_names.sql diff --git a/dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference b/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference rename to tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference diff --git a/dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql b/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql rename to tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql diff --git a/dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference b/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference rename to tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference diff --git a/dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql b/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql rename to tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql diff --git a/dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference b/tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference rename to tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference diff --git a/dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql b/tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql rename to tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql diff --git a/dbms/tests/queries/0_stateless/00471_sql_style_quoting.reference b/tests/queries/0_stateless/00471_sql_style_quoting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00471_sql_style_quoting.reference rename to tests/queries/0_stateless/00471_sql_style_quoting.reference diff --git a/dbms/tests/queries/0_stateless/00471_sql_style_quoting.sql b/tests/queries/0_stateless/00471_sql_style_quoting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00471_sql_style_quoting.sql rename to tests/queries/0_stateless/00471_sql_style_quoting.sql diff --git a/dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference b/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference rename to tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference diff --git a/dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql b/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql rename to tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql diff --git a/dbms/tests/queries/0_stateless/00609_prewhere_and_default.reference b/tests/queries/0_stateless/00472_create_view_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00609_prewhere_and_default.reference rename to tests/queries/0_stateless/00472_create_view_if_not_exists.reference diff --git a/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql b/tests/queries/0_stateless/00472_create_view_if_not_exists.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql rename to tests/queries/0_stateless/00472_create_view_if_not_exists.sql diff --git a/dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference b/tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference rename to tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference diff --git a/dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh b/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh rename to tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh diff --git a/dbms/tests/queries/0_stateless/00474_readonly_settings.reference b/tests/queries/0_stateless/00474_readonly_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00474_readonly_settings.reference rename to tests/queries/0_stateless/00474_readonly_settings.reference diff --git a/dbms/tests/queries/0_stateless/00474_readonly_settings.sh b/tests/queries/0_stateless/00474_readonly_settings.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00474_readonly_settings.sh rename to tests/queries/0_stateless/00474_readonly_settings.sh diff --git a/dbms/tests/queries/0_stateless/00475_in_join_db_table.reference b/tests/queries/0_stateless/00475_in_join_db_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00475_in_join_db_table.reference rename to tests/queries/0_stateless/00475_in_join_db_table.reference diff --git a/dbms/tests/queries/0_stateless/00475_in_join_db_table.sql b/tests/queries/0_stateless/00475_in_join_db_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00475_in_join_db_table.sql rename to tests/queries/0_stateless/00475_in_join_db_table.sql diff --git a/dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference b/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference rename to tests/queries/0_stateless/00476_pretty_formats_and_widths.reference diff --git a/dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql b/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql rename to tests/queries/0_stateless/00476_pretty_formats_and_widths.sql diff --git a/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.reference b/tests/queries/0_stateless/00477_parsing_data_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.reference rename to tests/queries/0_stateless/00477_parsing_data_types.reference diff --git a/dbms/tests/queries/0_stateless/00477_parsing_data_types.sql b/tests/queries/0_stateless/00477_parsing_data_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00477_parsing_data_types.sql rename to tests/queries/0_stateless/00477_parsing_data_types.sql diff --git a/dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.reference b/tests/queries/0_stateless/00479_date_and_datetime_to_number.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.reference rename to tests/queries/0_stateless/00479_date_and_datetime_to_number.reference diff --git a/dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.sql b/tests/queries/0_stateless/00479_date_and_datetime_to_number.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.sql rename to tests/queries/0_stateless/00479_date_and_datetime_to_number.sql diff --git a/dbms/tests/queries/0_stateless/00480_mac_addresses.reference b/tests/queries/0_stateless/00480_mac_addresses.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00480_mac_addresses.reference rename to tests/queries/0_stateless/00480_mac_addresses.reference diff --git a/dbms/tests/queries/0_stateless/00480_mac_addresses.sql b/tests/queries/0_stateless/00480_mac_addresses.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00480_mac_addresses.sql rename to tests/queries/0_stateless/00480_mac_addresses.sql diff --git a/dbms/tests/queries/0_stateless/00627_recursive_alias.reference b/tests/queries/0_stateless/00481_create_view_for_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00627_recursive_alias.reference rename to tests/queries/0_stateless/00481_create_view_for_null.reference diff --git a/dbms/tests/queries/0_stateless/00481_create_view_for_null.sql b/tests/queries/0_stateless/00481_create_view_for_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00481_create_view_for_null.sql rename to tests/queries/0_stateless/00481_create_view_for_null.sql diff --git a/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.reference b/tests/queries/0_stateless/00481_reading_from_last_granula.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00481_reading_from_last_granula.reference rename to tests/queries/0_stateless/00481_reading_from_last_granula.reference diff --git a/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql b/tests/queries/0_stateless/00481_reading_from_last_granula.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql rename to tests/queries/0_stateless/00481_reading_from_last_granula.sql diff --git a/dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.reference b/tests/queries/0_stateless/00482_subqueries_and_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.reference rename to tests/queries/0_stateless/00482_subqueries_and_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.sql b/tests/queries/0_stateless/00482_subqueries_and_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.sql rename to tests/queries/0_stateless/00482_subqueries_and_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00483_cast_syntax.reference b/tests/queries/0_stateless/00483_cast_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00483_cast_syntax.reference rename to tests/queries/0_stateless/00483_cast_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00483_cast_syntax.sql b/tests/queries/0_stateless/00483_cast_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00483_cast_syntax.sql rename to tests/queries/0_stateless/00483_cast_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00634_logging_shard.reference b/tests/queries/0_stateless/00483_reading_from_array_structure.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00634_logging_shard.reference rename to tests/queries/0_stateless/00483_reading_from_array_structure.reference diff --git a/dbms/tests/queries/0_stateless/00483_reading_from_array_structure.sql b/tests/queries/0_stateless/00483_reading_from_array_structure.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00483_reading_from_array_structure.sql rename to tests/queries/0_stateless/00483_reading_from_array_structure.sql diff --git a/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference b/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference rename to tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference diff --git a/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql b/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql rename to tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql diff --git a/dbms/tests/queries/0_stateless/00485_http_insert_format.reference b/tests/queries/0_stateless/00485_http_insert_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00485_http_insert_format.reference rename to tests/queries/0_stateless/00485_http_insert_format.reference diff --git a/dbms/tests/queries/0_stateless/00485_http_insert_format.sh b/tests/queries/0_stateless/00485_http_insert_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00485_http_insert_format.sh rename to tests/queries/0_stateless/00485_http_insert_format.sh diff --git a/dbms/tests/queries/0_stateless/00486_if_fixed_string.reference b/tests/queries/0_stateless/00486_if_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00486_if_fixed_string.reference rename to tests/queries/0_stateless/00486_if_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00486_if_fixed_string.sql b/tests/queries/0_stateless/00486_if_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00486_if_fixed_string.sql rename to tests/queries/0_stateless/00486_if_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00487_if_array_fixed_string.reference b/tests/queries/0_stateless/00487_if_array_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00487_if_array_fixed_string.reference rename to tests/queries/0_stateless/00487_if_array_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00487_if_array_fixed_string.sql b/tests/queries/0_stateless/00487_if_array_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00487_if_array_fixed_string.sql rename to tests/queries/0_stateless/00487_if_array_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00488_non_ascii_column_names.reference b/tests/queries/0_stateless/00488_non_ascii_column_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00488_non_ascii_column_names.reference rename to tests/queries/0_stateless/00488_non_ascii_column_names.reference diff --git a/dbms/tests/queries/0_stateless/00488_non_ascii_column_names.sql b/tests/queries/0_stateless/00488_non_ascii_column_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00488_non_ascii_column_names.sql rename to tests/queries/0_stateless/00488_non_ascii_column_names.sql diff --git a/dbms/tests/queries/0_stateless/00489_pk_subexpression.reference b/tests/queries/0_stateless/00489_pk_subexpression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00489_pk_subexpression.reference rename to tests/queries/0_stateless/00489_pk_subexpression.reference diff --git a/dbms/tests/queries/0_stateless/00489_pk_subexpression.sql b/tests/queries/0_stateless/00489_pk_subexpression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00489_pk_subexpression.sql rename to tests/queries/0_stateless/00489_pk_subexpression.sql diff --git a/dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference b/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference rename to tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference diff --git a/dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql b/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql rename to tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql diff --git a/dbms/tests/queries/0_stateless/00490_with_select.reference b/tests/queries/0_stateless/00490_with_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00490_with_select.reference rename to tests/queries/0_stateless/00490_with_select.reference diff --git a/dbms/tests/queries/0_stateless/00490_with_select.sql b/tests/queries/0_stateless/00490_with_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00490_with_select.sql rename to tests/queries/0_stateless/00490_with_select.sql diff --git a/dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference b/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference rename to tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference diff --git a/dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql b/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql rename to tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql diff --git a/dbms/tests/queries/0_stateless/00492_drop_temporary_table.reference b/tests/queries/0_stateless/00492_drop_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00492_drop_temporary_table.reference rename to tests/queries/0_stateless/00492_drop_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00492_drop_temporary_table.sql b/tests/queries/0_stateless/00492_drop_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00492_drop_temporary_table.sql rename to tests/queries/0_stateless/00492_drop_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.reference b/tests/queries/0_stateless/00493_substring_of_fixedstring.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.reference rename to tests/queries/0_stateless/00493_substring_of_fixedstring.reference diff --git a/dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.sql b/tests/queries/0_stateless/00493_substring_of_fixedstring.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.sql rename to tests/queries/0_stateless/00493_substring_of_fixedstring.sql diff --git a/dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference b/tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference rename to tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference diff --git a/dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql b/tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql rename to tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql diff --git a/dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference b/tests/queries/0_stateless/00495_reading_const_zero_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference rename to tests/queries/0_stateless/00495_reading_const_zero_column.reference diff --git a/dbms/tests/queries/0_stateless/00495_reading_const_zero_column.sql b/tests/queries/0_stateless/00495_reading_const_zero_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00495_reading_const_zero_column.sql rename to tests/queries/0_stateless/00495_reading_const_zero_column.sql diff --git a/dbms/tests/queries/0_stateless/00496_substring_negative_offset.reference b/tests/queries/0_stateless/00496_substring_negative_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00496_substring_negative_offset.reference rename to tests/queries/0_stateless/00496_substring_negative_offset.reference diff --git a/dbms/tests/queries/0_stateless/00496_substring_negative_offset.sql b/tests/queries/0_stateless/00496_substring_negative_offset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00496_substring_negative_offset.sql rename to tests/queries/0_stateless/00496_substring_negative_offset.sql diff --git a/dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.reference b/tests/queries/0_stateless/00497_whitespaces_in_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.reference rename to tests/queries/0_stateless/00497_whitespaces_in_insert.reference diff --git a/dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.sh b/tests/queries/0_stateless/00497_whitespaces_in_insert.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.sh rename to tests/queries/0_stateless/00497_whitespaces_in_insert.sh diff --git a/dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference b/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference rename to tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference diff --git a/dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql b/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql rename to tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql diff --git a/dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference b/tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference rename to tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql b/tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql rename to tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00499_json_enum_insert.reference b/tests/queries/0_stateless/00499_json_enum_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00499_json_enum_insert.reference rename to tests/queries/0_stateless/00499_json_enum_insert.reference diff --git a/dbms/tests/queries/0_stateless/00499_json_enum_insert.sql b/tests/queries/0_stateless/00499_json_enum_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00499_json_enum_insert.sql rename to tests/queries/0_stateless/00499_json_enum_insert.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon.reference b/tests/queries/0_stateless/00500_point_in_polygon.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon.reference rename to tests/queries/0_stateless/00500_point_in_polygon.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon.sql b/tests/queries/0_stateless/00500_point_in_polygon.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon.sql rename to tests/queries/0_stateless/00500_point_in_polygon.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.reference b/tests/queries/0_stateless/00500_point_in_polygon_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.reference rename to tests/queries/0_stateless/00500_point_in_polygon_bug.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.sql b/tests/queries/0_stateless/00500_point_in_polygon_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.sql rename to tests/queries/0_stateless/00500_point_in_polygon_bug.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference b/tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference rename to tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql b/tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql rename to tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference b/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference rename to tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql b/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql rename to tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql diff --git a/dbms/tests/queries/0_stateless/00501_http_head.reference b/tests/queries/0_stateless/00501_http_head.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00501_http_head.reference rename to tests/queries/0_stateless/00501_http_head.reference diff --git a/dbms/tests/queries/0_stateless/00501_http_head.sh b/tests/queries/0_stateless/00501_http_head.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00501_http_head.sh rename to tests/queries/0_stateless/00501_http_head.sh diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.reference b/tests/queries/0_stateless/00502_custom_partitioning_local.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_local.reference rename to tests/queries/0_stateless/00502_custom_partitioning_local.reference diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql b/tests/queries/0_stateless/00502_custom_partitioning_local.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql rename to tests/queries/0_stateless/00502_custom_partitioning_local.sql diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference rename to tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql rename to tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00502_string_concat_with_array.reference b/tests/queries/0_stateless/00502_string_concat_with_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_string_concat_with_array.reference rename to tests/queries/0_stateless/00502_string_concat_with_array.reference diff --git a/dbms/tests/queries/0_stateless/00502_string_concat_with_array.sql b/tests/queries/0_stateless/00502_string_concat_with_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_string_concat_with_array.sql rename to tests/queries/0_stateless/00502_string_concat_with_array.sql diff --git a/dbms/tests/queries/0_stateless/00502_sum_map.reference b/tests/queries/0_stateless/00502_sum_map.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_sum_map.reference rename to tests/queries/0_stateless/00502_sum_map.reference diff --git a/dbms/tests/queries/0_stateless/00502_sum_map.sql b/tests/queries/0_stateless/00502_sum_map.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_sum_map.sql rename to tests/queries/0_stateless/00502_sum_map.sql diff --git a/dbms/tests/queries/0_stateless/00503_cast_const_nullable.reference b/tests/queries/0_stateless/00503_cast_const_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00503_cast_const_nullable.reference rename to tests/queries/0_stateless/00503_cast_const_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00503_cast_const_nullable.sql b/tests/queries/0_stateless/00503_cast_const_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00503_cast_const_nullable.sql rename to tests/queries/0_stateless/00503_cast_const_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00504_insert_miss_columns.reference b/tests/queries/0_stateless/00504_insert_miss_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00504_insert_miss_columns.reference rename to tests/queries/0_stateless/00504_insert_miss_columns.reference diff --git a/dbms/tests/queries/0_stateless/00504_insert_miss_columns.sh b/tests/queries/0_stateless/00504_insert_miss_columns.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00504_insert_miss_columns.sh rename to tests/queries/0_stateless/00504_insert_miss_columns.sh diff --git a/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.reference b/tests/queries/0_stateless/00504_mergetree_arrays_rw.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.reference rename to tests/queries/0_stateless/00504_mergetree_arrays_rw.reference diff --git a/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql b/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql rename to tests/queries/0_stateless/00504_mergetree_arrays_rw.sql diff --git a/dbms/tests/queries/0_stateless/00505_distributed_secure.data b/tests/queries/0_stateless/00505_distributed_secure.data similarity index 100% rename from dbms/tests/queries/0_stateless/00505_distributed_secure.data rename to tests/queries/0_stateless/00505_distributed_secure.data diff --git a/dbms/tests/queries/0_stateless/00505_secure.reference b/tests/queries/0_stateless/00505_secure.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00505_secure.reference rename to tests/queries/0_stateless/00505_secure.reference diff --git a/dbms/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00505_secure.sh rename to tests/queries/0_stateless/00505_secure.sh diff --git a/dbms/tests/queries/0_stateless/00505_shard_secure.reference b/tests/queries/0_stateless/00505_shard_secure.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00505_shard_secure.reference rename to tests/queries/0_stateless/00505_shard_secure.reference diff --git a/dbms/tests/queries/0_stateless/00505_shard_secure.sh b/tests/queries/0_stateless/00505_shard_secure.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00505_shard_secure.sh rename to tests/queries/0_stateless/00505_shard_secure.sh diff --git a/dbms/tests/queries/0_stateless/00506_shard_global_in_union.reference b/tests/queries/0_stateless/00506_shard_global_in_union.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00506_shard_global_in_union.reference rename to tests/queries/0_stateless/00506_shard_global_in_union.reference diff --git a/dbms/tests/queries/0_stateless/00506_shard_global_in_union.sql b/tests/queries/0_stateless/00506_shard_global_in_union.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00506_shard_global_in_union.sql rename to tests/queries/0_stateless/00506_shard_global_in_union.sql diff --git a/dbms/tests/queries/0_stateless/00506_union_distributed.reference b/tests/queries/0_stateless/00506_union_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00506_union_distributed.reference rename to tests/queries/0_stateless/00506_union_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00506_union_distributed.sql b/tests/queries/0_stateless/00506_union_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00506_union_distributed.sql rename to tests/queries/0_stateless/00506_union_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00507_array_no_params.reference b/tests/queries/0_stateless/00507_array_no_params.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00507_array_no_params.reference rename to tests/queries/0_stateless/00507_array_no_params.reference diff --git a/dbms/tests/queries/0_stateless/00507_array_no_params.sh b/tests/queries/0_stateless/00507_array_no_params.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00507_array_no_params.sh rename to tests/queries/0_stateless/00507_array_no_params.sh diff --git a/dbms/tests/queries/0_stateless/00507_sumwithoverflow.reference b/tests/queries/0_stateless/00507_sumwithoverflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00507_sumwithoverflow.reference rename to tests/queries/0_stateless/00507_sumwithoverflow.reference diff --git a/dbms/tests/queries/0_stateless/00507_sumwithoverflow.sql b/tests/queries/0_stateless/00507_sumwithoverflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00507_sumwithoverflow.sql rename to tests/queries/0_stateless/00507_sumwithoverflow.sql diff --git a/dbms/tests/queries/0_stateless/00508_materialized_view_to.reference b/tests/queries/0_stateless/00508_materialized_view_to.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00508_materialized_view_to.reference rename to tests/queries/0_stateless/00508_materialized_view_to.reference diff --git a/dbms/tests/queries/0_stateless/00508_materialized_view_to.sql b/tests/queries/0_stateless/00508_materialized_view_to.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00508_materialized_view_to.sql rename to tests/queries/0_stateless/00508_materialized_view_to.sql diff --git a/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference b/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference rename to tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql b/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql rename to tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference rename to tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql rename to tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00511_get_size_of_enum.reference b/tests/queries/0_stateless/00511_get_size_of_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00511_get_size_of_enum.reference rename to tests/queries/0_stateless/00511_get_size_of_enum.reference diff --git a/dbms/tests/queries/0_stateless/00511_get_size_of_enum.sql b/tests/queries/0_stateless/00511_get_size_of_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00511_get_size_of_enum.sql rename to tests/queries/0_stateless/00511_get_size_of_enum.sql diff --git a/dbms/tests/queries/0_stateless/00512_fractional_time_zones.reference b/tests/queries/0_stateless/00512_fractional_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00512_fractional_time_zones.reference rename to tests/queries/0_stateless/00512_fractional_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00512_fractional_time_zones.sh b/tests/queries/0_stateless/00512_fractional_time_zones.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00512_fractional_time_zones.sh rename to tests/queries/0_stateless/00512_fractional_time_zones.sh diff --git a/dbms/tests/queries/0_stateless/00513_fractional_time_zones.reference b/tests/queries/0_stateless/00513_fractional_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00513_fractional_time_zones.reference rename to tests/queries/0_stateless/00513_fractional_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00513_fractional_time_zones.sql b/tests/queries/0_stateless/00513_fractional_time_zones.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00513_fractional_time_zones.sql rename to tests/queries/0_stateless/00513_fractional_time_zones.sql diff --git a/dbms/tests/queries/0_stateless/00514_interval_operators.reference b/tests/queries/0_stateless/00514_interval_operators.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00514_interval_operators.reference rename to tests/queries/0_stateless/00514_interval_operators.reference diff --git a/dbms/tests/queries/0_stateless/00514_interval_operators.sql b/tests/queries/0_stateless/00514_interval_operators.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00514_interval_operators.sql rename to tests/queries/0_stateless/00514_interval_operators.sql diff --git a/dbms/tests/queries/0_stateless/00515_enhanced_time_zones.reference b/tests/queries/0_stateless/00515_enhanced_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00515_enhanced_time_zones.reference rename to tests/queries/0_stateless/00515_enhanced_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00515_enhanced_time_zones.sql b/tests/queries/0_stateless/00515_enhanced_time_zones.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00515_enhanced_time_zones.sql rename to tests/queries/0_stateless/00515_enhanced_time_zones.sql diff --git a/dbms/tests/queries/0_stateless/00515_gcd_lcm.reference b/tests/queries/0_stateless/00515_gcd_lcm.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00515_gcd_lcm.reference rename to tests/queries/0_stateless/00515_gcd_lcm.reference diff --git a/dbms/tests/queries/0_stateless/00515_gcd_lcm.sql b/tests/queries/0_stateless/00515_gcd_lcm.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00515_gcd_lcm.sql rename to tests/queries/0_stateless/00515_gcd_lcm.sql diff --git a/dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference b/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference rename to tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql b/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql rename to tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference b/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference rename to tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql b/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql rename to tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00516_is_inf_nan.reference b/tests/queries/0_stateless/00516_is_inf_nan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00516_is_inf_nan.reference rename to tests/queries/0_stateless/00516_is_inf_nan.reference diff --git a/dbms/tests/queries/0_stateless/00516_is_inf_nan.sql b/tests/queries/0_stateless/00516_is_inf_nan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00516_is_inf_nan.sql rename to tests/queries/0_stateless/00516_is_inf_nan.sql diff --git a/dbms/tests/queries/0_stateless/00516_modulo.reference b/tests/queries/0_stateless/00516_modulo.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00516_modulo.reference rename to tests/queries/0_stateless/00516_modulo.reference diff --git a/dbms/tests/queries/0_stateless/00516_modulo.sql b/tests/queries/0_stateless/00516_modulo.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00516_modulo.sql rename to tests/queries/0_stateless/00516_modulo.sql diff --git a/dbms/tests/queries/0_stateless/00517_date_parsing.reference b/tests/queries/0_stateless/00517_date_parsing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00517_date_parsing.reference rename to tests/queries/0_stateless/00517_date_parsing.reference diff --git a/dbms/tests/queries/0_stateless/00517_date_parsing.sql b/tests/queries/0_stateless/00517_date_parsing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00517_date_parsing.sql rename to tests/queries/0_stateless/00517_date_parsing.sql diff --git a/dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference b/tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference rename to tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference diff --git a/dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql b/tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql rename to tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql diff --git a/dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference b/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference rename to tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql b/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql rename to tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00520_http_nullable.reference b/tests/queries/0_stateless/00520_http_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00520_http_nullable.reference rename to tests/queries/0_stateless/00520_http_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00520_http_nullable.sh b/tests/queries/0_stateless/00520_http_nullable.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00520_http_nullable.sh rename to tests/queries/0_stateless/00520_http_nullable.sh diff --git a/dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.reference b/tests/queries/0_stateless/00520_tuple_values_interpreter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.reference rename to tests/queries/0_stateless/00520_tuple_values_interpreter.reference diff --git a/dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.sql b/tests/queries/0_stateless/00520_tuple_values_interpreter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.sql rename to tests/queries/0_stateless/00520_tuple_values_interpreter.sql diff --git a/dbms/tests/queries/0_stateless/00521_multidimensional.reference b/tests/queries/0_stateless/00521_multidimensional.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00521_multidimensional.reference rename to tests/queries/0_stateless/00521_multidimensional.reference diff --git a/dbms/tests/queries/0_stateless/00521_multidimensional.sql b/tests/queries/0_stateless/00521_multidimensional.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00521_multidimensional.sql rename to tests/queries/0_stateless/00521_multidimensional.sql diff --git a/dbms/tests/queries/0_stateless/00522_multidimensional.reference b/tests/queries/0_stateless/00522_multidimensional.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00522_multidimensional.reference rename to tests/queries/0_stateless/00522_multidimensional.reference diff --git a/dbms/tests/queries/0_stateless/00522_multidimensional.sql b/tests/queries/0_stateless/00522_multidimensional.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00522_multidimensional.sql rename to tests/queries/0_stateless/00522_multidimensional.sql diff --git a/dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference b/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference rename to tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference diff --git a/dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql b/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql rename to tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql diff --git a/dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.reference b/tests/queries/0_stateless/00524_time_intervals_months_underflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.reference rename to tests/queries/0_stateless/00524_time_intervals_months_underflow.reference diff --git a/dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.sql b/tests/queries/0_stateless/00524_time_intervals_months_underflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.sql rename to tests/queries/0_stateless/00524_time_intervals_months_underflow.sql diff --git a/dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference b/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference rename to tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql b/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql rename to tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference b/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference rename to tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql b/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql rename to tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00527_totals_having_nullable.reference b/tests/queries/0_stateless/00527_totals_having_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00527_totals_having_nullable.reference rename to tests/queries/0_stateless/00527_totals_having_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00527_totals_having_nullable.sql b/tests/queries/0_stateless/00527_totals_having_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00527_totals_having_nullable.sql rename to tests/queries/0_stateless/00527_totals_having_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00528_const_of_nullable.reference b/tests/queries/0_stateless/00528_const_of_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00528_const_of_nullable.reference rename to tests/queries/0_stateless/00528_const_of_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00528_const_of_nullable.sql b/tests/queries/0_stateless/00528_const_of_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00528_const_of_nullable.sql rename to tests/queries/0_stateless/00528_const_of_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00529_orantius.reference b/tests/queries/0_stateless/00529_orantius.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00529_orantius.reference rename to tests/queries/0_stateless/00529_orantius.reference diff --git a/dbms/tests/queries/0_stateless/00529_orantius.sql b/tests/queries/0_stateless/00529_orantius.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00529_orantius.sql rename to tests/queries/0_stateless/00529_orantius.sql diff --git a/dbms/tests/queries/0_stateless/00530_arrays_of_nothing.reference b/tests/queries/0_stateless/00530_arrays_of_nothing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00530_arrays_of_nothing.reference rename to tests/queries/0_stateless/00530_arrays_of_nothing.reference diff --git a/dbms/tests/queries/0_stateless/00530_arrays_of_nothing.sql b/tests/queries/0_stateless/00530_arrays_of_nothing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00530_arrays_of_nothing.sql rename to tests/queries/0_stateless/00530_arrays_of_nothing.sql diff --git a/dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.reference b/tests/queries/0_stateless/00531_aggregate_over_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.reference rename to tests/queries/0_stateless/00531_aggregate_over_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.sql b/tests/queries/0_stateless/00531_aggregate_over_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.sql rename to tests/queries/0_stateless/00531_aggregate_over_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00531_client_ignore_error.reference b/tests/queries/0_stateless/00531_client_ignore_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00531_client_ignore_error.reference rename to tests/queries/0_stateless/00531_client_ignore_error.reference diff --git a/dbms/tests/queries/0_stateless/00531_client_ignore_error.sh b/tests/queries/0_stateless/00531_client_ignore_error.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00531_client_ignore_error.sh rename to tests/queries/0_stateless/00531_client_ignore_error.sh diff --git a/dbms/tests/queries/0_stateless/00532_topk_generic.reference b/tests/queries/0_stateless/00532_topk_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00532_topk_generic.reference rename to tests/queries/0_stateless/00532_topk_generic.reference diff --git a/dbms/tests/queries/0_stateless/00532_topk_generic.sql b/tests/queries/0_stateless/00532_topk_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00532_topk_generic.sql rename to tests/queries/0_stateless/00532_topk_generic.sql diff --git a/dbms/tests/queries/0_stateless/00533_uniq_array.reference b/tests/queries/0_stateless/00533_uniq_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00533_uniq_array.reference rename to tests/queries/0_stateless/00533_uniq_array.reference diff --git a/dbms/tests/queries/0_stateless/00533_uniq_array.sql b/tests/queries/0_stateless/00533_uniq_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00533_uniq_array.sql rename to tests/queries/0_stateless/00533_uniq_array.sql diff --git a/dbms/tests/queries/0_stateless/00534_client_ignore_error.reference b/tests/queries/0_stateless/00534_client_ignore_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_client_ignore_error.reference rename to tests/queries/0_stateless/00534_client_ignore_error.reference diff --git a/dbms/tests/queries/0_stateless/00534_client_ignore_error.sh b/tests/queries/0_stateless/00534_client_ignore_error.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_client_ignore_error.sh rename to tests/queries/0_stateless/00534_client_ignore_error.sh diff --git a/dbms/tests/queries/0_stateless/00534_exp10.reference b/tests/queries/0_stateless/00534_exp10.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_exp10.reference rename to tests/queries/0_stateless/00534_exp10.reference diff --git a/dbms/tests/queries/0_stateless/00534_exp10.sql b/tests/queries/0_stateless/00534_exp10.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00534_exp10.sql rename to tests/queries/0_stateless/00534_exp10.sql diff --git a/dbms/tests/queries/0_stateless/00534_filimonov.data b/tests/queries/0_stateless/00534_filimonov.data similarity index 100% rename from dbms/tests/queries/0_stateless/00534_filimonov.data rename to tests/queries/0_stateless/00534_filimonov.data diff --git a/dbms/tests/queries/0_stateless/00534_filimonov.reference b/tests/queries/0_stateless/00534_filimonov.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_filimonov.reference rename to tests/queries/0_stateless/00534_filimonov.reference diff --git a/dbms/tests/queries/0_stateless/00534_filimonov.sh b/tests/queries/0_stateless/00534_filimonov.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_filimonov.sh rename to tests/queries/0_stateless/00534_filimonov.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments.lib b/tests/queries/0_stateless/00534_functions_bad_arguments.lib similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments.lib rename to tests/queries/0_stateless/00534_functions_bad_arguments.lib diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.reference b/tests/queries/0_stateless/00534_functions_bad_arguments1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments1.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.sh b/tests/queries/0_stateless/00534_functions_bad_arguments1.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments1.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.reference b/tests/queries/0_stateless/00534_functions_bad_arguments10.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments10.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.sh b/tests/queries/0_stateless/00534_functions_bad_arguments10.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments10.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.reference b/tests/queries/0_stateless/00534_functions_bad_arguments11.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments11.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.sh b/tests/queries/0_stateless/00534_functions_bad_arguments11.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments11.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.reference b/tests/queries/0_stateless/00534_functions_bad_arguments12.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments12.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.sh b/tests/queries/0_stateless/00534_functions_bad_arguments12.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments12.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.reference b/tests/queries/0_stateless/00534_functions_bad_arguments13.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments13.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.sh b/tests/queries/0_stateless/00534_functions_bad_arguments13.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments13.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.reference b/tests/queries/0_stateless/00534_functions_bad_arguments2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments2.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.sh b/tests/queries/0_stateless/00534_functions_bad_arguments2.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments2.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.reference b/tests/queries/0_stateless/00534_functions_bad_arguments3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments3.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.sh b/tests/queries/0_stateless/00534_functions_bad_arguments3.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments3.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.reference b/tests/queries/0_stateless/00534_functions_bad_arguments4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments4.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.sh b/tests/queries/0_stateless/00534_functions_bad_arguments4.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments4.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.reference b/tests/queries/0_stateless/00534_functions_bad_arguments5.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments5.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.sh b/tests/queries/0_stateless/00534_functions_bad_arguments5.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments5.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.reference b/tests/queries/0_stateless/00534_functions_bad_arguments6.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments6.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.sh b/tests/queries/0_stateless/00534_functions_bad_arguments6.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments6.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.reference b/tests/queries/0_stateless/00534_functions_bad_arguments7.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments7.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.sh b/tests/queries/0_stateless/00534_functions_bad_arguments7.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments7.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.reference b/tests/queries/0_stateless/00534_functions_bad_arguments8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments8.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.sh b/tests/queries/0_stateless/00534_functions_bad_arguments8.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments8.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.reference b/tests/queries/0_stateless/00534_functions_bad_arguments9.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments9.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.sh b/tests/queries/0_stateless/00534_functions_bad_arguments9.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments9.sh diff --git a/dbms/tests/queries/0_stateless/00535_parse_float_scientific.reference b/tests/queries/0_stateless/00535_parse_float_scientific.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00535_parse_float_scientific.reference rename to tests/queries/0_stateless/00535_parse_float_scientific.reference diff --git a/dbms/tests/queries/0_stateless/00535_parse_float_scientific.sql b/tests/queries/0_stateless/00535_parse_float_scientific.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00535_parse_float_scientific.sql rename to tests/queries/0_stateless/00535_parse_float_scientific.sql diff --git a/dbms/tests/queries/0_stateless/00536_int_exp.reference b/tests/queries/0_stateless/00536_int_exp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00536_int_exp.reference rename to tests/queries/0_stateless/00536_int_exp.reference diff --git a/dbms/tests/queries/0_stateless/00536_int_exp.sql b/tests/queries/0_stateless/00536_int_exp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00536_int_exp.sql rename to tests/queries/0_stateless/00536_int_exp.sql diff --git a/dbms/tests/queries/0_stateless/00537_quarters.reference b/tests/queries/0_stateless/00537_quarters.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00537_quarters.reference rename to tests/queries/0_stateless/00537_quarters.reference diff --git a/dbms/tests/queries/0_stateless/00537_quarters.sql b/tests/queries/0_stateless/00537_quarters.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00537_quarters.sql rename to tests/queries/0_stateless/00537_quarters.sql diff --git a/dbms/tests/queries/0_stateless/00538_datediff.reference b/tests/queries/0_stateless/00538_datediff.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00538_datediff.reference rename to tests/queries/0_stateless/00538_datediff.reference diff --git a/dbms/tests/queries/0_stateless/00538_datediff.sql b/tests/queries/0_stateless/00538_datediff.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00538_datediff.sql rename to tests/queries/0_stateless/00538_datediff.sql diff --git a/dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.reference b/tests/queries/0_stateless/00539_functions_for_working_with_json.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.reference rename to tests/queries/0_stateless/00539_functions_for_working_with_json.reference diff --git a/dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.sql b/tests/queries/0_stateless/00539_functions_for_working_with_json.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.sql rename to tests/queries/0_stateless/00539_functions_for_working_with_json.sql diff --git a/dbms/tests/queries/0_stateless/00540_bad_data_types.reference b/tests/queries/0_stateless/00540_bad_data_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00540_bad_data_types.reference rename to tests/queries/0_stateless/00540_bad_data_types.reference diff --git a/dbms/tests/queries/0_stateless/00540_bad_data_types.sh b/tests/queries/0_stateless/00540_bad_data_types.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00540_bad_data_types.sh rename to tests/queries/0_stateless/00540_bad_data_types.sh diff --git a/dbms/tests/queries/0_stateless/00541_kahan_sum.reference b/tests/queries/0_stateless/00541_kahan_sum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00541_kahan_sum.reference rename to tests/queries/0_stateless/00541_kahan_sum.reference diff --git a/dbms/tests/queries/0_stateless/00541_kahan_sum.sql b/tests/queries/0_stateless/00541_kahan_sum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00541_kahan_sum.sql rename to tests/queries/0_stateless/00541_kahan_sum.sql diff --git a/dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference b/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference rename to tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference diff --git a/dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql b/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql rename to tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql diff --git a/dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference b/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference rename to tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference diff --git a/dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql b/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql rename to tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql diff --git a/dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference b/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference rename to tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference diff --git a/dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql b/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql rename to tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql diff --git a/dbms/tests/queries/0_stateless/00694_max_block_size_zero.reference b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00694_max_block_size_zero.reference rename to tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference diff --git a/dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh rename to tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh diff --git a/dbms/tests/queries/0_stateless/00543_null_and_prewhere.reference b/tests/queries/0_stateless/00543_null_and_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00543_null_and_prewhere.reference rename to tests/queries/0_stateless/00543_null_and_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00543_null_and_prewhere.sql b/tests/queries/0_stateless/00543_null_and_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00543_null_and_prewhere.sql rename to tests/queries/0_stateless/00543_null_and_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference b/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference rename to tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference diff --git a/dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql b/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql rename to tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql diff --git a/dbms/tests/queries/0_stateless/00544_insert_with_select.reference b/tests/queries/0_stateless/00544_insert_with_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00544_insert_with_select.reference rename to tests/queries/0_stateless/00544_insert_with_select.reference diff --git a/dbms/tests/queries/0_stateless/00544_insert_with_select.sql b/tests/queries/0_stateless/00544_insert_with_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00544_insert_with_select.sql rename to tests/queries/0_stateless/00544_insert_with_select.sql diff --git a/dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.reference b/tests/queries/0_stateless/00545_weird_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.reference rename to tests/queries/0_stateless/00545_weird_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.sql b/tests/queries/0_stateless/00545_weird_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.sql rename to tests/queries/0_stateless/00545_weird_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference b/tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference rename to tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference diff --git a/dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql b/tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql rename to tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql diff --git a/dbms/tests/queries/0_stateless/00547_named_tuples.reference b/tests/queries/0_stateless/00547_named_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00547_named_tuples.reference rename to tests/queries/0_stateless/00547_named_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00547_named_tuples.sql b/tests/queries/0_stateless/00547_named_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00547_named_tuples.sql rename to tests/queries/0_stateless/00547_named_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00548_slice_of_nested.reference b/tests/queries/0_stateless/00548_slice_of_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00548_slice_of_nested.reference rename to tests/queries/0_stateless/00548_slice_of_nested.reference diff --git a/dbms/tests/queries/0_stateless/00548_slice_of_nested.sql b/tests/queries/0_stateless/00548_slice_of_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00548_slice_of_nested.sql rename to tests/queries/0_stateless/00548_slice_of_nested.sql diff --git a/dbms/tests/queries/0_stateless/00549_join_use_nulls.reference b/tests/queries/0_stateless/00549_join_use_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00549_join_use_nulls.reference rename to tests/queries/0_stateless/00549_join_use_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00549_join_use_nulls.sql b/tests/queries/0_stateless/00549_join_use_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00549_join_use_nulls.sql rename to tests/queries/0_stateless/00549_join_use_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00550_join_insert_select.reference b/tests/queries/0_stateless/00550_join_insert_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00550_join_insert_select.reference rename to tests/queries/0_stateless/00550_join_insert_select.reference diff --git a/dbms/tests/queries/0_stateless/00550_join_insert_select.sh b/tests/queries/0_stateless/00550_join_insert_select.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00550_join_insert_select.sh rename to tests/queries/0_stateless/00550_join_insert_select.sh diff --git a/dbms/tests/queries/0_stateless/00551_parse_or_null.reference b/tests/queries/0_stateless/00551_parse_or_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00551_parse_or_null.reference rename to tests/queries/0_stateless/00551_parse_or_null.reference diff --git a/dbms/tests/queries/0_stateless/00551_parse_or_null.sql b/tests/queries/0_stateless/00551_parse_or_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00551_parse_or_null.sql rename to tests/queries/0_stateless/00551_parse_or_null.sql diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_simple.reference b/tests/queries/0_stateless/00552_logical_functions_simple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_simple.reference rename to tests/queries/0_stateless/00552_logical_functions_simple.reference diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_simple.sql b/tests/queries/0_stateless/00552_logical_functions_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_simple.sql rename to tests/queries/0_stateless/00552_logical_functions_simple.sql diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_ternary.reference b/tests/queries/0_stateless/00552_logical_functions_ternary.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_ternary.reference rename to tests/queries/0_stateless/00552_logical_functions_ternary.reference diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_ternary.sql b/tests/queries/0_stateless/00552_logical_functions_ternary.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_ternary.sql rename to tests/queries/0_stateless/00552_logical_functions_ternary.sql diff --git a/dbms/tests/queries/0_stateless/00552_or_nullable.reference b/tests/queries/0_stateless/00552_or_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00552_or_nullable.reference rename to tests/queries/0_stateless/00552_or_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00552_or_nullable.sql b/tests/queries/0_stateless/00552_or_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00552_or_nullable.sql rename to tests/queries/0_stateless/00552_or_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.reference b/tests/queries/0_stateless/00553_buff_exists_materlized_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.reference rename to tests/queries/0_stateless/00553_buff_exists_materlized_column.reference diff --git a/dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.sql b/tests/queries/0_stateless/00553_buff_exists_materlized_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.sql rename to tests/queries/0_stateless/00553_buff_exists_materlized_column.sql diff --git a/dbms/tests/queries/0_stateless/00553_invalid_nested_name.reference b/tests/queries/0_stateless/00553_invalid_nested_name.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00553_invalid_nested_name.reference rename to tests/queries/0_stateless/00553_invalid_nested_name.reference diff --git a/dbms/tests/queries/0_stateless/00553_invalid_nested_name.sql b/tests/queries/0_stateless/00553_invalid_nested_name.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00553_invalid_nested_name.sql rename to tests/queries/0_stateless/00553_invalid_nested_name.sql diff --git a/dbms/tests/queries/0_stateless/00554_nested_and_table_engines.reference b/tests/queries/0_stateless/00554_nested_and_table_engines.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00554_nested_and_table_engines.reference rename to tests/queries/0_stateless/00554_nested_and_table_engines.reference diff --git a/dbms/tests/queries/0_stateless/00554_nested_and_table_engines.sql b/tests/queries/0_stateless/00554_nested_and_table_engines.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00554_nested_and_table_engines.sql rename to tests/queries/0_stateless/00554_nested_and_table_engines.sql diff --git a/dbms/tests/queries/0_stateless/00555_hasAll_hasAny.reference b/tests/queries/0_stateless/00555_hasAll_hasAny.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00555_hasAll_hasAny.reference rename to tests/queries/0_stateless/00555_hasAll_hasAny.reference diff --git a/dbms/tests/queries/0_stateless/00555_hasAll_hasAny.sql b/tests/queries/0_stateless/00555_hasAll_hasAny.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00555_hasAll_hasAny.sql rename to tests/queries/0_stateless/00555_hasAll_hasAny.sql diff --git a/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.reference b/tests/queries/0_stateless/00555_right_join_excessive_rows.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.reference rename to tests/queries/0_stateless/00555_right_join_excessive_rows.reference diff --git a/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql b/tests/queries/0_stateless/00555_right_join_excessive_rows.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql rename to tests/queries/0_stateless/00555_right_join_excessive_rows.sql diff --git a/dbms/tests/queries/0_stateless/00556_array_intersect.reference b/tests/queries/0_stateless/00556_array_intersect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00556_array_intersect.reference rename to tests/queries/0_stateless/00556_array_intersect.reference diff --git a/dbms/tests/queries/0_stateless/00556_array_intersect.sql b/tests/queries/0_stateless/00556_array_intersect.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00556_array_intersect.sql rename to tests/queries/0_stateless/00556_array_intersect.sql diff --git a/dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.reference b/tests/queries/0_stateless/00556_remove_columns_from_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.reference rename to tests/queries/0_stateless/00556_remove_columns_from_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.sql b/tests/queries/0_stateless/00556_remove_columns_from_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.sql rename to tests/queries/0_stateless/00556_remove_columns_from_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.reference b/tests/queries/0_stateless/00557_alter_null_storage_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.reference rename to tests/queries/0_stateless/00557_alter_null_storage_tables.reference diff --git a/dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.sql b/tests/queries/0_stateless/00557_alter_null_storage_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.sql rename to tests/queries/0_stateless/00557_alter_null_storage_tables.sql diff --git a/dbms/tests/queries/0_stateless/00557_array_resize.reference b/tests/queries/0_stateless/00557_array_resize.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00557_array_resize.reference rename to tests/queries/0_stateless/00557_array_resize.reference diff --git a/dbms/tests/queries/0_stateless/00557_array_resize.sql b/tests/queries/0_stateless/00557_array_resize.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00557_array_resize.sql rename to tests/queries/0_stateless/00557_array_resize.sql diff --git a/dbms/tests/queries/0_stateless/00557_remote_port.reference b/tests/queries/0_stateless/00557_remote_port.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00557_remote_port.reference rename to tests/queries/0_stateless/00557_remote_port.reference diff --git a/dbms/tests/queries/0_stateless/00557_remote_port.sh b/tests/queries/0_stateless/00557_remote_port.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00557_remote_port.sh rename to tests/queries/0_stateless/00557_remote_port.sh diff --git a/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference b/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference rename to tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference diff --git a/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql b/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql rename to tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql diff --git a/dbms/tests/queries/0_stateless/00558_parse_floats.reference b/tests/queries/0_stateless/00558_parse_floats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00558_parse_floats.reference rename to tests/queries/0_stateless/00558_parse_floats.reference diff --git a/dbms/tests/queries/0_stateless/00558_parse_floats.sql b/tests/queries/0_stateless/00558_parse_floats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00558_parse_floats.sql rename to tests/queries/0_stateless/00558_parse_floats.sql diff --git a/dbms/tests/queries/0_stateless/00559_filter_array_generic.reference b/tests/queries/0_stateless/00559_filter_array_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00559_filter_array_generic.reference rename to tests/queries/0_stateless/00559_filter_array_generic.reference diff --git a/dbms/tests/queries/0_stateless/00559_filter_array_generic.sql b/tests/queries/0_stateless/00559_filter_array_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00559_filter_array_generic.sql rename to tests/queries/0_stateless/00559_filter_array_generic.sql diff --git a/dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference b/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference rename to tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference diff --git a/dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql b/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql rename to tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql diff --git a/dbms/tests/queries/0_stateless/00561_storage_join.reference b/tests/queries/0_stateless/00561_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00561_storage_join.reference rename to tests/queries/0_stateless/00561_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00561_storage_join.sql b/tests/queries/0_stateless/00561_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00561_storage_join.sql rename to tests/queries/0_stateless/00561_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.reference b/tests/queries/0_stateless/00562_in_subquery_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.reference rename to tests/queries/0_stateless/00562_in_subquery_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.sql b/tests/queries/0_stateless/00562_in_subquery_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.sql rename to tests/queries/0_stateless/00562_in_subquery_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference b/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference rename to tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference diff --git a/dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql b/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql rename to tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql diff --git a/dbms/tests/queries/0_stateless/00563_complex_in_expression.reference b/tests/queries/0_stateless/00563_complex_in_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00563_complex_in_expression.reference rename to tests/queries/0_stateless/00563_complex_in_expression.reference diff --git a/dbms/tests/queries/0_stateless/00563_complex_in_expression.sql b/tests/queries/0_stateless/00563_complex_in_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00563_complex_in_expression.sql rename to tests/queries/0_stateless/00563_complex_in_expression.sql diff --git a/dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference b/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference rename to tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql b/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql rename to tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.reference b/tests/queries/0_stateless/00563_shard_insert_into_remote.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.reference rename to tests/queries/0_stateless/00563_shard_insert_into_remote.reference diff --git a/dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.sql b/tests/queries/0_stateless/00563_shard_insert_into_remote.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.sql rename to tests/queries/0_stateless/00563_shard_insert_into_remote.sql diff --git a/dbms/tests/queries/0_stateless/00564_enum_order.reference b/tests/queries/0_stateless/00564_enum_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00564_enum_order.reference rename to tests/queries/0_stateless/00564_enum_order.reference diff --git a/dbms/tests/queries/0_stateless/00564_enum_order.sh b/tests/queries/0_stateless/00564_enum_order.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00564_enum_order.sh rename to tests/queries/0_stateless/00564_enum_order.sh diff --git a/dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference b/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference rename to tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference diff --git a/dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql b/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql rename to tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql diff --git a/tests/queries/0_stateless/00564_temporary_table_management.reference b/tests/queries/0_stateless/00564_temporary_table_management.reference new file mode 100644 index 00000000000..4cfb4230223 --- /dev/null +++ b/tests/queries/0_stateless/00564_temporary_table_management.reference @@ -0,0 +1,4 @@ +1 +CREATE TEMPORARY TABLE temp_tab\n(\n `number` UInt64\n)\nENGINE = Memory +temp_tab +0 diff --git a/dbms/tests/queries/0_stateless/00564_temporary_table_management.sql b/tests/queries/0_stateless/00564_temporary_table_management.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00564_temporary_table_management.sql rename to tests/queries/0_stateless/00564_temporary_table_management.sql diff --git a/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference b/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql b/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00565_enum_order.reference b/tests/queries/0_stateless/00565_enum_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00565_enum_order.reference rename to tests/queries/0_stateless/00565_enum_order.reference diff --git a/dbms/tests/queries/0_stateless/00565_enum_order.sh b/tests/queries/0_stateless/00565_enum_order.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00565_enum_order.sh rename to tests/queries/0_stateless/00565_enum_order.sh diff --git a/dbms/tests/queries/0_stateless/00566_enum_min_max.reference b/tests/queries/0_stateless/00566_enum_min_max.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00566_enum_min_max.reference rename to tests/queries/0_stateless/00566_enum_min_max.reference diff --git a/dbms/tests/queries/0_stateless/00566_enum_min_max.sql b/tests/queries/0_stateless/00566_enum_min_max.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00566_enum_min_max.sql rename to tests/queries/0_stateless/00566_enum_min_max.sql diff --git a/dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference b/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference rename to tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference diff --git a/dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql b/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql rename to tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql diff --git a/dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference b/tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference rename to tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql b/tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql rename to tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference rename to tests/queries/0_stateless/00569_parse_date_time_best_effort.reference diff --git a/dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql b/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql rename to tests/queries/0_stateless/00569_parse_date_time_best_effort.sql diff --git a/dbms/tests/queries/0_stateless/00570_empty_array_is_const.reference b/tests/queries/0_stateless/00570_empty_array_is_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00570_empty_array_is_const.reference rename to tests/queries/0_stateless/00570_empty_array_is_const.reference diff --git a/dbms/tests/queries/0_stateless/00570_empty_array_is_const.sql b/tests/queries/0_stateless/00570_empty_array_is_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00570_empty_array_is_const.sql rename to tests/queries/0_stateless/00570_empty_array_is_const.sql diff --git a/dbms/tests/queries/0_stateless/00571_alter_nullable.reference b/tests/queries/0_stateless/00571_alter_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00571_alter_nullable.reference rename to tests/queries/0_stateless/00571_alter_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00571_alter_nullable.sql b/tests/queries/0_stateless/00571_alter_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00571_alter_nullable.sql rename to tests/queries/0_stateless/00571_alter_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference b/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference rename to tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference diff --git a/dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql b/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql rename to tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql diff --git a/dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.reference b/tests/queries/0_stateless/00572_aggregation_by_empty_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.reference rename to tests/queries/0_stateless/00572_aggregation_by_empty_set.reference diff --git a/dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.sql b/tests/queries/0_stateless/00572_aggregation_by_empty_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.sql rename to tests/queries/0_stateless/00572_aggregation_by_empty_set.sql diff --git a/dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference b/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference rename to tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference diff --git a/dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql b/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql rename to tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql diff --git a/dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.reference b/tests/queries/0_stateless/00574_empty_strings_deserialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.reference rename to tests/queries/0_stateless/00574_empty_strings_deserialization.reference diff --git a/dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.sh b/tests/queries/0_stateless/00574_empty_strings_deserialization.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.sh rename to tests/queries/0_stateless/00574_empty_strings_deserialization.sh diff --git a/dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference rename to tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference diff --git a/dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh rename to tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh diff --git a/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference b/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference rename to tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql b/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql rename to tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00576_nested_and_prewhere.reference b/tests/queries/0_stateless/00576_nested_and_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00576_nested_and_prewhere.reference rename to tests/queries/0_stateless/00576_nested_and_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00576_nested_and_prewhere.sql b/tests/queries/0_stateless/00576_nested_and_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00576_nested_and_prewhere.sql rename to tests/queries/0_stateless/00576_nested_and_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00577_full_join_segfault.reference b/tests/queries/0_stateless/00577_full_join_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00577_full_join_segfault.reference rename to tests/queries/0_stateless/00577_full_join_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00577_full_join_segfault.sql b/tests/queries/0_stateless/00577_full_join_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00577_full_join_segfault.sql rename to tests/queries/0_stateless/00577_full_join_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference b/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference rename to tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference diff --git a/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql b/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql rename to tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference b/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference rename to tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql b/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql rename to tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_sampling.reference b/tests/queries/0_stateless/00578_merge_table_sampling.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_sampling.reference rename to tests/queries/0_stateless/00578_merge_table_sampling.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_sampling.sql b/tests/queries/0_stateless/00578_merge_table_sampling.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_sampling.sql rename to tests/queries/0_stateless/00578_merge_table_sampling.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference b/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference rename to tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql b/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql rename to tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference b/tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference rename to tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql b/tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql rename to tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference b/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference rename to tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference diff --git a/dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql b/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql rename to tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql diff --git a/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.reference b/tests/queries/0_stateless/00579_virtual_column_and_lazy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.reference rename to tests/queries/0_stateless/00579_virtual_column_and_lazy.reference diff --git a/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql b/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql rename to tests/queries/0_stateless/00579_virtual_column_and_lazy.sql diff --git a/dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference b/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference rename to tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql b/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql rename to tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.reference b/tests/queries/0_stateless/00580_consistent_hashing_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.reference rename to tests/queries/0_stateless/00580_consistent_hashing_functions.reference diff --git a/dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.sql b/tests/queries/0_stateless/00580_consistent_hashing_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.sql rename to tests/queries/0_stateless/00580_consistent_hashing_functions.sql diff --git a/dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference b/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference rename to tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference diff --git a/dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql b/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql rename to tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql diff --git a/dbms/tests/queries/0_stateless/00582_not_aliasing_functions.reference b/tests/queries/0_stateless/00582_not_aliasing_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00582_not_aliasing_functions.reference rename to tests/queries/0_stateless/00582_not_aliasing_functions.reference diff --git a/dbms/tests/queries/0_stateless/00582_not_aliasing_functions.sql b/tests/queries/0_stateless/00582_not_aliasing_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00582_not_aliasing_functions.sql rename to tests/queries/0_stateless/00582_not_aliasing_functions.sql diff --git a/dbms/tests/queries/0_stateless/00583_limit_by_expressions.reference b/tests/queries/0_stateless/00583_limit_by_expressions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00583_limit_by_expressions.reference rename to tests/queries/0_stateless/00583_limit_by_expressions.reference diff --git a/dbms/tests/queries/0_stateless/00583_limit_by_expressions.sql b/tests/queries/0_stateless/00583_limit_by_expressions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00583_limit_by_expressions.sql rename to tests/queries/0_stateless/00583_limit_by_expressions.sql diff --git a/dbms/tests/queries/0_stateless/00584_view_union_all.reference b/tests/queries/0_stateless/00584_view_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00584_view_union_all.reference rename to tests/queries/0_stateless/00584_view_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00584_view_union_all.sql b/tests/queries/0_stateless/00584_view_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00584_view_union_all.sql rename to tests/queries/0_stateless/00584_view_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference rename to tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference diff --git a/dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql rename to tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql diff --git a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference b/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference rename to tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql b/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql rename to tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00587_union_all_type_conversions.reference b/tests/queries/0_stateless/00587_union_all_type_conversions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00587_union_all_type_conversions.reference rename to tests/queries/0_stateless/00587_union_all_type_conversions.reference diff --git a/dbms/tests/queries/0_stateless/00587_union_all_type_conversions.sql b/tests/queries/0_stateless/00587_union_all_type_conversions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00587_union_all_type_conversions.sql rename to tests/queries/0_stateless/00587_union_all_type_conversions.sql diff --git a/dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.reference b/tests/queries/0_stateless/00588_shard_distributed_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.reference rename to tests/queries/0_stateless/00588_shard_distributed_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.sql b/tests/queries/0_stateless/00588_shard_distributed_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.sql rename to tests/queries/0_stateless/00588_shard_distributed_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference b/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference rename to tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql b/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql rename to tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00590_limit_by_column_removal.reference b/tests/queries/0_stateless/00590_limit_by_column_removal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00590_limit_by_column_removal.reference rename to tests/queries/0_stateless/00590_limit_by_column_removal.reference diff --git a/dbms/tests/queries/0_stateless/00590_limit_by_column_removal.sql b/tests/queries/0_stateless/00590_limit_by_column_removal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00590_limit_by_column_removal.sql rename to tests/queries/0_stateless/00590_limit_by_column_removal.sql diff --git a/dbms/tests/queries/0_stateless/00591_columns_removal_union_all.reference b/tests/queries/0_stateless/00591_columns_removal_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00591_columns_removal_union_all.reference rename to tests/queries/0_stateless/00591_columns_removal_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00591_columns_removal_union_all.sql b/tests/queries/0_stateless/00591_columns_removal_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00591_columns_removal_union_all.sql rename to tests/queries/0_stateless/00591_columns_removal_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00592_union_all_different_aliases.reference b/tests/queries/0_stateless/00592_union_all_different_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00592_union_all_different_aliases.reference rename to tests/queries/0_stateless/00592_union_all_different_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00592_union_all_different_aliases.sql b/tests/queries/0_stateless/00592_union_all_different_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00592_union_all_different_aliases.sql rename to tests/queries/0_stateless/00592_union_all_different_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference b/tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference rename to tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference diff --git a/dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql b/tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql rename to tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql diff --git a/dbms/tests/queries/0_stateless/00594_alias_in_distributed.reference b/tests/queries/0_stateless/00594_alias_in_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00594_alias_in_distributed.reference rename to tests/queries/0_stateless/00594_alias_in_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql b/tests/queries/0_stateless/00594_alias_in_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql rename to tests/queries/0_stateless/00594_alias_in_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00595_insert_into_view.reference b/tests/queries/0_stateless/00595_insert_into_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00595_insert_into_view.reference rename to tests/queries/0_stateless/00595_insert_into_view.reference diff --git a/dbms/tests/queries/0_stateless/00595_insert_into_view.sh b/tests/queries/0_stateless/00595_insert_into_view.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00595_insert_into_view.sh rename to tests/queries/0_stateless/00595_insert_into_view.sh diff --git a/dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.reference b/tests/queries/0_stateless/00596_limit_on_expanded_ast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.reference rename to tests/queries/0_stateless/00596_limit_on_expanded_ast.reference diff --git a/dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh b/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh rename to tests/queries/0_stateless/00596_limit_on_expanded_ast.sh diff --git a/dbms/tests/queries/0_stateless/00597_push_down_predicate.reference b/tests/queries/0_stateless/00597_push_down_predicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00597_push_down_predicate.reference rename to tests/queries/0_stateless/00597_push_down_predicate.reference diff --git a/dbms/tests/queries/0_stateless/00597_push_down_predicate.sql b/tests/queries/0_stateless/00597_push_down_predicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00597_push_down_predicate.sql rename to tests/queries/0_stateless/00597_push_down_predicate.sql diff --git a/dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.reference b/tests/queries/0_stateless/00597_with_totals_on_empty_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.reference rename to tests/queries/0_stateless/00597_with_totals_on_empty_set.reference diff --git a/dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.sql b/tests/queries/0_stateless/00597_with_totals_on_empty_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.sql rename to tests/queries/0_stateless/00597_with_totals_on_empty_set.sql diff --git a/dbms/tests/queries/0_stateless/00598_create_as_select_http.reference b/tests/queries/0_stateless/00598_create_as_select_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00598_create_as_select_http.reference rename to tests/queries/0_stateless/00598_create_as_select_http.reference diff --git a/dbms/tests/queries/0_stateless/00598_create_as_select_http.sh b/tests/queries/0_stateless/00598_create_as_select_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00598_create_as_select_http.sh rename to tests/queries/0_stateless/00598_create_as_select_http.sh diff --git a/tests/queries/0_stateless/00599_create_view_with_subquery.reference b/tests/queries/0_stateless/00599_create_view_with_subquery.reference new file mode 100644 index 00000000000..d83d2837a18 --- /dev/null +++ b/tests/queries/0_stateless/00599_create_view_with_subquery.reference @@ -0,0 +1 @@ +CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = \n(\n SELECT 1\n) diff --git a/dbms/tests/queries/0_stateless/00599_create_view_with_subquery.sql b/tests/queries/0_stateless/00599_create_view_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00599_create_view_with_subquery.sql rename to tests/queries/0_stateless/00599_create_view_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference b/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference rename to tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference diff --git a/dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql b/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql rename to tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql diff --git a/dbms/tests/queries/0_stateless/00600_replace_running_query.reference b/tests/queries/0_stateless/00600_replace_running_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00600_replace_running_query.reference rename to tests/queries/0_stateless/00600_replace_running_query.reference diff --git a/dbms/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00600_replace_running_query.sh rename to tests/queries/0_stateless/00600_replace_running_query.sh diff --git a/dbms/tests/queries/0_stateless/00601_kill_running_query.reference b/tests/queries/0_stateless/00601_kill_running_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00601_kill_running_query.reference rename to tests/queries/0_stateless/00601_kill_running_query.reference diff --git a/dbms/tests/queries/0_stateless/00601_kill_running_query.sh b/tests/queries/0_stateless/00601_kill_running_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00601_kill_running_query.sh rename to tests/queries/0_stateless/00601_kill_running_query.sh diff --git a/dbms/tests/queries/0_stateless/00602_throw_if.reference b/tests/queries/0_stateless/00602_throw_if.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00602_throw_if.reference rename to tests/queries/0_stateless/00602_throw_if.reference diff --git a/dbms/tests/queries/0_stateless/00602_throw_if.sh b/tests/queries/0_stateless/00602_throw_if.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00602_throw_if.sh rename to tests/queries/0_stateless/00602_throw_if.sh diff --git a/dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference b/tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference rename to tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference diff --git a/dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql b/tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql rename to tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql diff --git a/dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference b/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference rename to tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql b/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql rename to tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql diff --git a/tests/queries/0_stateless/00604_show_create_database.reference b/tests/queries/0_stateless/00604_show_create_database.reference new file mode 100644 index 00000000000..a9ad6abea25 --- /dev/null +++ b/tests/queries/0_stateless/00604_show_create_database.reference @@ -0,0 +1 @@ +CREATE DATABASE test_00604\nENGINE = Ordinary diff --git a/dbms/tests/queries/0_stateless/00604_show_create_database.sql b/tests/queries/0_stateless/00604_show_create_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00604_show_create_database.sql rename to tests/queries/0_stateless/00604_show_create_database.sql diff --git a/dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.reference b/tests/queries/0_stateless/00605_intersections_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.reference rename to tests/queries/0_stateless/00605_intersections_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.sql b/tests/queries/0_stateless/00605_intersections_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.sql rename to tests/queries/0_stateless/00605_intersections_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00606_quantiles_and_nans.reference b/tests/queries/0_stateless/00606_quantiles_and_nans.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00606_quantiles_and_nans.reference rename to tests/queries/0_stateless/00606_quantiles_and_nans.reference diff --git a/dbms/tests/queries/0_stateless/00606_quantiles_and_nans.sql b/tests/queries/0_stateless/00606_quantiles_and_nans.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00606_quantiles_and_nans.sql rename to tests/queries/0_stateless/00606_quantiles_and_nans.sql diff --git a/dbms/tests/queries/0_stateless/00607_index_in_in.reference b/tests/queries/0_stateless/00607_index_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00607_index_in_in.reference rename to tests/queries/0_stateless/00607_index_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00607_index_in_in.sql b/tests/queries/0_stateless/00607_index_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00607_index_in_in.sql rename to tests/queries/0_stateless/00607_index_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00608_uniq_array.reference b/tests/queries/0_stateless/00608_uniq_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00608_uniq_array.reference rename to tests/queries/0_stateless/00608_uniq_array.reference diff --git a/dbms/tests/queries/0_stateless/00608_uniq_array.sql b/tests/queries/0_stateless/00608_uniq_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00608_uniq_array.sql rename to tests/queries/0_stateless/00608_uniq_array.sql diff --git a/dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.reference b/tests/queries/0_stateless/00609_distributed_with_case_when_then.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.reference rename to tests/queries/0_stateless/00609_distributed_with_case_when_then.reference diff --git a/dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.sql b/tests/queries/0_stateless/00609_distributed_with_case_when_then.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.sql rename to tests/queries/0_stateless/00609_distributed_with_case_when_then.sql diff --git a/dbms/tests/queries/0_stateless/00609_mv_index_in_in.reference b/tests/queries/0_stateless/00609_mv_index_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00609_mv_index_in_in.reference rename to tests/queries/0_stateless/00609_mv_index_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00609_mv_index_in_in.sql b/tests/queries/0_stateless/00609_mv_index_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00609_mv_index_in_in.sql rename to tests/queries/0_stateless/00609_mv_index_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference b/tests/queries/0_stateless/00609_prewhere_and_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference rename to tests/queries/0_stateless/00609_prewhere_and_default.reference diff --git a/dbms/tests/queries/0_stateless/00609_prewhere_and_default.sql b/tests/queries/0_stateless/00609_prewhere_and_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00609_prewhere_and_default.sql rename to tests/queries/0_stateless/00609_prewhere_and_default.sql diff --git a/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference b/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference rename to tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference diff --git a/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql b/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql rename to tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql diff --git a/dbms/tests/queries/0_stateless/00612_count.reference b/tests/queries/0_stateless/00612_count.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_count.reference rename to tests/queries/0_stateless/00612_count.reference diff --git a/dbms/tests/queries/0_stateless/00612_count.sql b/tests/queries/0_stateless/00612_count.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_count.sql rename to tests/queries/0_stateless/00612_count.sql diff --git a/dbms/tests/queries/0_stateless/00612_http_max_query_size.reference b/tests/queries/0_stateless/00612_http_max_query_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_http_max_query_size.reference rename to tests/queries/0_stateless/00612_http_max_query_size.reference diff --git a/dbms/tests/queries/0_stateless/00612_http_max_query_size.sh b/tests/queries/0_stateless/00612_http_max_query_size.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00612_http_max_query_size.sh rename to tests/queries/0_stateless/00612_http_max_query_size.sh diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference b/tests/queries/0_stateless/00612_pk_in_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference rename to tests/queries/0_stateless/00612_pk_in_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql b/tests/queries/0_stateless/00612_pk_in_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql rename to tests/queries/0_stateless/00612_pk_in_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00612_shard_count.reference b/tests/queries/0_stateless/00612_shard_count.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_shard_count.reference rename to tests/queries/0_stateless/00612_shard_count.reference diff --git a/dbms/tests/queries/0_stateless/00612_shard_count.sql b/tests/queries/0_stateless/00612_shard_count.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_shard_count.sql rename to tests/queries/0_stateless/00612_shard_count.sql diff --git a/dbms/tests/queries/0_stateless/00612_union_query_with_subquery.reference b/tests/queries/0_stateless/00612_union_query_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_union_query_with_subquery.reference rename to tests/queries/0_stateless/00612_union_query_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00612_union_query_with_subquery.sql b/tests/queries/0_stateless/00612_union_query_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_union_query_with_subquery.sql rename to tests/queries/0_stateless/00612_union_query_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference b/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference rename to tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql b/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql rename to tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql diff --git a/dbms/tests/queries/0_stateless/00614_array_nullable.reference b/tests/queries/0_stateless/00614_array_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00614_array_nullable.reference rename to tests/queries/0_stateless/00614_array_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00614_array_nullable.sql b/tests/queries/0_stateless/00614_array_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00614_array_nullable.sql rename to tests/queries/0_stateless/00614_array_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference b/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference rename to tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference diff --git a/dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql b/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql rename to tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql diff --git a/dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.reference b/tests/queries/0_stateless/00615_nullable_alter_optimize.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.reference rename to tests/queries/0_stateless/00615_nullable_alter_optimize.reference diff --git a/dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.sql b/tests/queries/0_stateless/00615_nullable_alter_optimize.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.sql rename to tests/queries/0_stateless/00615_nullable_alter_optimize.sql diff --git a/dbms/tests/queries/0_stateless/00616_final_single_part.reference b/tests/queries/0_stateless/00616_final_single_part.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00616_final_single_part.reference rename to tests/queries/0_stateless/00616_final_single_part.reference diff --git a/dbms/tests/queries/0_stateless/00616_final_single_part.sql b/tests/queries/0_stateless/00616_final_single_part.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00616_final_single_part.sql rename to tests/queries/0_stateless/00616_final_single_part.sql diff --git a/dbms/tests/queries/0_stateless/00617_array_in.reference b/tests/queries/0_stateless/00617_array_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00617_array_in.reference rename to tests/queries/0_stateless/00617_array_in.reference diff --git a/dbms/tests/queries/0_stateless/00617_array_in.sql b/tests/queries/0_stateless/00617_array_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00617_array_in.sql rename to tests/queries/0_stateless/00617_array_in.sql diff --git a/dbms/tests/queries/0_stateless/00618_nullable_in.reference b/tests/queries/0_stateless/00618_nullable_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00618_nullable_in.reference rename to tests/queries/0_stateless/00618_nullable_in.reference diff --git a/dbms/tests/queries/0_stateless/00618_nullable_in.sql b/tests/queries/0_stateless/00618_nullable_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00618_nullable_in.sql rename to tests/queries/0_stateless/00618_nullable_in.sql diff --git a/dbms/tests/queries/0_stateless/00619_extract.reference b/tests/queries/0_stateless/00619_extract.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00619_extract.reference rename to tests/queries/0_stateless/00619_extract.reference diff --git a/dbms/tests/queries/0_stateless/00619_extract.sql b/tests/queries/0_stateless/00619_extract.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00619_extract.sql rename to tests/queries/0_stateless/00619_extract.sql diff --git a/dbms/tests/queries/0_stateless/00619_union_highlite.reference b/tests/queries/0_stateless/00619_union_highlite.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00619_union_highlite.reference rename to tests/queries/0_stateless/00619_union_highlite.reference diff --git a/dbms/tests/queries/0_stateless/00619_union_highlite.sql b/tests/queries/0_stateless/00619_union_highlite.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00619_union_highlite.sql rename to tests/queries/0_stateless/00619_union_highlite.sql diff --git a/dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference b/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference rename to tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql b/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql rename to tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00621_regression_for_in_operator.reference b/tests/queries/0_stateless/00621_regression_for_in_operator.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00621_regression_for_in_operator.reference rename to tests/queries/0_stateless/00621_regression_for_in_operator.reference diff --git a/dbms/tests/queries/0_stateless/00621_regression_for_in_operator.sql b/tests/queries/0_stateless/00621_regression_for_in_operator.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00621_regression_for_in_operator.sql rename to tests/queries/0_stateless/00621_regression_for_in_operator.sql diff --git a/dbms/tests/queries/0_stateless/00622_select_in_parens.reference b/tests/queries/0_stateless/00622_select_in_parens.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00622_select_in_parens.reference rename to tests/queries/0_stateless/00622_select_in_parens.reference diff --git a/dbms/tests/queries/0_stateless/00622_select_in_parens.sql b/tests/queries/0_stateless/00622_select_in_parens.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00622_select_in_parens.sql rename to tests/queries/0_stateless/00622_select_in_parens.sql diff --git a/dbms/tests/queries/0_stateless/00623_in_partition_key.reference b/tests/queries/0_stateless/00623_in_partition_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_in_partition_key.reference rename to tests/queries/0_stateless/00623_in_partition_key.reference diff --git a/dbms/tests/queries/0_stateless/00623_in_partition_key.sql b/tests/queries/0_stateless/00623_in_partition_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00623_in_partition_key.sql rename to tests/queries/0_stateless/00623_in_partition_key.sql diff --git a/dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference b/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference rename to tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql b/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql rename to tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table.reference b/tests/queries/0_stateless/00623_truncate_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table.reference rename to tests/queries/0_stateless/00623_truncate_table.reference diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table.sql b/tests/queries/0_stateless/00623_truncate_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table.sql rename to tests/queries/0_stateless/00623_truncate_table.sql diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.reference b/tests/queries/0_stateless/00623_truncate_table_throw_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.reference rename to tests/queries/0_stateless/00623_truncate_table_throw_exception.reference diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh b/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh rename to tests/queries/0_stateless/00623_truncate_table_throw_exception.sh diff --git a/dbms/tests/queries/0_stateless/00624_length_utf8.reference b/tests/queries/0_stateless/00624_length_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00624_length_utf8.reference rename to tests/queries/0_stateless/00624_length_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00624_length_utf8.sql b/tests/queries/0_stateless/00624_length_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00624_length_utf8.sql rename to tests/queries/0_stateless/00624_length_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00625_arrays_in_nested.reference b/tests/queries/0_stateless/00625_arrays_in_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00625_arrays_in_nested.reference rename to tests/queries/0_stateless/00625_arrays_in_nested.reference diff --git a/dbms/tests/queries/0_stateless/00625_arrays_in_nested.sql b/tests/queries/0_stateless/00625_arrays_in_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00625_arrays_in_nested.sql rename to tests/queries/0_stateless/00625_arrays_in_nested.sql diff --git a/dbms/tests/queries/0_stateless/00625_query_in_form_data.reference b/tests/queries/0_stateless/00625_query_in_form_data.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00625_query_in_form_data.reference rename to tests/queries/0_stateless/00625_query_in_form_data.reference diff --git a/dbms/tests/queries/0_stateless/00625_query_in_form_data.sh b/tests/queries/0_stateless/00625_query_in_form_data.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00625_query_in_form_data.sh rename to tests/queries/0_stateless/00625_query_in_form_data.sh diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference b/tests/queries/0_stateless/00625_summing_merge_tree_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference rename to tests/queries/0_stateless/00625_summing_merge_tree_merge.reference diff --git a/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql b/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql rename to tests/queries/0_stateless/00625_summing_merge_tree_merge.sql diff --git a/dbms/tests/queries/0_stateless/00626_in_syntax.reference b/tests/queries/0_stateless/00626_in_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00626_in_syntax.reference rename to tests/queries/0_stateless/00626_in_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00626_in_syntax.sql b/tests/queries/0_stateless/00626_in_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00626_in_syntax.sql rename to tests/queries/0_stateless/00626_in_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table.reference b/tests/queries/0_stateless/00626_replace_partition_from_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table.reference rename to tests/queries/0_stateless/00626_replace_partition_from_table.reference diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table.sql b/tests/queries/0_stateless/00626_replace_partition_from_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table.sql rename to tests/queries/0_stateless/00626_replace_partition_from_table.sql diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference rename to tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh rename to tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00716_allow_ddl.reference b/tests/queries/0_stateless/00627_recursive_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00716_allow_ddl.reference rename to tests/queries/0_stateless/00627_recursive_alias.reference diff --git a/dbms/tests/queries/0_stateless/00627_recursive_alias.sql b/tests/queries/0_stateless/00627_recursive_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00627_recursive_alias.sql rename to tests/queries/0_stateless/00627_recursive_alias.sql diff --git a/dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference b/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference rename to tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference diff --git a/dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql b/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql rename to tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql diff --git a/dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference b/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference rename to tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference diff --git a/dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh b/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh rename to tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh diff --git a/dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.reference b/tests/queries/0_stateless/00632_aggregation_window_funnel.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.reference rename to tests/queries/0_stateless/00632_aggregation_window_funnel.reference diff --git a/dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.sql b/tests/queries/0_stateless/00632_aggregation_window_funnel.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.sql rename to tests/queries/0_stateless/00632_aggregation_window_funnel.sql diff --git a/dbms/tests/queries/0_stateless/00632_get_sample_block_cache.reference b/tests/queries/0_stateless/00632_get_sample_block_cache.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00632_get_sample_block_cache.reference rename to tests/queries/0_stateless/00632_get_sample_block_cache.reference diff --git a/dbms/tests/queries/0_stateless/00632_get_sample_block_cache.sql b/tests/queries/0_stateless/00632_get_sample_block_cache.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00632_get_sample_block_cache.sql rename to tests/queries/0_stateless/00632_get_sample_block_cache.sql diff --git a/dbms/tests/queries/0_stateless/00633_func_or_in.reference b/tests/queries/0_stateless/00633_func_or_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00633_func_or_in.reference rename to tests/queries/0_stateless/00633_func_or_in.reference diff --git a/dbms/tests/queries/0_stateless/00633_func_or_in.sql b/tests/queries/0_stateless/00633_func_or_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00633_func_or_in.sql rename to tests/queries/0_stateless/00633_func_or_in.sql diff --git a/dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference rename to tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh rename to tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_db.reference b/tests/queries/0_stateless/00634_logging_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_db.reference rename to tests/queries/0_stateless/00634_logging_shard.reference diff --git a/dbms/tests/queries/0_stateless/00634_logging_shard.sh b/tests/queries/0_stateless/00634_logging_shard.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00634_logging_shard.sh rename to tests/queries/0_stateless/00634_logging_shard.sh diff --git a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference b/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference rename to tests/queries/0_stateless/00634_performance_introspection_and_logging.reference diff --git a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh rename to tests/queries/0_stateless/00634_performance_introspection_and_logging.sh diff --git a/dbms/tests/queries/0_stateless/00634_rename_view.reference b/tests/queries/0_stateless/00634_rename_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00634_rename_view.reference rename to tests/queries/0_stateless/00634_rename_view.reference diff --git a/dbms/tests/queries/0_stateless/00634_rename_view.sql b/tests/queries/0_stateless/00634_rename_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00634_rename_view.sql rename to tests/queries/0_stateless/00634_rename_view.sql diff --git a/dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.reference b/tests/queries/0_stateless/00635_shard_distinct_order_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.reference rename to tests/queries/0_stateless/00635_shard_distinct_order_by.reference diff --git a/dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.sql b/tests/queries/0_stateless/00635_shard_distinct_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.sql rename to tests/queries/0_stateless/00635_shard_distinct_order_by.sql diff --git a/dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.reference b/tests/queries/0_stateless/00636_partition_key_parts_pruning.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.reference rename to tests/queries/0_stateless/00636_partition_key_parts_pruning.reference diff --git a/dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh rename to tests/queries/0_stateless/00636_partition_key_parts_pruning.sh diff --git a/dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference b/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference rename to tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference diff --git a/dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh b/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh rename to tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh diff --git a/dbms/tests/queries/0_stateless/00638_remote_ssrf.reference b/tests/queries/0_stateless/00638_remote_ssrf.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00638_remote_ssrf.reference rename to tests/queries/0_stateless/00638_remote_ssrf.reference diff --git a/dbms/tests/queries/0_stateless/00638_remote_ssrf.sh.disabled b/tests/queries/0_stateless/00638_remote_ssrf.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00638_remote_ssrf.sh.disabled rename to tests/queries/0_stateless/00638_remote_ssrf.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00639_startsWith.reference b/tests/queries/0_stateless/00639_startsWith.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00639_startsWith.reference rename to tests/queries/0_stateless/00639_startsWith.reference diff --git a/dbms/tests/queries/0_stateless/00639_startsWith.sql b/tests/queries/0_stateless/00639_startsWith.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00639_startsWith.sql rename to tests/queries/0_stateless/00639_startsWith.sql diff --git a/dbms/tests/queries/0_stateless/00640_endsWith.reference b/tests/queries/0_stateless/00640_endsWith.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00640_endsWith.reference rename to tests/queries/0_stateless/00640_endsWith.reference diff --git a/dbms/tests/queries/0_stateless/00640_endsWith.sql b/tests/queries/0_stateless/00640_endsWith.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00640_endsWith.sql rename to tests/queries/0_stateless/00640_endsWith.sql diff --git a/tests/queries/0_stateless/00642_cast.reference b/tests/queries/0_stateless/00642_cast.reference new file mode 100644 index 00000000000..907861c1784 --- /dev/null +++ b/tests/queries/0_stateless/00642_cast.reference @@ -0,0 +1,20 @@ +hello +hello +hello +hello +hello +hello +hello +hello +1970-01-01 00:00:01 +CREATE TABLE default.cast +( + `x` UInt8, + `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)') +) +ENGINE = MergeTree +ORDER BY e +SETTINGS index_granularity = 8192 +x UInt8 +e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') +1 hello diff --git a/dbms/tests/queries/0_stateless/00642_cast.sql b/tests/queries/0_stateless/00642_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00642_cast.sql rename to tests/queries/0_stateless/00642_cast.sql diff --git a/tests/queries/0_stateless/00643_cast_zookeeper.reference b/tests/queries/0_stateless/00643_cast_zookeeper.reference new file mode 100644 index 00000000000..b79eb07aee3 --- /dev/null +++ b/tests/queries/0_stateless/00643_cast_zookeeper.reference @@ -0,0 +1,12 @@ +CREATE TABLE test.cast1 +( + `x` UInt8, + `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)') +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') +ORDER BY e +SETTINGS index_granularity = 8192 +x UInt8 +e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') +1 hello +1 hello diff --git a/dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql b/tests/queries/0_stateless/00643_cast_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql rename to tests/queries/0_stateless/00643_cast_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference b/tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference rename to tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference diff --git a/dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql b/tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql rename to tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql diff --git a/dbms/tests/queries/0_stateless/00645_date_time_input_format.reference b/tests/queries/0_stateless/00645_date_time_input_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00645_date_time_input_format.reference rename to tests/queries/0_stateless/00645_date_time_input_format.reference diff --git a/dbms/tests/queries/0_stateless/00645_date_time_input_format.sql b/tests/queries/0_stateless/00645_date_time_input_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00645_date_time_input_format.sql rename to tests/queries/0_stateless/00645_date_time_input_format.sql diff --git a/dbms/tests/queries/0_stateless/00646_url_engine.python b/tests/queries/0_stateless/00646_url_engine.python similarity index 95% rename from dbms/tests/queries/0_stateless/00646_url_engine.python rename to tests/queries/0_stateless/00646_url_engine.python index 960048dbb8f..494eb12b0ef 100644 --- a/dbms/tests/queries/0_stateless/00646_url_engine.python +++ b/tests/queries/0_stateless/00646_url_engine.python @@ -2,6 +2,7 @@ from __future__ import print_function import csv import sys +import time import tempfile import threading import os, urllib @@ -180,7 +181,15 @@ def main(): if __name__ == "__main__": - try: - main() - except: + exception_text = '' + for i in range(1, 5): + try: + main() + break + except Exception as ex: + exception_text = str(ex) + time.sleep(0.1) + + if exception_text: + print("Exception: {}".format(exception_text), file=sys.stderr) os._exit(1) diff --git a/dbms/tests/queries/0_stateless/00646_url_engine.reference b/tests/queries/0_stateless/00646_url_engine.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00646_url_engine.reference rename to tests/queries/0_stateless/00646_url_engine.reference diff --git a/dbms/tests/queries/0_stateless/00646_url_engine.sh b/tests/queries/0_stateless/00646_url_engine.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00646_url_engine.sh rename to tests/queries/0_stateless/00646_url_engine.sh diff --git a/dbms/tests/queries/0_stateless/00646_weird_mmx.reference b/tests/queries/0_stateless/00646_weird_mmx.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00646_weird_mmx.reference rename to tests/queries/0_stateless/00646_weird_mmx.reference diff --git a/dbms/tests/queries/0_stateless/00646_weird_mmx.sql b/tests/queries/0_stateless/00646_weird_mmx.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00646_weird_mmx.sql rename to tests/queries/0_stateless/00646_weird_mmx.sql diff --git a/dbms/tests/queries/0_stateless/00647_histogram.reference b/tests/queries/0_stateless/00647_histogram.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00647_histogram.reference rename to tests/queries/0_stateless/00647_histogram.reference diff --git a/dbms/tests/queries/0_stateless/00647_histogram.sql b/tests/queries/0_stateless/00647_histogram.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00647_histogram.sql rename to tests/queries/0_stateless/00647_histogram.sql diff --git a/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.reference b/tests/queries/0_stateless/00647_multiply_aggregation_state.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.reference rename to tests/queries/0_stateless/00647_multiply_aggregation_state.reference diff --git a/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.sql b/tests/queries/0_stateless/00647_multiply_aggregation_state.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.sql rename to tests/queries/0_stateless/00647_multiply_aggregation_state.sql diff --git a/dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.reference b/tests/queries/0_stateless/00647_select_numbers_with_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.reference rename to tests/queries/0_stateless/00647_select_numbers_with_offset.reference diff --git a/dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.sql b/tests/queries/0_stateless/00647_select_numbers_with_offset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.sql rename to tests/queries/0_stateless/00647_select_numbers_with_offset.sql diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_table.reference b/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_table.reference rename to tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql b/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql rename to tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.reference b/tests/queries/0_stateless/00649_quantile_tdigest_negative.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.reference rename to tests/queries/0_stateless/00649_quantile_tdigest_negative.reference diff --git a/dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.sql b/tests/queries/0_stateless/00649_quantile_tdigest_negative.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.sql rename to tests/queries/0_stateless/00649_quantile_tdigest_negative.sql diff --git a/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference b/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference rename to tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql b/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql rename to tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference b/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference rename to tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference diff --git a/dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh b/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh rename to tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh diff --git a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference b/tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference rename to tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference diff --git a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh b/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh rename to tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh diff --git a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference b/tests/queries/0_stateless/00652_mergetree_mutations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference rename to tests/queries/0_stateless/00652_mergetree_mutations.reference diff --git a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh b/tests/queries/0_stateless/00652_mergetree_mutations.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh rename to tests/queries/0_stateless/00652_mergetree_mutations.sh diff --git a/dbms/tests/queries/0_stateless/00652_mutations_alter_update.reference b/tests/queries/0_stateless/00652_mutations_alter_update.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_alter_update.reference rename to tests/queries/0_stateless/00652_mutations_alter_update.reference diff --git a/dbms/tests/queries/0_stateless/00652_mutations_alter_update.sh b/tests/queries/0_stateless/00652_mutations_alter_update.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_alter_update.sh rename to tests/queries/0_stateless/00652_mutations_alter_update.sh diff --git a/dbms/tests/queries/0_stateless/00652_mutations_default_database.reference b/tests/queries/0_stateless/00652_mutations_default_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_default_database.reference rename to tests/queries/0_stateless/00652_mutations_default_database.reference diff --git a/dbms/tests/queries/0_stateless/00652_mutations_default_database.sh b/tests/queries/0_stateless/00652_mutations_default_database.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_default_database.sh rename to tests/queries/0_stateless/00652_mutations_default_database.sh diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference rename to tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh rename to tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference rename to tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh rename to tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.reference b/tests/queries/0_stateless/00653_monotonic_integer_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.reference rename to tests/queries/0_stateless/00653_monotonic_integer_cast.reference diff --git a/dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.sql b/tests/queries/0_stateless/00653_monotonic_integer_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.sql rename to tests/queries/0_stateless/00653_monotonic_integer_cast.sql diff --git a/dbms/tests/queries/0_stateless/00653_running_difference.reference b/tests/queries/0_stateless/00653_running_difference.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00653_running_difference.reference rename to tests/queries/0_stateless/00653_running_difference.reference diff --git a/dbms/tests/queries/0_stateless/00653_running_difference.sql b/tests/queries/0_stateless/00653_running_difference.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00653_running_difference.sql rename to tests/queries/0_stateless/00653_running_difference.sql diff --git a/dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.reference b/tests/queries/0_stateless/00653_verification_monotonic_data_load.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.reference rename to tests/queries/0_stateless/00653_verification_monotonic_data_load.reference diff --git a/dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh rename to tests/queries/0_stateless/00653_verification_monotonic_data_load.sh diff --git a/dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.reference b/tests/queries/0_stateless/00660_optimize_final_without_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.reference rename to tests/queries/0_stateless/00660_optimize_final_without_partition.reference diff --git a/dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.sql b/tests/queries/0_stateless/00660_optimize_final_without_partition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.sql rename to tests/queries/0_stateless/00660_optimize_final_without_partition.sql diff --git a/dbms/tests/queries/0_stateless/00661_array_has_silviucpp.reference b/tests/queries/0_stateless/00661_array_has_silviucpp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00661_array_has_silviucpp.reference rename to tests/queries/0_stateless/00661_array_has_silviucpp.reference diff --git a/dbms/tests/queries/0_stateless/00661_array_has_silviucpp.sql b/tests/queries/0_stateless/00661_array_has_silviucpp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00661_array_has_silviucpp.sql rename to tests/queries/0_stateless/00661_array_has_silviucpp.sql diff --git a/dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference b/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference rename to tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql b/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql rename to tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00662_array_has_nullable.reference b/tests/queries/0_stateless/00662_array_has_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00662_array_has_nullable.reference rename to tests/queries/0_stateless/00662_array_has_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00662_array_has_nullable.sql b/tests/queries/0_stateless/00662_array_has_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00662_array_has_nullable.sql rename to tests/queries/0_stateless/00662_array_has_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.reference b/tests/queries/0_stateless/00663_tiny_log_empty_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.reference rename to tests/queries/0_stateless/00663_tiny_log_empty_insert.reference diff --git a/dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.sql b/tests/queries/0_stateless/00663_tiny_log_empty_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.sql rename to tests/queries/0_stateless/00663_tiny_log_empty_insert.sql diff --git a/dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference b/tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference rename to tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql b/tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql rename to tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference b/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference rename to tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference diff --git a/dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql b/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql rename to tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql diff --git a/dbms/tests/queries/0_stateless/00666_uniq_complex_types.reference b/tests/queries/0_stateless/00666_uniq_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00666_uniq_complex_types.reference rename to tests/queries/0_stateless/00666_uniq_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00666_uniq_complex_types.sql b/tests/queries/0_stateless/00666_uniq_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00666_uniq_complex_types.sql rename to tests/queries/0_stateless/00666_uniq_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference b/tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference rename to tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference diff --git a/dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql b/tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql rename to tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql diff --git a/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.reference b/tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00738_lock_for_inner_table.reference rename to tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference diff --git a/dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql b/tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql rename to tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql diff --git a/dbms/tests/queries/0_stateless/00670_truncate_temporary_table.reference b/tests/queries/0_stateless/00670_truncate_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00670_truncate_temporary_table.reference rename to tests/queries/0_stateless/00670_truncate_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00670_truncate_temporary_table.sql b/tests/queries/0_stateless/00670_truncate_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00670_truncate_temporary_table.sql rename to tests/queries/0_stateless/00670_truncate_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00671_max_intersections.reference b/tests/queries/0_stateless/00671_max_intersections.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00671_max_intersections.reference rename to tests/queries/0_stateless/00671_max_intersections.reference diff --git a/dbms/tests/queries/0_stateless/00671_max_intersections.sql b/tests/queries/0_stateless/00671_max_intersections.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00671_max_intersections.sql rename to tests/queries/0_stateless/00671_max_intersections.sql diff --git a/dbms/tests/queries/0_stateless/00672_arrayDistinct.reference b/tests/queries/0_stateless/00672_arrayDistinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00672_arrayDistinct.reference rename to tests/queries/0_stateless/00672_arrayDistinct.reference diff --git a/dbms/tests/queries/0_stateless/00672_arrayDistinct.sql b/tests/queries/0_stateless/00672_arrayDistinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00672_arrayDistinct.sql rename to tests/queries/0_stateless/00672_arrayDistinct.sql diff --git a/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference rename to tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference diff --git a/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql rename to tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql diff --git a/dbms/tests/queries/0_stateless/00674_has_array_enum.reference b/tests/queries/0_stateless/00674_has_array_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00674_has_array_enum.reference rename to tests/queries/0_stateless/00674_has_array_enum.reference diff --git a/dbms/tests/queries/0_stateless/00674_has_array_enum.sql b/tests/queries/0_stateless/00674_has_array_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00674_has_array_enum.sql rename to tests/queries/0_stateless/00674_has_array_enum.sql diff --git a/dbms/tests/queries/0_stateless/00674_join_on_syntax.reference b/tests/queries/0_stateless/00674_join_on_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00674_join_on_syntax.reference rename to tests/queries/0_stateless/00674_join_on_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00674_join_on_syntax.sql b/tests/queries/0_stateless/00674_join_on_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00674_join_on_syntax.sql rename to tests/queries/0_stateless/00674_join_on_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference b/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference rename to tests/queries/0_stateless/00675_shard_remote_with_table_function.reference diff --git a/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql b/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql rename to tests/queries/0_stateless/00675_shard_remote_with_table_function.sql diff --git a/dbms/tests/queries/0_stateless/00676_group_by_in.reference b/tests/queries/0_stateless/00676_group_by_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00676_group_by_in.reference rename to tests/queries/0_stateless/00676_group_by_in.reference diff --git a/dbms/tests/queries/0_stateless/00676_group_by_in.sql b/tests/queries/0_stateless/00676_group_by_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00676_group_by_in.sql rename to tests/queries/0_stateless/00676_group_by_in.sql diff --git a/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference b/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference rename to tests/queries/0_stateless/00677_shard_any_heavy_merge.reference diff --git a/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql b/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql rename to tests/queries/0_stateless/00677_shard_any_heavy_merge.sql diff --git a/dbms/tests/queries/0_stateless/00678_murmurhash.reference b/tests/queries/0_stateless/00678_murmurhash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00678_murmurhash.reference rename to tests/queries/0_stateless/00678_murmurhash.reference diff --git a/dbms/tests/queries/0_stateless/00678_murmurhash.sql b/tests/queries/0_stateless/00678_murmurhash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00678_murmurhash.sql rename to tests/queries/0_stateless/00678_murmurhash.sql diff --git a/dbms/tests/queries/0_stateless/00678_shard_funnel_window.reference b/tests/queries/0_stateless/00678_shard_funnel_window.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00678_shard_funnel_window.reference rename to tests/queries/0_stateless/00678_shard_funnel_window.reference diff --git a/dbms/tests/queries/0_stateless/00678_shard_funnel_window.sql b/tests/queries/0_stateless/00678_shard_funnel_window.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00678_shard_funnel_window.sql rename to tests/queries/0_stateless/00678_shard_funnel_window.sql diff --git a/dbms/tests/queries/0_stateless/00679_replace_asterisk.reference b/tests/queries/0_stateless/00679_replace_asterisk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00679_replace_asterisk.reference rename to tests/queries/0_stateless/00679_replace_asterisk.reference diff --git a/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql b/tests/queries/0_stateless/00679_replace_asterisk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00679_replace_asterisk.sql rename to tests/queries/0_stateless/00679_replace_asterisk.sql diff --git a/dbms/tests/queries/0_stateless/00679_uuid_in_key.reference b/tests/queries/0_stateless/00679_uuid_in_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00679_uuid_in_key.reference rename to tests/queries/0_stateless/00679_uuid_in_key.reference diff --git a/dbms/tests/queries/0_stateless/00679_uuid_in_key.sql b/tests/queries/0_stateless/00679_uuid_in_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00679_uuid_in_key.sql rename to tests/queries/0_stateless/00679_uuid_in_key.sql diff --git a/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference b/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference rename to tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql b/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql rename to tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference b/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference rename to tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference diff --git a/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql b/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql rename to tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql diff --git a/dbms/tests/queries/0_stateless/00682_empty_parts_merge.reference b/tests/queries/0_stateless/00682_empty_parts_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00682_empty_parts_merge.reference rename to tests/queries/0_stateless/00682_empty_parts_merge.reference diff --git a/dbms/tests/queries/0_stateless/00682_empty_parts_merge.sh b/tests/queries/0_stateless/00682_empty_parts_merge.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00682_empty_parts_merge.sh rename to tests/queries/0_stateless/00682_empty_parts_merge.sh diff --git a/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference b/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference rename to tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference diff --git a/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql b/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql rename to tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql diff --git a/dbms/tests/queries/0_stateless/00686_client_exit_code.reference b/tests/queries/0_stateless/00686_client_exit_code.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00686_client_exit_code.reference rename to tests/queries/0_stateless/00686_client_exit_code.reference diff --git a/dbms/tests/queries/0_stateless/00686_client_exit_code.sh b/tests/queries/0_stateless/00686_client_exit_code.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00686_client_exit_code.sh rename to tests/queries/0_stateless/00686_client_exit_code.sh diff --git a/dbms/tests/queries/0_stateless/00687_insert_into_mv.reference b/tests/queries/0_stateless/00687_insert_into_mv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00687_insert_into_mv.reference rename to tests/queries/0_stateless/00687_insert_into_mv.reference diff --git a/dbms/tests/queries/0_stateless/00687_insert_into_mv.sql b/tests/queries/0_stateless/00687_insert_into_mv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00687_insert_into_mv.sql rename to tests/queries/0_stateless/00687_insert_into_mv.sql diff --git a/dbms/tests/queries/0_stateless/00687_top_and_offset.reference b/tests/queries/0_stateless/00687_top_and_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00687_top_and_offset.reference rename to tests/queries/0_stateless/00687_top_and_offset.reference diff --git a/dbms/tests/queries/0_stateless/00687_top_and_offset.sh b/tests/queries/0_stateless/00687_top_and_offset.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00687_top_and_offset.sh rename to tests/queries/0_stateless/00687_top_and_offset.sh diff --git a/dbms/tests/queries/0_stateless/00688_aggregation_retention.reference b/tests/queries/0_stateless/00688_aggregation_retention.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_aggregation_retention.reference rename to tests/queries/0_stateless/00688_aggregation_retention.reference diff --git a/dbms/tests/queries/0_stateless/00688_aggregation_retention.sql b/tests/queries/0_stateless/00688_aggregation_retention.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_aggregation_retention.sql rename to tests/queries/0_stateless/00688_aggregation_retention.sql diff --git a/dbms/tests/queries/0_stateless/00688_case_without_else.reference b/tests/queries/0_stateless/00688_case_without_else.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_case_without_else.reference rename to tests/queries/0_stateless/00688_case_without_else.reference diff --git a/dbms/tests/queries/0_stateless/00688_case_without_else.sql b/tests/queries/0_stateless/00688_case_without_else.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_case_without_else.sql rename to tests/queries/0_stateless/00688_case_without_else.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference b/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference rename to tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql b/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql rename to tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.reference b/tests/queries/0_stateless/00688_low_cardinality_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.reference rename to tests/queries/0_stateless/00688_low_cardinality_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.sql b/tests/queries/0_stateless/00688_low_cardinality_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.sql rename to tests/queries/0_stateless/00688_low_cardinality_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference b/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference rename to tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql b/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql rename to tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_in.reference b/tests/queries/0_stateless/00688_low_cardinality_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_in.reference rename to tests/queries/0_stateless/00688_low_cardinality_in.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql b/tests/queries/0_stateless/00688_low_cardinality_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql rename to tests/queries/0_stateless/00688_low_cardinality_in.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference b/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference rename to tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql b/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql rename to tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.reference b/tests/queries/0_stateless/00688_low_cardinality_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.reference rename to tests/queries/0_stateless/00688_low_cardinality_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql b/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql rename to tests/queries/0_stateless/00688_low_cardinality_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.reference b/tests/queries/0_stateless/00688_low_cardinality_serialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.reference rename to tests/queries/0_stateless/00688_low_cardinality_serialization.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.sql b/tests/queries/0_stateless/00688_low_cardinality_serialization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.sql rename to tests/queries/0_stateless/00688_low_cardinality_serialization.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.reference b/tests/queries/0_stateless/00688_low_cardinality_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.reference rename to tests/queries/0_stateless/00688_low_cardinality_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.sql b/tests/queries/0_stateless/00688_low_cardinality_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.sql rename to tests/queries/0_stateless/00688_low_cardinality_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00689_file.txt b/tests/queries/0_stateless/00689_file.txt similarity index 100% rename from dbms/tests/queries/0_stateless/00689_file.txt rename to tests/queries/0_stateless/00689_file.txt diff --git a/dbms/tests/queries/0_stateless/00689_join_table_function.reference b/tests/queries/0_stateless/00689_join_table_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00689_join_table_function.reference rename to tests/queries/0_stateless/00689_join_table_function.reference diff --git a/dbms/tests/queries/0_stateless/00689_join_table_function.sql b/tests/queries/0_stateless/00689_join_table_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00689_join_table_function.sql rename to tests/queries/0_stateless/00689_join_table_function.sql diff --git a/dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference b/tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference rename to tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference diff --git a/dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh b/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh rename to tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh diff --git a/dbms/tests/queries/0_stateless/00691_array_distinct.reference b/tests/queries/0_stateless/00691_array_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00691_array_distinct.reference rename to tests/queries/0_stateless/00691_array_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00691_array_distinct.sql b/tests/queries/0_stateless/00691_array_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00691_array_distinct.sql rename to tests/queries/0_stateless/00691_array_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00692_if_exception_code.reference b/tests/queries/0_stateless/00692_if_exception_code.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00692_if_exception_code.reference rename to tests/queries/0_stateless/00692_if_exception_code.reference diff --git a/dbms/tests/queries/0_stateless/00692_if_exception_code.sql b/tests/queries/0_stateless/00692_if_exception_code.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00692_if_exception_code.sql rename to tests/queries/0_stateless/00692_if_exception_code.sql diff --git a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference b/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference rename to tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference diff --git a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql b/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql rename to tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql diff --git a/dbms/tests/queries/0_stateless/00741_client_comment_multiline.reference b/tests/queries/0_stateless/00694_max_block_size_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00741_client_comment_multiline.reference rename to tests/queries/0_stateless/00694_max_block_size_zero.reference diff --git a/dbms/tests/queries/0_stateless/00694_max_block_size_zero.sql b/tests/queries/0_stateless/00694_max_block_size_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00694_max_block_size_zero.sql rename to tests/queries/0_stateless/00694_max_block_size_zero.sql diff --git a/dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference b/tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference rename to tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference diff --git a/dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql b/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql rename to tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql diff --git a/dbms/tests/queries/0_stateless/00696_system_columns_limit.reference b/tests/queries/0_stateless/00696_system_columns_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00696_system_columns_limit.reference rename to tests/queries/0_stateless/00696_system_columns_limit.reference diff --git a/dbms/tests/queries/0_stateless/00696_system_columns_limit.sql b/tests/queries/0_stateless/00696_system_columns_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00696_system_columns_limit.sql rename to tests/queries/0_stateless/00696_system_columns_limit.sql diff --git a/dbms/tests/queries/0_stateless/00697_in_subquery_shard.reference b/tests/queries/0_stateless/00697_in_subquery_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00697_in_subquery_shard.reference rename to tests/queries/0_stateless/00697_in_subquery_shard.reference diff --git a/dbms/tests/queries/0_stateless/00697_in_subquery_shard.sql b/tests/queries/0_stateless/00697_in_subquery_shard.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00697_in_subquery_shard.sql rename to tests/queries/0_stateless/00697_in_subquery_shard.sql diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql diff --git a/dbms/tests/queries/0_stateless/00742_require_join_strictness.reference b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00742_require_join_strictness.reference rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql diff --git a/dbms/tests/queries/0_stateless/00699_materialized_view_mutations.reference b/tests/queries/0_stateless/00699_materialized_view_mutations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00699_materialized_view_mutations.reference rename to tests/queries/0_stateless/00699_materialized_view_mutations.reference diff --git a/dbms/tests/queries/0_stateless/00699_materialized_view_mutations.sh b/tests/queries/0_stateless/00699_materialized_view_mutations.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00699_materialized_view_mutations.sh rename to tests/queries/0_stateless/00699_materialized_view_mutations.sh diff --git a/dbms/tests/queries/0_stateless/00700_decimal_aggregates.reference b/tests/queries/0_stateless/00700_decimal_aggregates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_aggregates.reference rename to tests/queries/0_stateless/00700_decimal_aggregates.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql b/tests/queries/0_stateless/00700_decimal_aggregates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql rename to tests/queries/0_stateless/00700_decimal_aggregates.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_arithm.reference b/tests/queries/0_stateless/00700_decimal_arithm.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_arithm.reference rename to tests/queries/0_stateless/00700_decimal_arithm.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_arithm.sql b/tests/queries/0_stateless/00700_decimal_arithm.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_arithm.sql rename to tests/queries/0_stateless/00700_decimal_arithm.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_array_functions.reference b/tests/queries/0_stateless/00700_decimal_array_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_array_functions.reference rename to tests/queries/0_stateless/00700_decimal_array_functions.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_array_functions.sql b/tests/queries/0_stateless/00700_decimal_array_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_array_functions.sql rename to tests/queries/0_stateless/00700_decimal_array_functions.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_bounds.reference b/tests/queries/0_stateless/00700_decimal_bounds.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_bounds.reference rename to tests/queries/0_stateless/00700_decimal_bounds.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_bounds.sql b/tests/queries/0_stateless/00700_decimal_bounds.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_bounds.sql rename to tests/queries/0_stateless/00700_decimal_bounds.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_casts.reference b/tests/queries/0_stateless/00700_decimal_casts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_casts.reference rename to tests/queries/0_stateless/00700_decimal_casts.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_casts.sql b/tests/queries/0_stateless/00700_decimal_casts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_casts.sql rename to tests/queries/0_stateless/00700_decimal_casts.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_compare.reference b/tests/queries/0_stateless/00700_decimal_compare.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_compare.reference rename to tests/queries/0_stateless/00700_decimal_compare.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_compare.sql b/tests/queries/0_stateless/00700_decimal_compare.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_compare.sql rename to tests/queries/0_stateless/00700_decimal_compare.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_complex_types.reference b/tests/queries/0_stateless/00700_decimal_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_complex_types.reference rename to tests/queries/0_stateless/00700_decimal_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql b/tests/queries/0_stateless/00700_decimal_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql rename to tests/queries/0_stateless/00700_decimal_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_defaults.reference b/tests/queries/0_stateless/00700_decimal_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_defaults.reference rename to tests/queries/0_stateless/00700_decimal_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_defaults.sql b/tests/queries/0_stateless/00700_decimal_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_defaults.sql rename to tests/queries/0_stateless/00700_decimal_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.reference b/tests/queries/0_stateless/00700_decimal_empty_aggregates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.reference rename to tests/queries/0_stateless/00700_decimal_empty_aggregates.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.sql b/tests/queries/0_stateless/00700_decimal_empty_aggregates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.sql rename to tests/queries/0_stateless/00700_decimal_empty_aggregates.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_formats.reference b/tests/queries/0_stateless/00700_decimal_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_formats.reference rename to tests/queries/0_stateless/00700_decimal_formats.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_formats.sql b/tests/queries/0_stateless/00700_decimal_formats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_formats.sql rename to tests/queries/0_stateless/00700_decimal_formats.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_gathers.reference b/tests/queries/0_stateless/00700_decimal_gathers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_gathers.reference rename to tests/queries/0_stateless/00700_decimal_gathers.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_gathers.sql b/tests/queries/0_stateless/00700_decimal_gathers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_gathers.sql rename to tests/queries/0_stateless/00700_decimal_gathers.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_in_keys.reference b/tests/queries/0_stateless/00700_decimal_in_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_in_keys.reference rename to tests/queries/0_stateless/00700_decimal_in_keys.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql b/tests/queries/0_stateless/00700_decimal_in_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql rename to tests/queries/0_stateless/00700_decimal_in_keys.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_math.reference b/tests/queries/0_stateless/00700_decimal_math.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_math.reference rename to tests/queries/0_stateless/00700_decimal_math.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_math.sql b/tests/queries/0_stateless/00700_decimal_math.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_math.sql rename to tests/queries/0_stateless/00700_decimal_math.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_null.reference b/tests/queries/0_stateless/00700_decimal_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_null.reference rename to tests/queries/0_stateless/00700_decimal_null.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_null.sql b/tests/queries/0_stateless/00700_decimal_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_null.sql rename to tests/queries/0_stateless/00700_decimal_null.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_round.reference b/tests/queries/0_stateless/00700_decimal_round.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_round.reference rename to tests/queries/0_stateless/00700_decimal_round.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_round.sql b/tests/queries/0_stateless/00700_decimal_round.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_round.sql rename to tests/queries/0_stateless/00700_decimal_round.sql diff --git a/dbms/tests/queries/0_stateless/00700_to_decimal_or_something.reference b/tests/queries/0_stateless/00700_to_decimal_or_something.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_to_decimal_or_something.reference rename to tests/queries/0_stateless/00700_to_decimal_or_something.reference diff --git a/dbms/tests/queries/0_stateless/00700_to_decimal_or_something.sql b/tests/queries/0_stateless/00700_to_decimal_or_something.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_to_decimal_or_something.sql rename to tests/queries/0_stateless/00700_to_decimal_or_something.sql diff --git a/dbms/tests/queries/0_stateless/00701_context_use_after_free.reference b/tests/queries/0_stateless/00701_context_use_after_free.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00701_context_use_after_free.reference rename to tests/queries/0_stateless/00701_context_use_after_free.reference diff --git a/dbms/tests/queries/0_stateless/00701_context_use_after_free.sql b/tests/queries/0_stateless/00701_context_use_after_free.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00701_context_use_after_free.sql rename to tests/queries/0_stateless/00701_context_use_after_free.sql diff --git a/dbms/tests/queries/0_stateless/00701_join_default_strictness.reference b/tests/queries/0_stateless/00701_join_default_strictness.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00701_join_default_strictness.reference rename to tests/queries/0_stateless/00701_join_default_strictness.reference diff --git a/dbms/tests/queries/0_stateless/00701_join_default_strictness.sql b/tests/queries/0_stateless/00701_join_default_strictness.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00701_join_default_strictness.sql rename to tests/queries/0_stateless/00701_join_default_strictness.sql diff --git a/dbms/tests/queries/0_stateless/00701_rollup.reference b/tests/queries/0_stateless/00701_rollup.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00701_rollup.reference rename to tests/queries/0_stateless/00701_rollup.reference diff --git a/dbms/tests/queries/0_stateless/00701_rollup.sql b/tests/queries/0_stateless/00701_rollup.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00701_rollup.sql rename to tests/queries/0_stateless/00701_rollup.sql diff --git a/dbms/tests/queries/0_stateless/00702_join_on_dups.reference b/tests/queries/0_stateless/00702_join_on_dups.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_on_dups.reference rename to tests/queries/0_stateless/00702_join_on_dups.reference diff --git a/dbms/tests/queries/0_stateless/00702_join_on_dups.sql b/tests/queries/0_stateless/00702_join_on_dups.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_on_dups.sql rename to tests/queries/0_stateless/00702_join_on_dups.sql diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using.reference b/tests/queries/0_stateless/00702_join_with_using.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using.reference rename to tests/queries/0_stateless/00702_join_with_using.reference diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using.sql b/tests/queries/0_stateless/00702_join_with_using.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using.sql rename to tests/queries/0_stateless/00702_join_with_using.sql diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using_dups.reference b/tests/queries/0_stateless/00702_join_with_using_dups.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using_dups.reference rename to tests/queries/0_stateless/00702_join_with_using_dups.reference diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using_dups.sql b/tests/queries/0_stateless/00702_join_with_using_dups.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using_dups.sql rename to tests/queries/0_stateless/00702_join_with_using_dups.sql diff --git a/dbms/tests/queries/0_stateless/00702_where_with_quailified_names.reference b/tests/queries/0_stateless/00702_where_with_quailified_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_where_with_quailified_names.reference rename to tests/queries/0_stateless/00702_where_with_quailified_names.reference diff --git a/dbms/tests/queries/0_stateless/00702_where_with_quailified_names.sql b/tests/queries/0_stateless/00702_where_with_quailified_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_where_with_quailified_names.sql rename to tests/queries/0_stateless/00702_where_with_quailified_names.sql diff --git a/dbms/tests/queries/0_stateless/00703_join_crash.reference b/tests/queries/0_stateless/00703_join_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00703_join_crash.reference rename to tests/queries/0_stateless/00703_join_crash.reference diff --git a/dbms/tests/queries/0_stateless/00703_join_crash.sql b/tests/queries/0_stateless/00703_join_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00703_join_crash.sql rename to tests/queries/0_stateless/00703_join_crash.sql diff --git a/dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference b/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference rename to tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference diff --git a/dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql b/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql rename to tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql diff --git a/dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.reference b/tests/queries/0_stateless/00704_drop_truncate_memory_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.reference rename to tests/queries/0_stateless/00704_drop_truncate_memory_table.reference diff --git a/dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh b/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh rename to tests/queries/0_stateless/00704_drop_truncate_memory_table.sh diff --git a/dbms/tests/queries/0_stateless/00705_aggregate_states_addition.reference b/tests/queries/0_stateless/00705_aggregate_states_addition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00705_aggregate_states_addition.reference rename to tests/queries/0_stateless/00705_aggregate_states_addition.reference diff --git a/dbms/tests/queries/0_stateless/00705_aggregate_states_addition.sql b/tests/queries/0_stateless/00705_aggregate_states_addition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00705_aggregate_states_addition.sql rename to tests/queries/0_stateless/00705_aggregate_states_addition.sql diff --git a/dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.reference b/tests/queries/0_stateless/00705_drop_create_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.reference rename to tests/queries/0_stateless/00705_drop_create_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.sh rename to tests/queries/0_stateless/00705_drop_create_merge_tree.sh diff --git a/dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference b/tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference rename to tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference diff --git a/dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql b/tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql rename to tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql diff --git a/dbms/tests/queries/0_stateless/00707_float_csv_delimiter.reference b/tests/queries/0_stateless/00707_float_csv_delimiter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00707_float_csv_delimiter.reference rename to tests/queries/0_stateless/00707_float_csv_delimiter.reference diff --git a/dbms/tests/queries/0_stateless/00707_float_csv_delimiter.sql b/tests/queries/0_stateless/00707_float_csv_delimiter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00707_float_csv_delimiter.sql rename to tests/queries/0_stateless/00707_float_csv_delimiter.sql diff --git a/dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.reference b/tests/queries/0_stateless/00709_virtual_column_partition_id.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.reference rename to tests/queries/0_stateless/00709_virtual_column_partition_id.reference diff --git a/dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.sql b/tests/queries/0_stateless/00709_virtual_column_partition_id.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.sql rename to tests/queries/0_stateless/00709_virtual_column_partition_id.sql diff --git a/dbms/tests/queries/0_stateless/00710_array_enumerate_dense.reference b/tests/queries/0_stateless/00710_array_enumerate_dense.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00710_array_enumerate_dense.reference rename to tests/queries/0_stateless/00710_array_enumerate_dense.reference diff --git a/dbms/tests/queries/0_stateless/00710_array_enumerate_dense.sql b/tests/queries/0_stateless/00710_array_enumerate_dense.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00710_array_enumerate_dense.sql rename to tests/queries/0_stateless/00710_array_enumerate_dense.sql diff --git a/dbms/tests/queries/0_stateless/00711_array_enumerate_variants.reference b/tests/queries/0_stateless/00711_array_enumerate_variants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00711_array_enumerate_variants.reference rename to tests/queries/0_stateless/00711_array_enumerate_variants.reference diff --git a/dbms/tests/queries/0_stateless/00711_array_enumerate_variants.sql b/tests/queries/0_stateless/00711_array_enumerate_variants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00711_array_enumerate_variants.sql rename to tests/queries/0_stateless/00711_array_enumerate_variants.sql diff --git a/dbms/tests/queries/0_stateless/00712_nan_comparison.reference b/tests/queries/0_stateless/00712_nan_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_nan_comparison.reference rename to tests/queries/0_stateless/00712_nan_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00712_nan_comparison.sql b/tests/queries/0_stateless/00712_nan_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_nan_comparison.sql rename to tests/queries/0_stateless/00712_nan_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.reference b/tests/queries/0_stateless/00712_prewhere_with_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.sql b/tests/queries/0_stateless/00712_prewhere_with_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference b/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql b/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference b/tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql b/tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql diff --git a/dbms/tests/queries/0_stateless/00763_lock_buffer.reference b/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00763_lock_buffer.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql b/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_final.reference b/tests/queries/0_stateless/00712_prewhere_with_final.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_final.reference rename to tests/queries/0_stateless/00712_prewhere_with_final.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_final.sql b/tests/queries/0_stateless/00712_prewhere_with_final.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_final.sql rename to tests/queries/0_stateless/00712_prewhere_with_final.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference b/tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql b/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference b/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql b/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.reference b/tests/queries/0_stateless/00712_prewhere_with_sampling.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.reference rename to tests/queries/0_stateless/00712_prewhere_with_sampling.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql b/tests/queries/0_stateless/00712_prewhere_with_sampling.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql rename to tests/queries/0_stateless/00712_prewhere_with_sampling.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference b/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference rename to tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql b/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql rename to tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql diff --git a/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.reference b/tests/queries/0_stateless/00713_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00713_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.sql b/tests/queries/0_stateless/00713_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00713_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00714_alter_uuid.reference b/tests/queries/0_stateless/00714_alter_uuid.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00714_alter_uuid.reference rename to tests/queries/0_stateless/00714_alter_uuid.reference diff --git a/dbms/tests/queries/0_stateless/00714_alter_uuid.sql b/tests/queries/0_stateless/00714_alter_uuid.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00714_alter_uuid.sql rename to tests/queries/0_stateless/00714_alter_uuid.sql diff --git a/dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference b/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference rename to tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference diff --git a/dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql b/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql rename to tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql diff --git a/dbms/tests/queries/0_stateless/00715_bounding_ratio.reference b/tests/queries/0_stateless/00715_bounding_ratio.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00715_bounding_ratio.reference rename to tests/queries/0_stateless/00715_bounding_ratio.reference diff --git a/dbms/tests/queries/0_stateless/00715_bounding_ratio.sql b/tests/queries/0_stateless/00715_bounding_ratio.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00715_bounding_ratio.sql rename to tests/queries/0_stateless/00715_bounding_ratio.sql diff --git a/dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference rename to tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh rename to tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.reference b/tests/queries/0_stateless/00715_json_each_row_input_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.reference rename to tests/queries/0_stateless/00715_json_each_row_input_nested.reference diff --git a/dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.sh b/tests/queries/0_stateless/00715_json_each_row_input_nested.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.sh rename to tests/queries/0_stateless/00715_json_each_row_input_nested.sh diff --git a/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference b/tests/queries/0_stateless/00716_allow_ddl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference rename to tests/queries/0_stateless/00716_allow_ddl.reference diff --git a/dbms/tests/queries/0_stateless/00716_allow_ddl.sql b/tests/queries/0_stateless/00716_allow_ddl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00716_allow_ddl.sql rename to tests/queries/0_stateless/00716_allow_ddl.sql diff --git a/dbms/tests/queries/0_stateless/00717_default_join_type.reference b/tests/queries/0_stateless/00717_default_join_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_default_join_type.reference rename to tests/queries/0_stateless/00717_default_join_type.reference diff --git a/dbms/tests/queries/0_stateless/00717_default_join_type.sql b/tests/queries/0_stateless/00717_default_join_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_default_join_type.sql rename to tests/queries/0_stateless/00717_default_join_type.sql diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference b/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference rename to tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql b/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql rename to tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference b/tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference rename to tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql b/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql rename to tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00717_merge_and_distributed.reference b/tests/queries/0_stateless/00717_merge_and_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_merge_and_distributed.reference rename to tests/queries/0_stateless/00717_merge_and_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00717_merge_and_distributed.sql b/tests/queries/0_stateless/00717_merge_and_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_merge_and_distributed.sql rename to tests/queries/0_stateless/00717_merge_and_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00718_format_datetime.reference b/tests/queries/0_stateless/00718_format_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00718_format_datetime.reference rename to tests/queries/0_stateless/00718_format_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00718_format_datetime.sql rename to tests/queries/0_stateless/00718_format_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.reference b/tests/queries/0_stateless/00718_low_cardinaliry_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.reference rename to tests/queries/0_stateless/00718_low_cardinaliry_alter.reference diff --git a/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql b/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql rename to tests/queries/0_stateless/00718_low_cardinaliry_alter.sql diff --git a/dbms/tests/queries/0_stateless/00719_format_datetime_rand.reference b/tests/queries/0_stateless/00719_format_datetime_rand.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_format_datetime_rand.reference rename to tests/queries/0_stateless/00719_format_datetime_rand.reference diff --git a/dbms/tests/queries/0_stateless/00719_format_datetime_rand.sql b/tests/queries/0_stateless/00719_format_datetime_rand.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00719_format_datetime_rand.sql rename to tests/queries/0_stateless/00719_format_datetime_rand.sql diff --git a/dbms/tests/queries/0_stateless/00719_insert_block_without_column.reference b/tests/queries/0_stateless/00719_insert_block_without_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_insert_block_without_column.reference rename to tests/queries/0_stateless/00719_insert_block_without_column.reference diff --git a/dbms/tests/queries/0_stateless/00719_insert_block_without_column.sh b/tests/queries/0_stateless/00719_insert_block_without_column.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00719_insert_block_without_column.sh rename to tests/queries/0_stateless/00719_insert_block_without_column.sh diff --git a/dbms/tests/queries/0_stateless/00833_sleep_overflow.reference b/tests/queries/0_stateless/00719_parallel_ddl_db.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00833_sleep_overflow.reference rename to tests/queries/0_stateless/00719_parallel_ddl_db.reference diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_db.sh b/tests/queries/0_stateless/00719_parallel_ddl_db.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_db.sh rename to tests/queries/0_stateless/00719_parallel_ddl_db.sh diff --git a/dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference b/tests/queries/0_stateless/00719_parallel_ddl_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference rename to tests/queries/0_stateless/00719_parallel_ddl_table.reference diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_table.sh b/tests/queries/0_stateless/00719_parallel_ddl_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_table.sh rename to tests/queries/0_stateless/00719_parallel_ddl_table.sh diff --git a/dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference b/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference rename to tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference diff --git a/dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql b/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql rename to tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql diff --git a/dbms/tests/queries/0_stateless/00720_with_cube.reference b/tests/queries/0_stateless/00720_with_cube.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00720_with_cube.reference rename to tests/queries/0_stateless/00720_with_cube.reference diff --git a/dbms/tests/queries/0_stateless/00720_with_cube.sql b/tests/queries/0_stateless/00720_with_cube.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00720_with_cube.sql rename to tests/queries/0_stateless/00720_with_cube.sql diff --git a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference b/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference rename to tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql b/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql rename to tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00722_inner_join.reference b/tests/queries/0_stateless/00722_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00722_inner_join.reference rename to tests/queries/0_stateless/00722_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00722_inner_join.sql b/tests/queries/0_stateless/00722_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00722_inner_join.sql rename to tests/queries/0_stateless/00722_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00723_remerge_sort.reference b/tests/queries/0_stateless/00723_remerge_sort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00723_remerge_sort.reference rename to tests/queries/0_stateless/00723_remerge_sort.reference diff --git a/dbms/tests/queries/0_stateless/00723_remerge_sort.sql b/tests/queries/0_stateless/00723_remerge_sort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00723_remerge_sort.sql rename to tests/queries/0_stateless/00723_remerge_sort.sql diff --git a/dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference b/tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference rename to tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference diff --git a/dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql b/tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql rename to tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql diff --git a/tests/queries/0_stateless/00725_comment_columns.reference b/tests/queries/0_stateless/00725_comment_columns.reference new file mode 100644 index 00000000000..86794581daf --- /dev/null +++ b/tests/queries/0_stateless/00725_comment_columns.reference @@ -0,0 +1,38 @@ +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\', \n `fourth_column` UInt8 COMMENT \'comment 4\', \n `fifth_column` UInt8\n)\nENGINE = TinyLog +first_column UInt8 DEFAULT 1 comment 1 +second_column UInt8 MATERIALIZED first_column comment 2 +third_column UInt8 ALIAS second_column comment 3 +fourth_column UInt8 comment 4 +fifth_column UInt8 +┌─table──────────────────────┬─name──────────┬─comment───┐ +│ check_query_comment_column │ first_column │ comment 1 │ +│ check_query_comment_column │ second_column │ comment 2 │ +│ check_query_comment_column │ third_column │ comment 3 │ +│ check_query_comment_column │ fourth_column │ comment 4 │ +│ check_query_comment_column │ fifth_column │ │ +└────────────────────────────┴───────────────┴───────────┘ +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\', \n `fourth_column` UInt8 COMMENT \'comment 4_1\', \n `fifth_column` UInt8 COMMENT \'comment 5_1\'\n)\nENGINE = TinyLog +┌─table──────────────────────┬─name──────────┬─comment─────┐ +│ check_query_comment_column │ first_column │ comment 1_2 │ +│ check_query_comment_column │ second_column │ comment 2_2 │ +│ check_query_comment_column │ third_column │ comment 3_2 │ +│ check_query_comment_column │ fourth_column │ comment 4_2 │ +│ check_query_comment_column │ fifth_column │ comment 5_2 │ +└────────────────────────────┴───────────────┴─────────────┘ +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\', \n `fourth_column` UInt8 COMMENT \'comment 4_2\', \n `fifth_column` UInt8 COMMENT \'comment 5_2\'\n)\nENGINE = TinyLog +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1\', \n `second_column` UInt8 COMMENT \'comment 2\', \n `third_column` UInt8 COMMENT \'comment 3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 +first_column UInt8 comment 1 +second_column UInt8 comment 2 +third_column UInt8 comment 3 +┌─table──────────────────────┬─name──────────┬─comment───┐ +│ check_query_comment_column │ first_column │ comment 1 │ +│ check_query_comment_column │ second_column │ comment 2 │ +│ check_query_comment_column │ third_column │ comment 3 │ +└────────────────────────────┴───────────────┴───────────┘ +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_2\', \n `second_column` UInt8 COMMENT \'comment 2_2\', \n `third_column` UInt8 COMMENT \'comment 3_2\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_3\', \n `second_column` UInt8 COMMENT \'comment 2_3\', \n `third_column` UInt8 COMMENT \'comment 3_3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 +┌─table──────────────────────┬─name──────────┬─comment─────┐ +│ check_query_comment_column │ first_column │ comment 1_3 │ +│ check_query_comment_column │ second_column │ comment 2_3 │ +│ check_query_comment_column │ third_column │ comment 3_3 │ +└────────────────────────────┴───────────────┴─────────────┘ diff --git a/dbms/tests/queries/0_stateless/00725_comment_columns.sql b/tests/queries/0_stateless/00725_comment_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_comment_columns.sql rename to tests/queries/0_stateless/00725_comment_columns.sql diff --git a/dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference similarity index 85% rename from dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference rename to tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference index a31b4bd7308..28051d15f65 100644 --- a/dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference +++ b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.ipv4_test (`ipv4_` IPv4) ENGINE = Memory +CREATE TABLE default.ipv4_test\n(\n `ipv4_` IPv4\n)\nENGINE = Memory 0.0.0.0 00 8.8.8.8 08080808 127.0.0.1 7F000001 @@ -10,7 +10,7 @@ CREATE TABLE default.ipv4_test (`ipv4_` IPv4) ENGINE = Memory > 127.0.0.1 255.255.255.255 = 127.0.0.1 127.0.0.1 euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function: 1 -CREATE TABLE default.ipv6_test (`ipv6_` IPv6) ENGINE = Memory +CREATE TABLE default.ipv6_test\n(\n `ipv6_` IPv6\n)\nENGINE = Memory :: 00000000000000000000000000000000 :: 00000000000000000000000000000000 ::ffff:8.8.8.8 00000000000000000000FFFF08080808 diff --git a/dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql rename to tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_1.reference b/tests/queries/0_stateless/00725_join_on_bug_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_1.reference rename to tests/queries/0_stateless/00725_join_on_bug_1.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_1.sql b/tests/queries/0_stateless/00725_join_on_bug_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_1.sql rename to tests/queries/0_stateless/00725_join_on_bug_1.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_2.reference b/tests/queries/0_stateless/00725_join_on_bug_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_2.reference rename to tests/queries/0_stateless/00725_join_on_bug_2.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql b/tests/queries/0_stateless/00725_join_on_bug_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql rename to tests/queries/0_stateless/00725_join_on_bug_2.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_3.reference b/tests/queries/0_stateless/00725_join_on_bug_3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_3.reference rename to tests/queries/0_stateless/00725_join_on_bug_3.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql b/tests/queries/0_stateless/00725_join_on_bug_3.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql rename to tests/queries/0_stateless/00725_join_on_bug_3.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_4.reference b/tests/queries/0_stateless/00725_join_on_bug_4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_4.reference rename to tests/queries/0_stateless/00725_join_on_bug_4.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql b/tests/queries/0_stateless/00725_join_on_bug_4.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql rename to tests/queries/0_stateless/00725_join_on_bug_4.sql diff --git a/dbms/tests/queries/0_stateless/00725_memory_tracking.reference b/tests/queries/0_stateless/00725_memory_tracking.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_memory_tracking.reference rename to tests/queries/0_stateless/00725_memory_tracking.reference diff --git a/dbms/tests/queries/0_stateless/00725_memory_tracking.sql b/tests/queries/0_stateless/00725_memory_tracking.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_memory_tracking.sql rename to tests/queries/0_stateless/00725_memory_tracking.sql diff --git a/dbms/tests/queries/0_stateless/00725_quantiles_shard.reference b/tests/queries/0_stateless/00725_quantiles_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_quantiles_shard.reference rename to tests/queries/0_stateless/00725_quantiles_shard.reference diff --git a/dbms/tests/queries/0_stateless/00725_quantiles_shard.sql b/tests/queries/0_stateless/00725_quantiles_shard.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_quantiles_shard.sql rename to tests/queries/0_stateless/00725_quantiles_shard.sql diff --git a/dbms/tests/queries/0_stateless/00726_length_aliases.reference b/tests/queries/0_stateless/00726_length_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00726_length_aliases.reference rename to tests/queries/0_stateless/00726_length_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00726_length_aliases.sql b/tests/queries/0_stateless/00726_length_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00726_length_aliases.sql rename to tests/queries/0_stateless/00726_length_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.reference b/tests/queries/0_stateless/00726_materialized_view_concurrent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.reference rename to tests/queries/0_stateless/00726_materialized_view_concurrent.reference diff --git a/dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.sql b/tests/queries/0_stateless/00726_materialized_view_concurrent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.sql rename to tests/queries/0_stateless/00726_materialized_view_concurrent.sql diff --git a/dbms/tests/queries/0_stateless/00726_modulo_for_date.reference b/tests/queries/0_stateless/00726_modulo_for_date.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00726_modulo_for_date.reference rename to tests/queries/0_stateless/00726_modulo_for_date.reference diff --git a/dbms/tests/queries/0_stateless/00726_modulo_for_date.sql b/tests/queries/0_stateless/00726_modulo_for_date.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00726_modulo_for_date.sql rename to tests/queries/0_stateless/00726_modulo_for_date.sql diff --git a/dbms/tests/queries/0_stateless/00727_concat.reference b/tests/queries/0_stateless/00727_concat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00727_concat.reference rename to tests/queries/0_stateless/00727_concat.reference diff --git a/dbms/tests/queries/0_stateless/00727_concat.sql b/tests/queries/0_stateless/00727_concat.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00727_concat.sql rename to tests/queries/0_stateless/00727_concat.sql diff --git a/dbms/tests/queries/0_stateless/00728_json_each_row_parsing.reference b/tests/queries/0_stateless/00728_json_each_row_parsing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00728_json_each_row_parsing.reference rename to tests/queries/0_stateless/00728_json_each_row_parsing.reference diff --git a/dbms/tests/queries/0_stateless/00728_json_each_row_parsing.sh b/tests/queries/0_stateless/00728_json_each_row_parsing.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00728_json_each_row_parsing.sh rename to tests/queries/0_stateless/00728_json_each_row_parsing.sh diff --git a/dbms/tests/queries/0_stateless/00729_prewhere_array_join.reference b/tests/queries/0_stateless/00729_prewhere_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00729_prewhere_array_join.reference rename to tests/queries/0_stateless/00729_prewhere_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00729_prewhere_array_join.sql b/tests/queries/0_stateless/00729_prewhere_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00729_prewhere_array_join.sql rename to tests/queries/0_stateless/00729_prewhere_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.reference b/tests/queries/0_stateless/00730_unicode_terminal_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00730_unicode_terminal_format.reference rename to tests/queries/0_stateless/00730_unicode_terminal_format.reference diff --git a/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql b/tests/queries/0_stateless/00730_unicode_terminal_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql rename to tests/queries/0_stateless/00730_unicode_terminal_format.sql diff --git a/dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference rename to tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference diff --git a/dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh rename to tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh diff --git a/dbms/tests/queries/0_stateless/00732_base64_functions.reference b/tests/queries/0_stateless/00732_base64_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_base64_functions.reference rename to tests/queries/0_stateless/00732_base64_functions.reference diff --git a/dbms/tests/queries/0_stateless/00732_base64_functions.sql b/tests/queries/0_stateless/00732_base64_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_base64_functions.sql rename to tests/queries/0_stateless/00732_base64_functions.sql diff --git a/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference b/tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference rename to tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql b/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql rename to tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00733_if_datetime.reference b/tests/queries/0_stateless/00733_if_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00733_if_datetime.reference rename to tests/queries/0_stateless/00733_if_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00733_if_datetime.sql b/tests/queries/0_stateless/00733_if_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00733_if_datetime.sql rename to tests/queries/0_stateless/00733_if_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00734_timeslot.reference b/tests/queries/0_stateless/00734_timeslot.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00734_timeslot.reference rename to tests/queries/0_stateless/00734_timeslot.reference diff --git a/dbms/tests/queries/0_stateless/00734_timeslot.sql b/tests/queries/0_stateless/00734_timeslot.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00734_timeslot.sql rename to tests/queries/0_stateless/00734_timeslot.sql diff --git a/dbms/tests/queries/0_stateless/00735_conditional.reference b/tests/queries/0_stateless/00735_conditional.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00735_conditional.reference rename to tests/queries/0_stateless/00735_conditional.reference diff --git a/dbms/tests/queries/0_stateless/00735_conditional.sql b/tests/queries/0_stateless/00735_conditional.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00735_conditional.sql rename to tests/queries/0_stateless/00735_conditional.sql diff --git a/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.reference b/tests/queries/0_stateless/00735_or_expr_optimize_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.reference rename to tests/queries/0_stateless/00735_or_expr_optimize_bug.reference diff --git a/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql b/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql rename to tests/queries/0_stateless/00735_or_expr_optimize_bug.sql diff --git a/dbms/tests/queries/0_stateless/00736_disjunction_optimisation.reference b/tests/queries/0_stateless/00736_disjunction_optimisation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00736_disjunction_optimisation.reference rename to tests/queries/0_stateless/00736_disjunction_optimisation.reference diff --git a/dbms/tests/queries/0_stateless/00736_disjunction_optimisation.sql b/tests/queries/0_stateless/00736_disjunction_optimisation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00736_disjunction_optimisation.sql rename to tests/queries/0_stateless/00736_disjunction_optimisation.sql diff --git a/dbms/tests/queries/0_stateless/00737_decimal_group_by.reference b/tests/queries/0_stateless/00737_decimal_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00737_decimal_group_by.reference rename to tests/queries/0_stateless/00737_decimal_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00737_decimal_group_by.sql b/tests/queries/0_stateless/00737_decimal_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00737_decimal_group_by.sql rename to tests/queries/0_stateless/00737_decimal_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.reference b/tests/queries/0_stateless/00738_lock_for_inner_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.reference rename to tests/queries/0_stateless/00738_lock_for_inner_table.reference diff --git a/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/tests/queries/0_stateless/00738_lock_for_inner_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh rename to tests/queries/0_stateless/00738_lock_for_inner_table.sh diff --git a/dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference rename to tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference diff --git a/dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql rename to tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql diff --git a/dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference b/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference rename to tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference diff --git a/dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql b/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql rename to tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql diff --git a/dbms/tests/queries/0_stateless/00740_database_in_nested_view.reference b/tests/queries/0_stateless/00740_database_in_nested_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00740_database_in_nested_view.reference rename to tests/queries/0_stateless/00740_database_in_nested_view.reference diff --git a/dbms/tests/queries/0_stateless/00740_database_in_nested_view.sql b/tests/queries/0_stateless/00740_database_in_nested_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00740_database_in_nested_view.sql rename to tests/queries/0_stateless/00740_database_in_nested_view.sql diff --git a/dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.reference b/tests/queries/0_stateless/00740_optimize_predicate_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.reference rename to tests/queries/0_stateless/00740_optimize_predicate_expression.reference diff --git a/dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.sql b/tests/queries/0_stateless/00740_optimize_predicate_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.sql rename to tests/queries/0_stateless/00740_optimize_predicate_expression.sql diff --git a/dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.reference b/tests/queries/0_stateless/00741_client_comment_multiline.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.reference rename to tests/queries/0_stateless/00741_client_comment_multiline.reference diff --git a/dbms/tests/queries/0_stateless/00741_client_comment_multiline.sql b/tests/queries/0_stateless/00741_client_comment_multiline.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00741_client_comment_multiline.sql rename to tests/queries/0_stateless/00741_client_comment_multiline.sql diff --git a/dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.reference b/tests/queries/0_stateless/00742_require_join_strictness.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.reference rename to tests/queries/0_stateless/00742_require_join_strictness.reference diff --git a/dbms/tests/queries/0_stateless/00742_require_join_strictness.sql b/tests/queries/0_stateless/00742_require_join_strictness.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00742_require_join_strictness.sql rename to tests/queries/0_stateless/00742_require_join_strictness.sql diff --git a/dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.reference b/tests/queries/0_stateless/00743_limit_by_not_found_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.reference rename to tests/queries/0_stateless/00743_limit_by_not_found_column.reference diff --git a/dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.sql b/tests/queries/0_stateless/00743_limit_by_not_found_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.sql rename to tests/queries/0_stateless/00743_limit_by_not_found_column.sql diff --git a/dbms/tests/queries/0_stateless/00744_join_not_found_column.reference b/tests/queries/0_stateless/00744_join_not_found_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00744_join_not_found_column.reference rename to tests/queries/0_stateless/00744_join_not_found_column.reference diff --git a/dbms/tests/queries/0_stateless/00744_join_not_found_column.sql b/tests/queries/0_stateless/00744_join_not_found_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00744_join_not_found_column.sql rename to tests/queries/0_stateless/00744_join_not_found_column.sql diff --git a/dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.reference b/tests/queries/0_stateless/00745_compile_scalar_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.reference rename to tests/queries/0_stateless/00745_compile_scalar_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.sql b/tests/queries/0_stateless/00745_compile_scalar_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.sql rename to tests/queries/0_stateless/00745_compile_scalar_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.reference b/tests/queries/0_stateless/00746_compile_non_deterministic_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.reference rename to tests/queries/0_stateless/00746_compile_non_deterministic_function.reference diff --git a/dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.sql b/tests/queries/0_stateless/00746_compile_non_deterministic_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.sql rename to tests/queries/0_stateless/00746_compile_non_deterministic_function.sql diff --git a/dbms/tests/queries/0_stateless/00746_hashing_tuples.reference b/tests/queries/0_stateless/00746_hashing_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00746_hashing_tuples.reference rename to tests/queries/0_stateless/00746_hashing_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00746_hashing_tuples.sql b/tests/queries/0_stateless/00746_hashing_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00746_hashing_tuples.sql rename to tests/queries/0_stateless/00746_hashing_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00746_sql_fuzzy.pl b/tests/queries/0_stateless/00746_sql_fuzzy.pl similarity index 100% rename from dbms/tests/queries/0_stateless/00746_sql_fuzzy.pl rename to tests/queries/0_stateless/00746_sql_fuzzy.pl diff --git a/dbms/tests/queries/0_stateless/00746_sql_fuzzy.reference b/tests/queries/0_stateless/00746_sql_fuzzy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00746_sql_fuzzy.reference rename to tests/queries/0_stateless/00746_sql_fuzzy.reference diff --git a/dbms/tests/queries/0_stateless/00746_sql_fuzzy.sh b/tests/queries/0_stateless/00746_sql_fuzzy.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00746_sql_fuzzy.sh rename to tests/queries/0_stateless/00746_sql_fuzzy.sh diff --git a/dbms/tests/queries/0_stateless/00747_contributors.reference b/tests/queries/0_stateless/00747_contributors.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00747_contributors.reference rename to tests/queries/0_stateless/00747_contributors.reference diff --git a/dbms/tests/queries/0_stateless/00747_contributors.sql b/tests/queries/0_stateless/00747_contributors.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00747_contributors.sql rename to tests/queries/0_stateless/00747_contributors.sql diff --git a/dbms/tests/queries/0_stateless/00748_insert_array_with_null.reference b/tests/queries/0_stateless/00748_insert_array_with_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00748_insert_array_with_null.reference rename to tests/queries/0_stateless/00748_insert_array_with_null.reference diff --git a/dbms/tests/queries/0_stateless/00748_insert_array_with_null.sql b/tests/queries/0_stateless/00748_insert_array_with_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00748_insert_array_with_null.sql rename to tests/queries/0_stateless/00748_insert_array_with_null.sql diff --git a/dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference b/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference rename to tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql b/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql rename to tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference b/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference rename to tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference diff --git a/dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql b/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql rename to tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql diff --git a/tests/queries/0_stateless/00751_default_databasename_for_view.reference b/tests/queries/0_stateless/00751_default_databasename_for_view.reference new file mode 100644 index 00000000000..5ba1861e3ef --- /dev/null +++ b/tests/queries/0_stateless/00751_default_databasename_for_view.reference @@ -0,0 +1,32 @@ +CREATE MATERIALIZED VIEW test_00751.t_mv_00751 +( + `date` Date, + `platform` Enum8('a' = 0, 'b' = 1), + `app` Enum8('a' = 0, 'b' = 1) +) +ENGINE = MergeTree +ORDER BY date +SETTINGS index_granularity = 8192 AS +SELECT + date, + platform, + app +FROM test_00751.t_00751 +WHERE (app = +( + SELECT min(app) + FROM test_00751.u_00751 +)) AND (platform = +( + SELECT + ( + SELECT min(platform) + FROM test_00751.v_00751 + ) +)) +2000-01-01 a a +2000-01-02 b b +2000-01-03 a a +2000-01-04 b b +2000-01-02 b b +2000-01-03 a a diff --git a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql b/tests/queries/0_stateless/00751_default_databasename_for_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql rename to tests/queries/0_stateless/00751_default_databasename_for_view.sql diff --git a/dbms/tests/queries/0_stateless/00751_hashing_ints.reference b/tests/queries/0_stateless/00751_hashing_ints.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00751_hashing_ints.reference rename to tests/queries/0_stateless/00751_hashing_ints.reference diff --git a/dbms/tests/queries/0_stateless/00751_hashing_ints.sql b/tests/queries/0_stateless/00751_hashing_ints.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00751_hashing_ints.sql rename to tests/queries/0_stateless/00751_hashing_ints.sql diff --git a/dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference b/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference rename to tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql b/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql rename to tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.reference b/tests/queries/0_stateless/00752_low_cardinality_array_result.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.reference rename to tests/queries/0_stateless/00752_low_cardinality_array_result.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.sql b/tests/queries/0_stateless/00752_low_cardinality_array_result.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.sql rename to tests/queries/0_stateless/00752_low_cardinality_array_result.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference b/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference rename to tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql b/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql rename to tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference b/tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference rename to tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql b/tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql rename to tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.reference b/tests/queries/0_stateless/00752_low_cardinality_mv_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.reference rename to tests/queries/0_stateless/00752_low_cardinality_mv_1.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql b/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql rename to tests/queries/0_stateless/00752_low_cardinality_mv_1.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.reference b/tests/queries/0_stateless/00752_low_cardinality_mv_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.reference rename to tests/queries/0_stateless/00752_low_cardinality_mv_2.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.sql b/tests/queries/0_stateless/00752_low_cardinality_mv_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.sql rename to tests/queries/0_stateless/00752_low_cardinality_mv_2.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_permute.reference b/tests/queries/0_stateless/00752_low_cardinality_permute.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_permute.reference rename to tests/queries/0_stateless/00752_low_cardinality_permute.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_permute.sql b/tests/queries/0_stateless/00752_low_cardinality_permute.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_permute.sql rename to tests/queries/0_stateless/00752_low_cardinality_permute.sql diff --git a/dbms/tests/queries/0_stateless/00753_alter_attach.reference b/tests/queries/0_stateless/00753_alter_attach.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_attach.reference rename to tests/queries/0_stateless/00753_alter_attach.reference diff --git a/dbms/tests/queries/0_stateless/00753_alter_attach.sql b/tests/queries/0_stateless/00753_alter_attach.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_attach.sql rename to tests/queries/0_stateless/00753_alter_attach.sql diff --git a/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference b/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference rename to tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference diff --git a/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql b/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql rename to tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql diff --git a/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference new file mode 100644 index 00000000000..b5021d00f56 --- /dev/null +++ b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference @@ -0,0 +1,6 @@ +CREATE TABLE test.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'comment\', \n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 +column_name1 UInt8 DEFAULT 1 comment +column_name2 UInt8 non default comment +CREATE TABLE test.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\', \n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 +column_name1 UInt8 DEFAULT 1 another comment +column_name2 UInt8 non default comment diff --git a/dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql b/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql rename to tests/queries/0_stateless/00753_comment_columns_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00753_quantile_format.reference b/tests/queries/0_stateless/00753_quantile_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_quantile_format.reference rename to tests/queries/0_stateless/00753_quantile_format.reference diff --git a/dbms/tests/queries/0_stateless/00753_quantile_format.sql b/tests/queries/0_stateless/00753_quantile_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_quantile_format.sql rename to tests/queries/0_stateless/00753_quantile_format.sql diff --git a/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference rename to tests/queries/0_stateless/00753_system_columns_and_system_tables.reference diff --git a/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql b/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql rename to tests/queries/0_stateless/00753_system_columns_and_system_tables.sql diff --git a/dbms/tests/queries/0_stateless/00753_with_with_single_alias.reference b/tests/queries/0_stateless/00753_with_with_single_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_with_with_single_alias.reference rename to tests/queries/0_stateless/00753_with_with_single_alias.reference diff --git a/dbms/tests/queries/0_stateless/00753_with_with_single_alias.sql b/tests/queries/0_stateless/00753_with_with_single_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_with_with_single_alias.sql rename to tests/queries/0_stateless/00753_with_with_single_alias.sql diff --git a/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference b/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference new file mode 100644 index 00000000000..a1493508b61 --- /dev/null +++ b/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference @@ -0,0 +1,106 @@ +*** Check SHOW CREATE TABLE *** +CREATE TABLE default.alter_column\n(\n `x` UInt32, \n `y` Int32\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 +*** Check parts *** +0 0 +10 -10 +11 -11 +12 -12 +13 -13 +14 -14 +15 -15 +16 -16 +17 -17 +18 -18 +19 -19 +1 -1 +20 -20 +21 -21 +22 -22 +23 -23 +24 -24 +25 -25 +26 -26 +27 -27 +28 -28 +29 -29 +2 -2 +30 -30 +31 -31 +32 -32 +33 -33 +34 -34 +35 -35 +36 -36 +37 -37 +38 -38 +39 -39 +3 -3 +40 -40 +41 -41 +42 -42 +43 -43 +44 -44 +45 -45 +46 -46 +47 -47 +48 -48 +49 -49 +4 -4 +5 -5 +6 -6 +7 -7 +8 -8 +9 -9 +*** Check SHOW CREATE TABLE after ALTER MODIFY *** +CREATE TABLE default.alter_column\n(\n `x` UInt32, \n `y` Int64\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 +*** Check parts after ALTER MODIFY *** +0 0 +10 -10 +11 -11 +12 -12 +13 -13 +14 -14 +15 -15 +16 -16 +17 -17 +18 -18 +19 -19 +1 -1 +20 -20 +21 -21 +22 -22 +23 -23 +24 -24 +25 -25 +26 -26 +27 -27 +28 -28 +29 -29 +2 -2 +30 -30 +31 -31 +32 -32 +33 -33 +34 -34 +35 -35 +36 -36 +37 -37 +38 -38 +39 -39 +3 -3 +40 -40 +41 -41 +42 -42 +43 -43 +44 -44 +45 -45 +46 -46 +47 -47 +48 -48 +49 -49 +4 -4 +5 -5 +6 -6 +7 -7 +8 -8 +9 -9 diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.sql b/tests/queries/0_stateless/00754_alter_modify_column_partitions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.sql rename to tests/queries/0_stateless/00754_alter_modify_column_partitions.sql diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by.reference b/tests/queries/0_stateless/00754_alter_modify_order_by.reference new file mode 100644 index 00000000000..f0dc413a186 --- /dev/null +++ b/tests/queries/0_stateless/00754_alter_modify_order_by.reference @@ -0,0 +1,12 @@ +*** Check that the parts are sorted according to the new key. *** +1 2 0 10 +1 2 0 20 +1 2 2 40 +1 2 2 50 +1 2 1 30 +*** Check that the rows are collapsed according to the new key. *** +1 2 0 30 +1 2 1 30 +1 2 4 90 +*** Check SHOW CREATE TABLE *** +CREATE TABLE default.summing\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `val` UInt32\n)\nENGINE = SummingMergeTree\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by.sql b/tests/queries/0_stateless/00754_alter_modify_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_order_by.sql rename to tests/queries/0_stateless/00754_alter_modify_order_by.sql diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference new file mode 100644 index 00000000000..938a90a27b4 --- /dev/null +++ b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference @@ -0,0 +1,14 @@ +*** Check that the parts are sorted according to the new key. *** +1 2 0 10 +1 2 0 20 +1 2 2 40 +1 2 2 50 +1 2 1 30 +*** Check that the rows are collapsed according to the new key. *** +1 2 0 30 +1 2 1 30 +1 2 4 90 +*** Check SHOW CREATE TABLE *** +CREATE TABLE test.summing_r2\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 +*** Check SHOW CREATE TABLE after offline ALTER *** +CREATE TABLE test.summing_r2\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `t` UInt32, \n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, t * t)\nSETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql rename to tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh diff --git a/dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.reference b/tests/queries/0_stateless/00754_first_significant_subdomain_more.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.reference rename to tests/queries/0_stateless/00754_first_significant_subdomain_more.reference diff --git a/dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.sql b/tests/queries/0_stateless/00754_first_significant_subdomain_more.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.sql rename to tests/queries/0_stateless/00754_first_significant_subdomain_more.sql diff --git a/dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference b/tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference rename to tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference diff --git a/dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql b/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql rename to tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql diff --git a/dbms/tests/queries/0_stateless/00756_power_alias.reference b/tests/queries/0_stateless/00756_power_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00756_power_alias.reference rename to tests/queries/0_stateless/00756_power_alias.reference diff --git a/dbms/tests/queries/0_stateless/00756_power_alias.sql b/tests/queries/0_stateless/00756_power_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00756_power_alias.sql rename to tests/queries/0_stateless/00756_power_alias.sql diff --git a/dbms/tests/queries/0_stateless/00757_enum_defaults.reference b/tests/queries/0_stateless/00757_enum_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00757_enum_defaults.reference rename to tests/queries/0_stateless/00757_enum_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00757_enum_defaults.sql b/tests/queries/0_stateless/00757_enum_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00757_enum_defaults.sql rename to tests/queries/0_stateless/00757_enum_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00758_array_reverse.reference b/tests/queries/0_stateless/00758_array_reverse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00758_array_reverse.reference rename to tests/queries/0_stateless/00758_array_reverse.reference diff --git a/dbms/tests/queries/0_stateless/00758_array_reverse.sql b/tests/queries/0_stateless/00758_array_reverse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00758_array_reverse.sql rename to tests/queries/0_stateless/00758_array_reverse.sql diff --git a/dbms/tests/queries/0_stateless/00759_kodieg.reference b/tests/queries/0_stateless/00759_kodieg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00759_kodieg.reference rename to tests/queries/0_stateless/00759_kodieg.reference diff --git a/dbms/tests/queries/0_stateless/00759_kodieg.sql b/tests/queries/0_stateless/00759_kodieg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00759_kodieg.sql rename to tests/queries/0_stateless/00759_kodieg.sql diff --git a/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.reference b/tests/queries/0_stateless/00760_insert_json_with_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.reference rename to tests/queries/0_stateless/00760_insert_json_with_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql b/tests/queries/0_stateless/00760_insert_json_with_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql rename to tests/queries/0_stateless/00760_insert_json_with_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00760_url_functions_overflow.reference b/tests/queries/0_stateless/00760_url_functions_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00760_url_functions_overflow.reference rename to tests/queries/0_stateless/00760_url_functions_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00760_url_functions_overflow.sql b/tests/queries/0_stateless/00760_url_functions_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00760_url_functions_overflow.sql rename to tests/queries/0_stateless/00760_url_functions_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00761_lower_utf8_bug.reference b/tests/queries/0_stateless/00761_lower_utf8_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00761_lower_utf8_bug.reference rename to tests/queries/0_stateless/00761_lower_utf8_bug.reference diff --git a/dbms/tests/queries/0_stateless/00761_lower_utf8_bug.sql b/tests/queries/0_stateless/00761_lower_utf8_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00761_lower_utf8_bug.sql rename to tests/queries/0_stateless/00761_lower_utf8_bug.sql diff --git a/dbms/tests/queries/0_stateless/00762_date_comparsion.reference b/tests/queries/0_stateless/00762_date_comparsion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00762_date_comparsion.reference rename to tests/queries/0_stateless/00762_date_comparsion.reference diff --git a/dbms/tests/queries/0_stateless/00762_date_comparsion.sql b/tests/queries/0_stateless/00762_date_comparsion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00762_date_comparsion.sql rename to tests/queries/0_stateless/00762_date_comparsion.sql diff --git a/dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference b/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference rename to tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference diff --git a/dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql b/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql rename to tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql diff --git a/dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.reference b/tests/queries/0_stateless/00763_lock_buffer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.reference rename to tests/queries/0_stateless/00763_lock_buffer.reference diff --git a/dbms/tests/queries/0_stateless/00763_lock_buffer.sh b/tests/queries/0_stateless/00763_lock_buffer.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00763_lock_buffer.sh rename to tests/queries/0_stateless/00763_lock_buffer.sh diff --git a/dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference rename to tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference diff --git a/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh rename to tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh diff --git a/dbms/tests/queries/0_stateless/00764_max_query_size_allocation.reference b/tests/queries/0_stateless/00764_max_query_size_allocation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00764_max_query_size_allocation.reference rename to tests/queries/0_stateless/00764_max_query_size_allocation.reference diff --git a/dbms/tests/queries/0_stateless/00764_max_query_size_allocation.sh b/tests/queries/0_stateless/00764_max_query_size_allocation.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00764_max_query_size_allocation.sh rename to tests/queries/0_stateless/00764_max_query_size_allocation.sh diff --git a/dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.reference b/tests/queries/0_stateless/00765_sql_compatibility_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.reference rename to tests/queries/0_stateless/00765_sql_compatibility_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.sql b/tests/queries/0_stateless/00765_sql_compatibility_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.sql rename to tests/queries/0_stateless/00765_sql_compatibility_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.reference b/tests/queries/0_stateless/00779_all_right_join_max_block_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.reference rename to tests/queries/0_stateless/00779_all_right_join_max_block_size.reference diff --git a/dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql b/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql rename to tests/queries/0_stateless/00779_all_right_join_max_block_size.sql diff --git a/dbms/tests/queries/0_stateless/00780_unaligned_array_join.reference b/tests/queries/0_stateless/00780_unaligned_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00780_unaligned_array_join.reference rename to tests/queries/0_stateless/00780_unaligned_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00780_unaligned_array_join.sql b/tests/queries/0_stateless/00780_unaligned_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00780_unaligned_array_join.sql rename to tests/queries/0_stateless/00780_unaligned_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference b/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference rename to tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql b/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql rename to tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00799_function_dry_run.reference b/tests/queries/0_stateless/00799_function_dry_run.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00799_function_dry_run.reference rename to tests/queries/0_stateless/00799_function_dry_run.reference diff --git a/dbms/tests/queries/0_stateless/00799_function_dry_run.sql b/tests/queries/0_stateless/00799_function_dry_run.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00799_function_dry_run.sql rename to tests/queries/0_stateless/00799_function_dry_run.sql diff --git a/dbms/tests/queries/0_stateless/00800_function_java_hash.reference b/tests/queries/0_stateless/00800_function_java_hash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_function_java_hash.reference rename to tests/queries/0_stateless/00800_function_java_hash.reference diff --git a/dbms/tests/queries/0_stateless/00800_function_java_hash.sql b/tests/queries/0_stateless/00800_function_java_hash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_function_java_hash.sql rename to tests/queries/0_stateless/00800_function_java_hash.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference b/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference rename to tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql b/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql rename to tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference b/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference rename to tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql b/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql rename to tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference b/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference rename to tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql b/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql rename to tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.reference b/tests/queries/0_stateless/00800_low_cardinality_empty_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.reference rename to tests/queries/0_stateless/00800_low_cardinality_empty_array.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql b/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql rename to tests/queries/0_stateless/00800_low_cardinality_empty_array.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_join.reference b/tests/queries/0_stateless/00800_low_cardinality_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_join.reference rename to tests/queries/0_stateless/00800_low_cardinality_join.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_join.sql b/tests/queries/0_stateless/00800_low_cardinality_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_join.sql rename to tests/queries/0_stateless/00800_low_cardinality_join.sql diff --git a/dbms/tests/queries/0_stateless/00800_versatile_storage_join.reference b/tests/queries/0_stateless/00800_versatile_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_versatile_storage_join.reference rename to tests/queries/0_stateless/00800_versatile_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00800_versatile_storage_join.sql b/tests/queries/0_stateless/00800_versatile_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_versatile_storage_join.sql rename to tests/queries/0_stateless/00800_versatile_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference b/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference rename to tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference diff --git a/dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql b/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql rename to tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql diff --git a/dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference b/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference rename to tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference diff --git a/dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql b/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql rename to tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql diff --git a/dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference b/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference rename to tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference diff --git a/dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql b/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql rename to tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql diff --git a/dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.reference b/tests/queries/0_stateless/00803_odbc_driver_2_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.reference rename to tests/queries/0_stateless/00803_odbc_driver_2_format.reference diff --git a/dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.sql b/tests/queries/0_stateless/00803_odbc_driver_2_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.sql rename to tests/queries/0_stateless/00803_odbc_driver_2_format.sql diff --git a/dbms/tests/queries/0_stateless/00803_xxhash.reference b/tests/queries/0_stateless/00803_xxhash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00803_xxhash.reference rename to tests/queries/0_stateless/00803_xxhash.reference diff --git a/dbms/tests/queries/0_stateless/00803_xxhash.sql b/tests/queries/0_stateless/00803_xxhash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00803_xxhash.sql rename to tests/queries/0_stateless/00803_xxhash.sql diff --git a/dbms/tests/queries/0_stateless/00804_rollup_with_having.reference b/tests/queries/0_stateless/00804_rollup_with_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_rollup_with_having.reference rename to tests/queries/0_stateless/00804_rollup_with_having.reference diff --git a/dbms/tests/queries/0_stateless/00804_rollup_with_having.sql b/tests/queries/0_stateless/00804_rollup_with_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_rollup_with_having.sql rename to tests/queries/0_stateless/00804_rollup_with_having.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference rename to tests/queries/0_stateless/00804_test_alter_compression_codecs.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql rename to tests/queries/0_stateless/00804_test_alter_compression_codecs.sql diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference new file mode 100644 index 00000000000..f778c4f5d90 --- /dev/null +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference @@ -0,0 +1,18 @@ +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 +2 +1 world 2018-10-05 1.1 +2 hello 2018-10-01 2.2 +3 buy 2018-10-11 3.3 +10003 +10003 +274972506.6 +9175437371954010821 +CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), \n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), \n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), \n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1.5555555555555 hello world! [77] ['John'] +7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] +! +222 +!ZSTD +CREATE TABLE default.test_default_delta\n(\n `id` UInt64 CODEC(Delta(8)), \n `data` String CODEC(Delta(1)), \n `somedate` Date CODEC(Delta(2)), \n `somenum` Float64 CODEC(Delta(8)), \n `somestr` FixedString(3) CODEC(Delta(1)), \n `othernum` Int64 CODEC(Delta(8)), \n `yetothernum` Float32 CODEC(Delta(4)), \n `ddd.age` Array(UInt8) CODEC(Delta(1)), \n `ddd.Name` Array(String) CODEC(Delta(1)), \n `ddd.OName` Array(String) CODEC(Delta(1)), \n `ddd.BName` Array(String) CODEC(Delta(1))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql rename to tests/queries/0_stateless/00804_test_custom_compression_codecs.sql diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference new file mode 100644 index 00000000000..b33535364e5 --- /dev/null +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference @@ -0,0 +1,26 @@ +CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4), \n `data` String CODEC(ZSTD(1)), \n `ddd` Date CODEC(NONE), \n `somenum` Float64 CODEC(ZSTD(2)), \n `somestr` FixedString(3) CODEC(LZ4HC(7)), \n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = Log() +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 +2 +CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), \n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), \n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), \n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = Log() +1 world 2018-10-05 1.1 +2 hello 2018-10-01 2.2 +3 buy 2018-10-11 3.3 +10003 +10003 +274972506.6 +9175437371954010821 +CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4), \n `data` String CODEC(ZSTD(1)), \n `ddd` Date CODEC(NONE), \n `somenum` Float64 CODEC(ZSTD(2)), \n `somestr` FixedString(3) CODEC(LZ4HC(7)), \n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = TinyLog() +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 +2 +CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), \n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), \n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), \n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = TinyLog() +1 world 2018-10-05 1.1 +2 hello 2018-10-01 2.2 +3 buy 2018-10-11 3.3 +10003 +10003 +274972506.6 +9175437371954010821 diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql rename to tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.reference b/tests/queries/0_stateless/00804_test_delta_codec_compression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.reference rename to tests/queries/0_stateless/00804_test_delta_codec_compression.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.sql b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.sql rename to tests/queries/0_stateless/00804_test_delta_codec_compression.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference b/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference rename to tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql b/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql rename to tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql diff --git a/dbms/tests/queries/0_stateless/00805_round_down.reference b/tests/queries/0_stateless/00805_round_down.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00805_round_down.reference rename to tests/queries/0_stateless/00805_round_down.reference diff --git a/dbms/tests/queries/0_stateless/00805_round_down.sql b/tests/queries/0_stateless/00805_round_down.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00805_round_down.sql rename to tests/queries/0_stateless/00805_round_down.sql diff --git a/dbms/tests/queries/0_stateless/00806_alter_update.reference b/tests/queries/0_stateless/00806_alter_update.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00806_alter_update.reference rename to tests/queries/0_stateless/00806_alter_update.reference diff --git a/dbms/tests/queries/0_stateless/00806_alter_update.sql b/tests/queries/0_stateless/00806_alter_update.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00806_alter_update.sql rename to tests/queries/0_stateless/00806_alter_update.sql diff --git a/dbms/tests/queries/0_stateless/00807_regexp_quote_meta.reference b/tests/queries/0_stateless/00807_regexp_quote_meta.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00807_regexp_quote_meta.reference rename to tests/queries/0_stateless/00807_regexp_quote_meta.reference diff --git a/dbms/tests/queries/0_stateless/00807_regexp_quote_meta.sql b/tests/queries/0_stateless/00807_regexp_quote_meta.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00807_regexp_quote_meta.sql rename to tests/queries/0_stateless/00807_regexp_quote_meta.sql diff --git a/dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.reference b/tests/queries/0_stateless/00808_array_enumerate_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.reference rename to tests/queries/0_stateless/00808_array_enumerate_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.sql b/tests/queries/0_stateless/00808_array_enumerate_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.sql rename to tests/queries/0_stateless/00808_array_enumerate_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00808_not_optimize_predicate.reference b/tests/queries/0_stateless/00808_not_optimize_predicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00808_not_optimize_predicate.reference rename to tests/queries/0_stateless/00808_not_optimize_predicate.reference diff --git a/dbms/tests/queries/0_stateless/00808_not_optimize_predicate.sql b/tests/queries/0_stateless/00808_not_optimize_predicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00808_not_optimize_predicate.sql rename to tests/queries/0_stateless/00808_not_optimize_predicate.sql diff --git a/dbms/tests/queries/0_stateless/00809_add_days_segfault.reference b/tests/queries/0_stateless/00809_add_days_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00809_add_days_segfault.reference rename to tests/queries/0_stateless/00809_add_days_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00809_add_days_segfault.sql b/tests/queries/0_stateless/00809_add_days_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00809_add_days_segfault.sql rename to tests/queries/0_stateless/00809_add_days_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00810_in_operators_segfault.reference b/tests/queries/0_stateless/00810_in_operators_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00810_in_operators_segfault.reference rename to tests/queries/0_stateless/00810_in_operators_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00810_in_operators_segfault.sql b/tests/queries/0_stateless/00810_in_operators_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00810_in_operators_segfault.sql rename to tests/queries/0_stateless/00810_in_operators_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00811_garbage.reference b/tests/queries/0_stateless/00811_garbage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00811_garbage.reference rename to tests/queries/0_stateless/00811_garbage.reference diff --git a/dbms/tests/queries/0_stateless/00811_garbage.sql b/tests/queries/0_stateless/00811_garbage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00811_garbage.sql rename to tests/queries/0_stateless/00811_garbage.sql diff --git a/dbms/tests/queries/0_stateless/00812_prewhere_alias_array.reference b/tests/queries/0_stateless/00812_prewhere_alias_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00812_prewhere_alias_array.reference rename to tests/queries/0_stateless/00812_prewhere_alias_array.reference diff --git a/dbms/tests/queries/0_stateless/00812_prewhere_alias_array.sql b/tests/queries/0_stateless/00812_prewhere_alias_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00812_prewhere_alias_array.sql rename to tests/queries/0_stateless/00812_prewhere_alias_array.sql diff --git a/dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference rename to tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference diff --git a/dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql rename to tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql diff --git a/dbms/tests/queries/0_stateless/00814_parsing_ub.reference b/tests/queries/0_stateless/00814_parsing_ub.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00814_parsing_ub.reference rename to tests/queries/0_stateless/00814_parsing_ub.reference diff --git a/dbms/tests/queries/0_stateless/00814_parsing_ub.sql b/tests/queries/0_stateless/00814_parsing_ub.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00814_parsing_ub.sql rename to tests/queries/0_stateless/00814_parsing_ub.sql diff --git a/dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference rename to tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql rename to tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.reference b/tests/queries/0_stateless/00815_left_join_on_stepanel.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.reference rename to tests/queries/0_stateless/00815_left_join_on_stepanel.reference diff --git a/dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.sql b/tests/queries/0_stateless/00815_left_join_on_stepanel.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.sql rename to tests/queries/0_stateless/00815_left_join_on_stepanel.sql diff --git a/dbms/tests/queries/0_stateless/00816_join_column_names_sarg.reference b/tests/queries/0_stateless/00816_join_column_names_sarg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00816_join_column_names_sarg.reference rename to tests/queries/0_stateless/00816_join_column_names_sarg.reference diff --git a/dbms/tests/queries/0_stateless/00816_join_column_names_sarg.sql b/tests/queries/0_stateless/00816_join_column_names_sarg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00816_join_column_names_sarg.sql rename to tests/queries/0_stateless/00816_join_column_names_sarg.sql diff --git a/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.reference b/tests/queries/0_stateless/00816_long_concurrent_alter_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.reference rename to tests/queries/0_stateless/00816_long_concurrent_alter_column.reference diff --git a/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh b/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh rename to tests/queries/0_stateless/00816_long_concurrent_alter_column.sh diff --git a/dbms/tests/queries/0_stateless/00817_with_simple.reference b/tests/queries/0_stateless/00817_with_simple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00817_with_simple.reference rename to tests/queries/0_stateless/00817_with_simple.reference diff --git a/dbms/tests/queries/0_stateless/00817_with_simple.sql b/tests/queries/0_stateless/00817_with_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00817_with_simple.sql rename to tests/queries/0_stateless/00817_with_simple.sql diff --git a/dbms/tests/queries/0_stateless/00818_alias_bug_4110.reference b/tests/queries/0_stateless/00818_alias_bug_4110.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00818_alias_bug_4110.reference rename to tests/queries/0_stateless/00818_alias_bug_4110.reference diff --git a/dbms/tests/queries/0_stateless/00818_alias_bug_4110.sql b/tests/queries/0_stateless/00818_alias_bug_4110.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00818_alias_bug_4110.sql rename to tests/queries/0_stateless/00818_alias_bug_4110.sql diff --git a/tests/queries/0_stateless/00818_inner_join_bug_3567.reference b/tests/queries/0_stateless/00818_inner_join_bug_3567.reference new file mode 100644 index 00000000000..c0fe46ee963 --- /dev/null +++ b/tests/queries/0_stateless/00818_inner_join_bug_3567.reference @@ -0,0 +1,19 @@ +┌─a─┬──────────b─┐ +│ a │ 2018-01-01 │ +│ b │ 2018-01-01 │ +│ c │ 2018-01-01 │ +└───┴────────────┘ +┌─c─┬─a─┬──────────d─┬─a─┬──────────b─┐ +│ B │ b │ 2018-01-01 │ B │ 2018-01-01 │ +│ C │ c │ 2018-01-01 │ C │ 2018-01-01 │ +│ D │ d │ 2018-01-01 │ D │ 2018-01-01 │ +└───┴───┴────────────┴───┴────────────┘ +┌─a─┬──────────b─┬─c─┬──────────d─┐ +│ a │ 2018-01-01 │ │ 0000-00-00 │ +│ b │ 2018-01-01 │ B │ 2018-01-01 │ +│ c │ 2018-01-01 │ C │ 2018-01-01 │ +└───┴────────────┴───┴────────────┘ +┌─a─┬──────────b─┬─c─┬──────────d─┐ +│ b │ 2018-01-01 │ B │ 2018-01-01 │ +│ c │ 2018-01-01 │ C │ 2018-01-01 │ +└───┴────────────┴───┴────────────┘ diff --git a/tests/queries/0_stateless/00818_inner_join_bug_3567.sql b/tests/queries/0_stateless/00818_inner_join_bug_3567.sql new file mode 100644 index 00000000000..2058d2309e4 --- /dev/null +++ b/tests/queries/0_stateless/00818_inner_join_bug_3567.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1(a String, b Date) ENGINE MergeTree order by a; +CREATE TABLE table2(c String, a String, d Date) ENGINE MergeTree order by c; + +INSERT INTO table1 VALUES ('a', '2018-01-01') ('b', '2018-01-01') ('c', '2018-01-01'); +INSERT INTO table2 VALUES ('D', 'd', '2018-01-01') ('B', 'b', '2018-01-01') ('C', 'c', '2018-01-01'); + +SELECT * FROM table1 t1 FORMAT PrettyCompact; +SELECT *, c as a, d as b FROM table2 FORMAT PrettyCompact; +SELECT * FROM table1 t1 ALL LEFT JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d FORMAT PrettyCompact; +SELECT * FROM table1 t1 ALL INNER JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d FORMAT PrettyCompact; + +DROP TABLE table1; +DROP TABLE table2; diff --git a/dbms/tests/queries/0_stateless/00818_join_bug_4271.reference b/tests/queries/0_stateless/00818_join_bug_4271.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00818_join_bug_4271.reference rename to tests/queries/0_stateless/00818_join_bug_4271.reference diff --git a/dbms/tests/queries/0_stateless/00818_join_bug_4271.sql b/tests/queries/0_stateless/00818_join_bug_4271.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00818_join_bug_4271.sql rename to tests/queries/0_stateless/00818_join_bug_4271.sql diff --git a/dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.reference b/tests/queries/0_stateless/00819_ast_refactoring_bugs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.reference rename to tests/queries/0_stateless/00819_ast_refactoring_bugs.reference diff --git a/dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.sql b/tests/queries/0_stateless/00819_ast_refactoring_bugs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.sql rename to tests/queries/0_stateless/00819_ast_refactoring_bugs.sql diff --git a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference b/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference rename to tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference diff --git a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql b/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql rename to tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins.reference b/tests/queries/0_stateless/00820_multiple_joins.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins.reference rename to tests/queries/0_stateless/00820_multiple_joins.reference diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins.sql b/tests/queries/0_stateless/00820_multiple_joins.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins.sql rename to tests/queries/0_stateless/00820_multiple_joins.sql diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference rename to tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql rename to tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql diff --git a/dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference b/tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference rename to tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql b/tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql rename to tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00822_array_insert_default.reference b/tests/queries/0_stateless/00822_array_insert_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00822_array_insert_default.reference rename to tests/queries/0_stateless/00822_array_insert_default.reference diff --git a/dbms/tests/queries/0_stateless/00822_array_insert_default.sql b/tests/queries/0_stateless/00822_array_insert_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00822_array_insert_default.sql rename to tests/queries/0_stateless/00822_array_insert_default.sql diff --git a/dbms/tests/queries/0_stateless/00823_capnproto_input.reference b/tests/queries/0_stateless/00823_capnproto_input.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00823_capnproto_input.reference rename to tests/queries/0_stateless/00823_capnproto_input.reference diff --git a/dbms/tests/queries/0_stateless/00823_capnproto_input.sh b/tests/queries/0_stateless/00823_capnproto_input.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00823_capnproto_input.sh rename to tests/queries/0_stateless/00823_capnproto_input.sh diff --git a/dbms/tests/queries/0_stateless/00823_sequence_match_dfa.reference b/tests/queries/0_stateless/00823_sequence_match_dfa.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00823_sequence_match_dfa.reference rename to tests/queries/0_stateless/00823_sequence_match_dfa.reference diff --git a/dbms/tests/queries/0_stateless/00823_sequence_match_dfa.sql b/tests/queries/0_stateless/00823_sequence_match_dfa.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00823_sequence_match_dfa.sql rename to tests/queries/0_stateless/00823_sequence_match_dfa.sql diff --git a/dbms/tests/queries/0_stateless/00824_filesystem.reference b/tests/queries/0_stateless/00824_filesystem.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00824_filesystem.reference rename to tests/queries/0_stateless/00824_filesystem.reference diff --git a/dbms/tests/queries/0_stateless/00824_filesystem.sql b/tests/queries/0_stateless/00824_filesystem.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00824_filesystem.sql rename to tests/queries/0_stateless/00824_filesystem.sql diff --git a/dbms/tests/queries/0_stateless/00825_http_header_query_id.reference b/tests/queries/0_stateless/00825_http_header_query_id.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00825_http_header_query_id.reference rename to tests/queries/0_stateless/00825_http_header_query_id.reference diff --git a/dbms/tests/queries/0_stateless/00825_http_header_query_id.sh b/tests/queries/0_stateless/00825_http_header_query_id.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_http_header_query_id.sh rename to tests/queries/0_stateless/00825_http_header_query_id.sh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format.proto b/tests/queries/0_stateless/00825_protobuf_format.proto similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format.proto rename to tests/queries/0_stateless/00825_protobuf_format.proto diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.insh b/tests/queries/0_stateless/00825_protobuf_format_input.insh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_input.insh rename to tests/queries/0_stateless/00825_protobuf_format_input.insh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.reference b/tests/queries/0_stateless/00825_protobuf_format_input.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_input.reference rename to tests/queries/0_stateless/00825_protobuf_format_input.reference diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh b/tests/queries/0_stateless/00825_protobuf_format_input.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh rename to tests/queries/0_stateless/00825_protobuf_format_input.sh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_output.reference b/tests/queries/0_stateless/00825_protobuf_format_output.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_output.reference rename to tests/queries/0_stateless/00825_protobuf_format_output.reference diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh b/tests/queries/0_stateless/00825_protobuf_format_output.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh rename to tests/queries/0_stateless/00825_protobuf_format_output.sh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto b/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto rename to tests/queries/0_stateless/00825_protobuf_format_syntax2.proto diff --git a/dbms/tests/queries/0_stateless/00826_cross_to_inner_join.reference b/tests/queries/0_stateless/00826_cross_to_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00826_cross_to_inner_join.reference rename to tests/queries/0_stateless/00826_cross_to_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00826_cross_to_inner_join.sql b/tests/queries/0_stateless/00826_cross_to_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00826_cross_to_inner_join.sql rename to tests/queries/0_stateless/00826_cross_to_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00829_bitmap_function.reference b/tests/queries/0_stateless/00829_bitmap_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00829_bitmap_function.reference rename to tests/queries/0_stateless/00829_bitmap_function.reference diff --git a/dbms/tests/queries/0_stateless/00829_bitmap_function.sql b/tests/queries/0_stateless/00829_bitmap_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00829_bitmap_function.sql rename to tests/queries/0_stateless/00829_bitmap_function.sql diff --git a/dbms/tests/queries/0_stateless/00830_join_overwrite.reference b/tests/queries/0_stateless/00830_join_overwrite.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00830_join_overwrite.reference rename to tests/queries/0_stateless/00830_join_overwrite.reference diff --git a/dbms/tests/queries/0_stateless/00830_join_overwrite.sql b/tests/queries/0_stateless/00830_join_overwrite.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00830_join_overwrite.sql rename to tests/queries/0_stateless/00830_join_overwrite.sql diff --git a/dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference b/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference rename to tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference diff --git a/dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql b/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql rename to tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql diff --git a/dbms/tests/queries/0_stateless/00832_storage_file_lock.reference b/tests/queries/0_stateless/00832_storage_file_lock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00832_storage_file_lock.reference rename to tests/queries/0_stateless/00832_storage_file_lock.reference diff --git a/dbms/tests/queries/0_stateless/00832_storage_file_lock.sql b/tests/queries/0_stateless/00832_storage_file_lock.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00832_storage_file_lock.sql rename to tests/queries/0_stateless/00832_storage_file_lock.sql diff --git a/dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.reference b/tests/queries/0_stateless/00833_sleep_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.reference rename to tests/queries/0_stateless/00833_sleep_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00833_sleep_overflow.sql b/tests/queries/0_stateless/00833_sleep_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00833_sleep_overflow.sql rename to tests/queries/0_stateless/00833_sleep_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference rename to tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference diff --git a/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh rename to tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh diff --git a/dbms/tests/queries/0_stateless/00834_date_datetime_cmp.reference b/tests/queries/0_stateless/00834_date_datetime_cmp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_date_datetime_cmp.reference rename to tests/queries/0_stateless/00834_date_datetime_cmp.reference diff --git a/dbms/tests/queries/0_stateless/00834_date_datetime_cmp.sql b/tests/queries/0_stateless/00834_date_datetime_cmp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00834_date_datetime_cmp.sql rename to tests/queries/0_stateless/00834_date_datetime_cmp.sql diff --git a/dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference b/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference rename to tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference diff --git a/dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh b/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh rename to tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh diff --git a/dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference b/tests/queries/0_stateless/00834_hints_for_type_function_typos.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference rename to tests/queries/0_stateless/00834_hints_for_type_function_typos.reference diff --git a/dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh b/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh rename to tests/queries/0_stateless/00834_hints_for_type_function_typos.sh diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation.reference b/tests/queries/0_stateless/00834_kill_mutation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation.reference rename to tests/queries/0_stateless/00834_kill_mutation.reference diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation.sh b/tests/queries/0_stateless/00834_kill_mutation.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation.sh rename to tests/queries/0_stateless/00834_kill_mutation.sh diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference rename to tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh rename to tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.reference b/tests/queries/0_stateless/00834_limit_with_constant_expressions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.reference rename to tests/queries/0_stateless/00834_limit_with_constant_expressions.reference diff --git a/dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.sql b/tests/queries/0_stateless/00834_limit_with_constant_expressions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.sql rename to tests/queries/0_stateless/00834_limit_with_constant_expressions.sql diff --git a/dbms/tests/queries/0_stateless/00834_not_between.reference b/tests/queries/0_stateless/00834_not_between.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_not_between.reference rename to tests/queries/0_stateless/00834_not_between.reference diff --git a/dbms/tests/queries/0_stateless/00834_not_between.sql b/tests/queries/0_stateless/00834_not_between.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00834_not_between.sql rename to tests/queries/0_stateless/00834_not_between.sql diff --git a/dbms/tests/queries/0_stateless/00835_if_generic_case.reference b/tests/queries/0_stateless/00835_if_generic_case.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00835_if_generic_case.reference rename to tests/queries/0_stateless/00835_if_generic_case.reference diff --git a/dbms/tests/queries/0_stateless/00835_if_generic_case.sql b/tests/queries/0_stateless/00835_if_generic_case.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00835_if_generic_case.sql rename to tests/queries/0_stateless/00835_if_generic_case.sql diff --git a/tests/queries/0_stateless/00836_indices_alter.reference b/tests/queries/0_stateless/00836_indices_alter.reference new file mode 100644 index 00000000000..6efa25f47b7 --- /dev/null +++ b/tests/queries/0_stateless/00836_indices_alter.reference @@ -0,0 +1,28 @@ +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 2 diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter.sql b/tests/queries/0_stateless/00836_indices_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00836_indices_alter.sql rename to tests/queries/0_stateless/00836_indices_alter.sql diff --git a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference new file mode 100644 index 00000000000..ec9de160fcc --- /dev/null +++ b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference @@ -0,0 +1,58 @@ +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 2 +1 2 +1 4 +1 5 +3 2 +19 9 +65 75 +1 2 +1 4 +1 5 +3 2 +19 9 +65 75 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 4 +1 5 +3 2 +19 9 +65 75 +1 2 +1 4 +1 5 +3 2 +19 9 +65 75 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 4 +1 5 +3 2 +19 9 +65 75 +1 2 +1 4 +1 5 +3 2 +19 9 +65 75 +CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 3 +1 2 +1 3 +CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +1 2 +1 3 +1 2 +1 3 diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql rename to tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.reference b/tests/queries/0_stateless/00836_numbers_table_function_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.reference rename to tests/queries/0_stateless/00836_numbers_table_function_zero.reference diff --git a/dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.sql b/tests/queries/0_stateless/00836_numbers_table_function_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.sql rename to tests/queries/0_stateless/00836_numbers_table_function_zero.sql diff --git a/dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference b/tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference rename to tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference diff --git a/dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql b/tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql rename to tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index.reference b/tests/queries/0_stateless/00837_minmax_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index.reference rename to tests/queries/0_stateless/00837_minmax_index.reference diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index.sh b/tests/queries/0_stateless/00837_minmax_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index.sh rename to tests/queries/0_stateless/00837_minmax_index.sh diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference b/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference rename to tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql b/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql rename to tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference b/tests/queries/0_stateless/00838_system_tables_drop_table_race.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference rename to tests/queries/0_stateless/00838_system_tables_drop_table_race.reference diff --git a/dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh rename to tests/queries/0_stateless/00838_system_tables_drop_table_race.sh diff --git a/dbms/tests/queries/0_stateless/00838_unique_index.reference b/tests/queries/0_stateless/00838_unique_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00838_unique_index.reference rename to tests/queries/0_stateless/00838_unique_index.reference diff --git a/dbms/tests/queries/0_stateless/00838_unique_index.sh b/tests/queries/0_stateless/00838_unique_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00838_unique_index.sh rename to tests/queries/0_stateless/00838_unique_index.sh diff --git a/dbms/tests/queries/0_stateless/00839_bitmask_negative.reference b/tests/queries/0_stateless/00839_bitmask_negative.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00839_bitmask_negative.reference rename to tests/queries/0_stateless/00839_bitmask_negative.reference diff --git a/dbms/tests/queries/0_stateless/00839_bitmask_negative.sql b/tests/queries/0_stateless/00839_bitmask_negative.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00839_bitmask_negative.sql rename to tests/queries/0_stateless/00839_bitmask_negative.sql diff --git a/dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference rename to tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh rename to tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/00840_top_k_weighted.reference b/tests/queries/0_stateless/00840_top_k_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00840_top_k_weighted.reference rename to tests/queries/0_stateless/00840_top_k_weighted.reference diff --git a/dbms/tests/queries/0_stateless/00840_top_k_weighted.sql b/tests/queries/0_stateless/00840_top_k_weighted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00840_top_k_weighted.sql rename to tests/queries/0_stateless/00840_top_k_weighted.sql diff --git a/dbms/tests/queries/0_stateless/00841_temporary_table_database.reference b/tests/queries/0_stateless/00841_temporary_table_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00841_temporary_table_database.reference rename to tests/queries/0_stateless/00841_temporary_table_database.reference diff --git a/dbms/tests/queries/0_stateless/00841_temporary_table_database.sql b/tests/queries/0_stateless/00841_temporary_table_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00841_temporary_table_database.sql rename to tests/queries/0_stateless/00841_temporary_table_database.sql diff --git a/dbms/tests/queries/0_stateless/00933_reserved_word.reference b/tests/queries/0_stateless/00842_array_with_constant_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_reserved_word.reference rename to tests/queries/0_stateless/00842_array_with_constant_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.sql b/tests/queries/0_stateless/00842_array_with_constant_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.sql rename to tests/queries/0_stateless/00842_array_with_constant_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference b/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference rename to tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference diff --git a/dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql b/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql rename to tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql diff --git a/dbms/tests/queries/0_stateless/00844_join_lightee2.reference b/tests/queries/0_stateless/00844_join_lightee2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00844_join_lightee2.reference rename to tests/queries/0_stateless/00844_join_lightee2.reference diff --git a/dbms/tests/queries/0_stateless/00844_join_lightee2.sql b/tests/queries/0_stateless/00844_join_lightee2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00844_join_lightee2.sql rename to tests/queries/0_stateless/00844_join_lightee2.sql diff --git a/dbms/tests/queries/0_stateless/00845_join_on_aliases.reference b/tests/queries/0_stateless/00845_join_on_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00845_join_on_aliases.reference rename to tests/queries/0_stateless/00845_join_on_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00845_join_on_aliases.sql b/tests/queries/0_stateless/00845_join_on_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00845_join_on_aliases.sql rename to tests/queries/0_stateless/00845_join_on_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.reference b/tests/queries/0_stateless/00846_join_using_tuple_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.reference rename to tests/queries/0_stateless/00846_join_using_tuple_crash.reference diff --git a/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql b/tests/queries/0_stateless/00846_join_using_tuple_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql rename to tests/queries/0_stateless/00846_join_using_tuple_crash.sql diff --git a/dbms/tests/queries/0_stateless/00847_multiple_join_same_column.reference b/tests/queries/0_stateless/00847_multiple_join_same_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00847_multiple_join_same_column.reference rename to tests/queries/0_stateless/00847_multiple_join_same_column.reference diff --git a/dbms/tests/queries/0_stateless/00847_multiple_join_same_column.sql b/tests/queries/0_stateless/00847_multiple_join_same_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00847_multiple_join_same_column.sql rename to tests/queries/0_stateless/00847_multiple_join_same_column.sql diff --git a/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.reference b/tests/queries/0_stateless/00848_join_use_nulls_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.reference rename to tests/queries/0_stateless/00848_join_use_nulls_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql b/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql rename to tests/queries/0_stateless/00848_join_use_nulls_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join.reference b/tests/queries/0_stateless/00849_multiple_comma_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join.reference rename to tests/queries/0_stateless/00849_multiple_comma_join.reference diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join.sql b/tests/queries/0_stateless/00849_multiple_comma_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join.sql rename to tests/queries/0_stateless/00849_multiple_comma_join.sql diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.reference b/tests/queries/0_stateless/00849_multiple_comma_join_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.reference rename to tests/queries/0_stateless/00849_multiple_comma_join_2.reference diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.sql b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.sql rename to tests/queries/0_stateless/00849_multiple_comma_join_2.sql diff --git a/dbms/tests/queries/0_stateless/00850_global_join_dups.reference b/tests/queries/0_stateless/00850_global_join_dups.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00850_global_join_dups.reference rename to tests/queries/0_stateless/00850_global_join_dups.reference diff --git a/dbms/tests/queries/0_stateless/00850_global_join_dups.sql b/tests/queries/0_stateless/00850_global_join_dups.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00850_global_join_dups.sql rename to tests/queries/0_stateless/00850_global_join_dups.sql diff --git a/dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.reference b/tests/queries/0_stateless/00851_http_insert_json_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.reference rename to tests/queries/0_stateless/00851_http_insert_json_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.sh b/tests/queries/0_stateless/00851_http_insert_json_defaults.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.sh rename to tests/queries/0_stateless/00851_http_insert_json_defaults.sh diff --git a/dbms/tests/queries/0_stateless/00852_any_join_nulls.reference b/tests/queries/0_stateless/00852_any_join_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00852_any_join_nulls.reference rename to tests/queries/0_stateless/00852_any_join_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00852_any_join_nulls.sql b/tests/queries/0_stateless/00852_any_join_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00852_any_join_nulls.sql rename to tests/queries/0_stateless/00852_any_join_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.reference b/tests/queries/0_stateless/00853_join_with_nulls_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.reference rename to tests/queries/0_stateless/00853_join_with_nulls_crash.reference diff --git a/dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.sql b/tests/queries/0_stateless/00853_join_with_nulls_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.sql rename to tests/queries/0_stateless/00853_join_with_nulls_crash.sql diff --git a/dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.reference b/tests/queries/0_stateless/00854_multiple_join_asterisks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.reference rename to tests/queries/0_stateless/00854_multiple_join_asterisks.reference diff --git a/dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.sql b/tests/queries/0_stateless/00854_multiple_join_asterisks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.sql rename to tests/queries/0_stateless/00854_multiple_join_asterisks.sql diff --git a/dbms/tests/queries/0_stateless/00855_join_with_array_join.reference b/tests/queries/0_stateless/00855_join_with_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00855_join_with_array_join.reference rename to tests/queries/0_stateless/00855_join_with_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00855_join_with_array_join.sql b/tests/queries/0_stateless/00855_join_with_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00855_join_with_array_join.sql rename to tests/queries/0_stateless/00855_join_with_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00856_no_column_issue_4242.reference b/tests/queries/0_stateless/00856_no_column_issue_4242.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00856_no_column_issue_4242.reference rename to tests/queries/0_stateless/00856_no_column_issue_4242.reference diff --git a/dbms/tests/queries/0_stateless/00856_no_column_issue_4242.sql b/tests/queries/0_stateless/00856_no_column_issue_4242.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00856_no_column_issue_4242.sql rename to tests/queries/0_stateless/00856_no_column_issue_4242.sql diff --git a/dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference b/tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference rename to tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference diff --git a/dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql b/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql rename to tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql diff --git a/dbms/tests/queries/0_stateless/00858_issue_4756.reference b/tests/queries/0_stateless/00858_issue_4756.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00858_issue_4756.reference rename to tests/queries/0_stateless/00858_issue_4756.reference diff --git a/dbms/tests/queries/0_stateless/00858_issue_4756.sql b/tests/queries/0_stateless/00858_issue_4756.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00858_issue_4756.sql rename to tests/queries/0_stateless/00858_issue_4756.sql diff --git a/dbms/tests/queries/0_stateless/00859_distinct_with_join.reference b/tests/queries/0_stateless/00859_distinct_with_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00859_distinct_with_join.reference rename to tests/queries/0_stateless/00859_distinct_with_join.reference diff --git a/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql b/tests/queries/0_stateless/00859_distinct_with_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00859_distinct_with_join.sql rename to tests/queries/0_stateless/00859_distinct_with_join.sql diff --git a/dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.reference b/tests/queries/0_stateless/00860_unknown_identifier_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.reference rename to tests/queries/0_stateless/00860_unknown_identifier_bug.reference diff --git a/dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.sql b/tests/queries/0_stateless/00860_unknown_identifier_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.sql rename to tests/queries/0_stateless/00860_unknown_identifier_bug.sql diff --git a/dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.reference b/tests/queries/0_stateless/00861_decimal_quoted_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.reference rename to tests/queries/0_stateless/00861_decimal_quoted_csv.reference diff --git a/dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.sql b/tests/queries/0_stateless/00861_decimal_quoted_csv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.sql rename to tests/queries/0_stateless/00861_decimal_quoted_csv.sql diff --git a/dbms/tests/queries/0_stateless/00862_decimal_in.reference b/tests/queries/0_stateless/00862_decimal_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00862_decimal_in.reference rename to tests/queries/0_stateless/00862_decimal_in.reference diff --git a/dbms/tests/queries/0_stateless/00862_decimal_in.sql b/tests/queries/0_stateless/00862_decimal_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00862_decimal_in.sql rename to tests/queries/0_stateless/00862_decimal_in.sql diff --git a/dbms/tests/queries/0_stateless/00863_comma_join_in.reference b/tests/queries/0_stateless/00863_comma_join_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00863_comma_join_in.reference rename to tests/queries/0_stateless/00863_comma_join_in.reference diff --git a/dbms/tests/queries/0_stateless/00863_comma_join_in.sql b/tests/queries/0_stateless/00863_comma_join_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00863_comma_join_in.sql rename to tests/queries/0_stateless/00863_comma_join_in.sql diff --git a/dbms/tests/queries/0_stateless/00864_union_all_supertype.reference b/tests/queries/0_stateless/00864_union_all_supertype.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00864_union_all_supertype.reference rename to tests/queries/0_stateless/00864_union_all_supertype.reference diff --git a/dbms/tests/queries/0_stateless/00864_union_all_supertype.sql b/tests/queries/0_stateless/00864_union_all_supertype.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00864_union_all_supertype.sql rename to tests/queries/0_stateless/00864_union_all_supertype.sql diff --git a/dbms/tests/queries/0_stateless/00870_t64_codec.reference b/tests/queries/0_stateless/00870_t64_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00870_t64_codec.reference rename to tests/queries/0_stateless/00870_t64_codec.reference diff --git a/dbms/tests/queries/0_stateless/00870_t64_codec.sql b/tests/queries/0_stateless/00870_t64_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00870_t64_codec.sql rename to tests/queries/0_stateless/00870_t64_codec.sql diff --git a/dbms/tests/queries/0_stateless/00871_t64_codec_signed.reference b/tests/queries/0_stateless/00871_t64_codec_signed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00871_t64_codec_signed.reference rename to tests/queries/0_stateless/00871_t64_codec_signed.reference diff --git a/dbms/tests/queries/0_stateless/00871_t64_codec_signed.sql b/tests/queries/0_stateless/00871_t64_codec_signed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00871_t64_codec_signed.sql rename to tests/queries/0_stateless/00871_t64_codec_signed.sql diff --git a/dbms/tests/queries/0_stateless/00872_t64_bit_codec.reference b/tests/queries/0_stateless/00872_t64_bit_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00872_t64_bit_codec.reference rename to tests/queries/0_stateless/00872_t64_bit_codec.reference diff --git a/dbms/tests/queries/0_stateless/00872_t64_bit_codec.sql b/tests/queries/0_stateless/00872_t64_bit_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00872_t64_bit_codec.sql rename to tests/queries/0_stateless/00872_t64_bit_codec.sql diff --git a/dbms/tests/queries/0_stateless/00874_issue_3495.reference b/tests/queries/0_stateless/00874_issue_3495.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00874_issue_3495.reference rename to tests/queries/0_stateless/00874_issue_3495.reference diff --git a/dbms/tests/queries/0_stateless/00874_issue_3495.sql b/tests/queries/0_stateless/00874_issue_3495.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00874_issue_3495.sql rename to tests/queries/0_stateless/00874_issue_3495.sql diff --git a/dbms/tests/queries/0_stateless/00875_join_right_nulls.reference b/tests/queries/0_stateless/00875_join_right_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00875_join_right_nulls.reference rename to tests/queries/0_stateless/00875_join_right_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00875_join_right_nulls.sql b/tests/queries/0_stateless/00875_join_right_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00875_join_right_nulls.sql rename to tests/queries/0_stateless/00875_join_right_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.reference b/tests/queries/0_stateless/00876_wrong_arraj_join_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.reference rename to tests/queries/0_stateless/00876_wrong_arraj_join_column.reference diff --git a/dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.sql b/tests/queries/0_stateless/00876_wrong_arraj_join_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.sql rename to tests/queries/0_stateless/00876_wrong_arraj_join_column.sql diff --git a/dbms/tests/queries/0_stateless/00941_system_columns_race_condition.reference b/tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00941_system_columns_race_condition.reference rename to tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference diff --git a/dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql b/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql rename to tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql diff --git a/dbms/tests/queries/0_stateless/00878_join_unexpected_results.reference b/tests/queries/0_stateless/00878_join_unexpected_results.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00878_join_unexpected_results.reference rename to tests/queries/0_stateless/00878_join_unexpected_results.reference diff --git a/dbms/tests/queries/0_stateless/00878_join_unexpected_results.sql b/tests/queries/0_stateless/00878_join_unexpected_results.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00878_join_unexpected_results.sql rename to tests/queries/0_stateless/00878_join_unexpected_results.sql diff --git a/dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference b/tests/queries/0_stateless/00879_cast_to_decimal_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference rename to tests/queries/0_stateless/00879_cast_to_decimal_crash.reference diff --git a/dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.sql b/tests/queries/0_stateless/00879_cast_to_decimal_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.sql rename to tests/queries/0_stateless/00879_cast_to_decimal_crash.sql diff --git a/dbms/tests/queries/0_stateless/00880_decimal_in_key.reference b/tests/queries/0_stateless/00880_decimal_in_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00880_decimal_in_key.reference rename to tests/queries/0_stateless/00880_decimal_in_key.reference diff --git a/dbms/tests/queries/0_stateless/00880_decimal_in_key.sql b/tests/queries/0_stateless/00880_decimal_in_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00880_decimal_in_key.sql rename to tests/queries/0_stateless/00880_decimal_in_key.sql diff --git a/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference b/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference rename to tests/queries/0_stateless/00881_unknown_identifier_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql b/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql rename to tests/queries/0_stateless/00881_unknown_identifier_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.reference b/tests/queries/0_stateless/00882_multiple_join_no_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.reference rename to tests/queries/0_stateless/00882_multiple_join_no_alias.reference diff --git a/dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.sql b/tests/queries/0_stateless/00882_multiple_join_no_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.sql rename to tests/queries/0_stateless/00882_multiple_join_no_alias.sql diff --git a/dbms/tests/queries/0_stateless/00897_flatten.reference b/tests/queries/0_stateless/00897_flatten.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00897_flatten.reference rename to tests/queries/0_stateless/00897_flatten.reference diff --git a/dbms/tests/queries/0_stateless/00897_flatten.sql b/tests/queries/0_stateless/00897_flatten.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00897_flatten.sql rename to tests/queries/0_stateless/00897_flatten.sql diff --git a/dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference b/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference rename to tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference diff --git a/dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh b/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh rename to tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh diff --git a/dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference b/tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference rename to tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference diff --git a/dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql b/tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql rename to tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql diff --git a/dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.reference b/tests/queries/0_stateless/00899_long_attach_memory_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.reference rename to tests/queries/0_stateless/00899_long_attach_memory_limit.reference diff --git a/dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.sql b/tests/queries/0_stateless/00899_long_attach_memory_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.sql rename to tests/queries/0_stateless/00899_long_attach_memory_limit.sql diff --git a/dbms/tests/queries/0_stateless/00900_entropy_shard.reference b/tests/queries/0_stateless/00900_entropy_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_entropy_shard.reference rename to tests/queries/0_stateless/00900_entropy_shard.reference diff --git a/dbms/tests/queries/0_stateless/00900_entropy_shard.sql b/tests/queries/0_stateless/00900_entropy_shard.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00900_entropy_shard.sql rename to tests/queries/0_stateless/00900_entropy_shard.sql diff --git a/dbms/tests/queries/0_stateless/00900_orc_load.reference b/tests/queries/0_stateless/00900_orc_load.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_orc_load.reference rename to tests/queries/0_stateless/00900_orc_load.reference diff --git a/dbms/tests/queries/0_stateless/00900_orc_load.sh b/tests/queries/0_stateless/00900_orc_load.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_orc_load.sh rename to tests/queries/0_stateless/00900_orc_load.sh diff --git a/dbms/tests/queries/0_stateless/00900_parquet.reference b/tests/queries/0_stateless/00900_parquet.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet.reference rename to tests/queries/0_stateless/00900_parquet.reference diff --git a/dbms/tests/queries/0_stateless/00900_parquet.sh b/tests/queries/0_stateless/00900_parquet.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet.sh rename to tests/queries/0_stateless/00900_parquet.sh diff --git a/dbms/tests/queries/0_stateless/00900_parquet_create_table_columns.pl b/tests/queries/0_stateless/00900_parquet_create_table_columns.pl similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_create_table_columns.pl rename to tests/queries/0_stateless/00900_parquet_create_table_columns.pl diff --git a/dbms/tests/queries/0_stateless/00900_parquet_decimal.reference b/tests/queries/0_stateless/00900_parquet_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_decimal.reference rename to tests/queries/0_stateless/00900_parquet_decimal.reference diff --git a/dbms/tests/queries/0_stateless/00900_parquet_decimal.sh b/tests/queries/0_stateless/00900_parquet_decimal.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_decimal.sh rename to tests/queries/0_stateless/00900_parquet_decimal.sh diff --git a/dbms/tests/queries/0_stateless/00900_parquet_load.reference b/tests/queries/0_stateless/00900_parquet_load.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_load.reference rename to tests/queries/0_stateless/00900_parquet_load.reference diff --git a/dbms/tests/queries/0_stateless/00900_parquet_load.sh b/tests/queries/0_stateless/00900_parquet_load.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_load.sh rename to tests/queries/0_stateless/00900_parquet_load.sh diff --git a/dbms/tests/queries/0_stateless/00901_joint_entropy.reference b/tests/queries/0_stateless/00901_joint_entropy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00901_joint_entropy.reference rename to tests/queries/0_stateless/00901_joint_entropy.reference diff --git a/dbms/tests/queries/0_stateless/00901_joint_entropy.sql b/tests/queries/0_stateless/00901_joint_entropy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00901_joint_entropy.sql rename to tests/queries/0_stateless/00901_joint_entropy.sql diff --git a/dbms/tests/queries/0_stateless/00902_entropy.reference b/tests/queries/0_stateless/00902_entropy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00902_entropy.reference rename to tests/queries/0_stateless/00902_entropy.reference diff --git a/dbms/tests/queries/0_stateless/00902_entropy.sql b/tests/queries/0_stateless/00902_entropy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00902_entropy.sql rename to tests/queries/0_stateless/00902_entropy.sql diff --git a/dbms/tests/queries/0_stateless/00903_array_with_constant_function.reference b/tests/queries/0_stateless/00903_array_with_constant_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00903_array_with_constant_function.reference rename to tests/queries/0_stateless/00903_array_with_constant_function.reference diff --git a/dbms/tests/queries/0_stateless/00903_array_with_constant_function.sql b/tests/queries/0_stateless/00903_array_with_constant_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00903_array_with_constant_function.sql rename to tests/queries/0_stateless/00903_array_with_constant_function.sql diff --git a/dbms/tests/queries/0_stateless/00904_array_with_constant_2.reference b/tests/queries/0_stateless/00904_array_with_constant_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00904_array_with_constant_2.reference rename to tests/queries/0_stateless/00904_array_with_constant_2.reference diff --git a/dbms/tests/queries/0_stateless/00904_array_with_constant_2.sql b/tests/queries/0_stateless/00904_array_with_constant_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00904_array_with_constant_2.sql rename to tests/queries/0_stateless/00904_array_with_constant_2.sql diff --git a/dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference b/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference rename to tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference diff --git a/dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql b/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql rename to tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql diff --git a/dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference b/tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference rename to tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference diff --git a/dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql b/tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql rename to tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.reference b/tests/queries/0_stateless/00906_low_cardinality_cache.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_cache.reference rename to tests/queries/0_stateless/00906_low_cardinality_cache.reference diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql b/tests/queries/0_stateless/00906_low_cardinality_cache.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql rename to tests/queries/0_stateless/00906_low_cardinality_cache.sql diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.reference b/tests/queries/0_stateless/00906_low_cardinality_const_argument.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.reference rename to tests/queries/0_stateless/00906_low_cardinality_const_argument.reference diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.sql b/tests/queries/0_stateless/00906_low_cardinality_const_argument.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.sql rename to tests/queries/0_stateless/00906_low_cardinality_const_argument.sql diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.reference b/tests/queries/0_stateless/00906_low_cardinality_rollup.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.reference rename to tests/queries/0_stateless/00906_low_cardinality_rollup.reference diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.sql b/tests/queries/0_stateless/00906_low_cardinality_rollup.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.sql rename to tests/queries/0_stateless/00906_low_cardinality_rollup.sql diff --git a/dbms/tests/queries/0_stateless/00907_set_index_max_rows.reference b/tests/queries/0_stateless/00907_set_index_max_rows.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_max_rows.reference rename to tests/queries/0_stateless/00907_set_index_max_rows.reference diff --git a/dbms/tests/queries/0_stateless/00907_set_index_max_rows.sh b/tests/queries/0_stateless/00907_set_index_max_rows.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_max_rows.sh rename to tests/queries/0_stateless/00907_set_index_max_rows.sh diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql diff --git a/dbms/tests/queries/0_stateless/00908_analyze_query.reference b/tests/queries/0_stateless/00908_analyze_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00908_analyze_query.reference rename to tests/queries/0_stateless/00908_analyze_query.reference diff --git a/dbms/tests/queries/0_stateless/00908_analyze_query.sql b/tests/queries/0_stateless/00908_analyze_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00908_analyze_query.sql rename to tests/queries/0_stateless/00908_analyze_query.sql diff --git a/dbms/tests/queries/0_stateless/00908_bloom_filter_index.reference b/tests/queries/0_stateless/00908_bloom_filter_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00908_bloom_filter_index.reference rename to tests/queries/0_stateless/00908_bloom_filter_index.reference diff --git a/dbms/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00908_bloom_filter_index.sh rename to tests/queries/0_stateless/00908_bloom_filter_index.sh diff --git a/dbms/tests/queries/0_stateless/00908_long_http_insert.reference b/tests/queries/0_stateless/00908_long_http_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00908_long_http_insert.reference rename to tests/queries/0_stateless/00908_long_http_insert.reference diff --git a/dbms/tests/queries/0_stateless/00908_long_http_insert.sh b/tests/queries/0_stateless/00908_long_http_insert.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00908_long_http_insert.sh rename to tests/queries/0_stateless/00908_long_http_insert.sh diff --git a/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference b/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference rename to tests/queries/0_stateless/00909_arrayEnumerateUniq.reference diff --git a/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.sql b/tests/queries/0_stateless/00909_arrayEnumerateUniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.sql rename to tests/queries/0_stateless/00909_arrayEnumerateUniq.sql diff --git a/dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.reference b/tests/queries/0_stateless/00909_kill_not_initialized_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.reference rename to tests/queries/0_stateless/00909_kill_not_initialized_query.reference diff --git a/dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.sh b/tests/queries/0_stateless/00909_kill_not_initialized_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.sh rename to tests/queries/0_stateless/00909_kill_not_initialized_query.sh diff --git a/dbms/tests/queries/0_stateless/00909_ngram_distance.reference b/tests/queries/0_stateless/00909_ngram_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00909_ngram_distance.reference rename to tests/queries/0_stateless/00909_ngram_distance.reference diff --git a/dbms/tests/queries/0_stateless/00909_ngram_distance.sql b/tests/queries/0_stateless/00909_ngram_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00909_ngram_distance.sql rename to tests/queries/0_stateless/00909_ngram_distance.sql diff --git a/dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference b/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference rename to tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference diff --git a/dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql b/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql rename to tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql diff --git a/dbms/tests/queries/0_stateless/00910_buffer_prewhere.reference b/tests/queries/0_stateless/00910_buffer_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_buffer_prewhere.reference rename to tests/queries/0_stateless/00910_buffer_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00910_buffer_prewhere.sql b/tests/queries/0_stateless/00910_buffer_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_buffer_prewhere.sql rename to tests/queries/0_stateless/00910_buffer_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00910_client_window_size_detection.reference b/tests/queries/0_stateless/00910_client_window_size_detection.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_client_window_size_detection.reference rename to tests/queries/0_stateless/00910_client_window_size_detection.reference diff --git a/dbms/tests/queries/0_stateless/00910_client_window_size_detection.sh b/tests/queries/0_stateless/00910_client_window_size_detection.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00910_client_window_size_detection.sh rename to tests/queries/0_stateless/00910_client_window_size_detection.sh diff --git a/dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference b/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference rename to tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference diff --git a/dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql b/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql rename to tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql diff --git a/dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference b/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference rename to tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference diff --git a/dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql b/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql rename to tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql diff --git a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference new file mode 100644 index 00000000000..ee481c88d89 --- /dev/null +++ b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference @@ -0,0 +1,27 @@ +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 +2 +2 +1 world 2018-10-05 1.1 +2 hello 2018-10-01 2.2 +3 buy 2018-10-11 3.3 +1 world 2018-10-05 1.1 +2 hello 2018-10-01 2.2 +3 buy 2018-10-11 3.3 +10003 +10003 +10003 +10003 +274972506.6 +274972506.6 +9175437371954010821 +9175437371954010821 +CREATE TABLE test.compression_codec_multiple_more_types_replicated\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), \n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), \n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), \n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1.5555555555555 hello world! [77] ['John'] +7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] +! +222 diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql rename to tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference b/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference rename to tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql rename to tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql diff --git a/dbms/tests/queries/0_stateless/00911_tautological_compare.reference b/tests/queries/0_stateless/00911_tautological_compare.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00911_tautological_compare.reference rename to tests/queries/0_stateless/00911_tautological_compare.reference diff --git a/dbms/tests/queries/0_stateless/00911_tautological_compare.sql b/tests/queries/0_stateless/00911_tautological_compare.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00911_tautological_compare.sql rename to tests/queries/0_stateless/00911_tautological_compare.sql diff --git a/dbms/tests/queries/0_stateless/00912_string_comparison.reference b/tests/queries/0_stateless/00912_string_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00912_string_comparison.reference rename to tests/queries/0_stateless/00912_string_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00912_string_comparison.sql b/tests/queries/0_stateless/00912_string_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00912_string_comparison.sql rename to tests/queries/0_stateless/00912_string_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00913_many_threads.reference b/tests/queries/0_stateless/00913_many_threads.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00913_many_threads.reference rename to tests/queries/0_stateless/00913_many_threads.reference diff --git a/dbms/tests/queries/0_stateless/00913_many_threads.sql b/tests/queries/0_stateless/00913_many_threads.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00913_many_threads.sql rename to tests/queries/0_stateless/00913_many_threads.sql diff --git a/dbms/tests/queries/0_stateless/00914_join_bgranvea.reference b/tests/queries/0_stateless/00914_join_bgranvea.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00914_join_bgranvea.reference rename to tests/queries/0_stateless/00914_join_bgranvea.reference diff --git a/dbms/tests/queries/0_stateless/00914_join_bgranvea.sql b/tests/queries/0_stateless/00914_join_bgranvea.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00914_join_bgranvea.sql rename to tests/queries/0_stateless/00914_join_bgranvea.sql diff --git a/dbms/tests/queries/0_stateless/00914_replicate.reference b/tests/queries/0_stateless/00914_replicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00914_replicate.reference rename to tests/queries/0_stateless/00914_replicate.reference diff --git a/dbms/tests/queries/0_stateless/00914_replicate.sql b/tests/queries/0_stateless/00914_replicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00914_replicate.sql rename to tests/queries/0_stateless/00914_replicate.sql diff --git a/dbms/tests/queries/0_stateless/00915_simple_aggregate_function.reference b/tests/queries/0_stateless/00915_simple_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00915_simple_aggregate_function.reference rename to tests/queries/0_stateless/00915_simple_aggregate_function.reference diff --git a/dbms/tests/queries/0_stateless/00915_simple_aggregate_function.sql b/tests/queries/0_stateless/00915_simple_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00915_simple_aggregate_function.sql rename to tests/queries/0_stateless/00915_simple_aggregate_function.sql diff --git a/dbms/tests/queries/0_stateless/00915_tuple_orantius.reference b/tests/queries/0_stateless/00915_tuple_orantius.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00915_tuple_orantius.reference rename to tests/queries/0_stateless/00915_tuple_orantius.reference diff --git a/dbms/tests/queries/0_stateless/00915_tuple_orantius.sql b/tests/queries/0_stateless/00915_tuple_orantius.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00915_tuple_orantius.sql rename to tests/queries/0_stateless/00915_tuple_orantius.sql diff --git a/dbms/tests/queries/0_stateless/00916_add_materialized_column_after.reference b/tests/queries/0_stateless/00916_add_materialized_column_after.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00916_add_materialized_column_after.reference rename to tests/queries/0_stateless/00916_add_materialized_column_after.reference diff --git a/dbms/tests/queries/0_stateless/00916_add_materialized_column_after.sql b/tests/queries/0_stateless/00916_add_materialized_column_after.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00916_add_materialized_column_after.sql rename to tests/queries/0_stateless/00916_add_materialized_column_after.sql diff --git a/tests/queries/0_stateless/00916_create_or_replace_view.reference b/tests/queries/0_stateless/00916_create_or_replace_view.reference new file mode 100644 index 00000000000..50323e47556 --- /dev/null +++ b/tests/queries/0_stateless/00916_create_or_replace_view.reference @@ -0,0 +1,2 @@ +CREATE VIEW default.t\n(\n `number` UInt64\n) AS\nSELECT number\nFROM system.numbers +CREATE VIEW default.t\n(\n `next_number` UInt64\n) AS\nSELECT number + 1 AS next_number\nFROM system.numbers diff --git a/dbms/tests/queries/0_stateless/00916_create_or_replace_view.sql b/tests/queries/0_stateless/00916_create_or_replace_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00916_create_or_replace_view.sql rename to tests/queries/0_stateless/00916_create_or_replace_view.sql diff --git a/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.reference b/tests/queries/0_stateless/00916_join_using_duplicate_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.reference rename to tests/queries/0_stateless/00916_join_using_duplicate_columns.reference diff --git a/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql b/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql rename to tests/queries/0_stateless/00916_join_using_duplicate_columns.sql diff --git a/dbms/tests/queries/0_stateless/00917_least_sqr.reference b/tests/queries/0_stateless/00917_least_sqr.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00917_least_sqr.reference rename to tests/queries/0_stateless/00917_least_sqr.reference diff --git a/dbms/tests/queries/0_stateless/00917_least_sqr.sql b/tests/queries/0_stateless/00917_least_sqr.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00917_least_sqr.sql rename to tests/queries/0_stateless/00917_least_sqr.sql diff --git a/dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference b/tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference rename to tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference diff --git a/dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql b/tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql rename to tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql diff --git a/dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.reference b/tests/queries/0_stateless/00918_has_unsufficient_type_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.reference rename to tests/queries/0_stateless/00918_has_unsufficient_type_check.reference diff --git a/dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.sql b/tests/queries/0_stateless/00918_has_unsufficient_type_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.sql rename to tests/queries/0_stateless/00918_has_unsufficient_type_check.sql diff --git a/dbms/tests/queries/0_stateless/00918_json_functions.reference b/tests/queries/0_stateless/00918_json_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00918_json_functions.reference rename to tests/queries/0_stateless/00918_json_functions.reference diff --git a/dbms/tests/queries/0_stateless/00918_json_functions.sql b/tests/queries/0_stateless/00918_json_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00918_json_functions.sql rename to tests/queries/0_stateless/00918_json_functions.sql diff --git a/dbms/tests/queries/0_stateless/00919_histogram_merge.reference b/tests/queries/0_stateless/00919_histogram_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00919_histogram_merge.reference rename to tests/queries/0_stateless/00919_histogram_merge.reference diff --git a/dbms/tests/queries/0_stateless/00919_histogram_merge.sql b/tests/queries/0_stateless/00919_histogram_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00919_histogram_merge.sql rename to tests/queries/0_stateless/00919_histogram_merge.sql diff --git a/dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference b/tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference rename to tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference diff --git a/dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql b/tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql rename to tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql diff --git a/dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference b/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference rename to tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference diff --git a/dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql b/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql rename to tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_basic.reference b/tests/queries/0_stateless/00921_datetime64_basic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_basic.reference rename to tests/queries/0_stateless/00921_datetime64_basic.reference diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_basic.sql b/tests/queries/0_stateless/00921_datetime64_basic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_basic.sql rename to tests/queries/0_stateless/00921_datetime64_basic.sql diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_compatibility.python b/tests/queries/0_stateless/00921_datetime64_compatibility.python similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_compatibility.python rename to tests/queries/0_stateless/00921_datetime64_compatibility.python diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_compatibility.reference b/tests/queries/0_stateless/00921_datetime64_compatibility.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_compatibility.reference rename to tests/queries/0_stateless/00921_datetime64_compatibility.reference diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_compatibility.sh b/tests/queries/0_stateless/00921_datetime64_compatibility.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_compatibility.sh rename to tests/queries/0_stateless/00921_datetime64_compatibility.sh diff --git a/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.reference b/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.reference rename to tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference diff --git a/dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql b/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql rename to tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_geo_to_h3.reference b/tests/queries/0_stateless/00926_geo_to_h3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_geo_to_h3.reference rename to tests/queries/0_stateless/00926_geo_to_h3.reference diff --git a/dbms/tests/queries/0_stateless/00926_geo_to_h3.sql b/tests/queries/0_stateless/00926_geo_to_h3.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_geo_to_h3.sql rename to tests/queries/0_stateless/00926_geo_to_h3.sql diff --git a/dbms/tests/queries/0_stateless/00926_multimatch.reference b/tests/queries/0_stateless/00926_multimatch.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_multimatch.reference rename to tests/queries/0_stateless/00926_multimatch.reference diff --git a/dbms/tests/queries/0_stateless/00926_multimatch.sql b/tests/queries/0_stateless/00926_multimatch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_multimatch.sql rename to tests/queries/0_stateless/00926_multimatch.sql diff --git a/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference rename to tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql rename to tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.reference b/tests/queries/0_stateless/00927_asof_join_correct_bt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.reference rename to tests/queries/0_stateless/00927_asof_join_correct_bt.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.sql b/tests/queries/0_stateless/00927_asof_join_correct_bt.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.sql rename to tests/queries/0_stateless/00927_asof_join_correct_bt.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_long.reference b/tests/queries/0_stateless/00927_asof_join_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_long.reference rename to tests/queries/0_stateless/00927_asof_join_long.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_long.sql b/tests/queries/0_stateless/00927_asof_join_long.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_long.sql rename to tests/queries/0_stateless/00927_asof_join_long.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.reference b/tests/queries/0_stateless/00927_asof_join_noninclusive.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.reference rename to tests/queries/0_stateless/00927_asof_join_noninclusive.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.sql b/tests/queries/0_stateless/00927_asof_join_noninclusive.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.sql rename to tests/queries/0_stateless/00927_asof_join_noninclusive.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_other_types.reference b/tests/queries/0_stateless/00927_asof_join_other_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_other_types.reference rename to tests/queries/0_stateless/00927_asof_join_other_types.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_other_types.sh b/tests/queries/0_stateless/00927_asof_join_other_types.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_other_types.sh rename to tests/queries/0_stateless/00927_asof_join_other_types.sh diff --git a/dbms/tests/queries/0_stateless/00927_asof_joins.reference b/tests/queries/0_stateless/00927_asof_joins.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_joins.reference rename to tests/queries/0_stateless/00927_asof_joins.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_joins.sql b/tests/queries/0_stateless/00927_asof_joins.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_joins.sql rename to tests/queries/0_stateless/00927_asof_joins.sql diff --git a/dbms/tests/queries/0_stateless/00927_disable_hyperscan.reference b/tests/queries/0_stateless/00927_disable_hyperscan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_disable_hyperscan.reference rename to tests/queries/0_stateless/00927_disable_hyperscan.reference diff --git a/dbms/tests/queries/0_stateless/00927_disable_hyperscan.sql b/tests/queries/0_stateless/00927_disable_hyperscan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_disable_hyperscan.sql rename to tests/queries/0_stateless/00927_disable_hyperscan.sql diff --git a/dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.reference b/tests/queries/0_stateless/00928_multi_match_constant_constant.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.reference rename to tests/queries/0_stateless/00928_multi_match_constant_constant.reference diff --git a/dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.sql b/tests/queries/0_stateless/00928_multi_match_constant_constant.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.sql rename to tests/queries/0_stateless/00928_multi_match_constant_constant.sql diff --git a/dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.reference b/tests/queries/0_stateless/00929_multi_match_edit_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.reference rename to tests/queries/0_stateless/00929_multi_match_edit_distance.reference diff --git a/dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.sql b/tests/queries/0_stateless/00929_multi_match_edit_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.sql rename to tests/queries/0_stateless/00929_multi_match_edit_distance.sql diff --git a/dbms/tests/queries/0_stateless/00930_arrayIntersect.reference b/tests/queries/0_stateless/00930_arrayIntersect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00930_arrayIntersect.reference rename to tests/queries/0_stateless/00930_arrayIntersect.reference diff --git a/dbms/tests/queries/0_stateless/00930_arrayIntersect.sql b/tests/queries/0_stateless/00930_arrayIntersect.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00930_arrayIntersect.sql rename to tests/queries/0_stateless/00930_arrayIntersect.sql diff --git a/dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference b/tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference rename to tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference diff --git a/dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql b/tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql rename to tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference b/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference rename to tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql b/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql rename to tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql diff --git a/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference b/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference rename to tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql b/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql rename to tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference b/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference rename to tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql b/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql rename to tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql diff --git a/dbms/tests/queries/0_stateless/00932_array_intersect_bug.reference b/tests/queries/0_stateless/00932_array_intersect_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00932_array_intersect_bug.reference rename to tests/queries/0_stateless/00932_array_intersect_bug.reference diff --git a/dbms/tests/queries/0_stateless/00932_array_intersect_bug.sql b/tests/queries/0_stateless/00932_array_intersect_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00932_array_intersect_bug.sql rename to tests/queries/0_stateless/00932_array_intersect_bug.sql diff --git a/dbms/tests/queries/0_stateless/00932_geohash_support.reference b/tests/queries/0_stateless/00932_geohash_support.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00932_geohash_support.reference rename to tests/queries/0_stateless/00932_geohash_support.reference diff --git a/dbms/tests/queries/0_stateless/00932_geohash_support.sql b/tests/queries/0_stateless/00932_geohash_support.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00932_geohash_support.sql rename to tests/queries/0_stateless/00932_geohash_support.sql diff --git a/tests/queries/0_stateless/00933_alter_ttl.reference b/tests/queries/0_stateless/00933_alter_ttl.reference new file mode 100644 index 00000000000..9b5cec0f773 --- /dev/null +++ b/tests/queries/0_stateless/00933_alter_ttl.reference @@ -0,0 +1,5 @@ +CREATE TABLE default.ttl\n(\n `d` Date, \n `a` Int32\n)\nENGINE = MergeTree\nPARTITION BY toDayOfMonth(d)\nORDER BY a\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 +2100-10-10 3 +2100-10-10 4 +d Date +a Int32 d + toIntervalDay(1) diff --git a/dbms/tests/queries/0_stateless/00933_alter_ttl.sql b/tests/queries/0_stateless/00933_alter_ttl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_alter_ttl.sql rename to tests/queries/0_stateless/00933_alter_ttl.sql diff --git a/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference b/tests/queries/0_stateless/00933_reserved_word.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference rename to tests/queries/0_stateless/00933_reserved_word.reference diff --git a/dbms/tests/queries/0_stateless/00933_reserved_word.sql b/tests/queries/0_stateless/00933_reserved_word.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_reserved_word.sql rename to tests/queries/0_stateless/00933_reserved_word.sql diff --git a/dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference rename to tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference diff --git a/dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh rename to tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference new file mode 100644 index 00000000000..629fbf2a4a3 --- /dev/null +++ b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference @@ -0,0 +1,3 @@ +200 +400 +CREATE TABLE test.ttl_repl2\n(\n `d` Date, \n `x` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/ttl_repl\', \'2\')\nPARTITION BY toDayOfMonth(d)\nORDER BY x\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql rename to tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql diff --git a/tests/queries/0_stateless/00933_ttl_simple.reference b/tests/queries/0_stateless/00933_ttl_simple.reference new file mode 100644 index 00000000000..102639947a3 --- /dev/null +++ b/tests/queries/0_stateless/00933_ttl_simple.reference @@ -0,0 +1,16 @@ +0 0 +0 0 +5 6 +2000-10-10 00:00:00 0 +2000-10-10 00:00:00 0 +2000-10-10 00:00:00 0 +2100-10-10 00:00:00 3 +2100-10-10 2 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1 0 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1 1 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1 0 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1 1 diff --git a/dbms/tests/queries/0_stateless/00933_ttl_simple.sql b/tests/queries/0_stateless/00933_ttl_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_simple.sql rename to tests/queries/0_stateless/00933_ttl_simple.sql diff --git a/dbms/tests/queries/0_stateless/00933_ttl_with_default.reference b/tests/queries/0_stateless/00933_ttl_with_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_with_default.reference rename to tests/queries/0_stateless/00933_ttl_with_default.reference diff --git a/dbms/tests/queries/0_stateless/00933_ttl_with_default.sql b/tests/queries/0_stateless/00933_ttl_with_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_with_default.sql rename to tests/queries/0_stateless/00933_ttl_with_default.sql diff --git a/dbms/tests/queries/0_stateless/00934_is_valid_utf8.reference b/tests/queries/0_stateless/00934_is_valid_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00934_is_valid_utf8.reference rename to tests/queries/0_stateless/00934_is_valid_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00934_is_valid_utf8.sql b/tests/queries/0_stateless/00934_is_valid_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00934_is_valid_utf8.sql rename to tests/queries/0_stateless/00934_is_valid_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.reference b/tests/queries/0_stateless/00935_to_iso_week_first_year.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.reference rename to tests/queries/0_stateless/00935_to_iso_week_first_year.reference diff --git a/dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.sql b/tests/queries/0_stateless/00935_to_iso_week_first_year.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.sql rename to tests/queries/0_stateless/00935_to_iso_week_first_year.sql diff --git a/dbms/tests/queries/0_stateless/00936_crc_functions.reference b/tests/queries/0_stateless/00936_crc_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00936_crc_functions.reference rename to tests/queries/0_stateless/00936_crc_functions.reference diff --git a/dbms/tests/queries/0_stateless/00936_crc_functions.sql b/tests/queries/0_stateless/00936_crc_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00936_crc_functions.sql rename to tests/queries/0_stateless/00936_crc_functions.sql diff --git a/dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.reference b/tests/queries/0_stateless/00936_function_result_with_operator_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.reference rename to tests/queries/0_stateless/00936_function_result_with_operator_in.reference diff --git a/dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.sql b/tests/queries/0_stateless/00936_function_result_with_operator_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.sql rename to tests/queries/0_stateless/00936_function_result_with_operator_in.sql diff --git a/dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.reference b/tests/queries/0_stateless/00936_substring_utf8_non_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.reference rename to tests/queries/0_stateless/00936_substring_utf8_non_const.reference diff --git a/dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.sql b/tests/queries/0_stateless/00936_substring_utf8_non_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.sql rename to tests/queries/0_stateless/00936_substring_utf8_non_const.sql diff --git a/dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.reference b/tests/queries/0_stateless/00937_ipv4_cidr_range.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.reference rename to tests/queries/0_stateless/00937_ipv4_cidr_range.reference diff --git a/dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.sql b/tests/queries/0_stateless/00937_ipv4_cidr_range.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.sql rename to tests/queries/0_stateless/00937_ipv4_cidr_range.sql diff --git a/dbms/tests/queries/0_stateless/00937_template_output_format.reference b/tests/queries/0_stateless/00937_template_output_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_template_output_format.reference rename to tests/queries/0_stateless/00937_template_output_format.reference diff --git a/dbms/tests/queries/0_stateless/00937_template_output_format.sh b/tests/queries/0_stateless/00937_template_output_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00937_template_output_format.sh rename to tests/queries/0_stateless/00937_template_output_format.sh diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_csv.reference b/tests/queries/0_stateless/00937_test_use_header_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_csv.reference rename to tests/queries/0_stateless/00937_test_use_header_csv.reference diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_csv.sh b/tests/queries/0_stateless/00937_test_use_header_csv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_csv.sh rename to tests/queries/0_stateless/00937_test_use_header_csv.sh diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_tsv.reference b/tests/queries/0_stateless/00937_test_use_header_tsv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_tsv.reference rename to tests/queries/0_stateless/00937_test_use_header_tsv.reference diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_tsv.sh b/tests/queries/0_stateless/00937_test_use_header_tsv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_tsv.sh rename to tests/queries/0_stateless/00937_test_use_header_tsv.sh diff --git a/dbms/tests/queries/0_stateless/00938_basename.reference b/tests/queries/0_stateless/00938_basename.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_basename.reference rename to tests/queries/0_stateless/00938_basename.reference diff --git a/dbms/tests/queries/0_stateless/00938_basename.sql b/tests/queries/0_stateless/00938_basename.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_basename.sql rename to tests/queries/0_stateless/00938_basename.sql diff --git a/dbms/tests/queries/0_stateless/00938_dataset_test.reference b/tests/queries/0_stateless/00938_dataset_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_dataset_test.reference rename to tests/queries/0_stateless/00938_dataset_test.reference diff --git a/dbms/tests/queries/0_stateless/00938_dataset_test.sql b/tests/queries/0_stateless/00938_dataset_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_dataset_test.sql rename to tests/queries/0_stateless/00938_dataset_test.sql diff --git a/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference b/tests/queries/0_stateless/00938_fix_rwlock_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference rename to tests/queries/0_stateless/00938_fix_rwlock_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh b/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh rename to tests/queries/0_stateless/00938_fix_rwlock_segfault.sh diff --git a/dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.reference b/tests/queries/0_stateless/00938_ipv6_cidr_range.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.reference rename to tests/queries/0_stateless/00938_ipv6_cidr_range.reference diff --git a/dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.sql b/tests/queries/0_stateless/00938_ipv6_cidr_range.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.sql rename to tests/queries/0_stateless/00938_ipv6_cidr_range.sql diff --git a/dbms/tests/queries/0_stateless/00938_template_input_format.reference b/tests/queries/0_stateless/00938_template_input_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_template_input_format.reference rename to tests/queries/0_stateless/00938_template_input_format.reference diff --git a/dbms/tests/queries/0_stateless/00938_template_input_format.sh b/tests/queries/0_stateless/00938_template_input_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00938_template_input_format.sh rename to tests/queries/0_stateless/00938_template_input_format.sh diff --git a/dbms/tests/queries/0_stateless/00938_test_retention_function.reference b/tests/queries/0_stateless/00938_test_retention_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_test_retention_function.reference rename to tests/queries/0_stateless/00938_test_retention_function.reference diff --git a/dbms/tests/queries/0_stateless/00938_test_retention_function.sql b/tests/queries/0_stateless/00938_test_retention_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_test_retention_function.sql rename to tests/queries/0_stateless/00938_test_retention_function.sql diff --git a/dbms/tests/queries/0_stateless/00939_limit_by_offset.reference b/tests/queries/0_stateless/00939_limit_by_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00939_limit_by_offset.reference rename to tests/queries/0_stateless/00939_limit_by_offset.reference diff --git a/dbms/tests/queries/0_stateless/00939_limit_by_offset.sql b/tests/queries/0_stateless/00939_limit_by_offset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00939_limit_by_offset.sql rename to tests/queries/0_stateless/00939_limit_by_offset.sql diff --git a/dbms/tests/queries/0_stateless/00939_test_null_in.reference b/tests/queries/0_stateless/00939_test_null_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00939_test_null_in.reference rename to tests/queries/0_stateless/00939_test_null_in.reference diff --git a/dbms/tests/queries/0_stateless/00939_test_null_in.sql b/tests/queries/0_stateless/00939_test_null_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00939_test_null_in.sql rename to tests/queries/0_stateless/00939_test_null_in.sql diff --git a/dbms/tests/queries/0_stateless/00940_max_parts_in_total.reference b/tests/queries/0_stateless/00940_max_parts_in_total.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00940_max_parts_in_total.reference rename to tests/queries/0_stateless/00940_max_parts_in_total.reference diff --git a/dbms/tests/queries/0_stateless/00940_max_parts_in_total.sql b/tests/queries/0_stateless/00940_max_parts_in_total.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00940_max_parts_in_total.sql rename to tests/queries/0_stateless/00940_max_parts_in_total.sql diff --git a/dbms/tests/queries/0_stateless/00940_order_by_read_in_order.reference b/tests/queries/0_stateless/00940_order_by_read_in_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00940_order_by_read_in_order.reference rename to tests/queries/0_stateless/00940_order_by_read_in_order.reference diff --git a/dbms/tests/queries/0_stateless/00940_order_by_read_in_order.sql b/tests/queries/0_stateless/00940_order_by_read_in_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00940_order_by_read_in_order.sql rename to tests/queries/0_stateless/00940_order_by_read_in_order.sql diff --git a/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference b/tests/queries/0_stateless/00941_system_columns_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference rename to tests/queries/0_stateless/00941_system_columns_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/00941_system_columns_race_condition.sh b/tests/queries/0_stateless/00941_system_columns_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00941_system_columns_race_condition.sh rename to tests/queries/0_stateless/00941_system_columns_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/00941_to_custom_week.reference b/tests/queries/0_stateless/00941_to_custom_week.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00941_to_custom_week.reference rename to tests/queries/0_stateless/00941_to_custom_week.reference diff --git a/dbms/tests/queries/0_stateless/00941_to_custom_week.sql b/tests/queries/0_stateless/00941_to_custom_week.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00941_to_custom_week.sql rename to tests/queries/0_stateless/00941_to_custom_week.sql diff --git a/dbms/tests/queries/0_stateless/00942_dataparts_500.reference b/tests/queries/0_stateless/00942_dataparts_500.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00942_dataparts_500.reference rename to tests/queries/0_stateless/00942_dataparts_500.reference diff --git a/dbms/tests/queries/0_stateless/00942_dataparts_500.sh b/tests/queries/0_stateless/00942_dataparts_500.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00942_dataparts_500.sh rename to tests/queries/0_stateless/00942_dataparts_500.sh diff --git a/dbms/tests/queries/0_stateless/00942_mutate_index.reference b/tests/queries/0_stateless/00942_mutate_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mutate_index.reference rename to tests/queries/0_stateless/00942_mutate_index.reference diff --git a/dbms/tests/queries/0_stateless/00942_mutate_index.sh b/tests/queries/0_stateless/00942_mutate_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mutate_index.sh rename to tests/queries/0_stateless/00942_mutate_index.sh diff --git a/dbms/tests/queries/0_stateless/00942_mv_rename_table.reference b/tests/queries/0_stateless/00942_mv_rename_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mv_rename_table.reference rename to tests/queries/0_stateless/00942_mv_rename_table.reference diff --git a/dbms/tests/queries/0_stateless/00942_mv_rename_table.sql b/tests/queries/0_stateless/00942_mv_rename_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mv_rename_table.sql rename to tests/queries/0_stateless/00942_mv_rename_table.sql diff --git a/dbms/tests/queries/0_stateless/00943_materialize_index.reference b/tests/queries/0_stateless/00943_materialize_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00943_materialize_index.reference rename to tests/queries/0_stateless/00943_materialize_index.reference diff --git a/dbms/tests/queries/0_stateless/00943_materialize_index.sh b/tests/queries/0_stateless/00943_materialize_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00943_materialize_index.sh rename to tests/queries/0_stateless/00943_materialize_index.sh diff --git a/dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference b/tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference rename to tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference diff --git a/dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql b/tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql rename to tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql diff --git a/dbms/tests/queries/0_stateless/00944_clear_index_in_partition.reference b/tests/queries/0_stateless/00944_clear_index_in_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_clear_index_in_partition.reference rename to tests/queries/0_stateless/00944_clear_index_in_partition.reference diff --git a/dbms/tests/queries/0_stateless/00944_clear_index_in_partition.sh b/tests/queries/0_stateless/00944_clear_index_in_partition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00944_clear_index_in_partition.sh rename to tests/queries/0_stateless/00944_clear_index_in_partition.sh diff --git a/dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference b/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference rename to tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh b/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh rename to tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh diff --git a/dbms/tests/queries/0_stateless/00944_minmax_null.reference b/tests/queries/0_stateless/00944_minmax_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_minmax_null.reference rename to tests/queries/0_stateless/00944_minmax_null.reference diff --git a/dbms/tests/queries/0_stateless/00944_minmax_null.sql b/tests/queries/0_stateless/00944_minmax_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00944_minmax_null.sql rename to tests/queries/0_stateless/00944_minmax_null.sql diff --git a/dbms/tests/queries/0_stateless/00944_ml_test.reference b/tests/queries/0_stateless/00944_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_ml_test.reference rename to tests/queries/0_stateless/00944_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00944_ml_test.sql b/tests/queries/0_stateless/00944_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00944_ml_test.sql rename to tests/queries/0_stateless/00944_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00945_bloom_filter_index.reference b/tests/queries/0_stateless/00945_bloom_filter_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00945_bloom_filter_index.reference rename to tests/queries/0_stateless/00945_bloom_filter_index.reference diff --git a/dbms/tests/queries/0_stateless/00945_bloom_filter_index.sql b/tests/queries/0_stateless/00945_bloom_filter_index.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00945_bloom_filter_index.sql rename to tests/queries/0_stateless/00945_bloom_filter_index.sql diff --git a/dbms/tests/queries/0_stateless/00945_ml_test.reference b/tests/queries/0_stateless/00945_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00945_ml_test.reference rename to tests/queries/0_stateless/00945_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00945_ml_test.sql b/tests/queries/0_stateless/00945_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00945_ml_test.sql rename to tests/queries/0_stateless/00945_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00946_ml_test.reference b/tests/queries/0_stateless/00946_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00946_ml_test.reference rename to tests/queries/0_stateless/00946_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00946_ml_test.sql b/tests/queries/0_stateless/00946_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00946_ml_test.sql rename to tests/queries/0_stateless/00946_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00947_ml_test.reference b/tests/queries/0_stateless/00947_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00947_ml_test.reference rename to tests/queries/0_stateless/00947_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00947_ml_test.sql b/tests/queries/0_stateless/00947_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00947_ml_test.sql rename to tests/queries/0_stateless/00947_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00948_format_in_with_single_element.reference b/tests/queries/0_stateless/00948_format_in_with_single_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00948_format_in_with_single_element.reference rename to tests/queries/0_stateless/00948_format_in_with_single_element.reference diff --git a/dbms/tests/queries/0_stateless/00948_format_in_with_single_element.sh b/tests/queries/0_stateless/00948_format_in_with_single_element.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00948_format_in_with_single_element.sh rename to tests/queries/0_stateless/00948_format_in_with_single_element.sh diff --git a/dbms/tests/queries/0_stateless/00948_to_valid_utf8.reference b/tests/queries/0_stateless/00948_to_valid_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00948_to_valid_utf8.reference rename to tests/queries/0_stateless/00948_to_valid_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00948_to_valid_utf8.sql b/tests/queries/0_stateless/00948_to_valid_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00948_to_valid_utf8.sql rename to tests/queries/0_stateless/00948_to_valid_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00948_values_interpreter_template.reference b/tests/queries/0_stateless/00948_values_interpreter_template.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00948_values_interpreter_template.reference rename to tests/queries/0_stateless/00948_values_interpreter_template.reference diff --git a/dbms/tests/queries/0_stateless/00948_values_interpreter_template.sql b/tests/queries/0_stateless/00948_values_interpreter_template.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00948_values_interpreter_template.sql rename to tests/queries/0_stateless/00948_values_interpreter_template.sql diff --git a/dbms/tests/queries/0_stateless/00949_format.reference b/tests/queries/0_stateless/00949_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00949_format.reference rename to tests/queries/0_stateless/00949_format.reference diff --git a/dbms/tests/queries/0_stateless/00949_format.sql b/tests/queries/0_stateless/00949_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00949_format.sql rename to tests/queries/0_stateless/00949_format.sql diff --git a/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.reference b/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.reference rename to tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference diff --git a/dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql b/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql rename to tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql diff --git a/dbms/tests/queries/0_stateless/00950_default_prewhere.reference b/tests/queries/0_stateless/00950_default_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_default_prewhere.reference rename to tests/queries/0_stateless/00950_default_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00950_default_prewhere.sql b/tests/queries/0_stateless/00950_default_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_default_prewhere.sql rename to tests/queries/0_stateless/00950_default_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00950_dict_get.reference b/tests/queries/0_stateless/00950_dict_get.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_dict_get.reference rename to tests/queries/0_stateless/00950_dict_get.reference diff --git a/dbms/tests/queries/0_stateless/00950_dict_get.sql b/tests/queries/0_stateless/00950_dict_get.sql similarity index 95% rename from dbms/tests/queries/0_stateless/00950_dict_get.sql rename to tests/queries/0_stateless/00950_dict_get.sql index 2483a21c0d3..93bb8f9b813 100644 --- a/dbms/tests/queries/0_stateless/00950_dict_get.sql +++ b/tests/queries/0_stateless/00950_dict_get.sql @@ -1,6 +1,5 @@ --- Must use `test_00950` database and these tables - they're configured in dbms/tests/*_dictionary.xml -create database if not exists test_00950; -use test_00950; +-- Must use `system` database and these tables - they're configured in tests/*_dictionary.xml +use system; drop table if exists ints; drop table if exists strings; drop table if exists decimals; @@ -270,7 +269,14 @@ select 'dictGetOrDefault', 'complex_cache_decimals' as dict_name, tuple(toUInt64 dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)); -drop table ints; -drop table strings; -drop table decimals; -drop database test_00950; +-- +-- Keep the tables, so that the dictionaries can be reloaded correctly and +-- SYSTEM RELOAD DICTIONARIES doesn't break. +-- We could also: +-- * drop the dictionaries -- not possible, they are configured in a .xml; +-- * switch dictionaries to DDL syntax so that they can be dropped -- tedious, +-- because there are a couple dozens of them, and also we need to have some +-- .xml dictionaries in tests so that we test backward compatibility with this +-- format; +-- * unload dictionaries -- no command for that. +-- diff --git a/dbms/tests/queries/0_stateless/00950_test_double_delta_codec.reference b/tests/queries/0_stateless/00950_test_double_delta_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_double_delta_codec.reference rename to tests/queries/0_stateless/00950_test_double_delta_codec.reference diff --git a/dbms/tests/queries/0_stateless/00950_test_double_delta_codec.sql b/tests/queries/0_stateless/00950_test_double_delta_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_double_delta_codec.sql rename to tests/queries/0_stateless/00950_test_double_delta_codec.sql diff --git a/dbms/tests/queries/0_stateless/00950_test_gorilla_codec.reference b/tests/queries/0_stateless/00950_test_gorilla_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_gorilla_codec.reference rename to tests/queries/0_stateless/00950_test_gorilla_codec.reference diff --git a/dbms/tests/queries/0_stateless/00950_test_gorilla_codec.sql b/tests/queries/0_stateless/00950_test_gorilla_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_gorilla_codec.sql rename to tests/queries/0_stateless/00950_test_gorilla_codec.sql diff --git a/dbms/tests/queries/0_stateless/00951_ngram_search.reference b/tests/queries/0_stateless/00951_ngram_search.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00951_ngram_search.reference rename to tests/queries/0_stateless/00951_ngram_search.reference diff --git a/dbms/tests/queries/0_stateless/00951_ngram_search.sql b/tests/queries/0_stateless/00951_ngram_search.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00951_ngram_search.sql rename to tests/queries/0_stateless/00951_ngram_search.sql diff --git a/dbms/tests/queries/0_stateless/00952_basic_constraints.reference b/tests/queries/0_stateless/00952_basic_constraints.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_basic_constraints.reference rename to tests/queries/0_stateless/00952_basic_constraints.reference diff --git a/dbms/tests/queries/0_stateless/00952_basic_constraints.sh b/tests/queries/0_stateless/00952_basic_constraints.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00952_basic_constraints.sh rename to tests/queries/0_stateless/00952_basic_constraints.sh diff --git a/dbms/tests/queries/0_stateless/00952_input_function.reference b/tests/queries/0_stateless/00952_input_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_input_function.reference rename to tests/queries/0_stateless/00952_input_function.reference diff --git a/dbms/tests/queries/0_stateless/00952_input_function.sh b/tests/queries/0_stateless/00952_input_function.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00952_input_function.sh rename to tests/queries/0_stateless/00952_input_function.sh diff --git a/dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference b/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference rename to tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference diff --git a/dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql b/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql rename to tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql diff --git a/dbms/tests/queries/0_stateless/00952_part_frozen_info.reference b/tests/queries/0_stateless/00952_part_frozen_info.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_part_frozen_info.reference rename to tests/queries/0_stateless/00952_part_frozen_info.reference diff --git a/dbms/tests/queries/0_stateless/00952_part_frozen_info.sql b/tests/queries/0_stateless/00952_part_frozen_info.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00952_part_frozen_info.sql rename to tests/queries/0_stateless/00952_part_frozen_info.sql diff --git a/dbms/tests/queries/0_stateless/00953_constraints_operations.reference b/tests/queries/0_stateless/00953_constraints_operations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_constraints_operations.reference rename to tests/queries/0_stateless/00953_constraints_operations.reference diff --git a/dbms/tests/queries/0_stateless/00953_constraints_operations.sh b/tests/queries/0_stateless/00953_constraints_operations.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00953_constraints_operations.sh rename to tests/queries/0_stateless/00953_constraints_operations.sh diff --git a/dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.reference b/tests/queries/0_stateless/00953_indices_alter_exceptions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.reference rename to tests/queries/0_stateless/00953_indices_alter_exceptions.reference diff --git a/dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.sh b/tests/queries/0_stateless/00953_indices_alter_exceptions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.sh rename to tests/queries/0_stateless/00953_indices_alter_exceptions.sh diff --git a/dbms/tests/queries/0_stateless/00953_moving_functions.reference b/tests/queries/0_stateless/00953_moving_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_moving_functions.reference rename to tests/queries/0_stateless/00953_moving_functions.reference diff --git a/dbms/tests/queries/0_stateless/00953_moving_functions.sql b/tests/queries/0_stateless/00953_moving_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00953_moving_functions.sql rename to tests/queries/0_stateless/00953_moving_functions.sql diff --git a/dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference rename to tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference diff --git a/dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh rename to tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh diff --git a/dbms/tests/queries/0_stateless/00954_client_prepared_statements.reference b/tests/queries/0_stateless/00954_client_prepared_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00954_client_prepared_statements.reference rename to tests/queries/0_stateless/00954_client_prepared_statements.reference diff --git a/dbms/tests/queries/0_stateless/00954_client_prepared_statements.sh b/tests/queries/0_stateless/00954_client_prepared_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00954_client_prepared_statements.sh rename to tests/queries/0_stateless/00954_client_prepared_statements.sh diff --git a/dbms/tests/queries/0_stateless/00954_resample_combinator.reference b/tests/queries/0_stateless/00954_resample_combinator.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00954_resample_combinator.reference rename to tests/queries/0_stateless/00954_resample_combinator.reference diff --git a/dbms/tests/queries/0_stateless/00954_resample_combinator.sql b/tests/queries/0_stateless/00954_resample_combinator.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00954_resample_combinator.sql rename to tests/queries/0_stateless/00954_resample_combinator.sql diff --git a/dbms/tests/queries/0_stateless/00955_complex_prepared_statements.reference b/tests/queries/0_stateless/00955_complex_prepared_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00955_complex_prepared_statements.reference rename to tests/queries/0_stateless/00955_complex_prepared_statements.reference diff --git a/dbms/tests/queries/0_stateless/00955_complex_prepared_statements.sh b/tests/queries/0_stateless/00955_complex_prepared_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00955_complex_prepared_statements.sh rename to tests/queries/0_stateless/00955_complex_prepared_statements.sh diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark.reference b/tests/queries/0_stateless/00955_test_final_mark.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark.reference rename to tests/queries/0_stateless/00955_test_final_mark.reference diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark.sql b/tests/queries/0_stateless/00955_test_final_mark.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark.sql rename to tests/queries/0_stateless/00955_test_final_mark.sql diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark_use.reference b/tests/queries/0_stateless/00955_test_final_mark_use.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark_use.reference rename to tests/queries/0_stateless/00955_test_final_mark_use.reference diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark_use.sh b/tests/queries/0_stateless/00955_test_final_mark_use.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark_use.sh rename to tests/queries/0_stateless/00955_test_final_mark_use.sh diff --git a/dbms/tests/queries/0_stateless/00956_http_prepared_statements.reference b/tests/queries/0_stateless/00956_http_prepared_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00956_http_prepared_statements.reference rename to tests/queries/0_stateless/00956_http_prepared_statements.reference diff --git a/dbms/tests/queries/0_stateless/00956_http_prepared_statements.sh b/tests/queries/0_stateless/00956_http_prepared_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00956_http_prepared_statements.sh rename to tests/queries/0_stateless/00956_http_prepared_statements.sh diff --git a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference b/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference rename to tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference diff --git a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql b/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql rename to tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql diff --git a/dbms/tests/queries/0_stateless/00956_sensitive_data_masking.reference b/tests/queries/0_stateless/00956_sensitive_data_masking.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00956_sensitive_data_masking.reference rename to tests/queries/0_stateless/00956_sensitive_data_masking.reference diff --git a/dbms/tests/queries/0_stateless/00956_sensitive_data_masking.sh b/tests/queries/0_stateless/00956_sensitive_data_masking.sh similarity index 97% rename from dbms/tests/queries/0_stateless/00956_sensitive_data_masking.sh rename to tests/queries/0_stateless/00956_sensitive_data_masking.sh index 0c5bd753f26..0f76c34eaff 100755 --- a/dbms/tests/queries/0_stateless/00956_sensitive_data_masking.sh +++ b/tests/queries/0_stateless/00956_sensitive_data_masking.sh @@ -2,9 +2,9 @@ # Get all server logs export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL="trace" -#export CLICKHOUSE_BINARY='../../../../build-vscode/Debug/dbms/programs/clickhouse' +#export CLICKHOUSE_BINARY='../../../../build-vscode/Debug/programs/clickhouse' #export CLICKHOUSE_PORT_TCP=59000 -#export CLICKHOUSE_CLIENT_BINARY='../../../../cmake-build-debug/dbms/programs/clickhouse client' +#export CLICKHOUSE_CLIENT_BINARY='../../../../cmake-build-debug/programs/clickhouse client' CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh diff --git a/dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference b/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference rename to tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference diff --git a/dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql b/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql rename to tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql diff --git a/dbms/tests/queries/0_stateless/00957_delta_diff_bug.reference b/tests/queries/0_stateless/00957_delta_diff_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_delta_diff_bug.reference rename to tests/queries/0_stateless/00957_delta_diff_bug.reference diff --git a/dbms/tests/queries/0_stateless/00957_delta_diff_bug.sql b/tests/queries/0_stateless/00957_delta_diff_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00957_delta_diff_bug.sql rename to tests/queries/0_stateless/00957_delta_diff_bug.sql diff --git a/dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference b/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference rename to tests/queries/0_stateless/00957_format_with_clashed_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh b/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh rename to tests/queries/0_stateless/00957_format_with_clashed_aliases.sh diff --git a/dbms/tests/queries/0_stateless/00957_neighbor.reference b/tests/queries/0_stateless/00957_neighbor.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_neighbor.reference rename to tests/queries/0_stateless/00957_neighbor.reference diff --git a/dbms/tests/queries/0_stateless/00957_neighbor.sql b/tests/queries/0_stateless/00957_neighbor.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00957_neighbor.sql rename to tests/queries/0_stateless/00957_neighbor.sql diff --git a/dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference b/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference rename to tests/queries/0_stateless/00958_format_of_tuple_array_element.reference diff --git a/dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh b/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh rename to tests/queries/0_stateless/00958_format_of_tuple_array_element.sh diff --git a/dbms/tests/queries/0_stateless/00959_format_with_different_aliases.reference b/tests/queries/0_stateless/00959_format_with_different_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00959_format_with_different_aliases.reference rename to tests/queries/0_stateless/00959_format_with_different_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00959_format_with_different_aliases.sh b/tests/queries/0_stateless/00959_format_with_different_aliases.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00959_format_with_different_aliases.sh rename to tests/queries/0_stateless/00959_format_with_different_aliases.sh diff --git a/dbms/tests/queries/0_stateless/00960_eval_ml_method_const.reference b/tests/queries/0_stateless/00960_eval_ml_method_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00960_eval_ml_method_const.reference rename to tests/queries/0_stateless/00960_eval_ml_method_const.reference diff --git a/dbms/tests/queries/0_stateless/00960_eval_ml_method_const.sql b/tests/queries/0_stateless/00960_eval_ml_method_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00960_eval_ml_method_const.sql rename to tests/queries/0_stateless/00960_eval_ml_method_const.sql diff --git a/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.py b/tests/queries/0_stateless/00960_live_view_watch_events_live.py similarity index 100% rename from dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.py rename to tests/queries/0_stateless/00960_live_view_watch_events_live.py diff --git a/dbms/tests/queries/0_stateless/00967_live_view_watch_http.reference b/tests/queries/0_stateless/00960_live_view_watch_events_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00967_live_view_watch_http.reference rename to tests/queries/0_stateless/00960_live_view_watch_events_live.reference diff --git a/dbms/tests/queries/0_stateless/00961_check_table.reference b/tests/queries/0_stateless/00961_check_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_check_table.reference rename to tests/queries/0_stateless/00961_check_table.reference diff --git a/dbms/tests/queries/0_stateless/00961_check_table.sql b/tests/queries/0_stateless/00961_check_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_check_table.sql rename to tests/queries/0_stateless/00961_check_table.sql diff --git a/dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference rename to tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference diff --git a/dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql rename to tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql diff --git a/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.reference b/tests/queries/0_stateless/00961_temporary_live_view_watch.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.reference rename to tests/queries/0_stateless/00961_temporary_live_view_watch.reference diff --git a/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.sql b/tests/queries/0_stateless/00961_temporary_live_view_watch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.sql rename to tests/queries/0_stateless/00961_temporary_live_view_watch.sql diff --git a/dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference b/tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference rename to tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference diff --git a/dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql b/tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql rename to tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql diff --git a/dbms/tests/queries/0_stateless/00962_enumNotExect.reference b/tests/queries/0_stateless/00962_enumNotExect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00962_enumNotExect.reference rename to tests/queries/0_stateless/00962_enumNotExect.reference diff --git a/dbms/tests/queries/0_stateless/00962_enumNotExect.sql b/tests/queries/0_stateless/00962_enumNotExect.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00962_enumNotExect.sql rename to tests/queries/0_stateless/00962_enumNotExect.sql diff --git a/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py similarity index 100% rename from dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py rename to tests/queries/0_stateless/00962_temporary_live_view_watch_live.py diff --git a/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference rename to tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference diff --git a/dbms/tests/queries/0_stateless/00962_visit_param_various.reference b/tests/queries/0_stateless/00962_visit_param_various.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00962_visit_param_various.reference rename to tests/queries/0_stateless/00962_visit_param_various.reference diff --git a/dbms/tests/queries/0_stateless/00962_visit_param_various.sql b/tests/queries/0_stateless/00962_visit_param_various.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00962_visit_param_various.sql rename to tests/queries/0_stateless/00962_visit_param_various.sql diff --git a/dbms/tests/queries/0_stateless/00963_achimbab.reference b/tests/queries/0_stateless/00963_achimbab.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00963_achimbab.reference rename to tests/queries/0_stateless/00963_achimbab.reference diff --git a/dbms/tests/queries/0_stateless/00963_achimbab.sql b/tests/queries/0_stateless/00963_achimbab.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00963_achimbab.sql rename to tests/queries/0_stateless/00963_achimbab.sql diff --git a/dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.reference b/tests/queries/0_stateless/00963_startsWith_force_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.reference rename to tests/queries/0_stateless/00963_startsWith_force_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.sql b/tests/queries/0_stateless/00963_startsWith_force_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.sql rename to tests/queries/0_stateless/00963_startsWith_force_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled b/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled rename to tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled diff --git a/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference b/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference rename to tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference diff --git a/dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.reference b/tests/queries/0_stateless/00964_bloom_index_string_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.reference rename to tests/queries/0_stateless/00964_bloom_index_string_functions.reference diff --git a/dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.sh b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.sh rename to tests/queries/0_stateless/00964_bloom_index_string_functions.sh diff --git a/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py b/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py rename to tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00971_query_id_in_logs.reference b/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00971_query_id_in_logs.reference rename to tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00964_os_thread_priority.reference b/tests/queries/0_stateless/00964_os_thread_priority.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00964_os_thread_priority.reference rename to tests/queries/0_stateless/00964_os_thread_priority.reference diff --git a/dbms/tests/queries/0_stateless/00964_os_thread_priority.sql b/tests/queries/0_stateless/00964_os_thread_priority.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00964_os_thread_priority.sql rename to tests/queries/0_stateless/00964_os_thread_priority.sql diff --git a/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py rename to tests/queries/0_stateless/00965_live_view_watch_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00975_live_view_create.reference b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_live_view_create.reference rename to tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00965_logs_level_bugfix.reference b/tests/queries/0_stateless/00965_logs_level_bugfix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_logs_level_bugfix.reference rename to tests/queries/0_stateless/00965_logs_level_bugfix.reference diff --git a/dbms/tests/queries/0_stateless/00965_logs_level_bugfix.sh b/tests/queries/0_stateless/00965_logs_level_bugfix.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00965_logs_level_bugfix.sh rename to tests/queries/0_stateless/00965_logs_level_bugfix.sh diff --git a/dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.reference b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.reference rename to tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference diff --git a/dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh rename to tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh diff --git a/dbms/tests/queries/0_stateless/00965_set_index_string_functions.reference b/tests/queries/0_stateless/00965_set_index_string_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_set_index_string_functions.reference rename to tests/queries/0_stateless/00965_set_index_string_functions.reference diff --git a/dbms/tests/queries/0_stateless/00965_set_index_string_functions.sh b/tests/queries/0_stateless/00965_set_index_string_functions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00965_set_index_string_functions.sh rename to tests/queries/0_stateless/00965_set_index_string_functions.sh diff --git a/dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference rename to tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference diff --git a/dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql rename to tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql diff --git a/dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference b/tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference rename to tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference diff --git a/dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql b/tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql rename to tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql diff --git a/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.py b/tests/queries/0_stateless/00966_live_view_watch_events_http.py similarity index 100% rename from dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.py rename to tests/queries/0_stateless/00966_live_view_watch_events_http.py diff --git a/dbms/tests/queries/0_stateless/00976_max_execution_speed.reference b/tests/queries/0_stateless/00966_live_view_watch_events_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_max_execution_speed.reference rename to tests/queries/0_stateless/00966_live_view_watch_events_http.reference diff --git a/dbms/tests/queries/0_stateless/00967_ubsan_bit_test.reference b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00967_ubsan_bit_test.reference rename to tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference diff --git a/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql new file mode 100644 index 00000000000..33f16eb241c --- /dev/null +++ b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS dist_00967; +DROP TABLE IF EXISTS underlying_00967; + +CREATE TABLE dist_00967 (key UInt64) Engine=Distributed('test_shard_localhost', currentDatabase(), underlying_00967); +-- fails for TinyLog()/MergeTree()/... but not for Memory() +CREATE TABLE underlying_00967 (key Nullable(UInt64)) Engine=TinyLog(); +INSERT INTO dist_00967 SELECT toUInt64(number) FROM system.numbers LIMIT 1; + +SELECT * FROM dist_00967; diff --git a/dbms/tests/queries/0_stateless/00967_live_view_watch_http.py b/tests/queries/0_stateless/00967_live_view_watch_http.py similarity index 100% rename from dbms/tests/queries/0_stateless/00967_live_view_watch_http.py rename to tests/queries/0_stateless/00967_live_view_watch_http.py diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live.reference b/tests/queries/0_stateless/00967_live_view_watch_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live.reference rename to tests/queries/0_stateless/00967_live_view_watch_http.reference diff --git a/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference b/tests/queries/0_stateless/00967_ubsan_bit_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference rename to tests/queries/0_stateless/00967_ubsan_bit_test.reference diff --git a/dbms/tests/queries/0_stateless/00967_ubsan_bit_test.sql b/tests/queries/0_stateless/00967_ubsan_bit_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00967_ubsan_bit_test.sql rename to tests/queries/0_stateless/00967_ubsan_bit_test.sql diff --git a/dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.reference b/tests/queries/0_stateless/00968_file_engine_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.reference rename to tests/queries/0_stateless/00968_file_engine_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.sql b/tests/queries/0_stateless/00968_file_engine_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.sql rename to tests/queries/0_stateless/00968_file_engine_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference b/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference rename to tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference diff --git a/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql b/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql rename to tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql diff --git a/dbms/tests/queries/0_stateless/00968_roundAge.reference b/tests/queries/0_stateless/00968_roundAge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00968_roundAge.reference rename to tests/queries/0_stateless/00968_roundAge.reference diff --git a/dbms/tests/queries/0_stateless/00968_roundAge.sql b/tests/queries/0_stateless/00968_roundAge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00968_roundAge.sql rename to tests/queries/0_stateless/00968_roundAge.sql diff --git a/dbms/tests/queries/0_stateless/00969_columns_clause.reference b/tests/queries/0_stateless/00969_columns_clause.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00969_columns_clause.reference rename to tests/queries/0_stateless/00969_columns_clause.reference diff --git a/dbms/tests/queries/0_stateless/00969_columns_clause.sql b/tests/queries/0_stateless/00969_columns_clause.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00969_columns_clause.sql rename to tests/queries/0_stateless/00969_columns_clause.sql diff --git a/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference b/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference rename to tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference diff --git a/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql b/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql rename to tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql diff --git a/dbms/tests/queries/0_stateless/00969_roundDuration.reference b/tests/queries/0_stateless/00969_roundDuration.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00969_roundDuration.reference rename to tests/queries/0_stateless/00969_roundDuration.reference diff --git a/dbms/tests/queries/0_stateless/00969_roundDuration.sql b/tests/queries/0_stateless/00969_roundDuration.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00969_roundDuration.sql rename to tests/queries/0_stateless/00969_roundDuration.sql diff --git a/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py b/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py rename to tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference b/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference rename to tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00970_substring_arg_validation.reference b/tests/queries/0_stateless/00970_substring_arg_validation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00970_substring_arg_validation.reference rename to tests/queries/0_stateless/00970_substring_arg_validation.reference diff --git a/dbms/tests/queries/0_stateless/00970_substring_arg_validation.sql b/tests/queries/0_stateless/00970_substring_arg_validation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00970_substring_arg_validation.sql rename to tests/queries/0_stateless/00970_substring_arg_validation.sql diff --git a/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py b/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py rename to tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference b/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference rename to tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference b/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference rename to tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference diff --git a/dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql b/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql rename to tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql diff --git a/dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference b/tests/queries/0_stateless/00971_query_id_in_logs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference rename to tests/queries/0_stateless/00971_query_id_in_logs.reference diff --git a/dbms/tests/queries/0_stateless/00971_query_id_in_logs.sh b/tests/queries/0_stateless/00971_query_id_in_logs.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00971_query_id_in_logs.sh rename to tests/queries/0_stateless/00971_query_id_in_logs.sh diff --git a/dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.reference b/tests/queries/0_stateless/00972_desc_table_virtual_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.reference rename to tests/queries/0_stateless/00972_desc_table_virtual_columns.reference diff --git a/dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.sql b/tests/queries/0_stateless/00972_desc_table_virtual_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.sql rename to tests/queries/0_stateless/00972_desc_table_virtual_columns.sql diff --git a/dbms/tests/queries/0_stateless/00972_geohashesInBox.reference b/tests/queries/0_stateless/00972_geohashesInBox.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00972_geohashesInBox.reference rename to tests/queries/0_stateless/00972_geohashesInBox.reference diff --git a/dbms/tests/queries/0_stateless/00972_geohashesInBox.sql b/tests/queries/0_stateless/00972_geohashesInBox.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00972_geohashesInBox.sql rename to tests/queries/0_stateless/00972_geohashesInBox.sql diff --git a/dbms/tests/queries/0_stateless/00972_live_view_select_1.reference b/tests/queries/0_stateless/00972_live_view_select_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00972_live_view_select_1.reference rename to tests/queries/0_stateless/00972_live_view_select_1.reference diff --git a/dbms/tests/queries/0_stateless/00972_live_view_select_1.sql b/tests/queries/0_stateless/00972_live_view_select_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00972_live_view_select_1.sql rename to tests/queries/0_stateless/00972_live_view_select_1.sql diff --git a/dbms/tests/queries/0_stateless/00973_create_table_as_table_function.reference b/tests/queries/0_stateless/00973_create_table_as_table_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_create_table_as_table_function.reference rename to tests/queries/0_stateless/00973_create_table_as_table_function.reference diff --git a/dbms/tests/queries/0_stateless/00973_create_table_as_table_function.sql b/tests/queries/0_stateless/00973_create_table_as_table_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_create_table_as_table_function.sql rename to tests/queries/0_stateless/00973_create_table_as_table_function.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_select.reference b/tests/queries/0_stateless/00973_live_view_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_select.reference rename to tests/queries/0_stateless/00973_live_view_select.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_select.sql b/tests/queries/0_stateless/00973_live_view_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_select.sql rename to tests/queries/0_stateless/00973_live_view_select.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00973_uniq_non_associativity.reference b/tests/queries/0_stateless/00973_uniq_non_associativity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_uniq_non_associativity.reference rename to tests/queries/0_stateless/00973_uniq_non_associativity.reference diff --git a/dbms/tests/queries/0_stateless/00973_uniq_non_associativity.sql b/tests/queries/0_stateless/00973_uniq_non_associativity.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_uniq_non_associativity.sql rename to tests/queries/0_stateless/00973_uniq_non_associativity.sql diff --git a/dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference b/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference rename to tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference diff --git a/dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql b/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql rename to tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql diff --git a/dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference b/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference rename to tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql b/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql rename to tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00974_distributed_join_on.reference b/tests/queries/0_stateless/00974_distributed_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_distributed_join_on.reference rename to tests/queries/0_stateless/00974_distributed_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00974_distributed_join_on.sql b/tests/queries/0_stateless/00974_distributed_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_distributed_join_on.sql rename to tests/queries/0_stateless/00974_distributed_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00974_final_predicate_push_down.reference b/tests/queries/0_stateless/00974_final_predicate_push_down.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_final_predicate_push_down.reference rename to tests/queries/0_stateless/00974_final_predicate_push_down.reference diff --git a/dbms/tests/queries/0_stateless/00974_final_predicate_push_down.sql b/tests/queries/0_stateless/00974_final_predicate_push_down.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_final_predicate_push_down.sql rename to tests/queries/0_stateless/00974_final_predicate_push_down.sql diff --git a/dbms/tests/queries/0_stateless/00974_fix_join_on.reference b/tests/queries/0_stateless/00974_fix_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_fix_join_on.reference rename to tests/queries/0_stateless/00974_fix_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00974_fix_join_on.sql b/tests/queries/0_stateless/00974_fix_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_fix_join_on.sql rename to tests/queries/0_stateless/00974_fix_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00974_full_outer_join.reference b/tests/queries/0_stateless/00974_full_outer_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_full_outer_join.reference rename to tests/queries/0_stateless/00974_full_outer_join.reference diff --git a/dbms/tests/queries/0_stateless/00974_full_outer_join.sql b/tests/queries/0_stateless/00974_full_outer_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_full_outer_join.sql rename to tests/queries/0_stateless/00974_full_outer_join.sql diff --git a/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference b/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference rename to tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql b/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql rename to tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00974_low_cardinality_cast.reference b/tests/queries/0_stateless/00974_low_cardinality_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_low_cardinality_cast.reference rename to tests/queries/0_stateless/00974_low_cardinality_cast.reference diff --git a/dbms/tests/queries/0_stateless/00974_low_cardinality_cast.sql b/tests/queries/0_stateless/00974_low_cardinality_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_low_cardinality_cast.sql rename to tests/queries/0_stateless/00974_low_cardinality_cast.sql diff --git a/dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference rename to tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference diff --git a/dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh rename to tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh diff --git a/dbms/tests/queries/0_stateless/00974_query_profiler.reference b/tests/queries/0_stateless/00974_query_profiler.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_query_profiler.reference rename to tests/queries/0_stateless/00974_query_profiler.reference diff --git a/dbms/tests/queries/0_stateless/00974_query_profiler.sql b/tests/queries/0_stateless/00974_query_profiler.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_query_profiler.sql rename to tests/queries/0_stateless/00974_query_profiler.sql diff --git a/dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.reference b/tests/queries/0_stateless/00974_text_log_table_not_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.reference rename to tests/queries/0_stateless/00974_text_log_table_not_empty.reference diff --git a/dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.sh b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.sh rename to tests/queries/0_stateless/00974_text_log_table_not_empty.sh diff --git a/dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference rename to tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh rename to tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00975_json_hang.reference b/tests/queries/0_stateless/00975_json_hang.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_json_hang.reference rename to tests/queries/0_stateless/00975_json_hang.reference diff --git a/dbms/tests/queries/0_stateless/00975_json_hang.sql b/tests/queries/0_stateless/00975_json_hang.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_json_hang.sql rename to tests/queries/0_stateless/00975_json_hang.sql diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference b/tests/queries/0_stateless/00975_live_view_create.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference rename to tests/queries/0_stateless/00975_live_view_create.reference diff --git a/dbms/tests/queries/0_stateless/00975_live_view_create.sql b/tests/queries/0_stateless/00975_live_view_create.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_live_view_create.sql rename to tests/queries/0_stateless/00975_live_view_create.sql diff --git a/dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.reference b/tests/queries/0_stateless/00975_move_partition_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.reference rename to tests/queries/0_stateless/00975_move_partition_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.sql b/tests/queries/0_stateless/00975_move_partition_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.sql rename to tests/queries/0_stateless/00975_move_partition_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00975_recursive_materialized_view.reference b/tests/queries/0_stateless/00975_recursive_materialized_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_recursive_materialized_view.reference rename to tests/queries/0_stateless/00975_recursive_materialized_view.reference diff --git a/dbms/tests/queries/0_stateless/00975_recursive_materialized_view.sql b/tests/queries/0_stateless/00975_recursive_materialized_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_recursive_materialized_view.sql rename to tests/queries/0_stateless/00975_recursive_materialized_view.sql diff --git a/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference b/tests/queries/0_stateless/00975_sample_prewhere_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference rename to tests/queries/0_stateless/00975_sample_prewhere_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.sql b/tests/queries/0_stateless/00975_sample_prewhere_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.sql rename to tests/queries/0_stateless/00975_sample_prewhere_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00975_values_list.reference b/tests/queries/0_stateless/00975_values_list.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_values_list.reference rename to tests/queries/0_stateless/00975_values_list.reference diff --git a/dbms/tests/queries/0_stateless/00975_values_list.sql b/tests/queries/0_stateless/00975_values_list.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_values_list.sql rename to tests/queries/0_stateless/00975_values_list.sql diff --git a/dbms/tests/queries/0_stateless/00976_asof_join_on.reference b/tests/queries/0_stateless/00976_asof_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_asof_join_on.reference rename to tests/queries/0_stateless/00976_asof_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00976_asof_join_on.sql b/tests/queries/0_stateless/00976_asof_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_asof_join_on.sql rename to tests/queries/0_stateless/00976_asof_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00976_live_view_select_version.reference b/tests/queries/0_stateless/00976_live_view_select_version.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_live_view_select_version.reference rename to tests/queries/0_stateless/00976_live_view_select_version.reference diff --git a/dbms/tests/queries/0_stateless/00976_live_view_select_version.sql b/tests/queries/0_stateless/00976_live_view_select_version.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_live_view_select_version.sql rename to tests/queries/0_stateless/00976_live_view_select_version.sql diff --git a/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.reference b/tests/queries/0_stateless/00976_max_execution_speed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00985_merge_stack_overflow.reference rename to tests/queries/0_stateless/00976_max_execution_speed.reference diff --git a/dbms/tests/queries/0_stateless/00976_max_execution_speed.sql b/tests/queries/0_stateless/00976_max_execution_speed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_max_execution_speed.sql rename to tests/queries/0_stateless/00976_max_execution_speed.sql diff --git a/dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference b/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference rename to tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference diff --git a/dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql b/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql rename to tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql diff --git a/dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.reference b/tests/queries/0_stateless/00976_system_stop_ttl_merges.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.reference rename to tests/queries/0_stateless/00976_system_stop_ttl_merges.reference diff --git a/dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql b/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql rename to tests/queries/0_stateless/00976_system_stop_ttl_merges.sql diff --git a/dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.reference b/tests/queries/0_stateless/00976_ttl_with_old_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.reference rename to tests/queries/0_stateless/00976_ttl_with_old_parts.reference diff --git a/dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.sql b/tests/queries/0_stateless/00976_ttl_with_old_parts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.sql rename to tests/queries/0_stateless/00976_ttl_with_old_parts.sql diff --git a/dbms/tests/queries/0_stateless/00977_int_div.reference b/tests/queries/0_stateless/00977_int_div.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00977_int_div.reference rename to tests/queries/0_stateless/00977_int_div.reference diff --git a/dbms/tests/queries/0_stateless/00977_int_div.sql b/tests/queries/0_stateless/00977_int_div.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00977_int_div.sql rename to tests/queries/0_stateless/00977_int_div.sql diff --git a/dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference b/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference rename to tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference diff --git a/dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql b/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql rename to tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql diff --git a/dbms/tests/queries/0_stateless/00977_live_view_watch_events.reference b/tests/queries/0_stateless/00977_live_view_watch_events.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00977_live_view_watch_events.reference rename to tests/queries/0_stateless/00977_live_view_watch_events.reference diff --git a/dbms/tests/queries/0_stateless/00977_live_view_watch_events.sql b/tests/queries/0_stateless/00977_live_view_watch_events.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00977_live_view_watch_events.sql rename to tests/queries/0_stateless/00977_live_view_watch_events.sql diff --git a/dbms/tests/queries/0_stateless/00978_live_view_watch.reference b/tests/queries/0_stateless/00978_live_view_watch.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_live_view_watch.reference rename to tests/queries/0_stateless/00978_live_view_watch.reference diff --git a/dbms/tests/queries/0_stateless/00978_live_view_watch.sql b/tests/queries/0_stateless/00978_live_view_watch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_live_view_watch.sql rename to tests/queries/0_stateless/00978_live_view_watch.sql diff --git a/dbms/tests/queries/0_stateless/00978_ml_math.reference b/tests/queries/0_stateless/00978_ml_math.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_ml_math.reference rename to tests/queries/0_stateless/00978_ml_math.reference diff --git a/dbms/tests/queries/0_stateless/00978_ml_math.sql b/tests/queries/0_stateless/00978_ml_math.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_ml_math.sql rename to tests/queries/0_stateless/00978_ml_math.sql diff --git a/dbms/tests/queries/0_stateless/00978_sum_map_bugfix.reference b/tests/queries/0_stateless/00978_sum_map_bugfix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_sum_map_bugfix.reference rename to tests/queries/0_stateless/00978_sum_map_bugfix.reference diff --git a/dbms/tests/queries/0_stateless/00978_sum_map_bugfix.sql b/tests/queries/0_stateless/00978_sum_map_bugfix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_sum_map_bugfix.sql rename to tests/queries/0_stateless/00978_sum_map_bugfix.sql diff --git a/dbms/tests/queries/0_stateless/00978_table_function_values_alias.reference b/tests/queries/0_stateless/00978_table_function_values_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_table_function_values_alias.reference rename to tests/queries/0_stateless/00978_table_function_values_alias.reference diff --git a/dbms/tests/queries/0_stateless/00978_table_function_values_alias.sql b/tests/queries/0_stateless/00978_table_function_values_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_table_function_values_alias.sql rename to tests/queries/0_stateless/00978_table_function_values_alias.sql diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live.py b/tests/queries/0_stateless/00979_live_view_watch_live.py similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live.py rename to tests/queries/0_stateless/00979_live_view_watch_live.py diff --git a/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference b/tests/queries/0_stateless/00979_live_view_watch_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference rename to tests/queries/0_stateless/00979_live_view_watch_live.reference diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py b/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py rename to tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py diff --git a/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.reference b/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.reference rename to tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py rename to tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py diff --git a/dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference rename to tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference b/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference rename to tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference diff --git a/dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql b/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql rename to tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql diff --git a/dbms/tests/queries/0_stateless/00979_set_index_not.reference b/tests/queries/0_stateless/00979_set_index_not.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_set_index_not.reference rename to tests/queries/0_stateless/00979_set_index_not.reference diff --git a/dbms/tests/queries/0_stateless/00979_set_index_not.sql b/tests/queries/0_stateless/00979_set_index_not.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_set_index_not.sql rename to tests/queries/0_stateless/00979_set_index_not.sql diff --git a/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.reference b/tests/queries/0_stateless/00979_toFloat_monotonicity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.reference rename to tests/queries/0_stateless/00979_toFloat_monotonicity.reference diff --git a/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.sql b/tests/queries/0_stateless/00979_toFloat_monotonicity.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.sql rename to tests/queries/0_stateless/00979_toFloat_monotonicity.sql diff --git a/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.reference b/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.reference rename to tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference diff --git a/dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql b/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql rename to tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql diff --git a/dbms/tests/queries/0_stateless/00980_alter_settings_race.reference b/tests/queries/0_stateless/00980_alter_settings_race.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_alter_settings_race.reference rename to tests/queries/0_stateless/00980_alter_settings_race.reference diff --git a/dbms/tests/queries/0_stateless/00980_alter_settings_race.sh b/tests/queries/0_stateless/00980_alter_settings_race.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00980_alter_settings_race.sh rename to tests/queries/0_stateless/00980_alter_settings_race.sh diff --git a/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.reference b/tests/queries/0_stateless/00980_crash_nullable_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.reference rename to tests/queries/0_stateless/00980_crash_nullable_decimal.reference diff --git a/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.sql b/tests/queries/0_stateless/00980_crash_nullable_decimal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.sql rename to tests/queries/0_stateless/00980_crash_nullable_decimal.sql diff --git a/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.reference b/tests/queries/0_stateless/00980_create_temporary_live_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_create_temporary_live_view.reference rename to tests/queries/0_stateless/00980_create_temporary_live_view.reference diff --git a/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.sql b/tests/queries/0_stateless/00980_create_temporary_live_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_create_temporary_live_view.sql rename to tests/queries/0_stateless/00980_create_temporary_live_view.sql diff --git a/dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference b/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference rename to tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference diff --git a/dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql b/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql rename to tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql diff --git a/tests/queries/0_stateless/00980_merge_alter_settings.reference b/tests/queries/0_stateless/00980_merge_alter_settings.reference new file mode 100644 index 00000000000..340cf29ce89 --- /dev/null +++ b/tests/queries/0_stateless/00980_merge_alter_settings.reference @@ -0,0 +1,6 @@ +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100 +2 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 30 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 15 diff --git a/dbms/tests/queries/0_stateless/00980_merge_alter_settings.sql b/tests/queries/0_stateless/00980_merge_alter_settings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_merge_alter_settings.sql rename to tests/queries/0_stateless/00980_merge_alter_settings.sql diff --git a/dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference b/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference rename to tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference diff --git a/dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql b/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql rename to tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql diff --git a/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference b/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference rename to tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference diff --git a/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql b/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql rename to tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql diff --git a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference new file mode 100644 index 00000000000..ab006ea6931 --- /dev/null +++ b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference @@ -0,0 +1,12 @@ +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +4 +4 +4 +4 +6 +6 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1 +CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1, check_delay_period = 15 +CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 diff --git a/dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql rename to tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql diff --git a/dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.reference b/tests/queries/0_stateless/00981_in_subquery_with_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.reference rename to tests/queries/0_stateless/00981_in_subquery_with_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh b/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh rename to tests/queries/0_stateless/00981_in_subquery_with_tuple.sh diff --git a/dbms/tests/queries/0_stateless/00981_no_virtual_columns.reference b/tests/queries/0_stateless/00981_no_virtual_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00981_no_virtual_columns.reference rename to tests/queries/0_stateless/00981_no_virtual_columns.reference diff --git a/dbms/tests/queries/0_stateless/00981_no_virtual_columns.sql b/tests/queries/0_stateless/00981_no_virtual_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00981_no_virtual_columns.sql rename to tests/queries/0_stateless/00981_no_virtual_columns.sql diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference b/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference rename to tests/queries/0_stateless/00981_topK_topKWeighted_long.reference diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql b/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql rename to tests/queries/0_stateless/00981_topK_topKWeighted_long.sql diff --git a/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference b/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference rename to tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference diff --git a/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql b/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql rename to tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql diff --git a/dbms/tests/queries/0_stateless/00990_request_splitting.reference b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_request_splitting.reference rename to tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql rename to tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql diff --git a/dbms/tests/queries/0_stateless/00991_system_parts_race_condition.reference b/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_system_parts_race_condition.reference rename to tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference diff --git a/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql b/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql rename to tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql diff --git a/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference b/tests/queries/0_stateless/00984_materialized_view_to_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference rename to tests/queries/0_stateless/00984_materialized_view_to_columns.reference diff --git a/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql b/tests/queries/0_stateless/00984_materialized_view_to_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql rename to tests/queries/0_stateless/00984_materialized_view_to_columns.sql diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference b/tests/queries/0_stateless/00984_parser_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference rename to tests/queries/0_stateless/00984_parser_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/tests/queries/0_stateless/00984_parser_stack_overflow.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh rename to tests/queries/0_stateless/00984_parser_stack_overflow.sh diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference b/tests/queries/0_stateless/00985_merge_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference rename to tests/queries/0_stateless/00985_merge_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql b/tests/queries/0_stateless/00985_merge_stack_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql rename to tests/queries/0_stateless/00985_merge_stack_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference b/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference rename to tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql b/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql rename to tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference b/tests/queries/0_stateless/00987_distributed_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference rename to tests/queries/0_stateless/00987_distributed_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql b/tests/queries/0_stateless/00987_distributed_stack_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql rename to tests/queries/0_stateless/00987_distributed_stack_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00997_trim.reference b/tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00997_trim.reference rename to tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql b/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql rename to tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.reference b/tests/queries/0_stateless/00988_expansion_aliases_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.reference rename to tests/queries/0_stateless/00988_expansion_aliases_limit.reference diff --git a/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql b/tests/queries/0_stateless/00988_expansion_aliases_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql rename to tests/queries/0_stateless/00988_expansion_aliases_limit.sql diff --git a/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference b/tests/queries/0_stateless/00988_parallel_parts_removal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference rename to tests/queries/0_stateless/00988_parallel_parts_removal.reference diff --git a/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql b/tests/queries/0_stateless/00988_parallel_parts_removal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql rename to tests/queries/0_stateless/00988_parallel_parts_removal.sql diff --git a/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference b/tests/queries/0_stateless/00989_parallel_parts_loading.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference rename to tests/queries/0_stateless/00989_parallel_parts_loading.reference diff --git a/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql b/tests/queries/0_stateless/00989_parallel_parts_loading.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql rename to tests/queries/0_stateless/00989_parallel_parts_loading.sql diff --git a/dbms/tests/queries/0_stateless/00990_function_current_user.reference b/tests/queries/0_stateless/00990_function_current_user.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_function_current_user.reference rename to tests/queries/0_stateless/00990_function_current_user.reference diff --git a/dbms/tests/queries/0_stateless/00990_function_current_user.sql b/tests/queries/0_stateless/00990_function_current_user.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_function_current_user.sql rename to tests/queries/0_stateless/00990_function_current_user.sql diff --git a/dbms/tests/queries/0_stateless/00990_hasToken.python b/tests/queries/0_stateless/00990_hasToken.python similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken.python rename to tests/queries/0_stateless/00990_hasToken.python diff --git a/dbms/tests/queries/0_stateless/00990_hasToken.reference b/tests/queries/0_stateless/00990_hasToken.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken.reference rename to tests/queries/0_stateless/00990_hasToken.reference diff --git a/tests/queries/0_stateless/00990_hasToken.sh b/tests/queries/0_stateless/00990_hasToken.sh new file mode 100755 index 00000000000..4ef62bc69c0 --- /dev/null +++ b/tests/queries/0_stateless/00990_hasToken.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +# We should have correct env vars from shell_config.sh to run this test + +python $CURDIR/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -nm diff --git a/dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference b/tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference rename to tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference diff --git a/dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql b/tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql rename to tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql diff --git a/dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.reference b/tests/queries/0_stateless/00990_metric_log_table_not_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.reference rename to tests/queries/0_stateless/00990_metric_log_table_not_empty.reference diff --git a/dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.sql b/tests/queries/0_stateless/00990_metric_log_table_not_empty.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.sql rename to tests/queries/0_stateless/00990_metric_log_table_not_empty.sql diff --git a/dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference b/tests/queries/0_stateless/00990_request_splitting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference rename to tests/queries/0_stateless/00990_request_splitting.reference diff --git a/dbms/tests/queries/0_stateless/00990_request_splitting.sql b/tests/queries/0_stateless/00990_request_splitting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_request_splitting.sql rename to tests/queries/0_stateless/00990_request_splitting.sql diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.python b/tests/queries/0_stateless/00991_live_view_watch_event_live.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.python rename to tests/queries/0_stateless/00991_live_view_watch_event_live.python diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.reference b/tests/queries/0_stateless/00991_live_view_watch_event_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.reference rename to tests/queries/0_stateless/00991_live_view_watch_event_live.reference diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled b/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled rename to tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.python b/tests/queries/0_stateless/00991_live_view_watch_http.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_http.python rename to tests/queries/0_stateless/00991_live_view_watch_http.python diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.reference b/tests/queries/0_stateless/00991_live_view_watch_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_http.reference rename to tests/queries/0_stateless/00991_live_view_watch_http.reference diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled b/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled rename to tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled diff --git a/dbms/tests/queries/0_stateless/01003_kill_query_race_condition.reference b/tests/queries/0_stateless/00991_system_parts_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01003_kill_query_race_condition.reference rename to tests/queries/0_stateless/00991_system_parts_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/00991_system_parts_race_condition.sh b/tests/queries/0_stateless/00991_system_parts_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00991_system_parts_race_condition.sh rename to tests/queries/0_stateless/00991_system_parts_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python b/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python rename to tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python diff --git a/dbms/tests/queries/0_stateless/01004_rename_deadlock.reference b/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01004_rename_deadlock.reference rename to tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled b/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled rename to tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python b/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python rename to tests/queries/0_stateless/00991_temporary_live_view_watch_live.python diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference b/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference rename to tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled b/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled rename to tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled diff --git a/dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.reference b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.reference rename to tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh rename to tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference rename to tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh rename to tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.reference b/tests/queries/0_stateless/00994_table_function_numbers_mt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.reference rename to tests/queries/0_stateless/00994_table_function_numbers_mt.reference diff --git a/dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.sql b/tests/queries/0_stateless/00994_table_function_numbers_mt.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.sql rename to tests/queries/0_stateless/00994_table_function_numbers_mt.sql diff --git a/dbms/tests/queries/0_stateless/00995_exception_while_insert.reference b/tests/queries/0_stateless/00995_exception_while_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00995_exception_while_insert.reference rename to tests/queries/0_stateless/00995_exception_while_insert.reference diff --git a/dbms/tests/queries/0_stateless/00995_exception_while_insert.sh b/tests/queries/0_stateless/00995_exception_while_insert.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00995_exception_while_insert.sh rename to tests/queries/0_stateless/00995_exception_while_insert.sh diff --git a/dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference b/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference rename to tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql b/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql rename to tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00995_order_by_with_fill.reference b/tests/queries/0_stateless/00995_order_by_with_fill.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00995_order_by_with_fill.reference rename to tests/queries/0_stateless/00995_order_by_with_fill.reference diff --git a/dbms/tests/queries/0_stateless/00995_order_by_with_fill.sql b/tests/queries/0_stateless/00995_order_by_with_fill.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00995_order_by_with_fill.sql rename to tests/queries/0_stateless/00995_order_by_with_fill.sql diff --git a/dbms/tests/queries/0_stateless/00996_limit_with_ties.reference b/tests/queries/0_stateless/00996_limit_with_ties.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00996_limit_with_ties.reference rename to tests/queries/0_stateless/00996_limit_with_ties.reference diff --git a/dbms/tests/queries/0_stateless/00996_limit_with_ties.sql b/tests/queries/0_stateless/00996_limit_with_ties.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00996_limit_with_ties.sql rename to tests/queries/0_stateless/00996_limit_with_ties.sql diff --git a/dbms/tests/queries/0_stateless/00996_neighbor.reference b/tests/queries/0_stateless/00996_neighbor.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00996_neighbor.reference rename to tests/queries/0_stateless/00996_neighbor.reference diff --git a/dbms/tests/queries/0_stateless/00996_neighbor.sql b/tests/queries/0_stateless/00996_neighbor.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00996_neighbor.sql rename to tests/queries/0_stateless/00996_neighbor.sql diff --git a/dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.reference b/tests/queries/0_stateless/00997_extract_all_crash_6627.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.reference rename to tests/queries/0_stateless/00997_extract_all_crash_6627.reference diff --git a/dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.sql b/tests/queries/0_stateless/00997_extract_all_crash_6627.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.sql rename to tests/queries/0_stateless/00997_extract_all_crash_6627.sql diff --git a/dbms/tests/queries/0_stateless/00997_set_index_array.reference b/tests/queries/0_stateless/00997_set_index_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00997_set_index_array.reference rename to tests/queries/0_stateless/00997_set_index_array.reference diff --git a/dbms/tests/queries/0_stateless/00997_set_index_array.sql b/tests/queries/0_stateless/00997_set_index_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00997_set_index_array.sql rename to tests/queries/0_stateless/00997_set_index_array.sql diff --git a/dbms/tests/queries/0_stateless/01009_global_array_join_names.reference b/tests/queries/0_stateless/00997_trim.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01009_global_array_join_names.reference rename to tests/queries/0_stateless/00997_trim.reference diff --git a/dbms/tests/queries/0_stateless/00997_trim.sql b/tests/queries/0_stateless/00997_trim.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00997_trim.sql rename to tests/queries/0_stateless/00997_trim.sql diff --git a/tests/queries/0_stateless/00998_constraints_all_tables.reference b/tests/queries/0_stateless/00998_constraints_all_tables.reference new file mode 100644 index 00000000000..3de251daa71 --- /dev/null +++ b/tests/queries/0_stateless/00998_constraints_all_tables.reference @@ -0,0 +1,14 @@ +0 +0 +3 +0 +0 +3 +0 +0 +3 +0 +0 +3 +CREATE TABLE default.constrained\n(\n `URL` String, \n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\', \n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log +CREATE TABLE default.constrained2\n(\n `URL` String, \n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\', \n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log diff --git a/dbms/tests/queries/0_stateless/00998_constraints_all_tables.sql b/tests/queries/0_stateless/00998_constraints_all_tables.sql similarity index 97% rename from dbms/tests/queries/0_stateless/00998_constraints_all_tables.sql rename to tests/queries/0_stateless/00998_constraints_all_tables.sql index 66b93fca97b..e47b7eaf83c 100644 --- a/dbms/tests/queries/0_stateless/00998_constraints_all_tables.sql +++ b/tests/queries/0_stateless/00998_constraints_all_tables.sql @@ -45,8 +45,8 @@ DROP TABLE constrained; DROP TABLE IF EXISTS constrained2; CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; CREATE TABLE constrained2 AS constrained; -SHOW CREATE TABLE constrained FORMAT TSVRaw; -SHOW CREATE TABLE constrained2 FORMAT TSVRaw; +SHOW CREATE TABLE constrained; +SHOW CREATE TABLE constrained2; INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } INSERT INTO constrained2 VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } DROP TABLE constrained; diff --git a/dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference b/tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference rename to tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference diff --git a/dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql b/tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql rename to tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql diff --git a/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.reference b/tests/queries/0_stateless/00999_join_not_nullable_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_not_nullable_types.reference rename to tests/queries/0_stateless/00999_join_not_nullable_types.reference diff --git a/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.sql b/tests/queries/0_stateless/00999_join_not_nullable_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_not_nullable_types.sql rename to tests/queries/0_stateless/00999_join_not_nullable_types.sql diff --git a/dbms/tests/queries/0_stateless/00999_join_on_expression.reference b/tests/queries/0_stateless/00999_join_on_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_on_expression.reference rename to tests/queries/0_stateless/00999_join_on_expression.reference diff --git a/dbms/tests/queries/0_stateless/00999_join_on_expression.sql b/tests/queries/0_stateless/00999_join_on_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_on_expression.sql rename to tests/queries/0_stateless/00999_join_on_expression.sql diff --git a/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference b/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference rename to tests/queries/0_stateless/00999_nullable_nested_types_4877.reference diff --git a/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql b/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql rename to tests/queries/0_stateless/00999_nullable_nested_types_4877.sql diff --git a/dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference b/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference rename to tests/queries/0_stateless/00999_settings_no_extra_quotes.reference diff --git a/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql b/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql rename to tests/queries/0_stateless/00999_settings_no_extra_quotes.sql diff --git a/dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference b/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference rename to tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference diff --git a/dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql b/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql rename to tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql diff --git a/dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference b/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference rename to tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference diff --git a/dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql b/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql rename to tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql diff --git a/dbms/tests/queries/0_stateless/01000_subquery_requires_alias.reference b/tests/queries/0_stateless/01000_subquery_requires_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01000_subquery_requires_alias.reference rename to tests/queries/0_stateless/01000_subquery_requires_alias.reference diff --git a/dbms/tests/queries/0_stateless/01000_subquery_requires_alias.sql b/tests/queries/0_stateless/01000_subquery_requires_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01000_subquery_requires_alias.sql rename to tests/queries/0_stateless/01000_subquery_requires_alias.sql diff --git a/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference b/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference rename to tests/queries/0_stateless/01000_unneeded_substitutions_client.reference diff --git a/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh b/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh rename to tests/queries/0_stateless/01000_unneeded_substitutions_client.sh diff --git a/dbms/tests/queries/0_stateless/01001_enums_in_in_section.reference b/tests/queries/0_stateless/01001_enums_in_in_section.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01001_enums_in_in_section.reference rename to tests/queries/0_stateless/01001_enums_in_in_section.reference diff --git a/dbms/tests/queries/0_stateless/01001_enums_in_in_section.sql b/tests/queries/0_stateless/01001_enums_in_in_section.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01001_enums_in_in_section.sql rename to tests/queries/0_stateless/01001_enums_in_in_section.sql diff --git a/dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.reference b/tests/queries/0_stateless/01001_rename_merge_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.reference rename to tests/queries/0_stateless/01001_rename_merge_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.sh b/tests/queries/0_stateless/01001_rename_merge_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.sh rename to tests/queries/0_stateless/01001_rename_merge_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/01012_select_limit_x_0.reference b/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_select_limit_x_0.reference rename to tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference diff --git a/dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh b/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh rename to tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh diff --git a/dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.reference b/tests/queries/0_stateless/01003_kill_query_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.reference rename to tests/queries/0_stateless/01003_kill_query_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/01003_kill_query_race_condition.sh b/tests/queries/0_stateless/01003_kill_query_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01003_kill_query_race_condition.sh rename to tests/queries/0_stateless/01003_kill_query_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/01016_index_tuple_field_type.reference b/tests/queries/0_stateless/01004_rename_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_index_tuple_field_type.reference rename to tests/queries/0_stateless/01004_rename_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01004_rename_deadlock.sh b/tests/queries/0_stateless/01004_rename_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01004_rename_deadlock.sh rename to tests/queries/0_stateless/01004_rename_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/01016_null_part_minmax.reference b/tests/queries/0_stateless/01005_rwr_shard_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_null_part_minmax.reference rename to tests/queries/0_stateless/01005_rwr_shard_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh b/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh rename to tests/queries/0_stateless/01005_rwr_shard_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference b/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference rename to tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference diff --git a/dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh b/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh rename to tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh diff --git a/dbms/tests/queries/0_stateless/01006_ttl_with_default_2.reference b/tests/queries/0_stateless/01006_ttl_with_default_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01006_ttl_with_default_2.reference rename to tests/queries/0_stateless/01006_ttl_with_default_2.reference diff --git a/dbms/tests/queries/0_stateless/01006_ttl_with_default_2.sql b/tests/queries/0_stateless/01006_ttl_with_default_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01006_ttl_with_default_2.sql rename to tests/queries/0_stateless/01006_ttl_with_default_2.sql diff --git a/dbms/tests/queries/0_stateless/01024__getScalar.reference b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01024__getScalar.reference rename to tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh rename to tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference b/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference rename to tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference diff --git a/dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql b/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql rename to tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql diff --git a/dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.reference b/tests/queries/0_stateless/01009_global_array_join_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.reference rename to tests/queries/0_stateless/01009_global_array_join_names.reference diff --git a/dbms/tests/queries/0_stateless/01009_global_array_join_names.sql b/tests/queries/0_stateless/01009_global_array_join_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01009_global_array_join_names.sql rename to tests/queries/0_stateless/01009_global_array_join_names.sql diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.reference b/tests/queries/0_stateless/01009_insert_select_data_loss.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_data_loss.reference rename to tests/queries/0_stateless/01009_insert_select_data_loss.reference diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql b/tests/queries/0_stateless/01009_insert_select_data_loss.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql rename to tests/queries/0_stateless/01009_insert_select_data_loss.sql diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.reference b/tests/queries/0_stateless/01009_insert_select_nicelulu.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.reference rename to tests/queries/0_stateless/01009_insert_select_nicelulu.reference diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql b/tests/queries/0_stateless/01009_insert_select_nicelulu.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql rename to tests/queries/0_stateless/01009_insert_select_nicelulu.sql diff --git a/dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference b/tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference rename to tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference diff --git a/dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh b/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh rename to tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join.reference b/tests/queries/0_stateless/01010_partial_merge_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join.reference rename to tests/queries/0_stateless/01010_partial_merge_join.reference diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join.sql b/tests/queries/0_stateless/01010_partial_merge_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join.sql rename to tests/queries/0_stateless/01010_partial_merge_join.sql diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference b/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference rename to tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql b/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql rename to tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.reference b/tests/queries/0_stateless/01010_partial_merge_join_negative.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.reference rename to tests/queries/0_stateless/01010_partial_merge_join_negative.reference diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.sql b/tests/queries/0_stateless/01010_partial_merge_join_negative.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.sql rename to tests/queries/0_stateless/01010_partial_merge_join_negative.sql diff --git a/dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.reference b/tests/queries/0_stateless/01010_pm_join_all_join_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.reference rename to tests/queries/0_stateless/01010_pm_join_all_join_bug.reference diff --git a/dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.sql b/tests/queries/0_stateless/01010_pm_join_all_join_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.sql rename to tests/queries/0_stateless/01010_pm_join_all_join_bug.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_on_disk.reference b/tests/queries/0_stateless/01010_pmj_on_disk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_on_disk.reference rename to tests/queries/0_stateless/01010_pmj_on_disk.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_on_disk.sql b/tests/queries/0_stateless/01010_pmj_on_disk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_on_disk.sql rename to tests/queries/0_stateless/01010_pmj_on_disk.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.reference b/tests/queries/0_stateless/01010_pmj_one_row_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.reference rename to tests/queries/0_stateless/01010_pmj_one_row_blocks.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.sql b/tests/queries/0_stateless/01010_pmj_one_row_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.sql rename to tests/queries/0_stateless/01010_pmj_one_row_blocks.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference rename to tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql rename to tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.reference b/tests/queries/0_stateless/01010_pmj_skip_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.reference rename to tests/queries/0_stateless/01010_pmj_skip_blocks.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.sql b/tests/queries/0_stateless/01010_pmj_skip_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.sql rename to tests/queries/0_stateless/01010_pmj_skip_blocks.sql diff --git a/dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.reference b/tests/queries/0_stateless/01011_group_uniq_array_memsan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.reference rename to tests/queries/0_stateless/01011_group_uniq_array_memsan.reference diff --git a/dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.sql b/tests/queries/0_stateless/01011_group_uniq_array_memsan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.sql rename to tests/queries/0_stateless/01011_group_uniq_array_memsan.sql diff --git a/dbms/tests/queries/0_stateless/01030_storage_url_syntax.reference b/tests/queries/0_stateless/01011_test_create_as_skip_indices.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_url_syntax.reference rename to tests/queries/0_stateless/01011_test_create_as_skip_indices.reference diff --git a/dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.sql b/tests/queries/0_stateless/01011_test_create_as_skip_indices.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.sql rename to tests/queries/0_stateless/01011_test_create_as_skip_indices.sql diff --git a/dbms/tests/queries/0_stateless/01012_reset_running_accumulate.reference b/tests/queries/0_stateless/01012_reset_running_accumulate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_reset_running_accumulate.reference rename to tests/queries/0_stateless/01012_reset_running_accumulate.reference diff --git a/dbms/tests/queries/0_stateless/01012_reset_running_accumulate.sql b/tests/queries/0_stateless/01012_reset_running_accumulate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_reset_running_accumulate.sql rename to tests/queries/0_stateless/01012_reset_running_accumulate.sql diff --git a/dbms/tests/queries/0_stateless/01035_prewhere_with_alias.reference b/tests/queries/0_stateless/01012_select_limit_x_0.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_prewhere_with_alias.reference rename to tests/queries/0_stateless/01012_select_limit_x_0.reference diff --git a/dbms/tests/queries/0_stateless/01012_select_limit_x_0.sql b/tests/queries/0_stateless/01012_select_limit_x_0.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_select_limit_x_0.sql rename to tests/queries/0_stateless/01012_select_limit_x_0.sql diff --git a/dbms/tests/queries/0_stateless/01036_union_different_columns.reference b/tests/queries/0_stateless/01012_serialize_array_memory_usage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01036_union_different_columns.reference rename to tests/queries/0_stateless/01012_serialize_array_memory_usage.reference diff --git a/dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.sql b/tests/queries/0_stateless/01012_serialize_array_memory_usage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.sql rename to tests/queries/0_stateless/01012_serialize_array_memory_usage.sql diff --git a/dbms/tests/queries/0_stateless/01012_show_tables_limit.reference b/tests/queries/0_stateless/01012_show_tables_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_show_tables_limit.reference rename to tests/queries/0_stateless/01012_show_tables_limit.reference diff --git a/dbms/tests/queries/0_stateless/01012_show_tables_limit.sql b/tests/queries/0_stateless/01012_show_tables_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_show_tables_limit.sql rename to tests/queries/0_stateless/01012_show_tables_limit.sql diff --git a/dbms/tests/queries/0_stateless/01013_hex_decimal.reference b/tests/queries/0_stateless/01013_hex_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_decimal.reference rename to tests/queries/0_stateless/01013_hex_decimal.reference diff --git a/dbms/tests/queries/0_stateless/01013_hex_decimal.sql b/tests/queries/0_stateless/01013_hex_decimal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_decimal.sql rename to tests/queries/0_stateless/01013_hex_decimal.sql diff --git a/dbms/tests/queries/0_stateless/01013_hex_float.reference b/tests/queries/0_stateless/01013_hex_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_float.reference rename to tests/queries/0_stateless/01013_hex_float.reference diff --git a/dbms/tests/queries/0_stateless/01013_hex_float.sql b/tests/queries/0_stateless/01013_hex_float.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_float.sql rename to tests/queries/0_stateless/01013_hex_float.sql diff --git a/dbms/tests/queries/0_stateless/01013_repeat_function.reference b/tests/queries/0_stateless/01013_repeat_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_repeat_function.reference rename to tests/queries/0_stateless/01013_repeat_function.reference diff --git a/dbms/tests/queries/0_stateless/01013_repeat_function.sql b/tests/queries/0_stateless/01013_repeat_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_repeat_function.sql rename to tests/queries/0_stateless/01013_repeat_function.sql diff --git a/dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference rename to tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh rename to tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01013_totals_without_aggregation.reference b/tests/queries/0_stateless/01013_totals_without_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_totals_without_aggregation.reference rename to tests/queries/0_stateless/01013_totals_without_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/01013_totals_without_aggregation.sql b/tests/queries/0_stateless/01013_totals_without_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_totals_without_aggregation.sql rename to tests/queries/0_stateless/01013_totals_without_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.reference b/tests/queries/0_stateless/01014_count_of_merges_metrics.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.reference rename to tests/queries/0_stateless/01014_count_of_merges_metrics.reference diff --git a/dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.sql b/tests/queries/0_stateless/01014_count_of_merges_metrics.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.sql rename to tests/queries/0_stateless/01014_count_of_merges_metrics.sql diff --git a/dbms/tests/queries/0_stateless/01014_format_custom_separated.reference b/tests/queries/0_stateless/01014_format_custom_separated.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_format_custom_separated.reference rename to tests/queries/0_stateless/01014_format_custom_separated.reference diff --git a/dbms/tests/queries/0_stateless/01014_format_custom_separated.sh b/tests/queries/0_stateless/01014_format_custom_separated.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01014_format_custom_separated.sh rename to tests/queries/0_stateless/01014_format_custom_separated.sh diff --git a/dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.reference b/tests/queries/0_stateless/01014_function_repeat_corner_cases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.reference rename to tests/queries/0_stateless/01014_function_repeat_corner_cases.reference diff --git a/dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.sql b/tests/queries/0_stateless/01014_function_repeat_corner_cases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.sql rename to tests/queries/0_stateless/01014_function_repeat_corner_cases.sql diff --git a/dbms/tests/queries/0_stateless/01014_lazy_database_basic.reference b/tests/queries/0_stateless/01014_lazy_database_basic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_lazy_database_basic.reference rename to tests/queries/0_stateless/01014_lazy_database_basic.reference diff --git a/dbms/tests/queries/0_stateless/01014_lazy_database_basic.sh b/tests/queries/0_stateless/01014_lazy_database_basic.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01014_lazy_database_basic.sh rename to tests/queries/0_stateless/01014_lazy_database_basic.sh diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.reference b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.reference new file mode 100755 index 00000000000..678f9a34e6f --- /dev/null +++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.reference @@ -0,0 +1 @@ +Test OK diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh new file mode 100755 index 00000000000..8bf21d3cb02 --- /dev/null +++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +export CURR_DATABASE="test_lazy_01014_concurrent_${CLICKHOUSE_DATABASE}" + + +function recreate_lazy_func1() +{ + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.log (a UInt64, b UInt64) ENGINE = Log; + "; + + while true; do + $CLICKHOUSE_CLIENT -q " + DETACH TABLE $CURR_DATABASE.log; + "; + + $CLICKHOUSE_CLIENT -q " + ATTACH TABLE $CURR_DATABASE.log; + "; + done +} + +function recreate_lazy_func2() +{ + while true; do + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.tlog (a UInt64, b UInt64) ENGINE = TinyLog; + "; + + $CLICKHOUSE_CLIENT -q " + DROP TABLE $CURR_DATABASE.tlog; + "; + done +} + +function recreate_lazy_func3() +{ + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.slog (a UInt64, b UInt64) ENGINE = StripeLog; + "; + + while true; do + $CLICKHOUSE_CLIENT -q " + ATTACH TABLE $CURR_DATABASE.slog; + "; + + $CLICKHOUSE_CLIENT -q " + DETACH TABLE $CURR_DATABASE.slog; + "; + done +} + +function recreate_lazy_func4() +{ + while true; do + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.tlog2 (a UInt64, b UInt64) ENGINE = TinyLog; + "; + + $CLICKHOUSE_CLIENT -q " + DROP TABLE $CURR_DATABASE.tlog2; + "; + done +} + +function show_tables_func() +{ + while true; do + $CLICKHOUSE_CLIENT -q "SELECT * FROM system.tables WHERE database = '$CURR_DATABASE' FORMAT Null"; + done +} + + +export -f recreate_lazy_func1; +export -f recreate_lazy_func2; +export -f recreate_lazy_func3; +export -f recreate_lazy_func4; +export -f show_tables_func; + + +${CLICKHOUSE_CLIENT} -n -q " + DROP DATABASE IF EXISTS $CURR_DATABASE; + CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1); +" + + +TIMEOUT=30 + +timeout $TIMEOUT bash -c recreate_lazy_func1 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func2 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func3 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func4 2> /dev/null & +timeout $TIMEOUT bash -c show_tables_func 2> /dev/null & + +wait +sleep 1 + +${CLICKHOUSE_CLIENT} -n -q " + DROP TABLE IF EXISTS $CURR_DATABASE.log; + DROP TABLE IF EXISTS $CURR_DATABASE.slog; + DROP TABLE IF EXISTS $CURR_DATABASE.tlog; + DROP TABLE IF EXISTS $CURR_DATABASE.tlog2; +" +# DROP DATABASE $CURR_DATABASE; -- This fails for some reason + +echo "Test OK" diff --git a/dbms/tests/queries/0_stateless/01015_array_split.reference b/tests/queries/0_stateless/01015_array_split.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_array_split.reference rename to tests/queries/0_stateless/01015_array_split.reference diff --git a/dbms/tests/queries/0_stateless/01015_array_split.sql b/tests/queries/0_stateless/01015_array_split.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_array_split.sql rename to tests/queries/0_stateless/01015_array_split.sql diff --git a/dbms/tests/queries/0_stateless/01015_attach_part.reference b/tests/queries/0_stateless/01015_attach_part.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_attach_part.reference rename to tests/queries/0_stateless/01015_attach_part.reference diff --git a/dbms/tests/queries/0_stateless/01015_attach_part.sql b/tests/queries/0_stateless/01015_attach_part.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_attach_part.sql rename to tests/queries/0_stateless/01015_attach_part.sql diff --git a/dbms/tests/queries/0_stateless/01015_database_bad_tables.reference b/tests/queries/0_stateless/01015_database_bad_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_database_bad_tables.reference rename to tests/queries/0_stateless/01015_database_bad_tables.reference diff --git a/dbms/tests/queries/0_stateless/01015_database_bad_tables.sql b/tests/queries/0_stateless/01015_database_bad_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_database_bad_tables.sql rename to tests/queries/0_stateless/01015_database_bad_tables.sql diff --git a/dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.reference b/tests/queries/0_stateless/01015_empty_in_inner_right_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.reference rename to tests/queries/0_stateless/01015_empty_in_inner_right_join.reference diff --git a/dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql b/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql rename to tests/queries/0_stateless/01015_empty_in_inner_right_join.sql diff --git a/dbms/tests/queries/0_stateless/01015_insert_values_parametrized.reference b/tests/queries/0_stateless/01015_insert_values_parametrized.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_insert_values_parametrized.reference rename to tests/queries/0_stateless/01015_insert_values_parametrized.reference diff --git a/dbms/tests/queries/0_stateless/01015_insert_values_parametrized.sh b/tests/queries/0_stateless/01015_insert_values_parametrized.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01015_insert_values_parametrized.sh rename to tests/queries/0_stateless/01015_insert_values_parametrized.sh diff --git a/dbms/tests/queries/0_stateless/01015_random_constant.reference b/tests/queries/0_stateless/01015_random_constant.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_random_constant.reference rename to tests/queries/0_stateless/01015_random_constant.reference diff --git a/dbms/tests/queries/0_stateless/01015_random_constant.sql b/tests/queries/0_stateless/01015_random_constant.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_random_constant.sql rename to tests/queries/0_stateless/01015_random_constant.sql diff --git a/dbms/tests/queries/0_stateless/01039_mergetree_exec_time.reference b/tests/queries/0_stateless/01016_index_tuple_field_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01039_mergetree_exec_time.reference rename to tests/queries/0_stateless/01016_index_tuple_field_type.reference diff --git a/dbms/tests/queries/0_stateless/01016_index_tuple_field_type.sql b/tests/queries/0_stateless/01016_index_tuple_field_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_index_tuple_field_type.sql rename to tests/queries/0_stateless/01016_index_tuple_field_type.sql diff --git a/dbms/tests/queries/0_stateless/01016_input_null_as_default.reference b/tests/queries/0_stateless/01016_input_null_as_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_input_null_as_default.reference rename to tests/queries/0_stateless/01016_input_null_as_default.reference diff --git a/dbms/tests/queries/0_stateless/01016_input_null_as_default.sh b/tests/queries/0_stateless/01016_input_null_as_default.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01016_input_null_as_default.sh rename to tests/queries/0_stateless/01016_input_null_as_default.sh diff --git a/dbms/tests/queries/0_stateless/01016_macros.reference b/tests/queries/0_stateless/01016_macros.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_macros.reference rename to tests/queries/0_stateless/01016_macros.reference diff --git a/dbms/tests/queries/0_stateless/01016_macros.sql b/tests/queries/0_stateless/01016_macros.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_macros.sql rename to tests/queries/0_stateless/01016_macros.sql diff --git a/dbms/tests/queries/0_stateless/01039_row_policy_dcl.reference b/tests/queries/0_stateless/01016_null_part_minmax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01039_row_policy_dcl.reference rename to tests/queries/0_stateless/01016_null_part_minmax.reference diff --git a/dbms/tests/queries/0_stateless/01016_null_part_minmax.sql b/tests/queries/0_stateless/01016_null_part_minmax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_null_part_minmax.sql rename to tests/queries/0_stateless/01016_null_part_minmax.sql diff --git a/dbms/tests/queries/0_stateless/01016_uniqCombined64.reference b/tests/queries/0_stateless/01016_uniqCombined64.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_uniqCombined64.reference rename to tests/queries/0_stateless/01016_uniqCombined64.reference diff --git a/dbms/tests/queries/0_stateless/01016_uniqCombined64.sql b/tests/queries/0_stateless/01016_uniqCombined64.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_uniqCombined64.sql rename to tests/queries/0_stateless/01016_uniqCombined64.sql diff --git a/dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference b/tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference rename to tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference diff --git a/dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql b/tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql rename to tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference new file mode 100644 index 00000000000..6bf25043399 --- /dev/null +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference @@ -0,0 +1,9 @@ +OK +OK +OK +OK +OK +OK +OK +OK +OK diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh new file mode 100755 index 00000000000..68cb5e0e760 --- /dev/null +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + + +R1=table_1017_1 +R2=table_1017_2 +T1=table_1017_merge + +${CLICKHOUSE_CLIENT} -n -q " + DROP TABLE IF EXISTS $R1; + DROP TABLE IF EXISTS $R2; + DROP TABLE IF EXISTS $T1; + + DROP TABLE IF EXISTS lookup_table; + DROP TABLE IF EXISTS table_for_dict; + DROP DICTIONARY IF EXISTS dict1; + + CREATE TABLE table_for_dict (y UInt64, y_new UInt32) ENGINE = Log; + INSERT INTO table_for_dict VALUES (3, 3003),(4,4004); + + CREATE DICTIONARY dict1( y UInt64 DEFAULT 0, y_new UInt32 DEFAULT 0 ) PRIMARY KEY y + SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' PASSWORD '' DB '${CLICKHOUSE_DATABASE}')) + LIFETIME(MIN 1 MAX 10) + LAYOUT(FLAT()); + + CREATE TABLE lookup_table (y UInt32, y_new UInt32) ENGINE = Join(ANY, LEFT, y); + INSERT INTO lookup_table VALUES(1,1001),(2,1002); + + CREATE TABLE $R1 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r1') ORDER BY x; + CREATE TABLE $R2 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r2') ORDER BY x; + CREATE TABLE $T1 (x UInt32, y UInt32) ENGINE MergeTree() ORDER BY x; + + INSERT INTO $R1 VALUES (0, 1)(1, 2)(2, 3)(3, 4); + INSERT INTO $T1 VALUES (0, 1)(1, 2)(2, 3)(3, 4); +" + +# Check that in mutations of replicated tables predicates do not contain non-deterministic functions +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE ignore(rand())" 2>&1 \ +| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 \ +| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (x + y) % 2, range(y)) WHERE not ignore()" 2>&1 > /dev/null \ +&& echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (rand() + x) % 2, range(y)) WHERE not ignore()" 2>&1 \ +| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' + + +# For regular tables we do not enforce deterministic functions +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 DELETE WHERE rand() = 0" 2>&1 > /dev/null \ +&& echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 > /dev/null \ +&& echo 'OK' || echo 'FAIL' + +# hm... it looks like joinGet condidered determenistic +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = joinGet('${CLICKHOUSE_DATABASE}.lookup_table', 'y_new', y) WHERE x=1" 2>&1 \ +| echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" 2>&1 \ +| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" --allow_nondeterministic_mutations=1 2>&1 \ +&& echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} -n -q " + DROP TABLE IF EXISTS $R2; + DROP TABLE IF EXISTS $R1; + DROP TABLE IF EXISTS $T1; + DROP TABLE IF EXISTS lookup_table; + DROP TABLE IF EXISTS table_for_dict; + DROP DICTIONARY IF EXISTS dict1; +" diff --git a/dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.reference b/tests/queries/0_stateless/01017_tsv_empty_as_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.reference rename to tests/queries/0_stateless/01017_tsv_empty_as_default.reference diff --git a/dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.sh b/tests/queries/0_stateless/01017_tsv_empty_as_default.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.sh rename to tests/queries/0_stateless/01017_tsv_empty_as_default.sh diff --git a/dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference rename to tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference diff --git a/dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql rename to tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql diff --git a/dbms/tests/queries/0_stateless/01018_Distributed__shard_num.reference b/tests/queries/0_stateless/01018_Distributed__shard_num.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_Distributed__shard_num.reference rename to tests/queries/0_stateless/01018_Distributed__shard_num.reference diff --git a/dbms/tests/queries/0_stateless/01018_Distributed__shard_num.sql b/tests/queries/0_stateless/01018_Distributed__shard_num.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_Distributed__shard_num.sql rename to tests/queries/0_stateless/01018_Distributed__shard_num.sql diff --git a/dbms/tests/queries/0_stateless/01018_ambiguous_column.reference b/tests/queries/0_stateless/01018_ambiguous_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ambiguous_column.reference rename to tests/queries/0_stateless/01018_ambiguous_column.reference diff --git a/dbms/tests/queries/0_stateless/01018_ambiguous_column.sql b/tests/queries/0_stateless/01018_ambiguous_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ambiguous_column.sql rename to tests/queries/0_stateless/01018_ambiguous_column.sql diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh rename to tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh rename to tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference new file mode 100644 index 00000000000..ad16e8ae7f2 --- /dev/null +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference @@ -0,0 +1,19 @@ +=DICTIONARY in Ordinary DB +CREATE DICTIONARY ordinary_db.dict1\n(\n `key_column` UInt64 DEFAULT 0, \n `second_column` UInt8 DEFAULT 1, \n `third_column` String DEFAULT \'qqq\'\n)\nPRIMARY KEY key_column\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' PASSWORD \'\' DB \'database_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(FLAT()) +dict1 +1 +ordinary_db dict1 +==DETACH DICTIONARY +0 +==ATTACH DICTIONARY +dict1 +1 +ordinary_db dict1 +==DROP DICTIONARY +0 +=DICTIONARY in Memory DB +0 +=DICTIONARY in Lazy DB +=DROP DATABASE WITH DICTIONARY +dict4 +dict4 diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql rename to tests/queries/0_stateless/01018_ddl_dictionaries_create.sql diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_select.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql rename to tests/queries/0_stateless/01018_ddl_dictionaries_select.sql diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_special.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_special.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_special.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.sql rename to tests/queries/0_stateless/01018_ddl_dictionaries_special.sql diff --git a/dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference b/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference rename to tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference diff --git a/dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql b/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql rename to tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql diff --git a/dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.reference b/tests/queries/0_stateless/01018_empty_aggregation_filling.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.reference rename to tests/queries/0_stateless/01018_empty_aggregation_filling.reference diff --git a/dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.sql b/tests/queries/0_stateless/01018_empty_aggregation_filling.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.sql rename to tests/queries/0_stateless/01018_empty_aggregation_filling.sql diff --git a/dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference b/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference rename to tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference diff --git a/dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh b/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh rename to tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh diff --git a/dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference b/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference rename to tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql b/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql rename to tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference b/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference rename to tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference diff --git a/dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql b/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql rename to tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference rename to tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh rename to tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference b/tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference rename to tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh b/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh rename to tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.reference b/tests/queries/0_stateless/01019_alter_materialized_view_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.reference rename to tests/queries/0_stateless/01019_alter_materialized_view_query.reference diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.sql b/tests/queries/0_stateless/01019_alter_materialized_view_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.sql rename to tests/queries/0_stateless/01019_alter_materialized_view_query.sql diff --git a/dbms/tests/queries/0_stateless/01019_array_fill.reference b/tests/queries/0_stateless/01019_array_fill.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_array_fill.reference rename to tests/queries/0_stateless/01019_array_fill.reference diff --git a/dbms/tests/queries/0_stateless/01019_array_fill.sql b/tests/queries/0_stateless/01019_array_fill.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_array_fill.sql rename to tests/queries/0_stateless/01019_array_fill.sql diff --git a/dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference b/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference rename to tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference diff --git a/dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql b/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql rename to tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql diff --git a/dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.reference b/tests/queries/0_stateless/01019_parallel_parsing_cancel.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.reference rename to tests/queries/0_stateless/01019_parallel_parsing_cancel.reference diff --git a/dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.sh b/tests/queries/0_stateless/01019_parallel_parsing_cancel.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.sh rename to tests/queries/0_stateless/01019_parallel_parsing_cancel.sh diff --git a/dbms/tests/queries/0_stateless/01020_function_array_compact.reference b/tests/queries/0_stateless/01020_function_array_compact.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_array_compact.reference rename to tests/queries/0_stateless/01020_function_array_compact.reference diff --git a/dbms/tests/queries/0_stateless/01020_function_array_compact.sql b/tests/queries/0_stateless/01020_function_array_compact.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_array_compact.sql rename to tests/queries/0_stateless/01020_function_array_compact.sql diff --git a/dbms/tests/queries/0_stateless/01020_function_char.reference b/tests/queries/0_stateless/01020_function_char.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_char.reference rename to tests/queries/0_stateless/01020_function_char.reference diff --git a/dbms/tests/queries/0_stateless/01020_function_char.sql b/tests/queries/0_stateless/01020_function_char.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_char.sql rename to tests/queries/0_stateless/01020_function_char.sql diff --git a/dbms/tests/queries/0_stateless/01020_having_without_group_by.reference b/tests/queries/0_stateless/01020_having_without_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01020_having_without_group_by.reference rename to tests/queries/0_stateless/01020_having_without_group_by.reference diff --git a/dbms/tests/queries/0_stateless/01020_having_without_group_by.sql b/tests/queries/0_stateless/01020_having_without_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01020_having_without_group_by.sql rename to tests/queries/0_stateless/01020_having_without_group_by.sql diff --git a/dbms/tests/queries/0_stateless/01021_create_as_select.reference b/tests/queries/0_stateless/01021_create_as_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01021_create_as_select.reference rename to tests/queries/0_stateless/01021_create_as_select.reference diff --git a/dbms/tests/queries/0_stateless/01021_create_as_select.sql b/tests/queries/0_stateless/01021_create_as_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01021_create_as_select.sql rename to tests/queries/0_stateless/01021_create_as_select.sql diff --git a/dbms/tests/queries/0_stateless/01021_only_tuple_columns.reference b/tests/queries/0_stateless/01021_only_tuple_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01021_only_tuple_columns.reference rename to tests/queries/0_stateless/01021_only_tuple_columns.reference diff --git a/dbms/tests/queries/0_stateless/01021_only_tuple_columns.sql b/tests/queries/0_stateless/01021_only_tuple_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01021_only_tuple_columns.sql rename to tests/queries/0_stateless/01021_only_tuple_columns.sql diff --git a/dbms/tests/queries/0_stateless/01021_tuple_parser.reference b/tests/queries/0_stateless/01021_tuple_parser.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01021_tuple_parser.reference rename to tests/queries/0_stateless/01021_tuple_parser.reference diff --git a/dbms/tests/queries/0_stateless/01021_tuple_parser.sql b/tests/queries/0_stateless/01021_tuple_parser.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01021_tuple_parser.sql rename to tests/queries/0_stateless/01021_tuple_parser.sql diff --git a/dbms/tests/queries/0_stateless/01023_materialized_view_query_context.reference b/tests/queries/0_stateless/01023_materialized_view_query_context.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01023_materialized_view_query_context.reference rename to tests/queries/0_stateless/01023_materialized_view_query_context.reference diff --git a/dbms/tests/queries/0_stateless/01023_materialized_view_query_context.sql b/tests/queries/0_stateless/01023_materialized_view_query_context.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01023_materialized_view_query_context.sql rename to tests/queries/0_stateless/01023_materialized_view_query_context.sql diff --git a/dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.reference b/tests/queries/0_stateless/01024__getScalar.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.reference rename to tests/queries/0_stateless/01024__getScalar.reference diff --git a/dbms/tests/queries/0_stateless/01024__getScalar.sql b/tests/queries/0_stateless/01024__getScalar.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01024__getScalar.sql rename to tests/queries/0_stateless/01024__getScalar.sql diff --git a/dbms/tests/queries/0_stateless/01025_array_compact_generic.reference b/tests/queries/0_stateless/01025_array_compact_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01025_array_compact_generic.reference rename to tests/queries/0_stateless/01025_array_compact_generic.reference diff --git a/dbms/tests/queries/0_stateless/01025_array_compact_generic.sql b/tests/queries/0_stateless/01025_array_compact_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01025_array_compact_generic.sql rename to tests/queries/0_stateless/01025_array_compact_generic.sql diff --git a/dbms/tests/queries/0_stateless/01026_char_utf8.reference b/tests/queries/0_stateless/01026_char_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01026_char_utf8.reference rename to tests/queries/0_stateless/01026_char_utf8.reference diff --git a/dbms/tests/queries/0_stateless/01026_char_utf8.sql b/tests/queries/0_stateless/01026_char_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01026_char_utf8.sql rename to tests/queries/0_stateless/01026_char_utf8.sql diff --git a/dbms/tests/queries/0_stateless/01029_early_constant_folding.reference b/tests/queries/0_stateless/01029_early_constant_folding.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01029_early_constant_folding.reference rename to tests/queries/0_stateless/01029_early_constant_folding.reference diff --git a/dbms/tests/queries/0_stateless/01029_early_constant_folding.sql b/tests/queries/0_stateless/01029_early_constant_folding.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01029_early_constant_folding.sql rename to tests/queries/0_stateless/01029_early_constant_folding.sql diff --git a/dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference b/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference rename to tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference diff --git a/dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql b/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql rename to tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql diff --git a/dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference b/tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference rename to tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql b/tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql rename to tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference b/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference rename to tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql b/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql rename to tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference b/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference rename to tests/queries/0_stateless/01030_limit_by_with_ties_error.reference diff --git a/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh b/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh rename to tests/queries/0_stateless/01030_limit_by_with_ties_error.sh diff --git a/dbms/tests/queries/0_stateless/01052_array_reduce_exception.reference b/tests/queries/0_stateless/01030_storage_hdfs_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01052_array_reduce_exception.reference rename to tests/queries/0_stateless/01030_storage_hdfs_syntax.reference diff --git a/dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.sql b/tests/queries/0_stateless/01030_storage_hdfs_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.sql rename to tests/queries/0_stateless/01030_storage_hdfs_syntax.sql diff --git a/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.reference b/tests/queries/0_stateless/01030_storage_set_supports_read.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_set_supports_read.reference rename to tests/queries/0_stateless/01030_storage_set_supports_read.reference diff --git a/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.sql b/tests/queries/0_stateless/01030_storage_set_supports_read.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_set_supports_read.sql rename to tests/queries/0_stateless/01030_storage_set_supports_read.sql diff --git a/dbms/tests/queries/0_stateless/01056_create_table_as.reference b/tests/queries/0_stateless/01030_storage_url_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_create_table_as.reference rename to tests/queries/0_stateless/01030_storage_url_syntax.reference diff --git a/dbms/tests/queries/0_stateless/01030_storage_url_syntax.sql b/tests/queries/0_stateless/01030_storage_url_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_url_syntax.sql rename to tests/queries/0_stateless/01030_storage_url_syntax.sql diff --git a/dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference b/tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference rename to tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference diff --git a/dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh b/tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh rename to tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh diff --git a/dbms/tests/queries/0_stateless/01031_new_any_join.reference b/tests/queries/0_stateless/01031_new_any_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_new_any_join.reference rename to tests/queries/0_stateless/01031_new_any_join.reference diff --git a/dbms/tests/queries/0_stateless/01031_new_any_join.sql b/tests/queries/0_stateless/01031_new_any_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01031_new_any_join.sql rename to tests/queries/0_stateless/01031_new_any_join.sql diff --git a/dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference b/tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference rename to tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference diff --git a/dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql b/tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql rename to tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql diff --git a/dbms/tests/queries/0_stateless/01031_semi_anti_join.reference b/tests/queries/0_stateless/01031_semi_anti_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_semi_anti_join.reference rename to tests/queries/0_stateless/01031_semi_anti_join.reference diff --git a/dbms/tests/queries/0_stateless/01031_semi_anti_join.sql b/tests/queries/0_stateless/01031_semi_anti_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01031_semi_anti_join.sql rename to tests/queries/0_stateless/01031_semi_anti_join.sql diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference b/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference rename to tests/queries/0_stateless/01032_cityHash64_for_UUID.reference diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql b/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql rename to tests/queries/0_stateless/01032_cityHash64_for_UUID.sql diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference b/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference rename to tests/queries/0_stateless/01032_cityHash64_for_decimal.reference diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql b/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql rename to tests/queries/0_stateless/01032_cityHash64_for_decimal.sql diff --git a/dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.reference b/tests/queries/0_stateless/01032_duplicate_column_insert_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.reference rename to tests/queries/0_stateless/01032_duplicate_column_insert_query.reference diff --git a/dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.sql b/tests/queries/0_stateless/01032_duplicate_column_insert_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.sql rename to tests/queries/0_stateless/01032_duplicate_column_insert_query.sql diff --git a/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.reference b/tests/queries/0_stateless/01033_dictionaries_lifetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.reference rename to tests/queries/0_stateless/01033_dictionaries_lifetime.reference diff --git a/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.sql b/tests/queries/0_stateless/01033_dictionaries_lifetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.sql rename to tests/queries/0_stateless/01033_dictionaries_lifetime.sql diff --git a/tests/queries/0_stateless/01033_quota_dcl.reference b/tests/queries/0_stateless/01033_quota_dcl.reference new file mode 100644 index 00000000000..7bd2d2923d2 --- /dev/null +++ b/tests/queries/0_stateless/01033_quota_dcl.reference @@ -0,0 +1,2 @@ +default +CREATE QUOTA default KEYED BY \'user name\' FOR INTERVAL 1 HOUR TRACKING ONLY TO default, readonly diff --git a/dbms/tests/queries/0_stateless/01033_quota_dcl.sql b/tests/queries/0_stateless/01033_quota_dcl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_quota_dcl.sql rename to tests/queries/0_stateless/01033_quota_dcl.sql diff --git a/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference new file mode 100644 index 00000000000..c2d7d849fae --- /dev/null +++ b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference @@ -0,0 +1 @@ +CREATE TABLE default.BannerDict\n(\n `BannerID` UInt64, \n `CompaignID` UInt64\n)\nENGINE = ODBC(\'DSN=pgconn;Database=postgres\', \'somedb\', \'bannerdict\') diff --git a/dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql rename to tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql diff --git a/dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.reference b/tests/queries/0_stateless/01033_substr_negative_size_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.reference rename to tests/queries/0_stateless/01033_substr_negative_size_arg.reference diff --git a/dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.sql b/tests/queries/0_stateless/01033_substr_negative_size_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.sql rename to tests/queries/0_stateless/01033_substr_negative_size_arg.sql diff --git a/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.reference b/tests/queries/0_stateless/01034_JSONCompactEachRow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.reference rename to tests/queries/0_stateless/01034_JSONCompactEachRow.reference diff --git a/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.sql b/tests/queries/0_stateless/01034_JSONCompactEachRow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.sql rename to tests/queries/0_stateless/01034_JSONCompactEachRow.sql diff --git a/dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference rename to tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh rename to tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.reference b/tests/queries/0_stateless/01034_order_by_pk_prefix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.reference rename to tests/queries/0_stateless/01034_order_by_pk_prefix.reference diff --git a/dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.sql b/tests/queries/0_stateless/01034_order_by_pk_prefix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.sql rename to tests/queries/0_stateless/01034_order_by_pk_prefix.sql diff --git a/dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference b/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference rename to tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql b/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql rename to tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01034_sample_final_distributed.reference b/tests/queries/0_stateless/01034_sample_final_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_sample_final_distributed.reference rename to tests/queries/0_stateless/01034_sample_final_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01034_sample_final_distributed.sql b/tests/queries/0_stateless/01034_sample_final_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_sample_final_distributed.sql rename to tests/queries/0_stateless/01034_sample_final_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference b/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference rename to tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference diff --git a/dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql b/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql rename to tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql diff --git a/dbms/tests/queries/0_stateless/01034_values_parse_float_bug.reference b/tests/queries/0_stateless/01034_values_parse_float_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_values_parse_float_bug.reference rename to tests/queries/0_stateless/01034_values_parse_float_bug.reference diff --git a/dbms/tests/queries/0_stateless/01034_values_parse_float_bug.sh b/tests/queries/0_stateless/01034_values_parse_float_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01034_values_parse_float_bug.sh rename to tests/queries/0_stateless/01034_values_parse_float_bug.sh diff --git a/dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference b/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference rename to tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference diff --git a/dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql b/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql rename to tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql diff --git a/dbms/tests/queries/0_stateless/01035_avg_weighted.reference b/tests/queries/0_stateless/01035_avg_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_avg_weighted.reference rename to tests/queries/0_stateless/01035_avg_weighted.reference diff --git a/dbms/tests/queries/0_stateless/01035_avg_weighted.sh b/tests/queries/0_stateless/01035_avg_weighted.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_avg_weighted.sh rename to tests/queries/0_stateless/01035_avg_weighted.sh diff --git a/dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference rename to tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh rename to tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.reference b/tests/queries/0_stateless/01035_enum_conversion_native_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.reference rename to tests/queries/0_stateless/01035_enum_conversion_native_format.reference diff --git a/dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.sh b/tests/queries/0_stateless/01035_enum_conversion_native_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.sh rename to tests/queries/0_stateless/01035_enum_conversion_native_format.sh diff --git a/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.reference b/tests/queries/0_stateless/01035_lc_empty_part_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.reference rename to tests/queries/0_stateless/01035_lc_empty_part_bug.reference diff --git a/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.sh b/tests/queries/0_stateless/01035_lc_empty_part_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.sh rename to tests/queries/0_stateless/01035_lc_empty_part_bug.sh diff --git a/dbms/tests/queries/0_stateless/01070_template_empty_file.reference b/tests/queries/0_stateless/01035_prewhere_with_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_template_empty_file.reference rename to tests/queries/0_stateless/01035_prewhere_with_alias.reference diff --git a/dbms/tests/queries/0_stateless/01035_prewhere_with_alias.sql b/tests/queries/0_stateless/01035_prewhere_with_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01035_prewhere_with_alias.sql rename to tests/queries/0_stateless/01035_prewhere_with_alias.sql diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql diff --git a/dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference b/tests/queries/0_stateless/01036_union_different_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference rename to tests/queries/0_stateless/01036_union_different_columns.reference diff --git a/dbms/tests/queries/0_stateless/01036_union_different_columns.sql b/tests/queries/0_stateless/01036_union_different_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01036_union_different_columns.sql rename to tests/queries/0_stateless/01036_union_different_columns.sql diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference b/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference rename to tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql b/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql rename to tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference b/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference rename to tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql b/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql rename to tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql diff --git a/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference b/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference rename to tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference diff --git a/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql b/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql rename to tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql diff --git a/dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference b/tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference rename to tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference diff --git a/dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql b/tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql rename to tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql diff --git a/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference b/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference rename to tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference diff --git a/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh b/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh rename to tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh diff --git a/dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.reference b/tests/queries/0_stateless/01039_mergetree_exec_time.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.reference rename to tests/queries/0_stateless/01039_mergetree_exec_time.reference diff --git a/dbms/tests/queries/0_stateless/01039_mergetree_exec_time.sql b/tests/queries/0_stateless/01039_mergetree_exec_time.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01039_mergetree_exec_time.sql rename to tests/queries/0_stateless/01039_mergetree_exec_time.sql diff --git a/dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference b/tests/queries/0_stateless/01039_row_policy_dcl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference rename to tests/queries/0_stateless/01039_row_policy_dcl.reference diff --git a/dbms/tests/queries/0_stateless/01039_row_policy_dcl.sql b/tests/queries/0_stateless/01039_row_policy_dcl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01039_row_policy_dcl.sql rename to tests/queries/0_stateless/01039_row_policy_dcl.sql diff --git a/dbms/tests/queries/0_stateless/01039_test_setting_parse.reference b/tests/queries/0_stateless/01039_test_setting_parse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01039_test_setting_parse.reference rename to tests/queries/0_stateless/01039_test_setting_parse.reference diff --git a/dbms/tests/queries/0_stateless/01039_test_setting_parse.sql b/tests/queries/0_stateless/01039_test_setting_parse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01039_test_setting_parse.sql rename to tests/queries/0_stateless/01039_test_setting_parse.sql diff --git a/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference b/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference rename to tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference diff --git a/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh b/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh rename to tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh diff --git a/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference rename to tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference diff --git a/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql rename to tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql diff --git a/dbms/tests/queries/0_stateless/01040_h3_get_resolution.reference b/tests/queries/0_stateless/01040_h3_get_resolution.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01040_h3_get_resolution.reference rename to tests/queries/0_stateless/01040_h3_get_resolution.reference diff --git a/dbms/tests/queries/0_stateless/01040_h3_get_resolution.sql b/tests/queries/0_stateless/01040_h3_get_resolution.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01040_h3_get_resolution.sql rename to tests/queries/0_stateless/01040_h3_get_resolution.sql diff --git a/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference b/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference rename to tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference diff --git a/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql b/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql rename to tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql diff --git a/dbms/tests/queries/0_stateless/01041_h3_is_valid.reference b/tests/queries/0_stateless/01041_h3_is_valid.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01041_h3_is_valid.reference rename to tests/queries/0_stateless/01041_h3_is_valid.reference diff --git a/dbms/tests/queries/0_stateless/01041_h3_is_valid.sql b/tests/queries/0_stateless/01041_h3_is_valid.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01041_h3_is_valid.sql rename to tests/queries/0_stateless/01041_h3_is_valid.sql diff --git a/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference rename to tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference diff --git a/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql rename to tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql diff --git a/dbms/tests/queries/0_stateless/01042_h3_k_ring.reference b/tests/queries/0_stateless/01042_h3_k_ring.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01042_h3_k_ring.reference rename to tests/queries/0_stateless/01042_h3_k_ring.reference diff --git a/dbms/tests/queries/0_stateless/01042_h3_k_ring.sql b/tests/queries/0_stateless/01042_h3_k_ring.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01042_h3_k_ring.sql rename to tests/queries/0_stateless/01042_h3_k_ring.sql diff --git a/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference rename to tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference diff --git a/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh rename to tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh diff --git a/dbms/tests/queries/0_stateless/01043_categorical_iv.reference b/tests/queries/0_stateless/01043_categorical_iv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_categorical_iv.reference rename to tests/queries/0_stateless/01043_categorical_iv.reference diff --git a/dbms/tests/queries/0_stateless/01043_categorical_iv.sql b/tests/queries/0_stateless/01043_categorical_iv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_categorical_iv.sql rename to tests/queries/0_stateless/01043_categorical_iv.sql diff --git a/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference b/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference rename to tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference diff --git a/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql b/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql rename to tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql diff --git a/dbms/tests/queries/0_stateless/01043_geo_distance.reference b/tests/queries/0_stateless/01043_geo_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_geo_distance.reference rename to tests/queries/0_stateless/01043_geo_distance.reference diff --git a/dbms/tests/queries/0_stateless/01043_geo_distance.sql b/tests/queries/0_stateless/01043_geo_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_geo_distance.sql rename to tests/queries/0_stateless/01043_geo_distance.sql diff --git a/dbms/tests/queries/0_stateless/01043_h3_edge_length_m.reference b/tests/queries/0_stateless/01043_h3_edge_length_m.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_h3_edge_length_m.reference rename to tests/queries/0_stateless/01043_h3_edge_length_m.reference diff --git a/dbms/tests/queries/0_stateless/01043_h3_edge_length_m.sql b/tests/queries/0_stateless/01043_h3_edge_length_m.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_h3_edge_length_m.sql rename to tests/queries/0_stateless/01043_h3_edge_length_m.sql diff --git a/dbms/tests/queries/0_stateless/01044_great_circle_angle.reference b/tests/queries/0_stateless/01044_great_circle_angle.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01044_great_circle_angle.reference rename to tests/queries/0_stateless/01044_great_circle_angle.reference diff --git a/dbms/tests/queries/0_stateless/01044_great_circle_angle.sql b/tests/queries/0_stateless/01044_great_circle_angle.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01044_great_circle_angle.sql rename to tests/queries/0_stateless/01044_great_circle_angle.sql diff --git a/dbms/tests/queries/0_stateless/01044_h3_edge_angle.reference b/tests/queries/0_stateless/01044_h3_edge_angle.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01044_h3_edge_angle.reference rename to tests/queries/0_stateless/01044_h3_edge_angle.reference diff --git a/dbms/tests/queries/0_stateless/01044_h3_edge_angle.sql b/tests/queries/0_stateless/01044_h3_edge_angle.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01044_h3_edge_angle.sql rename to tests/queries/0_stateless/01044_h3_edge_angle.sql diff --git a/dbms/tests/queries/0_stateless/01045_array_zip.reference b/tests/queries/0_stateless/01045_array_zip.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_array_zip.reference rename to tests/queries/0_stateless/01045_array_zip.reference diff --git a/dbms/tests/queries/0_stateless/01045_array_zip.sql b/tests/queries/0_stateless/01045_array_zip.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01045_array_zip.sql rename to tests/queries/0_stateless/01045_array_zip.sql diff --git a/dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.reference b/tests/queries/0_stateless/01045_bloom_filter_null_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.reference rename to tests/queries/0_stateless/01045_bloom_filter_null_array.reference diff --git a/dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.sql b/tests/queries/0_stateless/01045_bloom_filter_null_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.sql rename to tests/queries/0_stateless/01045_bloom_filter_null_array.sql diff --git a/dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.reference b/tests/queries/0_stateless/01045_dictionaries_restrictions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.reference rename to tests/queries/0_stateless/01045_dictionaries_restrictions.reference diff --git a/dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.sql b/tests/queries/0_stateless/01045_dictionaries_restrictions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.sql rename to tests/queries/0_stateless/01045_dictionaries_restrictions.sql diff --git a/dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.reference b/tests/queries/0_stateless/01045_order_by_pk_special_storages.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.reference rename to tests/queries/0_stateless/01045_order_by_pk_special_storages.reference diff --git a/dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh rename to tests/queries/0_stateless/01045_order_by_pk_special_storages.sh diff --git a/dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference rename to tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference diff --git a/dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh rename to tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh diff --git a/dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference b/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference rename to tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql b/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql rename to tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.reference b/tests/queries/0_stateless/01046_trivial_count_query_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.reference rename to tests/queries/0_stateless/01046_trivial_count_query_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.sql b/tests/queries/0_stateless/01046_trivial_count_query_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.sql rename to tests/queries/0_stateless/01046_trivial_count_query_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference b/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference rename to tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference diff --git a/dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql b/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql rename to tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql diff --git a/dbms/tests/queries/0_stateless/01047_nullable_rand.reference b/tests/queries/0_stateless/01047_nullable_rand.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01047_nullable_rand.reference rename to tests/queries/0_stateless/01047_nullable_rand.reference diff --git a/dbms/tests/queries/0_stateless/01047_nullable_rand.sql b/tests/queries/0_stateless/01047_nullable_rand.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01047_nullable_rand.sql rename to tests/queries/0_stateless/01047_nullable_rand.sql diff --git a/dbms/tests/queries/0_stateless/01063_create_column_set.reference b/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01063_create_column_set.reference rename to tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference diff --git a/dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql b/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql rename to tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql diff --git a/dbms/tests/queries/0_stateless/01048_exists_query.reference b/tests/queries/0_stateless/01048_exists_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01048_exists_query.reference rename to tests/queries/0_stateless/01048_exists_query.reference diff --git a/tests/queries/0_stateless/01048_exists_query.sql b/tests/queries/0_stateless/01048_exists_query.sql new file mode 100644 index 00000000000..9a4c0558b60 --- /dev/null +++ b/tests/queries/0_stateless/01048_exists_query.sql @@ -0,0 +1,44 @@ +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +DROP DATABASE IF EXISTS db_01048; +CREATE DATABASE db_01048 Engine = Ordinary; + +DROP TABLE IF EXISTS db_01048.t_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +CREATE TABLE db_01048.t_01048 (x UInt8) ENGINE = Memory; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +DROP TABLE db_01048.t_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +DROP DICTIONARY IF EXISTS t_01048; +CREATE TEMPORARY TABLE t_01048 (x UInt8); +EXISTS t_01048; -- Does not work for temporary tables. Maybe have to fix. +EXISTS TABLE t_01048; +EXISTS DICTIONARY t_01048; + +CREATE DICTIONARY db_01048.t_01048 (k UInt64, v String) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT TSV)) LIFETIME(1000); +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; -- Dictionaries are tables as well. But not all tables are dictionaries. +EXISTS DICTIONARY db_01048.t_01048; + +-- But dictionary-tables cannot be dropped as usual tables. +DROP TABLE db_01048.t_01048; -- { serverError 60 } +DROP DICTIONARY db_01048.t_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +DROP DATABASE db_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_bug.reference b/tests/queries/0_stateless/01049_join_low_card_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_bug.reference rename to tests/queries/0_stateless/01049_join_low_card_bug.reference diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_bug.sql b/tests/queries/0_stateless/01049_join_low_card_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_bug.sql rename to tests/queries/0_stateless/01049_join_low_card_bug.sql diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_crash.reference b/tests/queries/0_stateless/01049_join_low_card_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_crash.reference rename to tests/queries/0_stateless/01049_join_low_card_crash.reference diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_crash.sql b/tests/queries/0_stateless/01049_join_low_card_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_crash.sql rename to tests/queries/0_stateless/01049_join_low_card_crash.sql diff --git a/dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference b/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference rename to tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference diff --git a/dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql b/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql rename to tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql diff --git a/dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference b/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference rename to tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql b/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql rename to tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_crash.reference b/tests/queries/0_stateless/01050_engine_join_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_crash.reference rename to tests/queries/0_stateless/01050_engine_join_crash.reference diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_crash.sql b/tests/queries/0_stateless/01050_engine_join_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_crash.sql rename to tests/queries/0_stateless/01050_engine_join_crash.sql diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_view_crash.reference b/tests/queries/0_stateless/01050_engine_join_view_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_view_crash.reference rename to tests/queries/0_stateless/01050_engine_join_view_crash.reference diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_view_crash.sql b/tests/queries/0_stateless/01050_engine_join_view_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_view_crash.sql rename to tests/queries/0_stateless/01050_engine_join_view_crash.sql diff --git a/dbms/tests/queries/0_stateless/01050_group_array_sample.reference b/tests/queries/0_stateless/01050_group_array_sample.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_group_array_sample.reference rename to tests/queries/0_stateless/01050_group_array_sample.reference diff --git a/dbms/tests/queries/0_stateless/01050_group_array_sample.sql b/tests/queries/0_stateless/01050_group_array_sample.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_group_array_sample.sql rename to tests/queries/0_stateless/01050_group_array_sample.sql diff --git a/dbms/tests/queries/0_stateless/01051_aggregate_function_crash.reference b/tests/queries/0_stateless/01051_aggregate_function_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_aggregate_function_crash.reference rename to tests/queries/0_stateless/01051_aggregate_function_crash.reference diff --git a/dbms/tests/queries/0_stateless/01051_aggregate_function_crash.sql b/tests/queries/0_stateless/01051_aggregate_function_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_aggregate_function_crash.sql rename to tests/queries/0_stateless/01051_aggregate_function_crash.sql diff --git a/dbms/tests/queries/0_stateless/01051_all_join_engine.reference b/tests/queries/0_stateless/01051_all_join_engine.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_all_join_engine.reference rename to tests/queries/0_stateless/01051_all_join_engine.reference diff --git a/dbms/tests/queries/0_stateless/01051_all_join_engine.sql b/tests/queries/0_stateless/01051_all_join_engine.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_all_join_engine.sql rename to tests/queries/0_stateless/01051_all_join_engine.sql diff --git a/dbms/tests/queries/0_stateless/01051_new_any_join_engine.reference b/tests/queries/0_stateless/01051_new_any_join_engine.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_new_any_join_engine.reference rename to tests/queries/0_stateless/01051_new_any_join_engine.reference diff --git a/dbms/tests/queries/0_stateless/01051_new_any_join_engine.sql b/tests/queries/0_stateless/01051_new_any_join_engine.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_new_any_join_engine.sql rename to tests/queries/0_stateless/01051_new_any_join_engine.sql diff --git a/dbms/tests/queries/0_stateless/01051_random_printable_ascii.reference b/tests/queries/0_stateless/01051_random_printable_ascii.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_random_printable_ascii.reference rename to tests/queries/0_stateless/01051_random_printable_ascii.reference diff --git a/dbms/tests/queries/0_stateless/01051_random_printable_ascii.sql b/tests/queries/0_stateless/01051_random_printable_ascii.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_random_printable_ascii.sql rename to tests/queries/0_stateless/01051_random_printable_ascii.sql diff --git a/dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference b/tests/queries/0_stateless/01051_same_name_alias_with_joins.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference rename to tests/queries/0_stateless/01051_same_name_alias_with_joins.reference diff --git a/dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.sql b/tests/queries/0_stateless/01051_same_name_alias_with_joins.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.sql rename to tests/queries/0_stateless/01051_same_name_alias_with_joins.sql diff --git a/dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference b/tests/queries/0_stateless/01052_array_reduce_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference rename to tests/queries/0_stateless/01052_array_reduce_exception.reference diff --git a/dbms/tests/queries/0_stateless/01052_array_reduce_exception.sql b/tests/queries/0_stateless/01052_array_reduce_exception.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01052_array_reduce_exception.sql rename to tests/queries/0_stateless/01052_array_reduce_exception.sql diff --git a/dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.reference b/tests/queries/0_stateless/01052_compression_buffer_overrun.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.reference rename to tests/queries/0_stateless/01052_compression_buffer_overrun.reference diff --git a/dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.sh b/tests/queries/0_stateless/01052_compression_buffer_overrun.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.sh rename to tests/queries/0_stateless/01052_compression_buffer_overrun.sh diff --git a/dbms/tests/queries/0_stateless/01053_drop_database_mat_view.reference b/tests/queries/0_stateless/01053_drop_database_mat_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01053_drop_database_mat_view.reference rename to tests/queries/0_stateless/01053_drop_database_mat_view.reference diff --git a/dbms/tests/queries/0_stateless/01053_drop_database_mat_view.sql b/tests/queries/0_stateless/01053_drop_database_mat_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01053_drop_database_mat_view.sql rename to tests/queries/0_stateless/01053_drop_database_mat_view.sql diff --git a/dbms/tests/queries/0_stateless/01053_if_chain_check.reference b/tests/queries/0_stateless/01053_if_chain_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01053_if_chain_check.reference rename to tests/queries/0_stateless/01053_if_chain_check.reference diff --git a/dbms/tests/queries/0_stateless/01053_if_chain_check.sql b/tests/queries/0_stateless/01053_if_chain_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01053_if_chain_check.sql rename to tests/queries/0_stateless/01053_if_chain_check.sql diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference b/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference rename to tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh b/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh rename to tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference b/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference rename to tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql b/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql rename to tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql diff --git a/dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference b/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference rename to tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference diff --git a/dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh b/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh rename to tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts.reference b/tests/queries/0_stateless/01055_compact_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts.reference rename to tests/queries/0_stateless/01055_compact_parts.reference diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts.sql b/tests/queries/0_stateless/01055_compact_parts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts.sql rename to tests/queries/0_stateless/01055_compact_parts.sql diff --git a/tests/queries/0_stateless/01055_compact_parts_1.reference b/tests/queries/0_stateless/01055_compact_parts_1.reference new file mode 100644 index 00000000000..b99f336d3b0 --- /dev/null +++ b/tests/queries/0_stateless/01055_compact_parts_1.reference @@ -0,0 +1,2 @@ +CREATE TABLE default.mt_compact\n(\n `a` Int32, \n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, index_granularity = 8192 +CREATE TABLE default.mt_compact\n(\n `a` Int32, \n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, index_granularity = 8192, parts_to_delay_insert = 300 diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_1.sql b/tests/queries/0_stateless/01055_compact_parts_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts_1.sql rename to tests/queries/0_stateless/01055_compact_parts_1.sql diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_granularity.reference b/tests/queries/0_stateless/01055_compact_parts_granularity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts_granularity.reference rename to tests/queries/0_stateless/01055_compact_parts_granularity.reference diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_granularity.sh b/tests/queries/0_stateless/01055_compact_parts_granularity.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts_granularity.sh rename to tests/queries/0_stateless/01055_compact_parts_granularity.sh diff --git a/dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.reference b/tests/queries/0_stateless/01055_minmax_index_compact_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.reference rename to tests/queries/0_stateless/01055_minmax_index_compact_parts.reference diff --git a/dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh rename to tests/queries/0_stateless/01055_minmax_index_compact_parts.sh diff --git a/dbms/tests/queries/0_stateless/01055_prewhere_bugs.reference b/tests/queries/0_stateless/01055_prewhere_bugs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_prewhere_bugs.reference rename to tests/queries/0_stateless/01055_prewhere_bugs.reference diff --git a/dbms/tests/queries/0_stateless/01055_prewhere_bugs.sql b/tests/queries/0_stateless/01055_prewhere_bugs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01055_prewhere_bugs.sql rename to tests/queries/0_stateless/01055_prewhere_bugs.sql diff --git a/dbms/tests/queries/0_stateless/01074_h3_range_check.reference b/tests/queries/0_stateless/01056_create_table_as.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01074_h3_range_check.reference rename to tests/queries/0_stateless/01056_create_table_as.reference diff --git a/dbms/tests/queries/0_stateless/01056_create_table_as.sql b/tests/queries/0_stateless/01056_create_table_as.sql similarity index 96% rename from dbms/tests/queries/0_stateless/01056_create_table_as.sql rename to tests/queries/0_stateless/01056_create_table_as.sql index 868e1f082dd..f95df9b7906 100644 --- a/dbms/tests/queries/0_stateless/01056_create_table_as.sql +++ b/tests/queries/0_stateless/01056_create_table_as.sql @@ -15,6 +15,7 @@ CREATE TABLE t3 AS v; -- { serverError 80; } DROP TABLE v; -- dictionary +DROP DICTIONARY IF EXISTS dict; DROP DATABASE if exists test_01056_dict_data; CREATE DATABASE test_01056_dict_data; CREATE TABLE test_01056_dict_data.dict_data (key Int, value UInt16) Engine=Memory(); diff --git a/dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.reference b/tests/queries/0_stateless/01056_negative_with_bloom_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.reference rename to tests/queries/0_stateless/01056_negative_with_bloom_filter.reference diff --git a/dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.sql b/tests/queries/0_stateless/01056_negative_with_bloom_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.sql rename to tests/queries/0_stateless/01056_negative_with_bloom_filter.sql diff --git a/dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference rename to tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference diff --git a/dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql rename to tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql diff --git a/dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference b/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference rename to tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference diff --git a/dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh b/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh rename to tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh diff --git a/dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference b/tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference rename to tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference diff --git a/dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh b/tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh rename to tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh diff --git a/dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference b/tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference rename to tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference diff --git a/dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh b/tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh rename to tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh diff --git a/dbms/tests/queries/0_stateless/01059_storage_file_brotli.reference b/tests/queries/0_stateless/01059_storage_file_brotli.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01059_storage_file_brotli.reference rename to tests/queries/0_stateless/01059_storage_file_brotli.reference diff --git a/dbms/tests/queries/0_stateless/01059_storage_file_brotli.sql b/tests/queries/0_stateless/01059_storage_file_brotli.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01059_storage_file_brotli.sql rename to tests/queries/0_stateless/01059_storage_file_brotli.sql diff --git a/dbms/tests/queries/0_stateless/01060_avro.reference b/tests/queries/0_stateless/01060_avro.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_avro.reference rename to tests/queries/0_stateless/01060_avro.reference diff --git a/dbms/tests/queries/0_stateless/01060_avro.sh b/tests/queries/0_stateless/01060_avro.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01060_avro.sh rename to tests/queries/0_stateless/01060_avro.sh diff --git a/dbms/tests/queries/0_stateless/01060_defaults_all_columns.reference b/tests/queries/0_stateless/01060_defaults_all_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_defaults_all_columns.reference rename to tests/queries/0_stateless/01060_defaults_all_columns.reference diff --git a/dbms/tests/queries/0_stateless/01060_defaults_all_columns.sql b/tests/queries/0_stateless/01060_defaults_all_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01060_defaults_all_columns.sql rename to tests/queries/0_stateless/01060_defaults_all_columns.sql diff --git a/dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.reference b/tests/queries/0_stateless/01060_shutdown_table_after_detach.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.reference rename to tests/queries/0_stateless/01060_shutdown_table_after_detach.reference diff --git a/dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.sql b/tests/queries/0_stateless/01060_shutdown_table_after_detach.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.sql rename to tests/queries/0_stateless/01060_shutdown_table_after_detach.sql diff --git a/dbms/tests/queries/0_stateless/01060_substring_negative_size.reference b/tests/queries/0_stateless/01060_substring_negative_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_substring_negative_size.reference rename to tests/queries/0_stateless/01060_substring_negative_size.reference diff --git a/dbms/tests/queries/0_stateless/01060_substring_negative_size.sql b/tests/queries/0_stateless/01060_substring_negative_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01060_substring_negative_size.sql rename to tests/queries/0_stateless/01060_substring_negative_size.sql diff --git a/dbms/tests/queries/0_stateless/01061_alter_codec_with_type.reference b/tests/queries/0_stateless/01061_alter_codec_with_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01061_alter_codec_with_type.reference rename to tests/queries/0_stateless/01061_alter_codec_with_type.reference diff --git a/dbms/tests/queries/0_stateless/01061_alter_codec_with_type.sql b/tests/queries/0_stateless/01061_alter_codec_with_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01061_alter_codec_with_type.sql rename to tests/queries/0_stateless/01061_alter_codec_with_type.sql diff --git a/dbms/tests/queries/0_stateless/01062_alter_on_mutataion.reference b/tests/queries/0_stateless/01062_alter_on_mutataion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_alter_on_mutataion.reference rename to tests/queries/0_stateless/01062_alter_on_mutataion.reference diff --git a/dbms/tests/queries/0_stateless/01062_alter_on_mutataion.sql b/tests/queries/0_stateless/01062_alter_on_mutataion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01062_alter_on_mutataion.sql rename to tests/queries/0_stateless/01062_alter_on_mutataion.sql diff --git a/dbms/tests/queries/0_stateless/01062_max_parser_depth.reference b/tests/queries/0_stateless/01062_max_parser_depth.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_max_parser_depth.reference rename to tests/queries/0_stateless/01062_max_parser_depth.reference diff --git a/dbms/tests/queries/0_stateless/01062_max_parser_depth.sh b/tests/queries/0_stateless/01062_max_parser_depth.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01062_max_parser_depth.sh rename to tests/queries/0_stateless/01062_max_parser_depth.sh diff --git a/dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference b/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference rename to tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference diff --git a/dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql b/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql rename to tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql diff --git a/dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference b/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference rename to tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference diff --git a/dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql b/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql rename to tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql diff --git a/dbms/tests/queries/0_stateless/01069_set_in_group_by.reference b/tests/queries/0_stateless/01063_create_column_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_set_in_group_by.reference rename to tests/queries/0_stateless/01063_create_column_set.reference diff --git a/dbms/tests/queries/0_stateless/01063_create_column_set.sql b/tests/queries/0_stateless/01063_create_column_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01063_create_column_set.sql rename to tests/queries/0_stateless/01063_create_column_set.sql diff --git a/dbms/tests/queries/0_stateless/01064_array_auc.reference b/tests/queries/0_stateless/01064_array_auc.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01064_array_auc.reference rename to tests/queries/0_stateless/01064_array_auc.reference diff --git a/dbms/tests/queries/0_stateless/01064_array_auc.sql b/tests/queries/0_stateless/01064_array_auc.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01064_array_auc.sql rename to tests/queries/0_stateless/01064_array_auc.sql diff --git a/dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference rename to tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference diff --git a/dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql rename to tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql diff --git a/dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference b/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference rename to tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference diff --git a/dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql b/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql rename to tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql diff --git a/dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.reference b/tests/queries/0_stateless/01065_array_zip_mixed_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.reference rename to tests/queries/0_stateless/01065_array_zip_mixed_const.reference diff --git a/dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.sql b/tests/queries/0_stateless/01065_array_zip_mixed_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.sql rename to tests/queries/0_stateless/01065_array_zip_mixed_const.sql diff --git a/dbms/tests/queries/0_stateless/01065_if_not_finite.reference b/tests/queries/0_stateless/01065_if_not_finite.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01065_if_not_finite.reference rename to tests/queries/0_stateless/01065_if_not_finite.reference diff --git a/dbms/tests/queries/0_stateless/01065_if_not_finite.sql b/tests/queries/0_stateless/01065_if_not_finite.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01065_if_not_finite.sql rename to tests/queries/0_stateless/01065_if_not_finite.sql diff --git a/dbms/tests/queries/0_stateless/01066_bit_count.reference b/tests/queries/0_stateless/01066_bit_count.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01066_bit_count.reference rename to tests/queries/0_stateless/01066_bit_count.reference diff --git a/dbms/tests/queries/0_stateless/01066_bit_count.sql b/tests/queries/0_stateless/01066_bit_count.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01066_bit_count.sql rename to tests/queries/0_stateless/01066_bit_count.sql diff --git a/dbms/tests/queries/0_stateless/01067_join_null.reference b/tests/queries/0_stateless/01067_join_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01067_join_null.reference rename to tests/queries/0_stateless/01067_join_null.reference diff --git a/dbms/tests/queries/0_stateless/01067_join_null.sql b/tests/queries/0_stateless/01067_join_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01067_join_null.sql rename to tests/queries/0_stateless/01067_join_null.sql diff --git a/dbms/tests/queries/0_stateless/01068_parens.reference b/tests/queries/0_stateless/01068_parens.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01068_parens.reference rename to tests/queries/0_stateless/01068_parens.reference diff --git a/tests/queries/0_stateless/01068_parens.sql b/tests/queries/0_stateless/01068_parens.sql new file mode 100644 index 00000000000..42948760594 --- /dev/null +++ b/tests/queries/0_stateless/01068_parens.sql @@ -0,0 +1,2 @@ +SET max_parser_depth = 10000; +((((((((((((((SELECT((((((((((((((((((((((((((((((((1)))))))))))))))))))))))))))))))))))))))))))))); diff --git a/tests/queries/0_stateless/01069_database_memory.reference b/tests/queries/0_stateless/01069_database_memory.reference new file mode 100644 index 00000000000..e7486d57276 --- /dev/null +++ b/tests/queries/0_stateless/01069_database_memory.reference @@ -0,0 +1,8 @@ +CREATE DATABASE memory_01069\nENGINE = Memory() +1 +2 +3 +4 +3 +4 +CREATE TABLE memory_01069.file\n(\n `n` UInt8\n)\nENGINE = File(\'CSV\') diff --git a/dbms/tests/queries/0_stateless/01069_database_memory.sql b/tests/queries/0_stateless/01069_database_memory.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_database_memory.sql rename to tests/queries/0_stateless/01069_database_memory.sql diff --git a/dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference b/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference rename to tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference diff --git a/dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql b/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql rename to tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference b/tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql b/tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference b/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql b/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql diff --git a/dbms/tests/queries/0_stateless/01083_log_first_column_alias.reference b/tests/queries/0_stateless/01069_set_in_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_first_column_alias.reference rename to tests/queries/0_stateless/01069_set_in_group_by.reference diff --git a/dbms/tests/queries/0_stateless/01069_set_in_group_by.sql b/tests/queries/0_stateless/01069_set_in_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_set_in_group_by.sql rename to tests/queries/0_stateless/01069_set_in_group_by.sql diff --git a/tests/queries/0_stateless/01070_alter_with_ttl.reference b/tests/queries/0_stateless/01070_alter_with_ttl.reference new file mode 100644 index 00000000000..de7833472a1 --- /dev/null +++ b/tests/queries/0_stateless/01070_alter_with_ttl.reference @@ -0,0 +1,2 @@ +CREATE TABLE default.alter_ttl\n(\n `i` Int32, \n `s` String TTL toDate(\'2020-01-01\')\n)\nENGINE = MergeTree\nORDER BY i\nTTL toDate(\'2020-05-05\')\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_ttl\n(\n `d` Date, \n `s` String TTL d + toIntervalDay(1)\n)\nENGINE = MergeTree\nORDER BY d\nTTL d + toIntervalMonth(1)\nSETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/01070_alter_with_ttl.sql b/tests/queries/0_stateless/01070_alter_with_ttl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_alter_with_ttl.sql rename to tests/queries/0_stateless/01070_alter_with_ttl.sql diff --git a/dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference b/tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference rename to tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference diff --git a/dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql b/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql rename to tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_get_base_cell.reference b/tests/queries/0_stateless/01070_h3_get_base_cell.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_get_base_cell.reference rename to tests/queries/0_stateless/01070_h3_get_base_cell.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_get_base_cell.sql b/tests/queries/0_stateless/01070_h3_get_base_cell.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_get_base_cell.sql rename to tests/queries/0_stateless/01070_h3_get_base_cell.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.reference b/tests/queries/0_stateless/01070_h3_hex_area_m2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.reference rename to tests/queries/0_stateless/01070_h3_hex_area_m2.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.sql b/tests/queries/0_stateless/01070_h3_hex_area_m2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.sql rename to tests/queries/0_stateless/01070_h3_hex_area_m2.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference b/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference rename to tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql b/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql rename to tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_children.reference b/tests/queries/0_stateless/01070_h3_to_children.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_children.reference rename to tests/queries/0_stateless/01070_h3_to_children.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_children.sql b/tests/queries/0_stateless/01070_h3_to_children.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_children.sql rename to tests/queries/0_stateless/01070_h3_to_children.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_parent.reference b/tests/queries/0_stateless/01070_h3_to_parent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_parent.reference rename to tests/queries/0_stateless/01070_h3_to_parent.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_parent.sql b/tests/queries/0_stateless/01070_h3_to_parent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_parent.sql rename to tests/queries/0_stateless/01070_h3_to_parent.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_string.reference b/tests/queries/0_stateless/01070_h3_to_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_string.reference rename to tests/queries/0_stateless/01070_h3_to_string.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_string.sql b/tests/queries/0_stateless/01070_h3_to_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_string.sql rename to tests/queries/0_stateless/01070_h3_to_string.sql diff --git a/dbms/tests/queries/0_stateless/01070_materialize_ttl.reference b/tests/queries/0_stateless/01070_materialize_ttl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_materialize_ttl.reference rename to tests/queries/0_stateless/01070_materialize_ttl.reference diff --git a/dbms/tests/queries/0_stateless/01070_materialize_ttl.sql b/tests/queries/0_stateless/01070_materialize_ttl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_materialize_ttl.sql rename to tests/queries/0_stateless/01070_materialize_ttl.sql diff --git a/dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.reference b/tests/queries/0_stateless/01070_mutations_with_dependencies.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.reference rename to tests/queries/0_stateless/01070_mutations_with_dependencies.reference diff --git a/dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.sql b/tests/queries/0_stateless/01070_mutations_with_dependencies.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.sql rename to tests/queries/0_stateless/01070_mutations_with_dependencies.sql diff --git a/dbms/tests/queries/0_stateless/01070_string_to_h3.reference b/tests/queries/0_stateless/01070_string_to_h3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_string_to_h3.reference rename to tests/queries/0_stateless/01070_string_to_h3.reference diff --git a/dbms/tests/queries/0_stateless/01070_string_to_h3.sql b/tests/queries/0_stateless/01070_string_to_h3.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_string_to_h3.sql rename to tests/queries/0_stateless/01070_string_to_h3.sql diff --git a/dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference b/tests/queries/0_stateless/01070_template_empty_file.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference rename to tests/queries/0_stateless/01070_template_empty_file.reference diff --git a/dbms/tests/queries/0_stateless/01070_template_empty_file.sql b/tests/queries/0_stateless/01070_template_empty_file.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_template_empty_file.sql rename to tests/queries/0_stateless/01070_template_empty_file.sql diff --git a/dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference b/tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference rename to tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference diff --git a/dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql b/tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql rename to tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql diff --git a/dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference b/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference rename to tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference diff --git a/dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql b/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql rename to tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql diff --git a/dbms/tests/queries/0_stateless/01071_http_header_exception_code.reference b/tests/queries/0_stateless/01071_http_header_exception_code.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_http_header_exception_code.reference rename to tests/queries/0_stateless/01071_http_header_exception_code.reference diff --git a/dbms/tests/queries/0_stateless/01071_http_header_exception_code.sh b/tests/queries/0_stateless/01071_http_header_exception_code.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01071_http_header_exception_code.sh rename to tests/queries/0_stateless/01071_http_header_exception_code.sh diff --git a/dbms/tests/queries/0_stateless/01071_in_array.reference b/tests/queries/0_stateless/01071_in_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_in_array.reference rename to tests/queries/0_stateless/01071_in_array.reference diff --git a/dbms/tests/queries/0_stateless/01071_in_array.sql b/tests/queries/0_stateless/01071_in_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_in_array.sql rename to tests/queries/0_stateless/01071_in_array.sql diff --git a/dbms/tests/queries/0_stateless/01084_regexp_empty.reference b/tests/queries/0_stateless/01071_live_view_detach_dependency.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01084_regexp_empty.reference rename to tests/queries/0_stateless/01071_live_view_detach_dependency.reference diff --git a/dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.sql b/tests/queries/0_stateless/01071_live_view_detach_dependency.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.sql rename to tests/queries/0_stateless/01071_live_view_detach_dependency.sql diff --git a/dbms/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.reference b/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.reference rename to tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql b/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql rename to tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.reference b/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.reference rename to tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference diff --git a/dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql b/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql rename to tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql diff --git a/dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference b/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference rename to tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference diff --git a/dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql b/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql rename to tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql diff --git a/dbms/tests/queries/0_stateless/01072_nullable_jit.reference b/tests/queries/0_stateless/01072_nullable_jit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_nullable_jit.reference rename to tests/queries/0_stateless/01072_nullable_jit.reference diff --git a/dbms/tests/queries/0_stateless/01072_nullable_jit.sql b/tests/queries/0_stateless/01072_nullable_jit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_nullable_jit.sql rename to tests/queries/0_stateless/01072_nullable_jit.sql diff --git a/dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference b/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference rename to tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference diff --git a/dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql b/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql rename to tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql diff --git a/dbms/tests/queries/0_stateless/01072_select_constant_limit.reference b/tests/queries/0_stateless/01072_select_constant_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_select_constant_limit.reference rename to tests/queries/0_stateless/01072_select_constant_limit.reference diff --git a/dbms/tests/queries/0_stateless/01072_select_constant_limit.sql b/tests/queries/0_stateless/01072_select_constant_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_select_constant_limit.sql rename to tests/queries/0_stateless/01072_select_constant_limit.sql diff --git a/dbms/tests/queries/0_stateless/01073_attach_if_not_exists.reference b/tests/queries/0_stateless/01073_attach_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_attach_if_not_exists.reference rename to tests/queries/0_stateless/01073_attach_if_not_exists.reference diff --git a/tests/queries/0_stateless/01073_attach_if_not_exists.sql b/tests/queries/0_stateless/01073_attach_if_not_exists.sql new file mode 100644 index 00000000000..8bb52556ccc --- /dev/null +++ b/tests/queries/0_stateless/01073_attach_if_not_exists.sql @@ -0,0 +1,7 @@ +CREATE TABLE aine (a Int) ENGINE = Log; +ATTACH TABLE aine; -- { serverError 57 } +ATTACH TABLE IF NOT EXISTS aine; +DETACH TABLE aine; +ATTACH TABLE IF NOT EXISTS aine; +EXISTS TABLE aine; +DROP TABLE aine; diff --git a/dbms/tests/queries/0_stateless/01073_bad_alter_partition.reference b/tests/queries/0_stateless/01073_bad_alter_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_bad_alter_partition.reference rename to tests/queries/0_stateless/01073_bad_alter_partition.reference diff --git a/dbms/tests/queries/0_stateless/01073_bad_alter_partition.sql b/tests/queries/0_stateless/01073_bad_alter_partition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_bad_alter_partition.sql rename to tests/queries/0_stateless/01073_bad_alter_partition.sql diff --git a/dbms/tests/queries/0_stateless/01073_blockSerializedSize.reference b/tests/queries/0_stateless/01073_blockSerializedSize.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_blockSerializedSize.reference rename to tests/queries/0_stateless/01073_blockSerializedSize.reference diff --git a/dbms/tests/queries/0_stateless/01073_blockSerializedSize.sql b/tests/queries/0_stateless/01073_blockSerializedSize.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_blockSerializedSize.sql rename to tests/queries/0_stateless/01073_blockSerializedSize.sql diff --git a/dbms/tests/queries/0_stateless/01073_crlf_end_of_line.reference b/tests/queries/0_stateless/01073_crlf_end_of_line.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_crlf_end_of_line.reference rename to tests/queries/0_stateless/01073_crlf_end_of_line.reference diff --git a/dbms/tests/queries/0_stateless/01073_crlf_end_of_line.sql b/tests/queries/0_stateless/01073_crlf_end_of_line.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_crlf_end_of_line.sql rename to tests/queries/0_stateless/01073_crlf_end_of_line.sql diff --git a/tests/queries/0_stateless/01073_grant_and_revoke.reference b/tests/queries/0_stateless/01073_grant_and_revoke.reference new file mode 100644 index 00000000000..d7d97fa28fe --- /dev/null +++ b/tests/queries/0_stateless/01073_grant_and_revoke.reference @@ -0,0 +1,11 @@ +CREATE USER test_user_01073 +A +B +GRANT ALTER DELETE, INSERT ON *.* TO test_user_01073 +GRANT SELECT ON db1.* TO test_user_01073 +GRANT SELECT ON db2.table TO test_user_01073 +GRANT SELECT(col1) ON db3.table TO test_user_01073 +GRANT SELECT(col1, col2) ON db4.table TO test_user_01073 +C +GRANT ALTER DELETE ON *.* TO test_user_01073 +GRANT SELECT(col1) ON db4.table TO test_user_01073 diff --git a/dbms/tests/queries/0_stateless/01073_grant_and_revoke.sql b/tests/queries/0_stateless/01073_grant_and_revoke.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_grant_and_revoke.sql rename to tests/queries/0_stateless/01073_grant_and_revoke.sql diff --git a/dbms/tests/queries/0_stateless/01073_show_tables_not_like.reference b/tests/queries/0_stateless/01073_show_tables_not_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_show_tables_not_like.reference rename to tests/queries/0_stateless/01073_show_tables_not_like.reference diff --git a/dbms/tests/queries/0_stateless/01073_show_tables_not_like.sql b/tests/queries/0_stateless/01073_show_tables_not_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_show_tables_not_like.sql rename to tests/queries/0_stateless/01073_show_tables_not_like.sql diff --git a/dbms/tests/queries/0_stateless/01210_drop_view.reference b/tests/queries/0_stateless/01074_h3_range_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01210_drop_view.reference rename to tests/queries/0_stateless/01074_h3_range_check.reference diff --git a/dbms/tests/queries/0_stateless/01074_h3_range_check.sql b/tests/queries/0_stateless/01074_h3_range_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01074_h3_range_check.sql rename to tests/queries/0_stateless/01074_h3_range_check.sql diff --git a/dbms/tests/queries/0_stateless/01074_partial_revokes.reference b/tests/queries/0_stateless/01074_partial_revokes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01074_partial_revokes.reference rename to tests/queries/0_stateless/01074_partial_revokes.reference diff --git a/dbms/tests/queries/0_stateless/01074_partial_revokes.sql b/tests/queries/0_stateless/01074_partial_revokes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01074_partial_revokes.sql rename to tests/queries/0_stateless/01074_partial_revokes.sql diff --git a/tests/queries/0_stateless/01075_allowed_client_hosts.reference b/tests/queries/0_stateless/01075_allowed_client_hosts.reference new file mode 100644 index 00000000000..73f54c6027a --- /dev/null +++ b/tests/queries/0_stateless/01075_allowed_client_hosts.reference @@ -0,0 +1,17 @@ +CREATE USER test_user_01075 +CREATE USER test_user_01075 +CREATE USER test_user_01075 HOST NONE +CREATE USER test_user_01075 HOST LOCAL +CREATE USER test_user_01075 HOST IP \'192.168.23.15\' +CREATE USER test_user_01075 HOST IP \'2001:db8:11a3:9d7:1f34:8a2e:7a0:765d\' +CREATE USER test_user_01075 HOST LOCAL, IP \'2001:db8:11a3:9d7:1f34:8a2e:7a0:765d\' +CREATE USER test_user_01075 HOST LOCAL +CREATE USER test_user_01075 HOST NONE +CREATE USER test_user_01075 HOST LIKE \'@.somesite.com\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite.com\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite.com\', \'.*.anothersite.org\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite2.com\', \'.*.anothersite2.org\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite3.com\', \'.*.anothersite3.org\' +CREATE USER `test_user_01075_x@localhost` HOST LOCAL +CREATE USER test_user_01075_x +CREATE USER `test_user_01075_x@192.168.23.15` HOST LIKE \'192.168.23.15\' diff --git a/tests/queries/0_stateless/01075_allowed_client_hosts.sql b/tests/queries/0_stateless/01075_allowed_client_hosts.sql new file mode 100644 index 00000000000..2960a93f0f2 --- /dev/null +++ b/tests/queries/0_stateless/01075_allowed_client_hosts.sql @@ -0,0 +1,56 @@ +DROP USER IF EXISTS test_user_01075, test_user_01075_x, test_user_01075_x@localhost, test_user_01075_x@'192.168.23.15'; + +CREATE USER test_user_01075; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST ANY; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST NONE; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST LOCAL; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST IP '192.168.23.15'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 ADD HOST IP '127.0.0.1'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 DROP HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 DROP HOST NAME 'localhost'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST LIKE '@.somesite.com'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite\.com'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite\.com', '.*\.anothersite\.org'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite2\.com', REGEXP '.*\.anothersite2\.org'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite3\.com' HOST REGEXP '.*\.anothersite3\.org'; +SHOW CREATE USER test_user_01075; + +DROP USER test_user_01075; + +CREATE USER test_user_01075_x@localhost; +SHOW CREATE USER test_user_01075_x@localhost; + +ALTER USER test_user_01075_x@localhost RENAME TO test_user_01075_x@'%'; +SHOW CREATE USER test_user_01075_x; + +ALTER USER test_user_01075_x RENAME TO test_user_01075_x@'192.168.23.15'; +SHOW CREATE USER 'test_user_01075_x@192.168.23.15'; + +DROP USER 'test_user_01075_x@192.168.23.15'; diff --git a/dbms/tests/queries/0_stateless/01075_in_arrays_enmk.reference b/tests/queries/0_stateless/01075_in_arrays_enmk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01075_in_arrays_enmk.reference rename to tests/queries/0_stateless/01075_in_arrays_enmk.reference diff --git a/dbms/tests/queries/0_stateless/01075_in_arrays_enmk.sql b/tests/queries/0_stateless/01075_in_arrays_enmk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01075_in_arrays_enmk.sql rename to tests/queries/0_stateless/01075_in_arrays_enmk.sql diff --git a/dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference b/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference rename to tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference diff --git a/dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql b/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql rename to tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql diff --git a/dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference rename to tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference diff --git a/dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh rename to tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh diff --git a/dbms/tests/queries/0_stateless/01076_json_each_row_array.reference b/tests/queries/0_stateless/01076_json_each_row_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_json_each_row_array.reference rename to tests/queries/0_stateless/01076_json_each_row_array.reference diff --git a/dbms/tests/queries/0_stateless/01076_json_each_row_array.sh b/tests/queries/0_stateless/01076_json_each_row_array.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01076_json_each_row_array.sh rename to tests/queries/0_stateless/01076_json_each_row_array.sh diff --git a/dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference rename to tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh rename to tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference rename to tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference diff --git a/dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql rename to tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql diff --git a/dbms/tests/queries/0_stateless/01076_range_reader_segfault.reference b/tests/queries/0_stateless/01076_range_reader_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_range_reader_segfault.reference rename to tests/queries/0_stateless/01076_range_reader_segfault.reference diff --git a/dbms/tests/queries/0_stateless/01076_range_reader_segfault.sql b/tests/queries/0_stateless/01076_range_reader_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01076_range_reader_segfault.sql rename to tests/queries/0_stateless/01076_range_reader_segfault.sql diff --git a/dbms/tests/queries/0_stateless/01077_mutations_index_consistency.reference b/tests/queries/0_stateless/01077_mutations_index_consistency.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01077_mutations_index_consistency.reference rename to tests/queries/0_stateless/01077_mutations_index_consistency.reference diff --git a/dbms/tests/queries/0_stateless/01077_mutations_index_consistency.sh b/tests/queries/0_stateless/01077_mutations_index_consistency.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01077_mutations_index_consistency.sh rename to tests/queries/0_stateless/01077_mutations_index_consistency.sh diff --git a/dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.reference b/tests/queries/0_stateless/01077_yet_another_prewhere_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.reference rename to tests/queries/0_stateless/01077_yet_another_prewhere_test.reference diff --git a/dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.sql b/tests/queries/0_stateless/01077_yet_another_prewhere_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.sql rename to tests/queries/0_stateless/01077_yet_another_prewhere_test.sql diff --git a/dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference b/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference rename to tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference diff --git a/dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql b/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql rename to tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql diff --git a/dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference b/tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference rename to tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference diff --git a/dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql b/tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql rename to tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql diff --git a/tests/queries/0_stateless/01079_alter_default_zookeeper.reference b/tests/queries/0_stateless/01079_alter_default_zookeeper.reference new file mode 100644 index 00000000000..62d26bc9b4b --- /dev/null +++ b/tests/queries/0_stateless/01079_alter_default_zookeeper.reference @@ -0,0 +1,11 @@ +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` String DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +1000 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +1000 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt16 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +10000 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10, \n `better_column` UInt8 DEFAULT \'1\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10, \n `better_column` UInt8 DEFAULT \'1\', \n `other_date` String DEFAULT 1\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.sql b/tests/queries/0_stateless/01079_alter_default_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.sql rename to tests/queries/0_stateless/01079_alter_default_zookeeper.sql diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference new file mode 100644 index 00000000000..ea3fbec34a8 --- /dev/null +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference @@ -0,0 +1,8 @@ +Wrong column name. +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64, \n `value1` UInt8, \n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64, \n `value1` UInt8, \n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +syntax error at begin of string. +7 +Hello +World +Wrong index name. diff --git a/dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh rename to tests/queries/0_stateless/01079_bad_alters_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.reference b/tests/queries/0_stateless/01079_bit_operations_using_bitset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.reference rename to tests/queries/0_stateless/01079_bit_operations_using_bitset.reference diff --git a/dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.sql b/tests/queries/0_stateless/01079_bit_operations_using_bitset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.sql rename to tests/queries/0_stateless/01079_bit_operations_using_bitset.sql diff --git a/dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.reference b/tests/queries/0_stateless/01079_new_range_reader_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.reference rename to tests/queries/0_stateless/01079_new_range_reader_segfault.reference diff --git a/dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.sql b/tests/queries/0_stateless/01079_new_range_reader_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.sql rename to tests/queries/0_stateless/01079_new_range_reader_segfault.sql diff --git a/dbms/tests/queries/0_stateless/01079_order_by_pk.reference b/tests/queries/0_stateless/01079_order_by_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_order_by_pk.reference rename to tests/queries/0_stateless/01079_order_by_pk.reference diff --git a/dbms/tests/queries/0_stateless/01079_order_by_pk.sql b/tests/queries/0_stateless/01079_order_by_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_order_by_pk.sql rename to tests/queries/0_stateless/01079_order_by_pk.sql diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference rename to tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh rename to tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference rename to tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh rename to tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference b/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference rename to tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql b/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql rename to tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference b/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference rename to tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference diff --git a/dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql b/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql rename to tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql diff --git a/dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference b/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference rename to tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference diff --git a/dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql b/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql rename to tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql diff --git a/dbms/tests/queries/0_stateless/01080_join_get_null.reference b/tests/queries/0_stateless/01080_join_get_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01080_join_get_null.reference rename to tests/queries/0_stateless/01080_join_get_null.reference diff --git a/dbms/tests/queries/0_stateless/01080_join_get_null.sql b/tests/queries/0_stateless/01080_join_get_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01080_join_get_null.sql rename to tests/queries/0_stateless/01080_join_get_null.sql diff --git a/dbms/tests/queries/0_stateless/01212_empty_join_and_totals.reference b/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01212_empty_join_and_totals.reference rename to tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference diff --git a/dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql b/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql rename to tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql diff --git a/dbms/tests/queries/0_stateless/01081_demangle.reference b/tests/queries/0_stateless/01081_demangle.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01081_demangle.reference rename to tests/queries/0_stateless/01081_demangle.reference diff --git a/dbms/tests/queries/0_stateless/01081_demangle.sql b/tests/queries/0_stateless/01081_demangle.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01081_demangle.sql rename to tests/queries/0_stateless/01081_demangle.sql diff --git a/dbms/tests/queries/0_stateless/01081_keywords_formatting.reference b/tests/queries/0_stateless/01081_keywords_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01081_keywords_formatting.reference rename to tests/queries/0_stateless/01081_keywords_formatting.reference diff --git a/dbms/tests/queries/0_stateless/01081_keywords_formatting.sql b/tests/queries/0_stateless/01081_keywords_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01081_keywords_formatting.sql rename to tests/queries/0_stateless/01081_keywords_formatting.sql diff --git a/dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.reference b/tests/queries/0_stateless/01082_bit_test_out_of_bound.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.reference rename to tests/queries/0_stateless/01082_bit_test_out_of_bound.reference diff --git a/dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.sql b/tests/queries/0_stateless/01082_bit_test_out_of_bound.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.sql rename to tests/queries/0_stateless/01082_bit_test_out_of_bound.sql diff --git a/dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference b/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference rename to tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference diff --git a/dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql b/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql rename to tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference b/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference rename to tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql b/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql rename to tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference b/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference rename to tests/queries/0_stateless/01083_cross_to_inner_with_like.reference diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql b/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql rename to tests/queries/0_stateless/01083_cross_to_inner_with_like.sql diff --git a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference new file mode 100644 index 00000000000..2007eda0f07 --- /dev/null +++ b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference @@ -0,0 +1,11 @@ +CREATE TABLE test_01083.file\n(\n `n` Int8\n)\nENGINE = File(\'TSVWithNamesAndTypes\') +CREATE TABLE test_01083.buffer\n(\n `n` Int8\n)\nENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) +CREATE TABLE test_01083.merge\n(\n `n` Int8\n)\nENGINE = Merge(\'test_01083\', \'distributed\') +CREATE TABLE test_01083.merge_tf AS merge(\'test_01083\', \'.*\') +CREATE TABLE test_01083.distributed\n(\n `n` Int8\n)\nENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') +CREATE TABLE test_01083.distributed_tf AS cluster(\'test_shard_localhost\', \'test_01083\', \'buffer\') +CREATE TABLE test_01083.url\n(\n `n` UInt64, \n `col` String\n)\nENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+test_01083.merge+format+CSV\', \'CSV\') +CREATE TABLE test_01083.rich_syntax AS remote(\'localhos{x|y|t}\', cluster(\'test_shard_localhost\', remote(\'127.0.0.{1..4}\', \'test_01083\', \'view\'))) +CREATE VIEW test_01083.view\n(\n `n` Int64\n) AS\nSELECT toInt64(n) AS n\nFROM \n(\n SELECT toString(n) AS n\n FROM test_01083.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM test_01083.file +CREATE DICTIONARY test_01083.dict\n(\n `n` UInt64, \n `col` String DEFAULT \'42\'\n)\nPRIMARY KEY n\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\'))\nLIFETIME(MIN 0 MAX 1)\nLAYOUT(CACHE(SIZE_IN_CELLS 1)) +16 diff --git a/dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql rename to tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql diff --git a/dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.reference b/tests/queries/0_stateless/01083_functional_index_in_mergetree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.reference rename to tests/queries/0_stateless/01083_functional_index_in_mergetree.reference diff --git a/dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.sql b/tests/queries/0_stateless/01083_functional_index_in_mergetree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.sql rename to tests/queries/0_stateless/01083_functional_index_in_mergetree.sql diff --git a/dbms/tests/queries/0_stateless/01083_log_family_disk_memory.reference b/tests/queries/0_stateless/01083_log_family_disk_memory.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_family_disk_memory.reference rename to tests/queries/0_stateless/01083_log_family_disk_memory.reference diff --git a/dbms/tests/queries/0_stateless/01083_log_family_disk_memory.sql b/tests/queries/0_stateless/01083_log_family_disk_memory.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_family_disk_memory.sql rename to tests/queries/0_stateless/01083_log_family_disk_memory.sql diff --git a/dbms/tests/queries/0_stateless/01096_block_serialized_state.reference b/tests/queries/0_stateless/01083_log_first_column_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01096_block_serialized_state.reference rename to tests/queries/0_stateless/01083_log_first_column_alias.reference diff --git a/dbms/tests/queries/0_stateless/01083_log_first_column_alias.sql b/tests/queries/0_stateless/01083_log_first_column_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_first_column_alias.sql rename to tests/queries/0_stateless/01083_log_first_column_alias.sql diff --git a/dbms/tests/queries/0_stateless/01083_match_zero_byte.reference b/tests/queries/0_stateless/01083_match_zero_byte.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_match_zero_byte.reference rename to tests/queries/0_stateless/01083_match_zero_byte.reference diff --git a/dbms/tests/queries/0_stateless/01083_match_zero_byte.sql b/tests/queries/0_stateless/01083_match_zero_byte.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_match_zero_byte.sql rename to tests/queries/0_stateless/01083_match_zero_byte.sql diff --git a/dbms/tests/queries/0_stateless/01084_defaults_on_aliases.reference b/tests/queries/0_stateless/01084_defaults_on_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01084_defaults_on_aliases.reference rename to tests/queries/0_stateless/01084_defaults_on_aliases.reference diff --git a/dbms/tests/queries/0_stateless/01084_defaults_on_aliases.sql b/tests/queries/0_stateless/01084_defaults_on_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01084_defaults_on_aliases.sql rename to tests/queries/0_stateless/01084_defaults_on_aliases.sql diff --git a/dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference b/tests/queries/0_stateless/01084_regexp_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference rename to tests/queries/0_stateless/01084_regexp_empty.reference diff --git a/dbms/tests/queries/0_stateless/01084_regexp_empty.sql b/tests/queries/0_stateless/01084_regexp_empty.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01084_regexp_empty.sql rename to tests/queries/0_stateless/01084_regexp_empty.sql diff --git a/dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference b/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference rename to tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference diff --git a/dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql b/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql rename to tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql diff --git a/dbms/tests/queries/0_stateless/01085_extract_all_empty.reference b/tests/queries/0_stateless/01085_extract_all_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_extract_all_empty.reference rename to tests/queries/0_stateless/01085_extract_all_empty.reference diff --git a/dbms/tests/queries/0_stateless/01085_extract_all_empty.sql b/tests/queries/0_stateless/01085_extract_all_empty.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01085_extract_all_empty.sql rename to tests/queries/0_stateless/01085_extract_all_empty.sql diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections.reference b/tests/queries/0_stateless/01085_max_distributed_connections.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections.reference rename to tests/queries/0_stateless/01085_max_distributed_connections.reference diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections.sh b/tests/queries/0_stateless/01085_max_distributed_connections.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections.sh rename to tests/queries/0_stateless/01085_max_distributed_connections.sh diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.reference b/tests/queries/0_stateless/01085_max_distributed_connections_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.reference rename to tests/queries/0_stateless/01085_max_distributed_connections_http.reference diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.sh b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.sh rename to tests/queries/0_stateless/01085_max_distributed_connections_http.sh diff --git a/dbms/tests/queries/0_stateless/01085_regexp_input_format.reference b/tests/queries/0_stateless/01085_regexp_input_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_regexp_input_format.reference rename to tests/queries/0_stateless/01085_regexp_input_format.reference diff --git a/dbms/tests/queries/0_stateless/01085_regexp_input_format.sh b/tests/queries/0_stateless/01085_regexp_input_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01085_regexp_input_format.sh rename to tests/queries/0_stateless/01085_regexp_input_format.sh diff --git a/dbms/tests/queries/0_stateless/01085_simdjson_uint64.reference b/tests/queries/0_stateless/01085_simdjson_uint64.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_simdjson_uint64.reference rename to tests/queries/0_stateless/01085_simdjson_uint64.reference diff --git a/dbms/tests/queries/0_stateless/01085_simdjson_uint64.sql b/tests/queries/0_stateless/01085_simdjson_uint64.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01085_simdjson_uint64.sql rename to tests/queries/0_stateless/01085_simdjson_uint64.sql diff --git a/dbms/tests/queries/0_stateless/01086_modulo_or_zero.reference b/tests/queries/0_stateless/01086_modulo_or_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01086_modulo_or_zero.reference rename to tests/queries/0_stateless/01086_modulo_or_zero.reference diff --git a/dbms/tests/queries/0_stateless/01086_modulo_or_zero.sql b/tests/queries/0_stateless/01086_modulo_or_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01086_modulo_or_zero.sql rename to tests/queries/0_stateless/01086_modulo_or_zero.sql diff --git a/dbms/tests/queries/0_stateless/01086_odbc_roundtrip.reference b/tests/queries/0_stateless/01086_odbc_roundtrip.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01086_odbc_roundtrip.reference rename to tests/queries/0_stateless/01086_odbc_roundtrip.reference diff --git a/tests/queries/0_stateless/01086_odbc_roundtrip.sh b/tests/queries/0_stateless/01086_odbc_roundtrip.sh new file mode 100755 index 00000000000..71ea517f4dd --- /dev/null +++ b/tests/queries/0_stateless/01086_odbc_roundtrip.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CUR_DIR/../shell_config.sh + + +for i in $(seq 1 10); do + ${CLICKHOUSE_CLIENT} -q "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (ANSI)}','system','tables'))" 2>/dev/null && break + sleep 0.1 +done + +${CLICKHOUSE_CLIENT} --query "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (Unicode)}','system','tables'))" + +${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS test_01086" +${CLICKHOUSE_CLIENT} --query "CREATE DATABASE test_01086" + + +${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_01086.t (x UInt8, y Float32, z String) ENGINE = Memory" +${CLICKHOUSE_CLIENT} --query "INSERT INTO test_01086.t VALUES (1,0.1,'a я'),(2,0.2,'b ą'),(3,0.3,'c d')" + +${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (ANSI)}','test_01086','t') ORDER BY x" + +${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (Unicode)}','test_01086','t') ORDER BY x" + +${CLICKHOUSE_CLIENT} --query "DROP DATABASE test_01086;" diff --git a/dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference rename to tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference diff --git a/dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh rename to tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh diff --git a/dbms/tests/queries/0_stateless/01087_index_set_ubsan.reference b/tests/queries/0_stateless/01087_index_set_ubsan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01087_index_set_ubsan.reference rename to tests/queries/0_stateless/01087_index_set_ubsan.reference diff --git a/dbms/tests/queries/0_stateless/01087_index_set_ubsan.sql b/tests/queries/0_stateless/01087_index_set_ubsan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01087_index_set_ubsan.sql rename to tests/queries/0_stateless/01087_index_set_ubsan.sql diff --git a/dbms/tests/queries/0_stateless/01087_storage_generate.reference b/tests/queries/0_stateless/01087_storage_generate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01087_storage_generate.reference rename to tests/queries/0_stateless/01087_storage_generate.reference diff --git a/dbms/tests/queries/0_stateless/01087_storage_generate.sql b/tests/queries/0_stateless/01087_storage_generate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01087_storage_generate.sql rename to tests/queries/0_stateless/01087_storage_generate.sql diff --git a/dbms/tests/queries/0_stateless/01087_table_function_generate.reference b/tests/queries/0_stateless/01087_table_function_generate.reference similarity index 78% rename from dbms/tests/queries/0_stateless/01087_table_function_generate.reference rename to tests/queries/0_stateless/01087_table_function_generate.reference index 68238faee48..d7cc6b0a933 100644 --- a/dbms/tests/queries/0_stateless/01087_table_function_generate.reference +++ b/tests/queries/0_stateless/01087_table_function_generate.reference @@ -1,14 +1,14 @@ UInt64 Int64 UInt32 Int32 UInt16 Int16 UInt8 Int8 -2804162938822577320 -2776833771540858 3467776823 1163715250 23903 13655 137 -41 -7885388429666205427 -1363628932535403038 484159052 -308788249 56810 -22227 51 -41 -4357435422797280898 1355609803008819271 4126129912 -852056475 64304 -11401 139 86 -5935810273536892891 -804738887697332962 3109335413 -80126721 258 12889 18 88 -368066018677693974 -4927165984347126295 1015254922 2026080544 44305 21973 16 0 -8124171311239967992 -1179703908046100129 1720727300 -138469036 61343 10573 252 -32 -15657812979985370729 -5733276247123822513 3254757884 -500590428 45913 19153 105 -102 -18371568619324220532 -6793779541583578394 1686821450 -455892108 49050 -28603 248 80 -821735343441964030 3148260644406230976 256251035 -885069056 58858 -29361 58 61 -9558594037060121162 -2907172753635797124 4276198376 1947296644 26801 -13531 204 -66 +2804162938822577320 -2776833771540858 3467776823 1163715250 31161 -2916 220 -117 +7885388429666205427 -1363628932535403038 484159052 -308788249 43346 13638 143 -105 +4357435422797280898 1355609803008819271 4126129912 -852056475 34184 9166 49 33 +5935810273536892891 -804738887697332962 3109335413 -80126721 47877 -31421 186 -77 +368066018677693974 -4927165984347126295 1015254922 2026080544 46037 -29626 240 108 +8124171311239967992 -1179703908046100129 1720727300 -138469036 33028 -12819 138 16 +15657812979985370729 -5733276247123822513 3254757884 -500590428 3829 30527 3 -81 +18371568619324220532 -6793779541583578394 1686821450 -455892108 43475 2284 252 -90 +821735343441964030 3148260644406230976 256251035 -885069056 11643 11455 176 90 +9558594037060121162 -2907172753635797124 4276198376 1947296644 45922 26632 97 43 - Enum8(\'hello\' = 1, \'world\' = 5) hello @@ -47,16 +47,16 @@ h o - Date DateTime DateTime(\'Europe/Moscow\') -2106-02-07 2050-12-17 02:46:35 2096-02-16 22:18:22 -2106-02-07 2013-10-17 23:35:26 1976-01-24 12:52:48 -2039-08-16 1974-11-17 23:22:46 1980-03-04 21:02:50 -1997-04-11 1972-09-18 23:44:08 2040-07-10 14:46:42 -2103-11-03 2044-11-23 20:57:12 1970-10-09 02:30:14 -2066-11-19 2029-12-10 03:13:55 2106-01-30 21:52:44 -2064-08-14 2016-07-14 11:33:45 2096-12-12 00:40:50 -2046-09-13 2085-07-10 18:51:14 2096-01-15 16:31:33 -2008-03-16 2047-05-16 23:28:36 2103-02-11 16:44:39 -2000-07-07 2105-07-19 19:29:06 1980-01-02 05:18:22 +2077-09-17 1970-10-09 02:30:14 2074-08-12 11:31:27 +2005-11-19 2106-01-30 21:52:44 2097-05-25 07:54:35 +2007-02-24 2096-12-12 00:40:50 1988-08-10 11:16:31 +2019-06-30 2096-01-15 16:31:33 2063-10-20 08:48:17 +2039-01-16 2103-02-11 16:44:39 2036-10-09 04:29:10 +1994-11-03 1980-01-02 05:18:22 2055-12-23 12:33:52 +2083-08-20 2079-06-11 16:29:02 2000-12-05 17:46:24 +2030-06-25 2100-03-01 18:50:22 1993-03-25 01:19:12 +2087-03-16 2034-08-25 19:46:33 2045-12-10 16:47:40 +2006-04-30 2069-09-30 16:07:48 2084-08-26 03:33:12 - DateTime64(3) DateTime64(6) DateTime64(6, \'Europe/Moscow\') 1978-06-07 23:50:57.320 2013-08-28 10:21:54.010758 1991-08-25 16:23:26.140215 @@ -225,14 +225,14 @@ RL,{Xs\\tw [114] -84125.1554 ('2023-06-06 06:55:06.492','bf9ab359-ef9f-ad11-7e6c-160368b1e5ea') [124] -114719.5228 ('2010-11-11 22:57:23.722','c1046ffb-3415-cc3a-509a-e0005856d7d7') - -[] 1900051923 { -189530.5846 h -5.6279699579452485e47 ('1984-12-06','2028-08-17 06:05:01','2036-04-02 23:52:28.468','4b3d498c-dd44-95c1-5b75-921504ec5d8d') F743 -[-102,-118] 392272782 Eb -14818.0200 o -2.664492247169164e59 ('2082-12-26','2052-09-09 06:50:50','2088-04-21 05:07:08.245','aeb9c26e-0ee7-2b8e-802b-2a96319b8e60') CBF4 -[-71] 775049089 \N -158115.1178 w 4.1323844687113747e-305 ('2106-02-07','2090-07-31 16:45:26','2076-07-10 09:11:06.385','57c69bc6-dddd-0975-e932-a7b5173a1304') EB1D -[-28,100] 3675466147 { -146685.1749 h 3.6676044396877755e142 ('2017-10-25','2100-02-28 18:07:18','2055-10-14 06:36:20.056','14949dae-dfa8-a124-af83-887348b2f609') 6D88 -[-23] 2514120753 (`u, -119659.6174 w 1.3231258347475906e34 ('2106-02-07','2074-08-10 06:25:12','1976-12-04 18:31:55.745','86a9b3c1-4593-4d56-7762-3aa1dd22cbbf') AD43 -[11,-36] 3308237300 \N 171205.1896 \N 5.634708707075817e195 ('1974-10-31','1993-12-24 09:38:45','2038-07-15 05:22:51.805','63d999b8-8cca-e237-c4a4-4dd7d0096f65') 609E -[39] 1614362420 `4A8P 157144.0630 o -1.1843143253872814e-255 ('2106-02-07','2072-09-28 18:27:27','2073-07-10 12:19:58.146','6483f5c0-8733-364c-4fa0-9948d32e8903') A886 -[48,-120] 3848918261 1 $CURDIR/tmp_msgpac_test_all_types.msgpk; + +cat $CURDIR/tmp_msgpac_test_all_types.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; + +rm $CURDIR/tmp_msgpac_test_all_types.msgpk + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + + +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (array1 Array(Array(UInt32)), array2 Array(Array(Array(String)))) ENGINE = Memory"; + +$CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES ([[1,2,3], [1001, 2002], [3167]], [[['one'], ['two']], [['three']],[['four'], ['five']]])"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/tmp_msgpack_test_nested_arrays.msgpk; + +cat $CURDIR/tmp_msgpack_test_nested_arrays.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; +rm $CURDIR/tmp_msgpack_test_nested_arrays.msgpk; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + + +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (array Array(UInt8)) ENGINE = Memory"; + +$CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 254, 255]), ([255, 254, 253, 42, 3, 2, 1, 0])"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/tmp_msgpack_type_conversion.msgpk; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (array Array(Int64)) ENGINE = Memory"; + +cat $CURDIR/tmp_msgpack_type_conversion.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; +rm $CURDIR/tmp_msgpack_type_conversion.msgpk; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (date FixedString(10)) ENGINE = Memory"; + +$CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES ('2020-01-01'), ('2020-01-02'), ('2020-01-02')"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + diff --git a/dbms/tests/queries/0_stateless/01098_sum.reference b/tests/queries/0_stateless/01098_sum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01098_sum.reference rename to tests/queries/0_stateless/01098_sum.reference diff --git a/dbms/tests/queries/0_stateless/01098_sum.sql b/tests/queries/0_stateless/01098_sum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01098_sum.sql rename to tests/queries/0_stateless/01098_sum.sql diff --git a/dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.reference b/tests/queries/0_stateless/01098_temporary_and_external_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.reference rename to tests/queries/0_stateless/01098_temporary_and_external_tables.reference diff --git a/tests/queries/0_stateless/01098_temporary_and_external_tables.sh b/tests/queries/0_stateless/01098_temporary_and_external_tables.sh new file mode 100755 index 00000000000..c984f363c31 --- /dev/null +++ b/tests/queries/0_stateless/01098_temporary_and_external_tables.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +url="https://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTPS}/?session_id=test_01098" + +${CLICKHOUSE_CURL} -m 30 -sSk "$url" --data "CREATE TEMPORARY TABLE tmp_table AS SELECT number AS n FROM numbers(42)" > /dev/null; + +name_expr="'\`' || database || '\`.\`' || name || '\`'" +full_tmp_name=`echo "SELECT $name_expr FROM system.tables WHERE database='_temporary_and_external_tables' AND create_table_query LIKE '%tmp_table%'" | ${CLICKHOUSE_CURL} -m 30 -sSgk $url -d @-` + +echo "SELECT * FROM $full_tmp_name" | ${CLICKHOUSE_CURL} -m 60 -sSgk $url -d @- | grep -F "Code: 291" > /dev/null && echo "OK" + +echo -ne '0\n1\n' | ${CLICKHOUSE_CURL} -m 30 -sSkF 'file=@-' "$url&file_format=CSV&file_types=UInt64&query=SELECT+sum((number+GLOBAL+IN+(SELECT+number+AS+n+FROM+remote('127.0.0.2',+numbers(5))+WHERE+n+GLOBAL+IN+(SELECT+*+FROM+tmp_table)+AND+n+GLOBAL+NOT+IN+(SELECT+*+FROM+file)+))+AS+res),+sum(number*res)+FROM+remote('127.0.0.2',+numbers(10))"; + diff --git a/dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.reference b/tests/queries/0_stateless/01099_operators_date_and_timestamp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.reference rename to tests/queries/0_stateless/01099_operators_date_and_timestamp.reference diff --git a/dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.sql b/tests/queries/0_stateless/01099_operators_date_and_timestamp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.sql rename to tests/queries/0_stateless/01099_operators_date_and_timestamp.sql diff --git a/dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference b/tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference rename to tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference diff --git a/dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql b/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql rename to tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql diff --git a/dbms/tests/queries/0_stateless/01100_split_by_string.reference b/tests/queries/0_stateless/01100_split_by_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01100_split_by_string.reference rename to tests/queries/0_stateless/01100_split_by_string.reference diff --git a/dbms/tests/queries/0_stateless/01100_split_by_string.sql b/tests/queries/0_stateless/01100_split_by_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01100_split_by_string.sql rename to tests/queries/0_stateless/01100_split_by_string.sql diff --git a/dbms/tests/queries/0_stateless/01101_prewhere_after_alter.reference b/tests/queries/0_stateless/01101_prewhere_after_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01101_prewhere_after_alter.reference rename to tests/queries/0_stateless/01101_prewhere_after_alter.reference diff --git a/dbms/tests/queries/0_stateless/01101_prewhere_after_alter.sql b/tests/queries/0_stateless/01101_prewhere_after_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01101_prewhere_after_alter.sql rename to tests/queries/0_stateless/01101_prewhere_after_alter.sql diff --git a/dbms/tests/queries/1_stateful/00045_uniq_upto.reference b/tests/queries/0_stateless/01102_distributed_local_in_bug.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00045_uniq_upto.reference rename to tests/queries/0_stateless/01102_distributed_local_in_bug.reference diff --git a/dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.sql b/tests/queries/0_stateless/01102_distributed_local_in_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.sql rename to tests/queries/0_stateless/01102_distributed_local_in_bug.sql diff --git a/dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference rename to tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference diff --git a/dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh rename to tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh diff --git a/dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference b/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference rename to tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference diff --git a/dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql b/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql rename to tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql diff --git a/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.reference b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.reference rename to tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh rename to tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.reference b/tests/queries/0_stateless/01104_distributed_numbers_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01104_distributed_numbers_test.reference rename to tests/queries/0_stateless/01104_distributed_numbers_test.reference diff --git a/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql b/tests/queries/0_stateless/01104_distributed_numbers_test.sql similarity index 85% rename from dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql rename to tests/queries/0_stateless/01104_distributed_numbers_test.sql index b301c0ac00f..7f56a4e08fd 100644 --- a/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql +++ b/tests/queries/0_stateless/01104_distributed_numbers_test.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS d_numbers; -CREATE TABLE d_numbers (number UInt32) ENGINE = Distributed(test_cluster_two_shards_localhost, system, numbers, rand()); +CREATE TABLE d_numbers (number UInt32) ENGINE = Distributed(test_cluster_two_shards, system, numbers, rand()); SET experimental_use_processors = 1; diff --git a/dbms/tests/queries/0_stateless/01104_distributed_one_test.reference b/tests/queries/0_stateless/01104_distributed_one_test.reference similarity index 89% rename from dbms/tests/queries/0_stateless/01104_distributed_one_test.reference rename to tests/queries/0_stateless/01104_distributed_one_test.reference index 929dd64ae90..efbf8ed025e 100644 --- a/dbms/tests/queries/0_stateless/01104_distributed_one_test.reference +++ b/tests/queries/0_stateless/01104_distributed_one_test.reference @@ -4,3 +4,4 @@ distributed_0 2 1 local_0 1 distributed_0 1 1 distributed_0 2 1 +remote_0 1 diff --git a/dbms/tests/queries/0_stateless/01104_distributed_one_test.sql b/tests/queries/0_stateless/01104_distributed_one_test.sql similarity index 79% rename from dbms/tests/queries/0_stateless/01104_distributed_one_test.sql rename to tests/queries/0_stateless/01104_distributed_one_test.sql index 92b4a83ebf3..0ae6a180570 100644 --- a/dbms/tests/queries/0_stateless/01104_distributed_one_test.sql +++ b/tests/queries/0_stateless/01104_distributed_one_test.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS d_one; -CREATE TABLE d_one (dummy UInt8) ENGINE = Distributed(test_cluster_two_shards_localhost, system, one, rand()); +CREATE TABLE d_one (dummy UInt8) ENGINE = Distributed(test_cluster_two_shards, system, one, rand()); SELECT 'local_0', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 0; SELECT 'local_1', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 1; @@ -16,3 +16,6 @@ SELECT 'distributed_0', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o. SELECT 'distributed_1', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o.dummy = 1 ORDER BY _shard_num; DROP TABLE d_one; + +SELECT 'remote_0', toUInt8(1) AS dummy FROM remote('127.0.0.2', system, one) AS o WHERE o.dummy = 0; +SELECT 'remote_1', toUInt8(1) AS dummy FROM remote('127.0.0.2', system, one) AS o WHERE o.dummy = 1; diff --git a/dbms/tests/queries/0_stateless/01104_fixed_string_like.reference b/tests/queries/0_stateless/01104_fixed_string_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01104_fixed_string_like.reference rename to tests/queries/0_stateless/01104_fixed_string_like.reference diff --git a/dbms/tests/queries/0_stateless/01104_fixed_string_like.sql b/tests/queries/0_stateless/01104_fixed_string_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01104_fixed_string_like.sql rename to tests/queries/0_stateless/01104_fixed_string_like.sql diff --git a/dbms/tests/queries/0_stateless/01105_string_like.reference b/tests/queries/0_stateless/01105_string_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01105_string_like.reference rename to tests/queries/0_stateless/01105_string_like.reference diff --git a/dbms/tests/queries/0_stateless/01105_string_like.sql b/tests/queries/0_stateless/01105_string_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01105_string_like.sql rename to tests/queries/0_stateless/01105_string_like.sql diff --git a/dbms/tests/queries/0_stateless/01106_const_fixed_string_like.reference b/tests/queries/0_stateless/01106_const_fixed_string_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01106_const_fixed_string_like.reference rename to tests/queries/0_stateless/01106_const_fixed_string_like.reference diff --git a/dbms/tests/queries/0_stateless/01106_const_fixed_string_like.sql b/tests/queries/0_stateless/01106_const_fixed_string_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01106_const_fixed_string_like.sql rename to tests/queries/0_stateless/01106_const_fixed_string_like.sql diff --git a/dbms/tests/queries/0_stateless/01107_join_right_table_totals.reference b/tests/queries/0_stateless/01107_join_right_table_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01107_join_right_table_totals.reference rename to tests/queries/0_stateless/01107_join_right_table_totals.reference diff --git a/dbms/tests/queries/0_stateless/01107_join_right_table_totals.sql b/tests/queries/0_stateless/01107_join_right_table_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01107_join_right_table_totals.sql rename to tests/queries/0_stateless/01107_join_right_table_totals.sql diff --git a/dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference b/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference rename to tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference diff --git a/dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh b/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh rename to tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh diff --git a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference rename to tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh similarity index 97% rename from dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh rename to tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh index 93327a44388..aac5b637d2b 100755 --- a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh +++ b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh @@ -61,8 +61,8 @@ timeout $TIMEOUT bash -c restart_thread_1 2> /dev/null & timeout $TIMEOUT bash -c restart_thread_2 2> /dev/null & wait +sleep 3 -$CLICKHOUSE_CLIENT -q "SYSTEM RESTART REPLICAS" $CLICKHOUSE_CLIENT -q "SELECT sum(n), count(n) FROM merge(currentDatabase(), '^replica_01108_') GROUP BY position(_table, 'tmp')" diff --git a/tests/queries/0_stateless/01109_inflating_cross_join.reference b/tests/queries/0_stateless/01109_inflating_cross_join.reference new file mode 100644 index 00000000000..825319e1c5b --- /dev/null +++ b/tests/queries/0_stateless/01109_inflating_cross_join.reference @@ -0,0 +1 @@ +10000000 diff --git a/tests/queries/0_stateless/01109_inflating_cross_join.sql b/tests/queries/0_stateless/01109_inflating_cross_join.sql new file mode 100644 index 00000000000..315f5c43c1e --- /dev/null +++ b/tests/queries/0_stateless/01109_inflating_cross_join.sql @@ -0,0 +1,7 @@ +SET max_memory_usage = 16000000; + +SET max_joined_block_size_rows = 10000000; +SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2; -- { serverError 241 } + +SET max_joined_block_size_rows = 1000; +SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2; diff --git a/dbms/tests/queries/0_stateless/01213_point_in_Myanmar.reference b/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_point_in_Myanmar.reference rename to tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.reference diff --git a/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.sql b/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.sql new file mode 100644 index 00000000000..b7ac6f1641f --- /dev/null +++ b/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.sql @@ -0,0 +1,15 @@ +-- Test that the string hash map works properly with keys containing zero +-- bytes. +-- Keys with no central '1' are mostly duplicates. The unique keys +-- in this group are '', '\0', ...., '\0 x 34', to a total of 35. All other +-- keys are unique. +select count(*) = 18 * 18 * 17 + 35 +from ( + select key + from ( + with 18 as n + select repeat('\0', number % n) + || repeat('1', intDiv(number, n) % n) + || repeat('\0', intDiv(number, n * n) % n) key + from numbers(18 * 18 * 18)) + group by key); diff --git a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference new file mode 100644 index 00000000000..852abeea187 --- /dev/null +++ b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference @@ -0,0 +1,3 @@ +World +CREATE DICTIONARY db_for_dict.dict_with_hashed_layout\n(\n `key1` UInt64, \n `value` String\n)\nPRIMARY KEY key1\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' DB \'db_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(HASHED) +Hello diff --git a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.sql b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.sql new file mode 100644 index 00000000000..718e7f295b3 --- /dev/null +++ b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.sql @@ -0,0 +1,33 @@ +DROP DATABASE IF EXISTS db_for_dict; +CREATE DATABASE db_for_dict; + +CREATE TABLE db_for_dict.table_for_dict +( + key1 UInt64, + value String +) +ENGINE = Memory(); + +INSERT INTO db_for_dict.table_for_dict VALUES (1, 'Hello'), (2, 'World'); + +CREATE DICTIONARY db_for_dict.dict_with_hashed_layout +( + key1 UInt64, + value String +) +PRIMARY KEY key1 +LAYOUT(HASHED) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'db_for_dict')) +LIFETIME(MIN 1 MAX 10); + +SELECT dictGet('db_for_dict.dict_with_hashed_layout', 'value', toUInt64(2)); + +DETACH DICTIONARY db_for_dict.dict_with_hashed_layout; + +ATTACH DICTIONARY db_for_dict.dict_with_hashed_layout; + +SHOW CREATE DICTIONARY db_for_dict.dict_with_hashed_layout; + +SELECT dictGet('db_for_dict.dict_with_hashed_layout', 'value', toUInt64(1)); + +DROP DATABASE IF EXISTS db_for_dict; diff --git a/tests/queries/0_stateless/01112_check_table_with_index.reference b/tests/queries/0_stateless/01112_check_table_with_index.reference new file mode 100644 index 00000000000..2027ea099a8 --- /dev/null +++ b/tests/queries/0_stateless/01112_check_table_with_index.reference @@ -0,0 +1 @@ +all_1_1_0 1 diff --git a/tests/queries/0_stateless/01112_check_table_with_index.sql b/tests/queries/0_stateless/01112_check_table_with_index.sql new file mode 100644 index 00000000000..e9613df7d1a --- /dev/null +++ b/tests/queries/0_stateless/01112_check_table_with_index.sql @@ -0,0 +1,15 @@ +SET check_query_single_value_result = 'false'; + +DROP TABLE IF EXISTS check_table_with_indices; + +CREATE TABLE check_table_with_indices ( + id UInt64, + data String, + INDEX a (id) type minmax GRANULARITY 3 +) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO check_table_with_indices VALUES (0, 'test'), (1, 'test2'); + +CHECK TABLE check_table_with_indices; + +DROP TABLE check_table_with_indices; diff --git a/tests/queries/0_stateless/01113_local_dictionary_type_conversion.reference b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.reference new file mode 100644 index 00000000000..ac390663059 --- /dev/null +++ b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.reference @@ -0,0 +1,2 @@ +First WINDOWS 1 +Second LINUX 2 diff --git a/tests/queries/0_stateless/01113_local_dictionary_type_conversion.sql b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.sql new file mode 100644 index 00000000000..df1f405e286 --- /dev/null +++ b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.sql @@ -0,0 +1,27 @@ +DROP DATABASE IF EXISTS database_for_dict; + +CREATE DATABASE database_for_dict; + +CREATE TABLE database_for_dict.table_for_dict ( + CompanyID String, + OSType Enum('UNKNOWN' = 0, 'WINDOWS' = 1, 'LINUX' = 2, 'ANDROID' = 3, 'MAC' = 4), + SomeID Int32 +) +ENGINE = Memory(); + +INSERT INTO database_for_dict.table_for_dict VALUES ('First', 'WINDOWS', 1), ('Second', 'LINUX', 2); + +CREATE DICTIONARY database_for_dict.dict_with_conversion +( + CompanyID String DEFAULT '', + OSType String DEFAULT '', + SomeID Int32 DEFAULT 0 +) +PRIMARY KEY CompanyID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 20) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT * FROM database_for_dict.dict_with_conversion ORDER BY CompanyID; + +DROP DATABASE IF EXISTS database_for_dict; diff --git a/tests/queries/0_stateless/01114_alter_modify_compact_parts.reference b/tests/queries/0_stateless/01114_alter_modify_compact_parts.reference new file mode 100644 index 00000000000..4ec38dfb475 --- /dev/null +++ b/tests/queries/0_stateless/01114_alter_modify_compact_parts.reference @@ -0,0 +1 @@ +999000 diff --git a/tests/queries/0_stateless/01114_alter_modify_compact_parts.sql b/tests/queries/0_stateless/01114_alter_modify_compact_parts.sql new file mode 100644 index 00000000000..a5aa12548e7 --- /dev/null +++ b/tests/queries/0_stateless/01114_alter_modify_compact_parts.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS mt_compact; + +CREATE TABLE mt_compact (d Date, id UInt32, s String) + ENGINE = MergeTree ORDER BY id PARTITION BY d + SETTINGS min_bytes_for_wide_part = 10000000, index_granularity = 128; + +INSERT INTO mt_compact SELECT toDate('2020-01-05'), number, toString(number) FROM numbers(1000); +INSERT INTO mt_compact SELECT toDate('2020-01-06'), number, toString(number) FROM numbers(1000); +ALTER TABLE mt_compact MODIFY COLUMN s UInt64; +SELECT sum(s) from mt_compact; + +DROP TABLE IF EXISTS mt_compact; diff --git a/tests/queries/0_stateless/01114_clear_column_compact_parts.reference b/tests/queries/0_stateless/01114_clear_column_compact_parts.reference new file mode 100644 index 00000000000..e8c9b37ff3a --- /dev/null +++ b/tests/queries/0_stateless/01114_clear_column_compact_parts.reference @@ -0,0 +1,4 @@ +1 0 +2 3 +1 0 +2 0 diff --git a/tests/queries/0_stateless/01114_clear_column_compact_parts.sql b/tests/queries/0_stateless/01114_clear_column_compact_parts.sql new file mode 100644 index 00000000000..bdfed06ea9a --- /dev/null +++ b/tests/queries/0_stateless/01114_clear_column_compact_parts.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS clear_column; + +CREATE TABLE clear_column(x UInt32, y UInt32) ENGINE MergeTree ORDER BY x PARTITION by x; +INSERT INTO clear_column VALUES (1, 1), (2, 3); + +ALTER TABLE clear_column CLEAR COLUMN y IN PARTITION 1; +SELECT * FROM clear_column ORDER BY x; +ALTER TABLE clear_column CLEAR COLUMN y IN PARTITION 2; +SELECT * FROM clear_column ORDER BY x; + +DROP TABLE clear_column; diff --git a/tests/queries/0_stateless/01114_materialize_clear_index_compact_parts.reference b/tests/queries/0_stateless/01114_materialize_clear_index_compact_parts.reference new file mode 100644 index 00000000000..083edaac248 --- /dev/null +++ b/tests/queries/0_stateless/01114_materialize_clear_index_compact_parts.reference @@ -0,0 +1,3 @@ +2 +2 +2 diff --git a/tests/queries/0_stateless/01114_materialize_clear_index_compact_parts.sql b/tests/queries/0_stateless/01114_materialize_clear_index_compact_parts.sql new file mode 100644 index 00000000000..404922f36bb --- /dev/null +++ b/tests/queries/0_stateless/01114_materialize_clear_index_compact_parts.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS minmax_compact; + +CREATE TABLE minmax_compact +( + u64 UInt64, + i64 Int64, + i32 Int32 +) ENGINE = MergeTree() +PARTITION BY i32 +ORDER BY u64 +SETTINGS index_granularity = 2, min_rows_for_wide_part = 1000000; + +INSERT INTO minmax_compact VALUES (0, 2, 1), (1, 1, 1), (2, 1, 1), (3, 1, 1), (4, 1, 1), (5, 2, 1), (6, 1, 2), (7, 1, 2), (8, 1, 2), (9, 1, 2); + +SET mutations_sync = 1; +ALTER TABLE minmax_compact ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1; + +ALTER TABLE minmax_compact MATERIALIZE INDEX idx IN PARTITION 1; +set max_rows_to_read = 8; +SELECT count() FROM minmax_compact WHERE i64 = 2; + +ALTER TABLE minmax_compact MATERIALIZE INDEX idx IN PARTITION 2; +set max_rows_to_read = 6; +SELECT count() FROM minmax_compact WHERE i64 = 2; + +ALTER TABLE minmax_compact CLEAR INDEX idx IN PARTITION 1; +ALTER TABLE minmax_compact CLEAR INDEX idx IN PARTITION 2; + +SELECT count() FROM minmax_compact WHERE i64 = 2; -- { serverError 158 } + +set max_rows_to_read = 10; +SELECT count() FROM minmax_compact WHERE i64 = 2; diff --git a/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference rename to tests/queries/0_stateless/01114_mysql_database_engine_segfault.reference diff --git a/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql new file mode 100644 index 00000000000..af88c5af53a --- /dev/null +++ b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql @@ -0,0 +1 @@ +CREATE DATABASE conv_main ENGINE = MySQL('127.0.0.1:3456', conv_main, 'metrika', 'password'); -- { serverError 501 } diff --git a/dbms/tests/queries/1_stateful/00086_array_reduce.reference b/tests/queries/0_stateless/01115_prewhere_array_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00086_array_reduce.reference rename to tests/queries/0_stateless/01115_prewhere_array_join.reference diff --git a/tests/queries/0_stateless/01115_prewhere_array_join.sql b/tests/queries/0_stateless/01115_prewhere_array_join.sql new file mode 100644 index 00000000000..e614bdf402b --- /dev/null +++ b/tests/queries/0_stateless/01115_prewhere_array_join.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS prewhere; + +CREATE TABLE prewhere (light UInt8, heavy String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO prewhere SELECT 0, randomPrintableASCII(10000) FROM numbers(10000); +SELECT arrayJoin([light]) != 0 AS cond, length(heavy) FROM prewhere WHERE light != 0 AND cond != 0; + +DROP TABLE prewhere; diff --git a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference new file mode 100644 index 00000000000..1055a67ea5b --- /dev/null +++ b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference @@ -0,0 +1,3 @@ +v1 o1 ['s2','s1'] +v1 o2 ['s4'] +v2 o3 ['s5','s3'] diff --git a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql new file mode 100644 index 00000000000..8a94b6ddd24 --- /dev/null +++ b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql @@ -0,0 +1,18 @@ +CREATE TEMPORARY TABLE sessions (date DateTime, visitorId String, sessionId String); +CREATE TEMPORARY TABLE orders (date DateTime, visitorId String, orderId String); + +INSERT INTO sessions VALUES ('2018-01-01 00:00:00', 'v1', 's1'), ('2018-01-02 00:00:00', 'v1', 's2'), ('2018-01-03 00:00:00', 'v2', 's3'), ('2018-01-04 00:00:00', 'v1', 's4'), ('2018-01-05 00:00:00', 'v2', 's5'), ('2018-01-06 00:00:00', 'v3', 's6'); +INSERT INTO orders VALUES ('2018-01-03 00:00:00', 'v1', 'o1'), ('2018-01-05 00:00:00', 'v1', 'o2'), ('2018-01-06 00:00:00', 'v2', 'o3'); + +SELECT + visitorId, + orderId, + groupUniqArray(sessionId) +FROM sessions +ASOF INNER JOIN orders ON (sessions.visitorId = orders.visitorId) AND (sessions.date <= orders.date) +GROUP BY + visitorId, + orderId +ORDER BY + visitorId ASC, + orderId ASC; diff --git a/tests/queries/0_stateless/01116_cross_count_asterisks.reference b/tests/queries/0_stateless/01116_cross_count_asterisks.reference new file mode 100644 index 00000000000..8347b144a35 --- /dev/null +++ b/tests/queries/0_stateless/01116_cross_count_asterisks.reference @@ -0,0 +1,4 @@ +2 +1 +2 +1 diff --git a/tests/queries/0_stateless/01116_cross_count_asterisks.sql b/tests/queries/0_stateless/01116_cross_count_asterisks.sql new file mode 100644 index 00000000000..1fb8b0b0e66 --- /dev/null +++ b/tests/queries/0_stateless/01116_cross_count_asterisks.sql @@ -0,0 +1,29 @@ +SET multiple_joins_rewriter_version = 2; + +SELECT count(*) +FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 +WHERE (n1.number = n2.number) AND (n2.number = n3.number); + +SELECT count(*) c FROM ( + SELECT count(*), count(*) as c + FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 + WHERE (n1.number = n2.number) AND (n2.number = n3.number) + AND (SELECT count(*) FROM numbers(1)) = 1 +) +WHERE (SELECT count(*) FROM numbers(2)) = 2 +HAVING c IN(SELECT count(*) c FROM numbers(1)); + +SET multiple_joins_rewriter_version = 1; + +SELECT count(*) +FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 +WHERE (n1.number = n2.number) AND (n2.number = n3.number); + +SELECT count(*) c FROM ( + SELECT count(*), count(*) as c + FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 + WHERE (n1.number = n2.number) AND (n2.number = n3.number) + AND (SELECT count(*) FROM numbers(1)) = 1 +) +WHERE (SELECT count(*) FROM numbers(2)) = 2 +HAVING c IN(SELECT count(*) c FROM numbers(1)); diff --git a/tests/queries/0_stateless/01117_chain_finalize_bug.reference b/tests/queries/0_stateless/01117_chain_finalize_bug.reference new file mode 100644 index 00000000000..5704c399b5c --- /dev/null +++ b/tests/queries/0_stateless/01117_chain_finalize_bug.reference @@ -0,0 +1,9 @@ +1 0 +1 1 +2 0 +2 1 +1 0 +1 1 +2 0 +2 1 +200000 diff --git a/tests/queries/0_stateless/01117_chain_finalize_bug.sql b/tests/queries/0_stateless/01117_chain_finalize_bug.sql new file mode 100644 index 00000000000..f79f82b8d4d --- /dev/null +++ b/tests/queries/0_stateless/01117_chain_finalize_bug.sql @@ -0,0 +1,24 @@ +SELECT arrayJoin(arrayMap(i -> (i + 1), range(2))) AS index, number +FROM numbers(2) +GROUP BY number +ORDER BY index, number; + +SET max_bytes_before_external_group_by = 1; + +SELECT arrayJoin(arrayMap(i -> (i + 1), range(2))) AS index, number +FROM numbers(2) +GROUP BY number +ORDER BY index, number; + +SET group_by_two_level_threshold = 2; + +SELECT count() FROM +( + SELECT + arrayJoin(arrayMap(i -> (i + 1), range(2))) AS index, + number + FROM numbers_mt(100000) + GROUP BY number + ORDER BY index ASC + SETTINGS max_block_size = 100000, max_threads = 2 +); diff --git a/tests/queries/0_stateless/01117_greatest_least_case.reference b/tests/queries/0_stateless/01117_greatest_least_case.reference new file mode 100644 index 00000000000..4bbcfcf5682 --- /dev/null +++ b/tests/queries/0_stateless/01117_greatest_least_case.reference @@ -0,0 +1,2 @@ +2 +-1 diff --git a/tests/queries/0_stateless/01117_greatest_least_case.sql b/tests/queries/0_stateless/01117_greatest_least_case.sql new file mode 100644 index 00000000000..21bfd240f5a --- /dev/null +++ b/tests/queries/0_stateless/01117_greatest_least_case.sql @@ -0,0 +1,2 @@ +SELECT GREATEST(1, 2); +SELECT LEAST(1, -1); diff --git a/tests/queries/0_stateless/01118_is_constant.reference b/tests/queries/0_stateless/01118_is_constant.reference new file mode 100644 index 00000000000..aba2b912a08 --- /dev/null +++ b/tests/queries/0_stateless/01118_is_constant.reference @@ -0,0 +1,9 @@ +1 +1 +0 +1 +1 +--- +0 +0 +--- diff --git a/tests/queries/0_stateless/01118_is_constant.sql b/tests/queries/0_stateless/01118_is_constant.sql new file mode 100644 index 00000000000..5cbff986dd2 --- /dev/null +++ b/tests/queries/0_stateless/01118_is_constant.sql @@ -0,0 +1,10 @@ +select isConstant(1); +select isConstant([1]); +select isConstant(arrayJoin([1])); +SELECT isConstant((SELECT 1)); +SELECT isConstant(x) FROM (SELECT 1 x); +SELECT '---'; +SELECT isConstant(x) FROM (SELECT 1 x UNION ALL SELECT 2); +SELECT '---'; +select isConstant(); -- { serverError 42 } +select isConstant(1, 2); -- { serverError 42 } diff --git a/tests/queries/0_stateless/01120_join_constants.reference b/tests/queries/0_stateless/01120_join_constants.reference new file mode 100644 index 00000000000..a16427fbdf7 --- /dev/null +++ b/tests/queries/0_stateless/01120_join_constants.reference @@ -0,0 +1,2 @@ +1 hello 1 world world 1 +2 hello 0 world 1 diff --git a/tests/queries/0_stateless/01120_join_constants.sql b/tests/queries/0_stateless/01120_join_constants.sql new file mode 100644 index 00000000000..443559c3ea1 --- /dev/null +++ b/tests/queries/0_stateless/01120_join_constants.sql @@ -0,0 +1,17 @@ +SELECT + t1.*, + t2.*, + 'world', + isConstant('world') +FROM +( + SELECT + arrayJoin([1, 2]) AS k, + 'hello' +) AS t1 +LEFT JOIN +( + SELECT + arrayJoin([1, 3]) AS k, + 'world' +) AS t2 ON t1.k = t2.k; diff --git a/tests/queries/0_stateless/01121_remote_scalar_subquery.reference b/tests/queries/0_stateless/01121_remote_scalar_subquery.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/01121_remote_scalar_subquery.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/dbms/tests/queries/bugs/remote_scalar_subquery.sql b/tests/queries/0_stateless/01121_remote_scalar_subquery.sql similarity index 100% rename from dbms/tests/queries/bugs/remote_scalar_subquery.sql rename to tests/queries/0_stateless/01121_remote_scalar_subquery.sql diff --git a/dbms/tests/queries/1_stateful/00087_where_0.reference b/tests/queries/0_stateless/01122_totals_rollup_having_block_header.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00087_where_0.reference rename to tests/queries/0_stateless/01122_totals_rollup_having_block_header.reference diff --git a/tests/queries/0_stateless/01122_totals_rollup_having_block_header.sql b/tests/queries/0_stateless/01122_totals_rollup_having_block_header.sql new file mode 100644 index 00000000000..4f4f3355912 --- /dev/null +++ b/tests/queries/0_stateless/01122_totals_rollup_having_block_header.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test.rollup_having; +CREATE TABLE test.rollup_having ( + a Nullable(String), + b Nullable(String) +) ENGINE = Memory; + +INSERT INTO test.rollup_having VALUES (NULL, NULL); +INSERT INTO test.rollup_having VALUES ('a', NULL); +INSERT INTO test.rollup_having VALUES ('a', 'b'); + +SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL; -- { serverError 48 } +SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL and b IS NOT NULL; -- { serverError 48 } + +DROP TABLE test.rollup_having; diff --git a/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.reference b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.reference new file mode 100644 index 00000000000..558ba34abcd --- /dev/null +++ b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.reference @@ -0,0 +1,2 @@ +2018-08-18 07:22:16 +2018-08-16 07:22:16 diff --git a/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.sql b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.sql new file mode 100644 index 00000000000..a4f6f173402 --- /dev/null +++ b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.sql @@ -0,0 +1,2 @@ +SELECT toTimeZone(parseDateTimeBestEffort('Thu, 18 Aug 2018 07:22:16 GMT'), 'UTC'); +SELECT toTimeZone(parseDateTimeBestEffort('Tue, 16 Aug 2018 07:22:16 GMT'), 'UTC'); diff --git a/tests/queries/0_stateless/01124_view_bad_types.reference b/tests/queries/0_stateless/01124_view_bad_types.reference new file mode 100644 index 00000000000..af98bcd6397 --- /dev/null +++ b/tests/queries/0_stateless/01124_view_bad_types.reference @@ -0,0 +1,10 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 diff --git a/tests/queries/0_stateless/01124_view_bad_types.sql b/tests/queries/0_stateless/01124_view_bad_types.sql new file mode 100644 index 00000000000..81fc53930c1 --- /dev/null +++ b/tests/queries/0_stateless/01124_view_bad_types.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test.table; +CREATE TABLE test.table (x UInt16) ENGINE = TinyLog; +INSERT INTO test.table SELECT * FROM system.numbers LIMIT 10; + +DROP TABLE IF EXISTS test.view; +CREATE VIEW test.view (x UInt64) AS SELECT * FROM test.table; + +SELECT x, any(x) FROM test.view GROUP BY x ORDER BY x; + +DROP TABLE test.view; +DROP TABLE test.table; diff --git a/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.reference b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.reference new file mode 100644 index 00000000000..1a9e5685a6a --- /dev/null +++ b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.reference @@ -0,0 +1,3 @@ +1 2019-01-05 2020-01-10 1 +date_table +somedict diff --git a/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.sql b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.sql new file mode 100644 index 00000000000..3f87235bdf4 --- /dev/null +++ b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.sql @@ -0,0 +1,34 @@ +DROP DATABASE IF EXISTS database_for_dict; + +CREATE DATABASE database_for_dict; + +use database_for_dict; + +CREATE TABLE date_table +( + id UInt32, + val String, + start Date, + end Date +) Engine = Memory(); + +INSERT INTO date_table VALUES(1, '1', toDate('2019-01-05'), toDate('2020-01-10')); + +CREATE DICTIONARY somedict +( + id UInt32, + val String, + start Date, + end Date +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'date_table' DB 'database_for_dict')) +LAYOUT(RANGE_HASHED()) +RANGE (MIN start MAX end) +LIFETIME(MIN 300 MAX 360); + +SELECT * from somedict; + +SHOW TABLES; + +DROP DATABASE IF EXISTS database_for_dict; diff --git a/tests/queries/0_stateless/01125_generate_random_qoega.reference b/tests/queries/0_stateless/01125_generate_random_qoega.reference new file mode 100644 index 00000000000..1cb416a722b --- /dev/null +++ b/tests/queries/0_stateless/01125_generate_random_qoega.reference @@ -0,0 +1 @@ +100 4456446406473339606 diff --git a/tests/queries/0_stateless/01125_generate_random_qoega.sql b/tests/queries/0_stateless/01125_generate_random_qoega.sql new file mode 100644 index 00000000000..7fb586ad2b5 --- /dev/null +++ b/tests/queries/0_stateless/01125_generate_random_qoega.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS mass_table_117; +CREATE TABLE mass_table_117 (`dt` Date, `site_id` Int32, `site_key` String) ENGINE = MergeTree(dt, (site_id, site_key, dt), 8192); +INSERT INTO mass_table_117 SELECT * FROM generateRandom('`dt` Date,`site_id` Int32,`site_key` String', 1, 10, 2) LIMIT 100; +SELECT count(), sum(cityHash64(*)) FROM mass_table_117; +DROP TABLE mass_table_117; diff --git a/dbms/tests/queries/1_stateful/00153_aggregate_arena_race.reference b/tests/queries/0_stateless/01126_month_partitioning_consistent_code.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00153_aggregate_arena_race.reference rename to tests/queries/0_stateless/01126_month_partitioning_consistent_code.reference diff --git a/tests/queries/0_stateless/01126_month_partitioning_consistent_code.sql b/tests/queries/0_stateless/01126_month_partitioning_consistent_code.sql new file mode 100644 index 00000000000..c9bfbbe5111 --- /dev/null +++ b/tests/queries/0_stateless/01126_month_partitioning_consistent_code.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS mt; +CREATE TABLE mt (d Date, x UInt8) ENGINE = MergeTree(d, x, 8192); +INSERT INTO mt VALUES (52392, 1), (62677, 2); +DROP TABLE mt; diff --git a/tests/queries/0_stateless/01127_month_partitioning_consistency_select.reference b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.reference new file mode 100644 index 00000000000..1b08e7f2d6f --- /dev/null +++ b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.reference @@ -0,0 +1,4 @@ +Q1 2106-02-07 Hello +Q2 0000-00-00 World +Q1 2106-02-07 Hello +Q2 0000-00-00 World diff --git a/tests/queries/0_stateless/01127_month_partitioning_consistency_select.sql b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.sql new file mode 100644 index 00000000000..59edd0c37b8 --- /dev/null +++ b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS mt; +CREATE TABLE mt (d Date, x String) ENGINE = MergeTree(d, x, 8192); +INSERT INTO mt VALUES ('2106-02-07', 'Hello'), ('1970-01-01', 'World'); + +SELECT 'Q1', * FROM mt WHERE d = '2106-02-07'; +SELECT 'Q2', * FROM mt WHERE d = '1970-01-01'; + +DETACH TABLE mt; +ATTACH TABLE mt; + +SELECT 'Q1', * FROM mt WHERE d = '2106-02-07'; +SELECT 'Q2', * FROM mt WHERE d = '1970-01-01'; + +DROP TABLE mt; diff --git a/tests/queries/0_stateless/01128_generate_random_nested.reference b/tests/queries/0_stateless/01128_generate_random_nested.reference new file mode 100644 index 00000000000..d9d2b251702 --- /dev/null +++ b/tests/queries/0_stateless/01128_generate_random_nested.reference @@ -0,0 +1,2 @@ +100 12366141706519416319 +109 2990700419202507835 diff --git a/tests/queries/0_stateless/01128_generate_random_nested.sql b/tests/queries/0_stateless/01128_generate_random_nested.sql new file mode 100644 index 00000000000..2af52e69893 --- /dev/null +++ b/tests/queries/0_stateless/01128_generate_random_nested.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS mass_table_312; +CREATE TABLE mass_table_312 (d Date DEFAULT '2000-01-01', x UInt64, n Nested(a String, b String)) ENGINE = MergeTree(d, x, 1); +INSERT INTO mass_table_312 SELECT * FROM generateRandom('`d` Date,`x` UInt64,`n.a` Array(String),`n.b` Array(String)', 1, 10, 2) LIMIT 100; + +SELECT count(), sum(cityHash64(*)) FROM mass_table_312; +SELECT count(), sum(cityHash64(*)) FROM mass_table_312 ARRAY JOIN n; + +DROP TABLE mass_table_312; diff --git a/tests/queries/0_stateless/01134_max_rows_to_group_by.reference b/tests/queries/0_stateless/01134_max_rows_to_group_by.reference new file mode 100644 index 00000000000..caaf3394975 --- /dev/null +++ b/tests/queries/0_stateless/01134_max_rows_to_group_by.reference @@ -0,0 +1,33 @@ +test2 0 +test2 1 +test2 2 +test2 3 +test2 4 +test2 5 +test2 6 +test2 7 +test2 8 +test2 9 +test2 10 +test3 0 +test3 1 +test3 2 +test3 3 +test3 4 +test3 5 +test3 6 +test3 7 +test3 8 +test3 9 +test3 10 +test5 0 +test5 1 +test5 2 +test5 3 +test5 4 +test5 5 +test5 6 +test5 7 +test5 8 +test5 9 +test5 10 diff --git a/tests/queries/0_stateless/01134_max_rows_to_group_by.sql b/tests/queries/0_stateless/01134_max_rows_to_group_by.sql new file mode 100644 index 00000000000..bfbc499e1c3 --- /dev/null +++ b/tests/queries/0_stateless/01134_max_rows_to_group_by.sql @@ -0,0 +1,17 @@ +SET max_block_size = 1; +SET max_rows_to_group_by = 10; +SET group_by_overflow_mode = 'throw'; + +SELECT 'test1', number FROM system.numbers GROUP BY number; -- { serverError 158 } + +SET group_by_overflow_mode = 'break'; +SELECT 'test2', number FROM system.numbers GROUP BY number ORDER BY number; + +SET max_rows_to_read = 500; +SELECT 'test3', number FROM system.numbers GROUP BY number ORDER BY number; + +SET group_by_overflow_mode = 'any'; +SELECT 'test4', number FROM numbers(1000) GROUP BY number ORDER BY number; -- { serverError 158 } + +SET max_rows_to_read = 1000; +SELECT 'test5', number FROM numbers(1000) GROUP BY number ORDER BY number; diff --git a/tests/queries/0_stateless/01196_max_parser_depth.reference b/tests/queries/0_stateless/01196_max_parser_depth.reference new file mode 100644 index 00000000000..a72c1b18aa2 --- /dev/null +++ b/tests/queries/0_stateless/01196_max_parser_depth.reference @@ -0,0 +1,3 @@ +Code: 306 +Code: 306 +Code: 306 diff --git a/tests/queries/0_stateless/01196_max_parser_depth.sh b/tests/queries/0_stateless/01196_max_parser_depth.sh new file mode 100755 index 00000000000..471c1c22ecb --- /dev/null +++ b/tests/queries/0_stateless/01196_max_parser_depth.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +{ printf "select "; for x in {1..1000}; do printf "coalesce(null, "; done; printf "1"; for x in {1..1000}; do printf ")"; done; } | $CLICKHOUSE_CLIENT 2>&1 | grep -o -F 'Code: 306' +{ printf "select "; for x in {1..1000}; do printf "coalesce(null, "; done; printf "1"; for x in {1..1000}; do printf ")"; done; } | $CLICKHOUSE_LOCAL 2>&1 | grep -o -F 'Code: 306' +{ printf "select "; for x in {1..1000}; do printf "coalesce(null, "; done; printf "1"; for x in {1..1000}; do printf ")"; done; } | $CLICKHOUSE_CURL --data-binary @- -vsS "$CLICKHOUSE_URL" 2>&1 | grep -o -F 'Code: 306' diff --git a/tests/queries/0_stateless/01197_summing_enum.reference b/tests/queries/0_stateless/01197_summing_enum.reference new file mode 100644 index 00000000000..aa6a25fb0cf --- /dev/null +++ b/tests/queries/0_stateless/01197_summing_enum.reference @@ -0,0 +1 @@ + 2000 hello diff --git a/tests/queries/0_stateless/01197_summing_enum.sql b/tests/queries/0_stateless/01197_summing_enum.sql new file mode 100644 index 00000000000..c76f43aca4e --- /dev/null +++ b/tests/queries/0_stateless/01197_summing_enum.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS summing; + +CREATE TABLE summing (k String, x UInt64, e Enum('hello' = 1, 'world' = 2)) ENGINE = SummingMergeTree ORDER BY k; +INSERT INTO summing SELECT '', 1, e FROM generateRandom('e Enum(\'hello\' = 1, \'world\' = 2)', 1) LIMIT 1000; +INSERT INTO summing SELECT '', 1, e FROM generateRandom('e Enum(\'hello\' = 1, \'world\' = 2)', 1) LIMIT 1000; + +OPTIMIZE TABLE summing; +SELECT k, x, e FROM summing; + +DROP TABLE summing; \ No newline at end of file diff --git a/tests/queries/0_stateless/01198_plus_inf.reference b/tests/queries/0_stateless/01198_plus_inf.reference new file mode 100644 index 00000000000..f726b8429b6 --- /dev/null +++ b/tests/queries/0_stateless/01198_plus_inf.reference @@ -0,0 +1,3 @@ +inf +-inf +inf diff --git a/tests/queries/0_stateless/01198_plus_inf.sql b/tests/queries/0_stateless/01198_plus_inf.sql new file mode 100644 index 00000000000..e06faa2fd21 --- /dev/null +++ b/tests/queries/0_stateless/01198_plus_inf.sql @@ -0,0 +1,3 @@ +SELECT DISTINCT toFloat64(arrayJoin(['+inf', '+Inf', '+INF', '+infinity', '+Infinity'])); +SELECT DISTINCT toFloat64(arrayJoin(['-inf', '-Inf', '-INF', '-infinity', '-Infinity'])); +SELECT DISTINCT toFloat64(arrayJoin(['inf', 'Inf', 'INF', 'infinity', 'Infinity'])); diff --git a/tests/queries/0_stateless/01199_url_functions_path_without_schema_yiurule.reference b/tests/queries/0_stateless/01199_url_functions_path_without_schema_yiurule.reference new file mode 100644 index 00000000000..9d75f9c90df --- /dev/null +++ b/tests/queries/0_stateless/01199_url_functions_path_without_schema_yiurule.reference @@ -0,0 +1,2 @@ +/a/b/c +/?query=hello world+foo+bar diff --git a/tests/queries/0_stateless/01199_url_functions_path_without_schema_yiurule.sql b/tests/queries/0_stateless/01199_url_functions_path_without_schema_yiurule.sql new file mode 100644 index 00000000000..14b0f4fd8d5 --- /dev/null +++ b/tests/queries/0_stateless/01199_url_functions_path_without_schema_yiurule.sql @@ -0,0 +1,2 @@ +SELECT path('www.example.com:443/a/b/c') AS Path; +SELECT decodeURLComponent(materialize(pathFull('www.example.com/?query=hello%20world+foo%2Bbar'))) AS Path; diff --git a/dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.reference b/tests/queries/0_stateless/01200_mutations_memory_consumption.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.reference rename to tests/queries/0_stateless/01200_mutations_memory_consumption.reference diff --git a/dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.sql b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.sql rename to tests/queries/0_stateless/01200_mutations_memory_consumption.sql diff --git a/dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference b/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference rename to tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference diff --git a/dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql b/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql rename to tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql diff --git a/tests/queries/0_stateless/01201_read_single_thread_in_order.reference b/tests/queries/0_stateless/01201_read_single_thread_in_order.reference new file mode 100644 index 00000000000..7660873d103 --- /dev/null +++ b/tests/queries/0_stateless/01201_read_single_thread_in_order.reference @@ -0,0 +1 @@ +[1] diff --git a/tests/queries/0_stateless/01201_read_single_thread_in_order.sql b/tests/queries/0_stateless/01201_read_single_thread_in_order.sql new file mode 100644 index 00000000000..bfe03192891 --- /dev/null +++ b/tests/queries/0_stateless/01201_read_single_thread_in_order.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + number UInt64 +) +ENGINE = MergeTree +ORDER BY number +SETTINGS index_granularity = 128; + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO t SELECT number FROM numbers(10000000); + +SET max_threads = 1, max_block_size = 12345; +SELECT arrayDistinct(arrayPopFront(arrayDifference(groupArray(number)))) FROM t; + +DROP TABLE t; diff --git a/dbms/tests/queries/0_stateless/01202_array_auc_special.reference b/tests/queries/0_stateless/01202_array_auc_special.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01202_array_auc_special.reference rename to tests/queries/0_stateless/01202_array_auc_special.reference diff --git a/dbms/tests/queries/0_stateless/01202_array_auc_special.sql b/tests/queries/0_stateless/01202_array_auc_special.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01202_array_auc_special.sql rename to tests/queries/0_stateless/01202_array_auc_special.sql diff --git a/dbms/tests/queries/__init__.py b/tests/queries/0_stateless/01210_drop_view.reference similarity index 100% rename from dbms/tests/queries/__init__.py rename to tests/queries/0_stateless/01210_drop_view.reference diff --git a/dbms/tests/queries/0_stateless/01210_drop_view.sql b/tests/queries/0_stateless/01210_drop_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01210_drop_view.sql rename to tests/queries/0_stateless/01210_drop_view.sql diff --git a/docs/tools/mkdocs-material-theme/__init__.py b/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference similarity index 100% rename from docs/tools/mkdocs-material-theme/__init__.py rename to tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference diff --git a/dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql b/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql rename to tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql diff --git a/docs/tools/mkdocs-material-theme/assets/javascripts/lunr/lunr.fa.js b/tests/queries/0_stateless/01212_empty_join_and_totals.reference similarity index 100% rename from docs/tools/mkdocs-material-theme/assets/javascripts/lunr/lunr.fa.js rename to tests/queries/0_stateless/01212_empty_join_and_totals.reference diff --git a/dbms/tests/queries/0_stateless/01212_empty_join_and_totals.sql b/tests/queries/0_stateless/01212_empty_join_and_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01212_empty_join_and_totals.sql rename to tests/queries/0_stateless/01212_empty_join_and_totals.sql diff --git a/tests/queries/0_stateless/01213_alter_rename_column.reference b/tests/queries/0_stateless/01213_alter_rename_column.reference new file mode 100644 index 00000000000..a5e642f56ad --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_column.reference @@ -0,0 +1,7 @@ +1 +1 +date key renamed_value1 value2 value3 +2019-10-02 1 1 1 1 +7 7 +date key renamed_value1 renamed_value2 renamed_value3 +2019-10-02 7 7 7 7 diff --git a/tests/queries/0_stateless/01213_alter_rename_column.sql b/tests/queries/0_stateless/01213_alter_rename_column.sql new file mode 100644 index 00000000000..59e4191d7d5 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_column.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS table_for_rename; + +CREATE TABLE table_for_rename +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); + +SELECT value1 FROM table_for_rename WHERE key = 1; + +ALTER TABLE table_for_rename RENAME COLUMN value1 to renamed_value1; + +SELECT renamed_value1 FROM table_for_rename WHERE key = 1; + +SELECT * FROM table_for_rename WHERE key = 1 FORMAT TSVWithNames; + +ALTER TABLE table_for_rename RENAME COLUMN value3 to value2; --{serverError 15} +ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN value3 TO r2; --{serverError 36} +ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError 10} + +ALTER TABLE table_for_rename RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3; + +SELECT renamed_value2, renamed_value3 FROM table_for_rename WHERE key = 7; + +SELECT * FROM table_for_rename WHERE key = 7 FORMAT TSVWithNames; + +ALTER TABLE table_for_rename RENAME COLUMN value100 to renamed_value100; --{serverError 10} +ALTER TABLE table_for_rename RENAME COLUMN IF EXISTS value100 to renamed_value100; + +DROP TABLE IF EXISTS table_for_rename; diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference new file mode 100644 index 00000000000..e2d6007c57f --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference @@ -0,0 +1,8 @@ +1 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String, \n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date, \n `key` UInt64, \n `renamed_value1` String, \n `value2` String, \n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +1 +date key renamed_value1 value2 value3 +2019-10-02 1 1 1 1 +date key renamed_value1 value2 value3 +2019-10-02 1 1 1 1 diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh new file mode 100755 index 00000000000..b7a4738d417 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated" + +$CLICKHOUSE_CLIENT -n --query " +CREATE TABLE table_for_rename_replicated +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_replicated', '1') +PARTITION BY date +ORDER BY key; +" + + +$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_rename_replicated SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9);" + +$CLICKHOUSE_CLIENT --query "SELECT value1 FROM table_for_rename_replicated WHERE key = 1;" + +$CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES;" + +$CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;" + +$CLICKHOUSE_CLIENT --query "ALTER TABLE table_for_rename_replicated RENAME COLUMN value1 to renamed_value1" --replication_alter_partitions_sync=0 + + +while [[ -z $($CLICKHOUSE_CLIENT --query "SELECT name FROM system.columns WHERE name = 'renamed_value1' and table = 'table_for_rename_replicated'" 2>/dev/null) ]]; do + sleep 0.5 +done + +# RENAME on fly works + +$CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;" + +$CLICKHOUSE_CLIENT --query "SELECT renamed_value1 FROM table_for_rename_replicated WHERE key = 1;" + +$CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;" + +$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES;" + +$CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA table_for_rename_replicated;" + +$CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;" + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated;" diff --git a/tests/queries/0_stateless/01213_alter_rename_compact_part.reference b/tests/queries/0_stateless/01213_alter_rename_compact_part.reference new file mode 100644 index 00000000000..a5e642f56ad --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_compact_part.reference @@ -0,0 +1,7 @@ +1 +1 +date key renamed_value1 value2 value3 +2019-10-02 1 1 1 1 +7 7 +date key renamed_value1 renamed_value2 renamed_value3 +2019-10-02 7 7 7 7 diff --git a/tests/queries/0_stateless/01213_alter_rename_compact_part.sql b/tests/queries/0_stateless/01213_alter_rename_compact_part.sql new file mode 100644 index 00000000000..07188ece519 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_compact_part.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS table_with_compact_parts; + +CREATE TABLE table_with_compact_parts +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key +settings index_granularity = 8, +min_rows_for_wide_part = 10; + +INSERT INTO table_with_compact_parts SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); + +SELECT value1 FROM table_with_compact_parts WHERE key = 1; + +ALTER TABLE table_with_compact_parts RENAME COLUMN value1 to renamed_value1; + +SELECT renamed_value1 FROM table_with_compact_parts WHERE key = 1; + +SELECT * FROM table_with_compact_parts WHERE key = 1 FORMAT TSVWithNames; + +ALTER TABLE table_with_compact_parts RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3; + +SELECT renamed_value2, renamed_value3 FROM table_with_compact_parts WHERE key = 7; + +SELECT * FROM table_with_compact_parts WHERE key = 7 FORMAT TSVWithNames; + +DROP TABLE IF EXISTS table_with_compact_parts; diff --git a/tests/queries/0_stateless/01213_alter_rename_nested.reference b/tests/queries/0_stateless/01213_alter_rename_nested.reference new file mode 100644 index 00000000000..2641df46aeb --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_nested.reference @@ -0,0 +1,10 @@ +[8,9,10] +['a','b','c'] +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.x` Array(UInt32), \n `n.y` Array(String), \n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +7 [8,9,10] +7 ['a','b','c'] +[['7']] +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `renamed_value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +date key n.renamed_x n.renamed_y renamed_value1 +2019-10-01 7 [8,9,10] ['a','b','c'] [['7']] diff --git a/tests/queries/0_stateless/01213_alter_rename_nested.sql b/tests/queries/0_stateless/01213_alter_rename_nested.sql new file mode 100644 index 00000000000..1b00cd19e21 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_nested.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS table_for_rename_nested; +CREATE TABLE table_for_rename_nested +( + date Date, + key UInt64, + n Nested(x UInt32, y String), + value1 Array(Array(LowCardinality(String))) -- column with several files +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], [[toString(number)]] FROM numbers(10); + +SELECT n.x FROM table_for_rename_nested WHERE key = 7; +SELECT n.y FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x; +ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7; +SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36} + +-- Currently not implemented +ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48} + +ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO renamed_value1; + +SELECT renamed_value1 FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT * FROM table_for_rename_nested WHERE key = 7 FORMAT TSVWithNames; + +DROP TABLE IF EXISTS table_for_rename_nested; + diff --git a/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference new file mode 100644 index 00000000000..9972842f982 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference @@ -0,0 +1 @@ +1 1 diff --git a/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql new file mode 100644 index 00000000000..e03ef67212e --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql @@ -0,0 +1,55 @@ +DROP TABLE IF EXISTS table_for_rename_pk; + +CREATE TABLE table_for_rename_pk +( + date Date, + key1 UInt64, + key2 UInt64, + key3 UInt64, + value1 String, + value2 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1') +PARTITION BY date +ORDER BY (key1, pow(key2, 2), key3); + +INSERT INTO table_for_rename_pk SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9); + +SELECT key1, value1 FROM table_for_rename_pk WHERE key1 = 1 AND key2 = 1 AND key3 = 1; + +ALTER TABLE table_for_rename_pk RENAME COLUMN key1 TO renamed_key1; --{serverError 44} + +ALTER TABLE table_for_rename_pk RENAME COLUMN key3 TO renamed_key3; --{serverError 44} + +ALTER TABLE table_for_rename_pk RENAME COLUMN key2 TO renamed_key2; --{serverError 44} + +DROP TABLE IF EXISTS table_for_rename_pk; + +DROP TABLE IF EXISTS table_for_rename_with_primary_key; + +CREATE TABLE table_for_rename_with_primary_key +( + date Date, + key1 UInt64, + key2 UInt64, + key3 UInt64, + value1 String, + value2 String, + INDEX idx (value1) TYPE set(1) GRANULARITY 1 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1') +PARTITION BY date +ORDER BY (key1, key2, key3) +PRIMARY KEY (key1, key2); + +INSERT INTO table_for_rename_with_primary_key SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9); + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key1 TO renamed_key1; --{serverError 44} + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key2 TO renamed_key2; --{serverError 44} + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key3 TO renamed_key3; --{serverError 44} + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN value1 TO renamed_value1; --{serverError 44} + +DROP TABLE IF EXISTS table_for_rename_with_primary_key; diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference new file mode 100644 index 00000000000..251e664b522 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference @@ -0,0 +1,17 @@ +date key value1 value2 +2019-10-02 1 1 Hello 1 +CREATE TABLE default.table_rename_with_default\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String DEFAULT concat(\'Hello \', value1), \n `value3` String ALIAS concat(\'Word \', value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +date key renamed_value1 value2 +2019-10-02 1 1 Hello 1 +CREATE TABLE default.table_rename_with_default\n(\n `date` Date, \n `key` UInt64, \n `renamed_value1` String, \n `value2` String DEFAULT concat(\'Hello \', renamed_value1), \n `value3` String ALIAS concat(\'Word \', renamed_value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +Hello 1 +Word 1 +date1 date2 value1 value2 +2019-10-02 2018-10-02 1 1 +CREATE TABLE default.table_rename_with_ttl\n(\n `date1` Date, \n `date2` Date, \n `value1` String, \n `value2` String TTL date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +renamed_date1 date2 value1 value2 +2019-10-02 2018-10-02 1 1 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date, \n `date2` Date, \n `value1` String, \n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +renamed_date1 renamed_date2 value1 value2 +2019-10-02 2018-10-02 1 1 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date, \n `renamed_date2` Date, \n `value1` String, \n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL renamed_date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql new file mode 100644 index 00000000000..fa4c02aa58c --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS table_rename_with_default; + +CREATE TABLE table_rename_with_default +( + date Date, + key UInt64, + value1 String, + value2 String DEFAULT concat('Hello ', value1), + value3 String ALIAS concat('Word ', value1) +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_rename_with_default (date, key, value1) SELECT toDate('2019-10-01') + number % 3, number, toString(number) from numbers(9); + +SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_default; + +ALTER TABLE table_rename_with_default RENAME COLUMN value1 TO renamed_value1; + +SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_default; + +SELECT value2 FROM table_rename_with_default WHERE key = 1; +SELECT value3 FROM table_rename_with_default WHERE key = 1; + +DROP TABLE IF EXISTS table_rename_with_default; + +DROP TABLE IF EXISTS table_rename_with_ttl; + +CREATE TABLE table_rename_with_ttl +( + date1 Date, + date2 Date, + value1 String, + value2 String TTL date1 + INTERVAL 10000 MONTH +) +ENGINE = ReplicatedMergeTree('/clickhouse/test/table_rename_with_ttl', '1') +ORDER BY tuple() +TTL date2 + INTERVAL 10000 MONTH; + +INSERT INTO table_rename_with_ttl SELECT toDate('2019-10-01') + number % 3, toDate('2018-10-01') + number % 3, toString(number), toString(number) from numbers(9); + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +ALTER TABLE table_rename_with_ttl RENAME COLUMN date1 TO renamed_date1; + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +ALTER TABLE table_rename_with_ttl RENAME COLUMN date2 TO renamed_date2; + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +DROP TABLE IF EXISTS table_rename_with_ttl; diff --git a/tests/queries/0_stateless/01213_alter_table_rename_nested.reference b/tests/queries/0_stateless/01213_alter_table_rename_nested.reference new file mode 100644 index 00000000000..8e6d93dbcce --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_table_rename_nested.reference @@ -0,0 +1,6 @@ +[8,9,10] +['a','b','c'] +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.x` Array(UInt32), \n `n.y` Array(String), \n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +7 [8,9,10] +7 ['a','b','c'] diff --git a/tests/queries/0_stateless/01213_alter_table_rename_nested.sql b/tests/queries/0_stateless/01213_alter_table_rename_nested.sql new file mode 100644 index 00000000000..e08e3c0c3b1 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_table_rename_nested.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS table_for_rename_nested; +CREATE TABLE table_for_rename_nested +( + date Date, + key UInt64, + n Nested(x UInt32, y String), + value1 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], toString(number) FROM numbers(10); + +SELECT n.x FROM table_for_rename_nested WHERE key = 7; +SELECT n.y FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x; +ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7; +SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36} + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO q.renamed_x; --{serverError 36} + +ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO q.renamed_x; --{serverError 36} + +-- Currently not implemented +ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48} + +DROP TABLE IF EXISTS table_for_rename_nested; + diff --git a/dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference b/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference rename to tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference diff --git a/dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql b/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql rename to tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql diff --git a/dbms/tests/queries/1_stateful/00053_replicate_segfault.reference b/tests/queries/0_stateless/01213_point_in_Myanmar.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00053_replicate_segfault.reference rename to tests/queries/0_stateless/01213_point_in_Myanmar.reference diff --git a/dbms/tests/queries/0_stateless/01213_point_in_Myanmar.sql b/tests/queries/0_stateless/01213_point_in_Myanmar.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_point_in_Myanmar.sql rename to tests/queries/0_stateless/01213_point_in_Myanmar.sql diff --git a/dbms/tests/queries/0_stateless/01214_point_in_Mecca.reference b/tests/queries/0_stateless/01214_point_in_Mecca.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01214_point_in_Mecca.reference rename to tests/queries/0_stateless/01214_point_in_Mecca.reference diff --git a/dbms/tests/queries/0_stateless/01214_point_in_Mecca.sql b/tests/queries/0_stateless/01214_point_in_Mecca.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01214_point_in_Mecca.sql rename to tests/queries/0_stateless/01214_point_in_Mecca.sql diff --git a/tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference b/tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql b/tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql rename to tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql diff --git a/dbms/tests/queries/0_stateless/01221_system_settings.reference b/tests/queries/0_stateless/01221_system_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01221_system_settings.reference rename to tests/queries/0_stateless/01221_system_settings.reference diff --git a/dbms/tests/queries/0_stateless/01221_system_settings.sql b/tests/queries/0_stateless/01221_system_settings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01221_system_settings.sql rename to tests/queries/0_stateless/01221_system_settings.sql diff --git a/dbms/tests/queries/0_stateless/01223_dist_on_dist.reference b/tests/queries/0_stateless/01223_dist_on_dist.reference similarity index 75% rename from dbms/tests/queries/0_stateless/01223_dist_on_dist.reference rename to tests/queries/0_stateless/01223_dist_on_dist.reference index 7ca613f70fc..aca2f070db4 100644 --- a/dbms/tests/queries/0_stateless/01223_dist_on_dist.reference +++ b/tests/queries/0_stateless/01223_dist_on_dist.reference @@ -82,3 +82,22 @@ GROUP BY ORDER BY distributed_aggregation_memory_efficient/group_by_two_level_th 0 1 2 +COUNT +132 +distributed_group_by_no_merge +33 +33 +33 +33 +only one shard in nested +66 +distributed_group_by_no_merge +33 +33 +merge() +66 +distributed_group_by_no_merge +33 +33 +GLOBAL IN +1 diff --git a/tests/queries/0_stateless/01223_dist_on_dist.sql b/tests/queries/0_stateless/01223_dist_on_dist.sql new file mode 100644 index 00000000000..65a240fd48b --- /dev/null +++ b/tests/queries/0_stateless/01223_dist_on_dist.sql @@ -0,0 +1,92 @@ +drop table if exists merge_dist_01223; +drop table if exists dist_01223; +drop table if exists dist_layer_01223; +drop table if exists data_01223; + +create table data_01223 (key Int) Engine=Memory(); +create table dist_layer_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01223); +create table dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); + +select * from dist_01223; + +insert into data_01223 select * from numbers(3); + +select 'DISTINCT ORDER BY'; +select distinct * from dist_01223 order by key; +select 'GROUP BY ORDER BY'; +select * from dist_01223 group by key order by key; +select 'GROUP BY ORDER BY LIMIT'; +select * from dist_01223 group by key order by key limit 1; +select 'HAVING'; +select * from dist_01223 having key = 1; +select 'GROUP BY HAVING'; +select * from dist_01223 group by key having key = 1; +select 'ORDER BY'; +select * from dist_01223 order by key; +select 'ORDER BY LIMIT'; +select * from dist_01223 order by key limit 1; +select 'ORDER BY LIMIT BY'; +select * from dist_01223 order by key limit 1 by key; +select 'cluster() ORDER BY'; +select * from cluster(test_cluster_two_shards, currentDatabase(), dist_01223) order by key; +select 'cluster() GROUP BY ORDER BY'; +select * from cluster(test_cluster_two_shards, currentDatabase(), dist_01223) group by key order by key; + +select 'LEFT JOIN'; +select toInt32(number) key, b.key from numbers(2) a left join (select distinct * from dist_01223) b using key order by b.key; +select 'RIGHT JOIN'; +select toInt32(number) key, b.key from numbers(2) a right join (select distinct * from dist_01223) b using key order by b.key; + +-- more data for GROUP BY +insert into data_01223 select number%3 from numbers(30); + +-- group_by_two_level_threshold +select 'GROUP BY ORDER BY group_by_two_level_threshold'; +select * from dist_01223 group by key order by key settings +group_by_two_level_threshold=1, +group_by_two_level_threshold_bytes=1; + +-- distributed_aggregation_memory_efficient +select 'GROUP BY ORDER BY distributed_aggregation_memory_efficient'; +select * from dist_01223 group by key order by key settings +distributed_aggregation_memory_efficient=1; + +-- distributed_aggregation_memory_efficient/group_by_two_level_threshold +select 'GROUP BY ORDER BY distributed_aggregation_memory_efficient/group_by_two_level_threshold'; +select * from dist_01223 group by key order by key settings +group_by_two_level_threshold=1, +group_by_two_level_threshold_bytes=1, +distributed_aggregation_memory_efficient=1; + +select 'COUNT'; +select count() from dist_01223; +select 'distributed_group_by_no_merge'; +select count() from dist_01223 settings distributed_group_by_no_merge=1; + +drop table dist_01223; +drop table dist_layer_01223; + +-- only one shard in nested +select 'only one shard in nested'; +create table dist_layer_01223 as data_01223 Engine=Distributed(test_shard_localhost, currentDatabase(), data_01223); +create table dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); +select count() from dist_01223; + +select 'distributed_group_by_no_merge'; +select count() from dist_01223 settings distributed_group_by_no_merge=1; + +-- wrap with merge() +select 'merge()'; +create table merge_dist_01223 as dist_01223 engine=Merge(currentDatabase(), 'dist_01223'); +select count() from merge_dist_01223; +select 'distributed_group_by_no_merge'; +select count() from merge_dist_01223 settings distributed_group_by_no_merge=1; + +-- global in +select 'GLOBAL IN'; +select distinct * from dist_01223 where key global in (select toInt32(1)); + +drop table merge_dist_01223; +drop table dist_01223; +drop table dist_layer_01223; +drop table data_01223; diff --git a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference new file mode 100644 index 00000000000..5321624de02 --- /dev/null +++ b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference @@ -0,0 +1,19 @@ +NOT_LOADED +NOT_LOADED +CREATE DICTIONARY dict_db_01224.dict +( + `key` UInt64 DEFAULT 0, + `val` UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01224')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()) +NOT_LOADED +CREATE TABLE dict_db_01224_dictionary.`dict_db_01224.dict` +( + `key` UInt64, + `val` UInt64 +) +ENGINE = Dictionary(`dict_db_01224.dict`) +LOADED diff --git a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql new file mode 100644 index 00000000000..a6eed6f072c --- /dev/null +++ b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql @@ -0,0 +1,32 @@ +DROP DATABASE IF EXISTS dict_db_01224; +DROP DATABASE IF EXISTS dict_db_01224_dictionary; +CREATE DATABASE dict_db_01224; + +CREATE TABLE dict_db_01224.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01224.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01224')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SELECT * FROM system.tables FORMAT Null; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SHOW CREATE TABLE dict_db_01224.dict FORMAT TSVRaw; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +CREATE DATABASE dict_db_01224_dictionary Engine=Dictionary; +SHOW CREATE TABLE dict_db_01224_dictionary.`dict_db_01224.dict` FORMAT TSVRaw; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +DROP DICTIONARY dict_db_01224.dict; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +DROP DATABASE dict_db_01224; +DROP DATABASE dict_db_01224_dictionary; diff --git a/tests/queries/0_stateless/01225_drop_dictionary_as_table.reference b/tests/queries/0_stateless/01225_drop_dictionary_as_table.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01225_drop_dictionary_as_table.sql b/tests/queries/0_stateless/01225_drop_dictionary_as_table.sql new file mode 100644 index 00000000000..045775aec2b --- /dev/null +++ b/tests/queries/0_stateless/01225_drop_dictionary_as_table.sql @@ -0,0 +1,20 @@ +DROP DATABASE IF EXISTS dict_db_01225; +CREATE DATABASE dict_db_01225; + +CREATE TABLE dict_db_01225.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01225.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01225')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SYSTEM RELOAD DICTIONARY dict_db_01225.dict; +DROP TABLE dict_db_01225.dict; -- { serverError 60; } +-- Regression: +-- Code: 1000. DB::Exception: Received from localhost:9000. DB::Exception: File not found: ./metadata/dict_db_01225/dict.sql. +DROP DICTIONARY dict_db_01225.dict; +DROP DATABASE dict_db_01225; diff --git a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference new file mode 100644 index 00000000000..14ddc093143 --- /dev/null +++ b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference @@ -0,0 +1,6 @@ +CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` +( + `key` UInt64, + `val` UInt64 +) +ENGINE = Dictionary(`dict_db_01225.dict`) diff --git a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql new file mode 100644 index 00000000000..7550d5292d0 --- /dev/null +++ b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql @@ -0,0 +1,21 @@ +DROP DATABASE IF EXISTS dict_db_01225; +DROP DATABASE IF EXISTS dict_db_01225_dictionary; +CREATE DATABASE dict_db_01225; +CREATE DATABASE dict_db_01225_dictionary Engine=Dictionary; + +CREATE TABLE dict_db_01225.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01225.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01225')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` FORMAT TSVRaw; +SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.no_such_dict`; -- { serverError 36; } + +DROP DATABASE dict_db_01225; +DROP DATABASE dict_db_01225_dictionary; diff --git a/tests/queries/0_stateless/01226_dist_on_dist_global_in.reference b/tests/queries/0_stateless/01226_dist_on_dist_global_in.reference new file mode 100644 index 00000000000..3d8d7fb770d --- /dev/null +++ b/tests/queries/0_stateless/01226_dist_on_dist_global_in.reference @@ -0,0 +1,6 @@ +GLOBAL IN +0 +0 +0 +0 +GLOBAL NOT IN diff --git a/tests/queries/0_stateless/01226_dist_on_dist_global_in.sql b/tests/queries/0_stateless/01226_dist_on_dist_global_in.sql new file mode 100644 index 00000000000..588ea9c1048 --- /dev/null +++ b/tests/queries/0_stateless/01226_dist_on_dist_global_in.sql @@ -0,0 +1,10 @@ +SELECT 'GLOBAL IN'; +select * from remote('localhost', system.one) where dummy global in (0); +select * from remote('localhost', system.one) where toUInt64(dummy) global in numbers(1); +select * from remote('localhost', system.one) where dummy global in system.one; +select * from remote('localhost', system.one) where dummy global in (select 0); +SELECT 'GLOBAL NOT IN'; +select * from remote('localhost', system.one) where dummy global not in (0); +select * from remote('localhost', system.one) where toUInt64(dummy) global not in numbers(1); +select * from remote('localhost', system.one) where dummy global not in system.one; +select * from remote('localhost', system.one) where dummy global not in (select 0); diff --git a/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.reference b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.reference new file mode 100644 index 00000000000..083edaac248 --- /dev/null +++ b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.reference @@ -0,0 +1,3 @@ +2 +2 +2 diff --git a/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.sql b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.sql new file mode 100644 index 00000000000..a063e417e3a --- /dev/null +++ b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.sql @@ -0,0 +1,6 @@ +-- Test from the issue https://github.com/ClickHouse/ClickHouse/issues/2610 +drop table if exists data_01227; +create table data_01227 (key Int) Engine=MergeTree() order by key; +insert into data_01227 select * from numbers(10); +select * from remote('127.1', currentDatabase(), data_01227) prewhere key global in (select key from data_01227 prewhere key = 2); +select * from cluster('test_cluster_two_shards', currentDatabase(), data_01227) prewhere key global in (select key from data_01227 prewhere key = 2); diff --git a/dbms/tests/queries/0_stateless/01230_join_get_truncate.reference b/tests/queries/0_stateless/01230_join_get_truncate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01230_join_get_truncate.reference rename to tests/queries/0_stateless/01230_join_get_truncate.reference diff --git a/dbms/tests/queries/0_stateless/01230_join_get_truncate.sql b/tests/queries/0_stateless/01230_join_get_truncate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01230_join_get_truncate.sql rename to tests/queries/0_stateless/01230_join_get_truncate.sql diff --git a/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.reference b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.reference new file mode 100644 index 00000000000..ac13b3f193e --- /dev/null +++ b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.reference @@ -0,0 +1,10 @@ +0 2 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 diff --git a/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql new file mode 100644 index 00000000000..31f09b35bf3 --- /dev/null +++ b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql @@ -0,0 +1,25 @@ +set send_logs_level = 'error'; + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists ma_dist; + +create table shard_0.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x; +create table shard_1.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x; + +insert into shard_0.shard_01231_distributed_aggregation_memory_efficient select * from numbers(1); +insert into shard_1.shard_01231_distributed_aggregation_memory_efficient select * from numbers(10); + +create table ma_dist (x UInt64) ENGINE = Distributed(test_cluster_two_shards_different_databases, '', 'shard_01231_distributed_aggregation_memory_efficient'); + +set distributed_aggregation_memory_efficient = 1; +set group_by_two_level_threshold = 2; +set max_bytes_before_external_group_by = 16; + +select x, count() from ma_dist group by x order by x; + +drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient; diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.reference b/tests/queries/0_stateless/01231_log_queries_min_type.reference new file mode 100644 index 00000000000..a358d022033 --- /dev/null +++ b/tests/queries/0_stateless/01231_log_queries_min_type.reference @@ -0,0 +1,5 @@ +01231_log_queries_min_type/QUERY_START +2 +01231_log_queries_min_type/EXCEPTION_BEFORE_START +2 +3 diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.sql b/tests/queries/0_stateless/01231_log_queries_min_type.sql new file mode 100644 index 00000000000..f2229c94a8a --- /dev/null +++ b/tests/queries/0_stateless/01231_log_queries_min_type.sql @@ -0,0 +1,15 @@ +set log_queries=1; + +select '01231_log_queries_min_type/QUERY_START'; +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; + +set log_queries_min_type='EXCEPTION_BEFORE_START'; +select '01231_log_queries_min_type/EXCEPTION_BEFORE_START'; +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; + +set log_queries_min_type='EXCEPTION_WHILE_PROCESSING'; +select '01231_log_queries_min_type/', max(number) from system.numbers limit 1e6 settings max_rows_to_read='100K'; -- { serverError 158; } +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; diff --git a/tests/queries/0_stateless/01231_operator_null_in.reference b/tests/queries/0_stateless/01231_operator_null_in.reference new file mode 100644 index 00000000000..b76f42e9af4 --- /dev/null +++ b/tests/queries/0_stateless/01231_operator_null_in.reference @@ -0,0 +1,80 @@ +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/01231_operator_null_in.sql b/tests/queries/0_stateless/01231_operator_null_in.sql new file mode 100644 index 00000000000..ddebaf23900 --- /dev/null +++ b/tests/queries/0_stateless/01231_operator_null_in.sql @@ -0,0 +1,143 @@ +DROP TABLE IF EXISTS null_in; +CREATE TABLE null_in (dt DateTime, idx int, i Nullable(int), s Nullable(String)) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx; + +INSERT INTO null_in VALUES (1, 1, 1, '1') (2, 2, NULL, NULL) (3, 3, 3, '3') (4, 4, NULL, NULL) (5, 5, 5, '5'); + +SELECT count() == 2 FROM null_in WHERE i in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i in range(4); +SELECT count() == 2 FROM null_in WHERE s in ('1', '3', NULL); +SELECT count() == 2 FROM null_in WHERE i global in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i global in range(4); +SELECT count() == 2 FROM null_in WHERE s global in ('1', '3', NULL); + +SELECT count() == 1 FROM null_in WHERE i not in (1, 3, NULL); +SELECT count() == 1 FROM null_in WHERE i not in range(4); +SELECT count() == 1 FROM null_in WHERE s not in ('1', '3', NULL); +SELECT count() == 1 FROM null_in WHERE i global not in (1, 3, NULL); +SELECT count() == 1 FROM null_in WHERE i global not in range(4); +SELECT count() == 1 FROM null_in WHERE s global not in ('1', '3', NULL); + +SET transform_null_in = 1; + +SELECT count() == 4 FROM null_in WHERE i in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i in range(4); +SELECT count() == 4 FROM null_in WHERE s in ('1', '3', NULL); +SELECT count() == 4 FROM null_in WHERE i global in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i global in range(4); +SELECT count() == 4 FROM null_in WHERE s global in ('1', '3', NULL); + +SELECT count() == 1 FROM null_in WHERE i not in (1, 3, NULL); +SELECT count() == 3 FROM null_in WHERE i not in range(4); +SELECT count() == 1 FROM null_in WHERE s not in ('1', '3', NULL); +SELECT count() == 1 FROM null_in WHERE i global not in (1, 3, NULL); +SELECT count() == 3 FROM null_in WHERE i global not in range(4); +SELECT count() == 1 FROM null_in WHERE s global not in ('1', '3', NULL); + +SELECT count() == 3 FROM null_in WHERE i not in (1, 3); +SELECT count() == 3 FROM null_in WHERE i not in range(4); +SELECT count() == 3 FROM null_in WHERE s not in ('1', '3'); +SELECT count() == 3 FROM null_in WHERE i global not in (1, 3); +SELECT count() == 3 FROM null_in WHERE i global not in range(4); +SELECT count() == 3 FROM null_in WHERE s global not in ('1', '3'); + +DROP TABLE IF EXISTS test_set; +CREATE TABLE test_set (i Nullable(int)) ENGINE = Set(); +INSERT INTO test_set VALUES (1), (NULL); + +SET transform_null_in = 0; + +SELECT count() == 1 FROM null_in WHERE i in test_set; +SELECT count() == 2 FROM null_in WHERE i not in test_set; +SELECT count() == 1 FROM null_in WHERE i global in test_set; +SELECT count() == 2 FROM null_in WHERE i global not in test_set; + +SET transform_null_in = 1; + +SELECT count() == 3 FROM null_in WHERE i in test_set; +SELECT count() == 2 FROM null_in WHERE i not in test_set; +SELECT count() == 3 FROM null_in WHERE i global in test_set; +SELECT count() == 2 FROM null_in WHERE i global not in test_set; + +-- Create with transform_null_in +CREATE TABLE test_set2 (i Nullable(int)) ENGINE = Set(); +INSERT INTO test_set2 VALUES (1), (NULL); + +SET transform_null_in = 0; + +SELECT count() == 1 FROM null_in WHERE i in test_set2; +SELECT count() == 2 FROM null_in WHERE i not in test_set2; +SELECT count() == 1 FROM null_in WHERE i global in test_set2; +SELECT count() == 2 FROM null_in WHERE i global not in test_set2; + +SET transform_null_in = 1; + +SELECT count() == 3 FROM null_in WHERE i in test_set2; +SELECT count() == 2 FROM null_in WHERE i not in test_set2; +SELECT count() == 3 FROM null_in WHERE i global in test_set2; +SELECT count() == 2 FROM null_in WHERE i global not in test_set2; + +DROP TABLE IF EXISTS test_set; +DROP TABLE IF EXISTS null_in; + + +DROP TABLE IF EXISTS null_in_subquery; +CREATE TABLE null_in_subquery (dt DateTime, idx int, i Nullable(UInt64)) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx; +INSERT INTO null_in_subquery SELECT number % 3, number, number FROM system.numbers LIMIT 99999; + +SELECT count() == 33333 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66666 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 0); + +-- For index column +SELECT count() == 33333 FROM null_in_subquery WHERE idx in (SELECT idx FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE idx not in (SELECT idx FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE idx global in (SELECT idx FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66666 FROM null_in_subquery WHERE idx global not in (SELECT idx FROM null_in_subquery WHERE dt = 0); + +INSERT INTO null_in_subquery VALUES (0, 123456780, NULL); +INSERT INTO null_in_subquery VALUES (1, 123456781, NULL); + +SELECT count() == 33335 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66668 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 33335 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66668 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 2); + +DROP TABLE IF EXISTS null_in_subquery; + + +DROP TABLE IF EXISTS null_in_tuple; +CREATE TABLE null_in_tuple (dt DateTime, idx int, t Tuple(Nullable(UInt64), Nullable(String))) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx; +INSERT INTO null_in_tuple VALUES (1, 1, (1, '1')) (2, 2, (2, NULL)) (3, 3, (NULL, '3')) (4, 4, (NULL, NULL)) + +SET transform_null_in = 0; + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t global in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t global not in ((1, '1'), (NULL, NULL)); + +SET transform_null_in = 1; + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, NULL)] FROM null_in_tuple WHERE t global in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FROM null_in_tuple WHERE t global not in ((1, '1'), (NULL, NULL)); + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, '1')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (2, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, '1'), (2, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, '3')] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL), (NULL, '3')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL), (2, NULL), (NULL, '3'), (NULL, NULL)); + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, '1')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, '1'), (2, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL), (NULL, '3')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL), (2, NULL), (NULL, '3'), (NULL, NULL)); + +DROP TABLE IF EXISTS null_in_tuple; diff --git a/tests/queries/0_stateless/01232_extremes.reference b/tests/queries/0_stateless/01232_extremes.reference new file mode 100644 index 00000000000..d5b66dcbd4b --- /dev/null +++ b/tests/queries/0_stateless/01232_extremes.reference @@ -0,0 +1,110 @@ +0 +1 + +0 +1 +- + +- +- +0 +1 +0 +1 + +0 +1 +- + +- +- +0 +1 +0 +1 + +0 +1 +- + +- +- +0 +1 +0 +1 + +0 +1 +- + +- +- +0 +1 + +0 +1 +------ + +------ +------ +0 +0 +0 +1 +1 +2 + +0 +2 +- + +- +- +0 +0 +0 +1 +1 +2 + +0 +2 +- + +- +- +0 +0 +0 +1 +1 +2 + +0 +2 +------ + +------ +------ +0 +0 +1 +1 +2 + +0 +2 +- + +- +- +0 +0 +1 +1 +2 + +0 +2 diff --git a/tests/queries/0_stateless/01232_extremes.sql b/tests/queries/0_stateless/01232_extremes.sql new file mode 100644 index 00000000000..9379dc1cd38 --- /dev/null +++ b/tests/queries/0_stateless/01232_extremes.sql @@ -0,0 +1,55 @@ +set send_logs_level = 'error'; +set extremes = 1; +-- set experimental_use_processors=0; + +select * from remote('127.0.0.1', numbers(2)); +select '-'; +select * from remote('127.0.0.{1,1}', numbers(2)); +select '-'; +select * from remote('127.0.0.{1,2}', numbers(2)); +select '-'; +select * from remote('127.0.0.{2,2}', numbers(2)); +select '-'; +select * from remote('127.0.0.2', numbers(2)); +select '------'; + +select * from (select * from numbers(2) union all select * from numbers(3) union all select * from numbers(1)) order by number; +select '-'; +select * from (select * from numbers(1) union all select * from numbers(2) union all select * from numbers(3)) order by number; +select '-'; +select * from (select * from numbers(3) union all select * from numbers(1) union all select * from numbers(2)) order by number; +select '------'; + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.num_01232; +drop table if exists shard_0.num2_01232; +drop table if exists shard_1.num_01232; +drop table if exists shard_1.num2_01232; +drop table if exists distr; +drop table if exists distr2; + +create table shard_0.num_01232 (number UInt64) engine = MergeTree order by number; +create table shard_1.num_01232 (number UInt64) engine = MergeTree order by number; +insert into shard_0.num_01232 select number from numbers(2); +insert into shard_1.num_01232 select number from numbers(3); +create table distr (number UInt64) engine = Distributed(test_cluster_two_shards_different_databases, '', num_01232); + +create table shard_0.num2_01232 (number UInt64) engine = MergeTree order by number; +create table shard_1.num2_01232 (number UInt64) engine = MergeTree order by number; +insert into shard_0.num2_01232 select number from numbers(3); +insert into shard_1.num2_01232 select number from numbers(2); +create table distr2 (number UInt64) engine = Distributed(test_cluster_two_shards_different_databases, '', num2_01232); + +select * from distr order by number; +select '-'; +select * from distr2 order by number; + +drop table if exists shard_0.num_01232; +drop table if exists shard_0.num2_01232; +drop table if exists shard_1.num_01232; +drop table if exists shard_1.num2_01232; +drop table if exists distr; +drop table if exists distr2; + diff --git a/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.reference b/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.reference rename to tests/queries/0_stateless/01232_preparing_sets_race_condition.reference diff --git a/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh new file mode 100755 index 00000000000..25a8cdb12ea --- /dev/null +++ b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +set -o errexit +set -o pipefail + + +echo " + DROP TABLE if exists tableA; + DROP TABLE if exists tableB; + + create table tableA (id UInt64, col1 UInt64, colDate Date) engine = ReplacingMergeTree(colDate, id, 8192); + create table tableB (id UInt64, Aid UInt64, colDate Date) engine = ReplacingMergeTree(colDate, id, 8192); + + insert into tableA select number, number % 10, addDays(toDate('2020-01-01'), - number % 1000) from numbers(100000); + insert into tableB select number, number % 100000, addDays(toDate('2020-01-01'), number % 90) from numbers(50000000); +" | $CLICKHOUSE_CLIENT -n + +for i in {1..1}; do echo " +SELECT tableName +FROM + ( + SELECT + col1, + 'T1_notJoin1' AS tableName, + count(*) AS c + FROM tableA + GROUP BY col1 + UNION ALL + SELECT + a.col1, + 'T2_filteredAfterJoin1' AS tableName, + count(*) AS c + FROM tableB AS b + INNER JOIN tableA AS a ON a.id = b.Aid + WHERE b.colDate = '2020-01-01' + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T3_filteredAfterJoin2' AS tableName, + count(*) AS c + FROM tableB AS b + INNER JOIN + tableA AS a + ON a.id = b.Aid + WHERE b.colDate = '2020-01-02' + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T4_filteredBeforeJoin1' AS tableName, + count(*) AS c + FROM tableA AS a + INNER JOIN + ( + SELECT + Aid + FROM tableB + WHERE colDate = '2020-01-01' + ) AS b ON a.id = b.Aid + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T5_filteredBeforeJoin2' AS tableName, + count(*) AS c + FROM tableA AS a + INNER JOIN + ( + SELECT + Aid + FROM tableB + WHERE colDate = '2020-01-02' + ) AS b ON a.id = b.Aid + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T6_filteredAfterJoin3' AS tableName, + count(*) AS c + FROM tableB AS b + INNER JOIN tableA AS a ON a.id = b.Aid + WHERE b.colDate = '2020-01-03' + GROUP BY a.col1 + UNION ALL + SELECT + col1, + 'T7_notJoin2' AS tableName, + count(*) AS c + FROM tableA + GROUP BY col1 + UNION ALL + SELECT + a.col1, + 'T8_filteredBeforeJoin3' AS tableName, + count(*) AS c + FROM tableA AS a + INNER JOIN + ( + SELECT + Aid + FROM tableB + WHERE colDate = '2020-01-03' + ) AS b ON a.id = b.Aid + GROUP BY a.col1 + ) AS a +GROUP BY tableName +ORDER BY tableName ASC; +" | $CLICKHOUSE_CLIENT -n | wc -l ; done; + +echo " + DROP TABLE tableA; + DROP TABLE tableB; +" | $CLICKHOUSE_CLIENT -n diff --git a/tests/queries/0_stateless/01234_to_string_monotonic.reference b/tests/queries/0_stateless/01234_to_string_monotonic.reference new file mode 100644 index 00000000000..75404a347a4 --- /dev/null +++ b/tests/queries/0_stateless/01234_to_string_monotonic.reference @@ -0,0 +1,2 @@ +1234 +1234 diff --git a/tests/queries/0_stateless/01234_to_string_monotonic.sql b/tests/queries/0_stateless/01234_to_string_monotonic.sql new file mode 100644 index 00000000000..87324fdda27 --- /dev/null +++ b/tests/queries/0_stateless/01234_to_string_monotonic.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test1 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; +CREATE TABLE test2 (s LowCardinality(String)) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; + +INSERT INTO test1 SELECT toString(number) FROM numbers(10000); +INSERT INTO test2 SELECT toString(number) FROM numbers(10000); + +SELECT s FROM test1 WHERE toString(s) = '1234' SETTINGS max_rows_to_read = 2; +SELECT s FROM test2 WHERE toString(s) = '1234' SETTINGS max_rows_to_read = 2; + +DROP TABLE test1; +DROP TABLE test2; diff --git a/tests/queries/0_stateless/01235_live_view_over_distributed.reference b/tests/queries/0_stateless/01235_live_view_over_distributed.reference new file mode 100644 index 00000000000..00fc99d96ba --- /dev/null +++ b/tests/queries/0_stateless/01235_live_view_over_distributed.reference @@ -0,0 +1,4 @@ +2020-01-01 +2020-01-01 +2020-01-02 +2020-01-02 diff --git a/tests/queries/0_stateless/01235_live_view_over_distributed.sql b/tests/queries/0_stateless/01235_live_view_over_distributed.sql new file mode 100644 index 00000000000..f3950c16002 --- /dev/null +++ b/tests/queries/0_stateless/01235_live_view_over_distributed.sql @@ -0,0 +1,19 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS lv; +DROP TABLE IF EXISTS visits; +DROP TABLE IF EXISTS visits_layer; + +CREATE TABLE visits(StartDate Date) ENGINE MergeTree ORDER BY(StartDate); +CREATE TABLE visits_layer(StartDate Date) ENGINE Distributed(test_cluster_two_shards_localhost, currentDatabase(), 'visits', rand()); + +CREATE LIVE VIEW lv AS SELECT * FROM visits_layer ORDER BY StartDate; + +INSERT INTO visits_layer (StartDate) VALUES ('2020-01-01'); +INSERT INTO visits_layer (StartDate) VALUES ('2020-01-02'); + +SELECT * FROM lv; + +DROP TABLE visits; +DROP TABLE visits_layer; + diff --git a/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.reference b/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.reference new file mode 100644 index 00000000000..dfb4d0552f5 --- /dev/null +++ b/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.reference @@ -0,0 +1,8 @@ +2020-01-01 +2020-01-01 +2020-01-02 +2020-01-02 +2020-01-01 +2020-01-01 +2020-01-02 +2020-01-02 diff --git a/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.sql b/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.sql new file mode 100644 index 00000000000..4408880ec5f --- /dev/null +++ b/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.sql @@ -0,0 +1,21 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS lv; +DROP TABLE IF EXISTS visits; +DROP TABLE IF EXISTS visits_layer; + +CREATE TABLE visits(StartDate Date) ENGINE MergeTree ORDER BY(StartDate); +CREATE TABLE visits_layer(StartDate Date) ENGINE Distributed(test_cluster_two_shards_localhost, currentDatabase(), 'visits', rand()); + +CREATE LIVE VIEW lv AS SELECT * FROM visits_layer ORDER BY StartDate; + +CREATE TABLE visits_layer_lv(StartDate Date) ENGINE Distributed(test_cluster_two_shards_localhost, currentDatabase(), 'lv', rand()); + +INSERT INTO visits_layer (StartDate) VALUES ('2020-01-01'); +INSERT INTO visits_layer (StartDate) VALUES ('2020-01-02'); + +SELECT * FROM visits_layer_lv; + +DROP TABLE visits; +DROP TABLE visits_layer; + diff --git a/tests/queries/0_stateless/01236_graphite_mt.reference b/tests/queries/0_stateless/01236_graphite_mt.reference new file mode 100644 index 00000000000..a30d2495265 --- /dev/null +++ b/tests/queries/0_stateless/01236_graphite_mt.reference @@ -0,0 +1,344 @@ +1 max_1 9 1 0 +1 max_1 19 1 10 +1 max_1 29 1 20 +1 max_1 39 1 30 +1 max_1 49 1 40 +1 max_1 59 1 50 +1 max_1 69 1 60 +1 max_1 79 1 70 +1 max_1 89 1 80 +1 max_1 99 1 90 +1 max_1 109 1 100 +1 max_1 119 1 110 +1 max_1 129 1 120 +1 max_1 139 1 130 +1 max_1 149 1 140 +1 max_1 159 1 150 +1 max_1 169 1 160 +1 max_1 179 1 170 +1 max_1 189 1 180 +1 max_1 199 1 190 +1 max_1 209 1 200 +1 max_1 219 1 210 +1 max_1 229 1 220 +1 max_1 239 1 230 +1 max_1 249 1 240 +1 max_1 259 1 250 +1 max_1 269 1 260 +1 max_1 279 1 270 +1 max_1 289 1 280 +1 max_1 299 1 290 +1 max_1 39 1 0 +1 max_1 139 1 40 +1 max_1 239 1 140 +1 max_1 339 1 240 +1 max_1 439 1 340 +1 max_1 539 1 440 +1 max_1 639 1 540 +1 max_1 739 1 640 +1 max_1 839 1 740 +1 max_1 939 1 840 +1 max_1 1039 1 940 +1 max_1 1139 1 1040 +1 max_1 1199 1 1140 +1 max_2 9 1 0 +1 max_2 19 1 10 +1 max_2 29 1 20 +1 max_2 39 1 30 +1 max_2 49 1 40 +1 max_2 59 1 50 +1 max_2 69 1 60 +1 max_2 79 1 70 +1 max_2 89 1 80 +1 max_2 99 1 90 +1 max_2 109 1 100 +1 max_2 119 1 110 +1 max_2 129 1 120 +1 max_2 139 1 130 +1 max_2 149 1 140 +1 max_2 159 1 150 +1 max_2 169 1 160 +1 max_2 179 1 170 +1 max_2 189 1 180 +1 max_2 199 1 190 +1 max_2 209 1 200 +1 max_2 219 1 210 +1 max_2 229 1 220 +1 max_2 239 1 230 +1 max_2 249 1 240 +1 max_2 259 1 250 +1 max_2 269 1 260 +1 max_2 279 1 270 +1 max_2 289 1 280 +1 max_2 299 1 290 +1 max_2 39 1 0 +1 max_2 139 1 40 +1 max_2 239 1 140 +1 max_2 339 1 240 +1 max_2 439 1 340 +1 max_2 539 1 440 +1 max_2 639 1 540 +1 max_2 739 1 640 +1 max_2 839 1 740 +1 max_2 939 1 840 +1 max_2 1039 1 940 +1 max_2 1139 1 1040 +1 max_2 1199 1 1140 +1 sum_1 45 1 0 +1 sum_1 145 1 10 +1 sum_1 245 1 20 +1 sum_1 345 1 30 +1 sum_1 445 1 40 +1 sum_1 545 1 50 +1 sum_1 645 1 60 +1 sum_1 745 1 70 +1 sum_1 845 1 80 +1 sum_1 945 1 90 +1 sum_1 1045 1 100 +1 sum_1 1145 1 110 +1 sum_1 1245 1 120 +1 sum_1 1345 1 130 +1 sum_1 1445 1 140 +1 sum_1 1545 1 150 +1 sum_1 1645 1 160 +1 sum_1 1745 1 170 +1 sum_1 1845 1 180 +1 sum_1 1945 1 190 +1 sum_1 2045 1 200 +1 sum_1 2145 1 210 +1 sum_1 2245 1 220 +1 sum_1 2345 1 230 +1 sum_1 2445 1 240 +1 sum_1 2545 1 250 +1 sum_1 2645 1 260 +1 sum_1 2745 1 270 +1 sum_1 2845 1 280 +1 sum_1 2945 1 290 +1 sum_1 780 1 0 +1 sum_1 8950 1 40 +1 sum_1 18950 1 140 +1 sum_1 28950 1 240 +1 sum_1 38950 1 340 +1 sum_1 48950 1 440 +1 sum_1 58950 1 540 +1 sum_1 68950 1 640 +1 sum_1 78950 1 740 +1 sum_1 88950 1 840 +1 sum_1 98950 1 940 +1 sum_1 108950 1 1040 +1 sum_1 70170 1 1140 +1 sum_2 45 1 0 +1 sum_2 145 1 10 +1 sum_2 245 1 20 +1 sum_2 345 1 30 +1 sum_2 445 1 40 +1 sum_2 545 1 50 +1 sum_2 645 1 60 +1 sum_2 745 1 70 +1 sum_2 845 1 80 +1 sum_2 945 1 90 +1 sum_2 1045 1 100 +1 sum_2 1145 1 110 +1 sum_2 1245 1 120 +1 sum_2 1345 1 130 +1 sum_2 1445 1 140 +1 sum_2 1545 1 150 +1 sum_2 1645 1 160 +1 sum_2 1745 1 170 +1 sum_2 1845 1 180 +1 sum_2 1945 1 190 +1 sum_2 2045 1 200 +1 sum_2 2145 1 210 +1 sum_2 2245 1 220 +1 sum_2 2345 1 230 +1 sum_2 2445 1 240 +1 sum_2 2545 1 250 +1 sum_2 2645 1 260 +1 sum_2 2745 1 270 +1 sum_2 2845 1 280 +1 sum_2 2945 1 290 +1 sum_2 780 1 0 +1 sum_2 8950 1 40 +1 sum_2 18950 1 140 +1 sum_2 28950 1 240 +1 sum_2 38950 1 340 +1 sum_2 48950 1 440 +1 sum_2 58950 1 540 +1 sum_2 68950 1 640 +1 sum_2 78950 1 740 +1 sum_2 88950 1 840 +1 sum_2 98950 1 940 +1 sum_2 108950 1 1040 +1 sum_2 70170 1 1140 +2 max_1 9 1 0 +2 max_1 19 1 10 +2 max_1 29 1 20 +2 max_1 39 1 30 +2 max_1 49 1 40 +2 max_1 59 1 50 +2 max_1 69 1 60 +2 max_1 79 1 70 +2 max_1 89 1 80 +2 max_1 99 1 90 +2 max_1 109 1 100 +2 max_1 119 1 110 +2 max_1 129 1 120 +2 max_1 139 1 130 +2 max_1 149 1 140 +2 max_1 159 1 150 +2 max_1 169 1 160 +2 max_1 179 1 170 +2 max_1 189 1 180 +2 max_1 199 1 190 +2 max_1 209 1 200 +2 max_1 219 1 210 +2 max_1 229 1 220 +2 max_1 239 1 230 +2 max_1 249 1 240 +2 max_1 259 1 250 +2 max_1 269 1 260 +2 max_1 279 1 270 +2 max_1 289 1 280 +2 max_1 299 1 290 +2 max_1 39 1 0 +2 max_1 139 1 40 +2 max_1 239 1 140 +2 max_1 339 1 240 +2 max_1 439 1 340 +2 max_1 539 1 440 +2 max_1 639 1 540 +2 max_1 739 1 640 +2 max_1 839 1 740 +2 max_1 939 1 840 +2 max_1 1039 1 940 +2 max_1 1139 1 1040 +2 max_1 1199 1 1140 +2 max_2 9 1 0 +2 max_2 19 1 10 +2 max_2 29 1 20 +2 max_2 39 1 30 +2 max_2 49 1 40 +2 max_2 59 1 50 +2 max_2 69 1 60 +2 max_2 79 1 70 +2 max_2 89 1 80 +2 max_2 99 1 90 +2 max_2 109 1 100 +2 max_2 119 1 110 +2 max_2 129 1 120 +2 max_2 139 1 130 +2 max_2 149 1 140 +2 max_2 159 1 150 +2 max_2 169 1 160 +2 max_2 179 1 170 +2 max_2 189 1 180 +2 max_2 199 1 190 +2 max_2 209 1 200 +2 max_2 219 1 210 +2 max_2 229 1 220 +2 max_2 239 1 230 +2 max_2 249 1 240 +2 max_2 259 1 250 +2 max_2 269 1 260 +2 max_2 279 1 270 +2 max_2 289 1 280 +2 max_2 299 1 290 +2 max_2 39 1 0 +2 max_2 139 1 40 +2 max_2 239 1 140 +2 max_2 339 1 240 +2 max_2 439 1 340 +2 max_2 539 1 440 +2 max_2 639 1 540 +2 max_2 739 1 640 +2 max_2 839 1 740 +2 max_2 939 1 840 +2 max_2 1039 1 940 +2 max_2 1139 1 1040 +2 max_2 1199 1 1140 +2 sum_1 45 1 0 +2 sum_1 145 1 10 +2 sum_1 245 1 20 +2 sum_1 345 1 30 +2 sum_1 445 1 40 +2 sum_1 545 1 50 +2 sum_1 645 1 60 +2 sum_1 745 1 70 +2 sum_1 845 1 80 +2 sum_1 945 1 90 +2 sum_1 1045 1 100 +2 sum_1 1145 1 110 +2 sum_1 1245 1 120 +2 sum_1 1345 1 130 +2 sum_1 1445 1 140 +2 sum_1 1545 1 150 +2 sum_1 1645 1 160 +2 sum_1 1745 1 170 +2 sum_1 1845 1 180 +2 sum_1 1945 1 190 +2 sum_1 2045 1 200 +2 sum_1 2145 1 210 +2 sum_1 2245 1 220 +2 sum_1 2345 1 230 +2 sum_1 2445 1 240 +2 sum_1 2545 1 250 +2 sum_1 2645 1 260 +2 sum_1 2745 1 270 +2 sum_1 2845 1 280 +2 sum_1 2945 1 290 +2 sum_1 780 1 0 +2 sum_1 8950 1 40 +2 sum_1 18950 1 140 +2 sum_1 28950 1 240 +2 sum_1 38950 1 340 +2 sum_1 48950 1 440 +2 sum_1 58950 1 540 +2 sum_1 68950 1 640 +2 sum_1 78950 1 740 +2 sum_1 88950 1 840 +2 sum_1 98950 1 940 +2 sum_1 108950 1 1040 +2 sum_1 70170 1 1140 +2 sum_2 45 1 0 +2 sum_2 145 1 10 +2 sum_2 245 1 20 +2 sum_2 345 1 30 +2 sum_2 445 1 40 +2 sum_2 545 1 50 +2 sum_2 645 1 60 +2 sum_2 745 1 70 +2 sum_2 845 1 80 +2 sum_2 945 1 90 +2 sum_2 1045 1 100 +2 sum_2 1145 1 110 +2 sum_2 1245 1 120 +2 sum_2 1345 1 130 +2 sum_2 1445 1 140 +2 sum_2 1545 1 150 +2 sum_2 1645 1 160 +2 sum_2 1745 1 170 +2 sum_2 1845 1 180 +2 sum_2 1945 1 190 +2 sum_2 2045 1 200 +2 sum_2 2145 1 210 +2 sum_2 2245 1 220 +2 sum_2 2345 1 230 +2 sum_2 2445 1 240 +2 sum_2 2545 1 250 +2 sum_2 2645 1 260 +2 sum_2 2745 1 270 +2 sum_2 2845 1 280 +2 sum_2 2945 1 290 +2 sum_2 780 1 0 +2 sum_2 8950 1 40 +2 sum_2 18950 1 140 +2 sum_2 28950 1 240 +2 sum_2 38950 1 340 +2 sum_2 48950 1 440 +2 sum_2 58950 1 540 +2 sum_2 68950 1 640 +2 sum_2 78950 1 740 +2 sum_2 88950 1 840 +2 sum_2 98950 1 940 +2 sum_2 108950 1 1040 +2 sum_2 70170 1 1140 diff --git a/tests/queries/0_stateless/01236_graphite_mt.sql b/tests/queries/0_stateless/01236_graphite_mt.sql new file mode 100644 index 00000000000..cee9b8c9fde --- /dev/null +++ b/tests/queries/0_stateless/01236_graphite_mt.sql @@ -0,0 +1,26 @@ +drop table if exists test_graphite; +create table test_graphite (key UInt32, Path String, Time DateTime, Value Float64, Version UInt32, col UInt64) engine = GraphiteMergeTree('graphite_rollup') order by key settings index_granularity=10; + +insert into test_graphite +select 1, 'sum_1', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300) union all +select 2, 'sum_1', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300) union all +select 1, 'sum_2', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300) union all +select 2, 'sum_2', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300) union all +select 1, 'max_1', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300) union all +select 2, 'max_1', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300) union all +select 1, 'max_2', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300) union all +select 2, 'max_2', toDateTime(today()) - number * 60 - 30, number, 1, number from numbers(300); + +insert into test_graphite +select 1, 'sum_1', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200) union all +select 2, 'sum_1', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200) union all +select 1, 'sum_2', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200) union all +select 2, 'sum_2', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200) union all +select 1, 'max_1', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200) union all +select 2, 'max_1', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200) union all +select 1, 'max_2', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200) union all +select 2, 'max_2', toDateTime(today() - 3) - number * 60 - 30, number, 1, number from numbers(1200); + +optimize table test_graphite; + +select key, Path, Value, Version, col from test_graphite order by key, Path, Time desc; diff --git a/tests/queries/0_stateless/01237_live_view_over_distributed_with_subquery_select_table_alias.reference b/tests/queries/0_stateless/01237_live_view_over_distributed_with_subquery_select_table_alias.reference new file mode 100644 index 00000000000..00fc99d96ba --- /dev/null +++ b/tests/queries/0_stateless/01237_live_view_over_distributed_with_subquery_select_table_alias.reference @@ -0,0 +1,4 @@ +2020-01-01 +2020-01-01 +2020-01-02 +2020-01-02 diff --git a/tests/queries/0_stateless/01237_live_view_over_distributed_with_subquery_select_table_alias.sql b/tests/queries/0_stateless/01237_live_view_over_distributed_with_subquery_select_table_alias.sql new file mode 100644 index 00000000000..dc57e001122 --- /dev/null +++ b/tests/queries/0_stateless/01237_live_view_over_distributed_with_subquery_select_table_alias.sql @@ -0,0 +1,19 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS lv; +DROP TABLE IF EXISTS visits; +DROP TABLE IF EXISTS visits_layer; + +CREATE TABLE visits(StartDate Date) ENGINE MergeTree ORDER BY(StartDate); +CREATE TABLE visits_layer(StartDate Date) ENGINE Distributed(test_cluster_two_shards_localhost, currentDatabase(), 'visits', rand()); + +CREATE LIVE VIEW lv AS SELECT foo.x FROM (SELECT StartDate AS x FROM visits_layer) AS foo ORDER BY foo.x; + +INSERT INTO visits_layer (StartDate) VALUES ('2020-01-01'); +INSERT INTO visits_layer (StartDate) VALUES ('2020-01-02'); + +SELECT * FROM lv; + +DROP TABLE visits; +DROP TABLE visits_layer; + diff --git a/tests/queries/0_stateless/01240_join_get_or_null.reference b/tests/queries/0_stateless/01240_join_get_or_null.reference new file mode 100644 index 00000000000..96e34d5a44c --- /dev/null +++ b/tests/queries/0_stateless/01240_join_get_or_null.reference @@ -0,0 +1,2 @@ +\N +\N diff --git a/tests/queries/0_stateless/01240_join_get_or_null.sql b/tests/queries/0_stateless/01240_join_get_or_null.sql new file mode 100644 index 00000000000..48fd8228b55 --- /dev/null +++ b/tests/queries/0_stateless/01240_join_get_or_null.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS join_test; + +CREATE TABLE join_test (id UInt16, num UInt16) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); +DROP TABLE join_test; + +CREATE TABLE join_test (id UInt16, num Nullable(UInt16)) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); +DROP TABLE join_test; + +CREATE TABLE join_test (id UInt16, num Array(UInt16)) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); -- { serverError 43 } +DROP TABLE join_test; diff --git a/tests/queries/0_stateless/01245_limit_infinite_sources.reference b/tests/queries/0_stateless/01245_limit_infinite_sources.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01245_limit_infinite_sources.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01245_limit_infinite_sources.sql b/tests/queries/0_stateless/01245_limit_infinite_sources.sql new file mode 100644 index 00000000000..803a2d14c39 --- /dev/null +++ b/tests/queries/0_stateless/01245_limit_infinite_sources.sql @@ -0,0 +1,11 @@ +SELECT number +FROM +( + SELECT zero AS number + FROM remote('127.0.0.2', system.zeros) + UNION ALL + SELECT number + sleep(0.5) + FROM system.numbers +) +WHERE number = 1 +LIMIT 1 diff --git a/dbms/tests/queries/0_stateless/data_avro/complex.avro b/tests/queries/0_stateless/data_avro/complex.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/complex.avro rename to tests/queries/0_stateless/data_avro/complex.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/complex.avsc b/tests/queries/0_stateless/data_avro/complex.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/complex.avsc rename to tests/queries/0_stateless/data_avro/complex.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/complex.json b/tests/queries/0_stateless/data_avro/complex.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/complex.json rename to tests/queries/0_stateless/data_avro/complex.json diff --git a/dbms/tests/queries/0_stateless/data_avro/empty.avro b/tests/queries/0_stateless/data_avro/empty.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/empty.avro rename to tests/queries/0_stateless/data_avro/empty.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/empty.avsc b/tests/queries/0_stateless/data_avro/empty.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/empty.avsc rename to tests/queries/0_stateless/data_avro/empty.avsc diff --git a/tests/queries/0_stateless/data_avro/empty.json b/tests/queries/0_stateless/data_avro/empty.json new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/data_avro/generate_avro.sh b/tests/queries/0_stateless/data_avro/generate_avro.sh similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/generate_avro.sh rename to tests/queries/0_stateless/data_avro/generate_avro.sh diff --git a/dbms/tests/queries/0_stateless/data_avro/logical_types.avro b/tests/queries/0_stateless/data_avro/logical_types.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/logical_types.avro rename to tests/queries/0_stateless/data_avro/logical_types.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/logical_types.avsc b/tests/queries/0_stateless/data_avro/logical_types.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/logical_types.avsc rename to tests/queries/0_stateless/data_avro/logical_types.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/logical_types.json b/tests/queries/0_stateless/data_avro/logical_types.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/logical_types.json rename to tests/queries/0_stateless/data_avro/logical_types.json diff --git a/dbms/tests/queries/0_stateless/data_avro/primitive.avro b/tests/queries/0_stateless/data_avro/primitive.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/primitive.avro rename to tests/queries/0_stateless/data_avro/primitive.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/primitive.avsc b/tests/queries/0_stateless/data_avro/primitive.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/primitive.avsc rename to tests/queries/0_stateless/data_avro/primitive.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/primitive.json b/tests/queries/0_stateless/data_avro/primitive.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/primitive.json rename to tests/queries/0_stateless/data_avro/primitive.json diff --git a/dbms/tests/queries/0_stateless/data_avro/references.avro b/tests/queries/0_stateless/data_avro/references.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/references.avro rename to tests/queries/0_stateless/data_avro/references.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/references.avsc b/tests/queries/0_stateless/data_avro/references.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/references.avsc rename to tests/queries/0_stateless/data_avro/references.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/references.json b/tests/queries/0_stateless/data_avro/references.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/references.json rename to tests/queries/0_stateless/data_avro/references.json diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.avsc b/tests/queries/0_stateless/data_avro/simple.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.avsc rename to tests/queries/0_stateless/data_avro/simple.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.deflate.avro b/tests/queries/0_stateless/data_avro/simple.deflate.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.deflate.avro rename to tests/queries/0_stateless/data_avro/simple.deflate.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.json b/tests/queries/0_stateless/data_avro/simple.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.json rename to tests/queries/0_stateless/data_avro/simple.json diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.null.avro b/tests/queries/0_stateless/data_avro/simple.null.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.null.avro rename to tests/queries/0_stateless/data_avro/simple.null.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.snappy.avro b/tests/queries/0_stateless/data_avro/simple.snappy.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.snappy.avro rename to tests/queries/0_stateless/data_avro/simple.snappy.avro diff --git a/dbms/tests/queries/0_stateless/data_orc/test.orc b/tests/queries/0_stateless/data_orc/test.orc similarity index 100% rename from dbms/tests/queries/0_stateless/data_orc/test.orc rename to tests/queries/0_stateless/data_orc/test.orc diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet b/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet rename to tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns b/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns rename to tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet b/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet rename to tests/queries/0_stateless/data_parquet/alltypes_plain.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns b/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns rename to tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet b/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet rename to tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet b/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet rename to tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns b/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns rename to tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet b/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet rename to tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet b/tests/queries/0_stateless/data_parquet/int32_decimal.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet rename to tests/queries/0_stateless/data_parquet/int32_decimal.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns b/tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns rename to tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet b/tests/queries/0_stateless/data_parquet/int64_decimal.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet rename to tests/queries/0_stateless/data_parquet/int64_decimal.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns b/tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns rename to tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet b/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet rename to tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns b/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns rename to tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet b/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet rename to tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet b/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet rename to tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet b/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet rename to tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns b/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns rename to tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet b/tests/queries/0_stateless/data_parquet/nullable.impala.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet rename to tests/queries/0_stateless/data_parquet/nullable.impala.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns b/tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns rename to tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet b/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet rename to tests/queries/0_stateless/data_parquet/nulls.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet b/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet rename to tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns b/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns rename to tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet b/tests/queries/0_stateless/data_parquet/userdata1.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet rename to tests/queries/0_stateless/data_parquet/userdata1.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata1.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata1.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet b/tests/queries/0_stateless/data_parquet/userdata2.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet rename to tests/queries/0_stateless/data_parquet/userdata2.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata2.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata2.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet b/tests/queries/0_stateless/data_parquet/userdata3.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet rename to tests/queries/0_stateless/data_parquet/userdata3.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata3.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata3.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet b/tests/queries/0_stateless/data_parquet/userdata4.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet rename to tests/queries/0_stateless/data_parquet/userdata4.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata4.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata4.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet b/tests/queries/0_stateless/data_parquet/userdata5.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet rename to tests/queries/0_stateless/data_parquet/userdata5.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata5.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata5.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns diff --git a/dbms/tests/queries/0_stateless/helpers/client.py b/tests/queries/0_stateless/helpers/client.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/client.py rename to tests/queries/0_stateless/helpers/client.py diff --git a/dbms/tests/queries/0_stateless/helpers/httpclient.py b/tests/queries/0_stateless/helpers/httpclient.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/httpclient.py rename to tests/queries/0_stateless/helpers/httpclient.py diff --git a/dbms/tests/queries/0_stateless/helpers/httpexpect.py b/tests/queries/0_stateless/helpers/httpexpect.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/httpexpect.py rename to tests/queries/0_stateless/helpers/httpexpect.py diff --git a/tests/queries/0_stateless/helpers/uexpect.py b/tests/queries/0_stateless/helpers/uexpect.py new file mode 100644 index 00000000000..f71b32a53e1 --- /dev/null +++ b/tests/queries/0_stateless/helpers/uexpect.py @@ -0,0 +1,206 @@ +# Copyright (c) 2019 Vitaliy Zakaznikov +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import pty +import time +import sys +import re + +from threading import Thread, Event +from subprocess import Popen +from Queue import Queue, Empty + +class TimeoutError(Exception): + def __init__(self, timeout): + self.timeout = timeout + + def __str__(self): + return 'Timeout %.3fs' % float(self.timeout) + +class ExpectTimeoutError(Exception): + def __init__(self, pattern, timeout, buffer): + self.pattern = pattern + self.timeout = timeout + self.buffer = buffer + + def __str__(self): + s = 'Timeout %.3fs ' % float(self.timeout) + if self.pattern: + s += 'for %s ' % repr(self.pattern.pattern) + if self.buffer: + s += 'buffer %s ' % repr(self.buffer[:]) + s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]]) + return s + +class IO(object): + class EOF(object): + pass + + class Timeout(object): + pass + + EOF = EOF + TIMEOUT = Timeout + + class Logger(object): + def __init__(self, logger, prefix=''): + self._logger = logger + self._prefix = prefix + + def write(self, data): + self._logger.write(('\n' + data).replace('\n','\n' + self._prefix)) + + def flush(self): + self._logger.flush() + + def __init__(self, process, master, queue, reader): + self.process = process + self.master = master + self.queue = queue + self.buffer = None + self.before = None + self.after = None + self.match = None + self.pattern = None + self.reader = reader + self._timeout = None + self._logger = None + self._eol = '' + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def logger(self, logger=None, prefix=''): + if logger: + self._logger = self.Logger(logger, prefix=prefix) + return self._logger + + def timeout(self, timeout=None): + if timeout: + self._timeout = timeout + return self._timeout + + def eol(self, eol=None): + if eol: + self._eol = eol + return self._eol + + def close(self, force=True): + self.reader['kill_event'].set() + os.system('pkill -TERM -P %d' % self.process.pid) + if force: + self.process.kill() + else: + self.process.terminate() + os.close(self.master) + if self._logger: + self._logger.write('\n') + self._logger.flush() + + def send(self, data, eol=None): + if eol is None: + eol = self._eol + return self.write(data + eol) + + def write(self, data): + return os.write(self.master, data) + + def expect(self, pattern, timeout=None, escape=False): + self.match = None + self.before = None + self.after = None + if escape: + pattern = re.escape(pattern) + pattern = re.compile(pattern) + if timeout is None: + timeout = self._timeout + timeleft = timeout + while True: + start_time = time.time() + if self.buffer is not None: + self.match = pattern.search(self.buffer, 0) + if self.match is not None: + self.after = self.buffer[self.match.start():self.match.end()] + self.before = self.buffer[:self.match.start()] + self.buffer = self.buffer[self.match.end():] + break + if timeleft < 0: + break + try: + data = self.read(timeout=timeleft, raise_exception=True) + except TimeoutError: + if self._logger: + self._logger.write((self.buffer or '') + '\n') + self._logger.flush() + exception = ExpectTimeoutError(pattern, timeout, self.buffer) + self.buffer = None + raise exception + timeleft -= (time.time() - start_time) + if data: + self.buffer = (self.buffer + data) if self.buffer else data + if self._logger: + self._logger.write((self.before or '') + (self.after or '')) + self._logger.flush() + if self.match is None: + exception = ExpectTimeoutError(pattern, timeout, self.buffer) + self.buffer = None + raise exception + return self.match + + def read(self, timeout=0, raise_exception=False): + data = '' + timeleft = timeout + try: + while timeleft >= 0 : + start_time = time.time() + data += self.queue.get(timeout=timeleft) + if data: + break + timeleft -= (time.time() - start_time) + except Empty: + if data: + return data + if raise_exception: + raise TimeoutError(timeout) + pass + if not data and raise_exception: + raise TimeoutError(timeout) + + return data + +def spawn(command): + master, slave = pty.openpty() + process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1) + os.close(slave) + + queue = Queue() + reader_kill_event = Event() + thread = Thread(target=reader, args=(process, master, queue, reader_kill_event)) + thread.daemon = True + thread.start() + + return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event}) + +def reader(process, out, queue, kill_event): + while True: + try: + data = os.read(out, 65536) + queue.put(data) + except: + if kill_event.is_set(): + break + raise diff --git a/dbms/tests/queries/0_stateless/mergetree_mutations.lib b/tests/queries/0_stateless/mergetree_mutations.lib similarity index 100% rename from dbms/tests/queries/0_stateless/mergetree_mutations.lib rename to tests/queries/0_stateless/mergetree_mutations.lib diff --git a/dbms/tests/queries/1_stateful/00001_count_hits.reference b/tests/queries/1_stateful/00001_count_hits.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00001_count_hits.reference rename to tests/queries/1_stateful/00001_count_hits.reference diff --git a/dbms/tests/queries/1_stateful/00001_count_hits.sql b/tests/queries/1_stateful/00001_count_hits.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00001_count_hits.sql rename to tests/queries/1_stateful/00001_count_hits.sql diff --git a/dbms/tests/queries/1_stateful/00002_count_visits.reference b/tests/queries/1_stateful/00002_count_visits.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00002_count_visits.reference rename to tests/queries/1_stateful/00002_count_visits.reference diff --git a/dbms/tests/queries/1_stateful/00002_count_visits.sql b/tests/queries/1_stateful/00002_count_visits.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00002_count_visits.sql rename to tests/queries/1_stateful/00002_count_visits.sql diff --git a/dbms/tests/queries/1_stateful/00004_top_counters.reference b/tests/queries/1_stateful/00004_top_counters.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00004_top_counters.reference rename to tests/queries/1_stateful/00004_top_counters.reference diff --git a/dbms/tests/queries/1_stateful/00004_top_counters.sql b/tests/queries/1_stateful/00004_top_counters.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00004_top_counters.sql rename to tests/queries/1_stateful/00004_top_counters.sql diff --git a/dbms/tests/queries/1_stateful/00005_filtering.reference b/tests/queries/1_stateful/00005_filtering.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00005_filtering.reference rename to tests/queries/1_stateful/00005_filtering.reference diff --git a/dbms/tests/queries/1_stateful/00005_filtering.sql b/tests/queries/1_stateful/00005_filtering.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00005_filtering.sql rename to tests/queries/1_stateful/00005_filtering.sql diff --git a/dbms/tests/queries/1_stateful/00006_agregates.reference b/tests/queries/1_stateful/00006_agregates.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00006_agregates.reference rename to tests/queries/1_stateful/00006_agregates.reference diff --git a/dbms/tests/queries/1_stateful/00006_agregates.sql b/tests/queries/1_stateful/00006_agregates.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00006_agregates.sql rename to tests/queries/1_stateful/00006_agregates.sql diff --git a/dbms/tests/queries/1_stateful/00007_uniq.reference b/tests/queries/1_stateful/00007_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00007_uniq.reference rename to tests/queries/1_stateful/00007_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00007_uniq.sql b/tests/queries/1_stateful/00007_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00007_uniq.sql rename to tests/queries/1_stateful/00007_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00008_uniq.reference b/tests/queries/1_stateful/00008_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00008_uniq.reference rename to tests/queries/1_stateful/00008_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00008_uniq.sql b/tests/queries/1_stateful/00008_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00008_uniq.sql rename to tests/queries/1_stateful/00008_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00009_uniq_distributed.reference b/tests/queries/1_stateful/00009_uniq_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00009_uniq_distributed.reference rename to tests/queries/1_stateful/00009_uniq_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00009_uniq_distributed.sql b/tests/queries/1_stateful/00009_uniq_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00009_uniq_distributed.sql rename to tests/queries/1_stateful/00009_uniq_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00010_quantiles_segfault.reference b/tests/queries/1_stateful/00010_quantiles_segfault.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00010_quantiles_segfault.reference rename to tests/queries/1_stateful/00010_quantiles_segfault.reference diff --git a/dbms/tests/queries/1_stateful/00010_quantiles_segfault.sql b/tests/queries/1_stateful/00010_quantiles_segfault.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00010_quantiles_segfault.sql rename to tests/queries/1_stateful/00010_quantiles_segfault.sql diff --git a/dbms/tests/queries/1_stateful/00011_sorting.reference b/tests/queries/1_stateful/00011_sorting.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00011_sorting.reference rename to tests/queries/1_stateful/00011_sorting.reference diff --git a/dbms/tests/queries/1_stateful/00011_sorting.sql b/tests/queries/1_stateful/00011_sorting.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00011_sorting.sql rename to tests/queries/1_stateful/00011_sorting.sql diff --git a/dbms/tests/queries/1_stateful/00012_sorting_distributed.reference b/tests/queries/1_stateful/00012_sorting_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00012_sorting_distributed.reference rename to tests/queries/1_stateful/00012_sorting_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00012_sorting_distributed.sql b/tests/queries/1_stateful/00012_sorting_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00012_sorting_distributed.sql rename to tests/queries/1_stateful/00012_sorting_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00013_sorting_of_nested.reference b/tests/queries/1_stateful/00013_sorting_of_nested.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00013_sorting_of_nested.reference rename to tests/queries/1_stateful/00013_sorting_of_nested.reference diff --git a/dbms/tests/queries/1_stateful/00013_sorting_of_nested.sql b/tests/queries/1_stateful/00013_sorting_of_nested.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00013_sorting_of_nested.sql rename to tests/queries/1_stateful/00013_sorting_of_nested.sql diff --git a/dbms/tests/queries/1_stateful/00014_filtering_arrays.reference b/tests/queries/1_stateful/00014_filtering_arrays.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00014_filtering_arrays.reference rename to tests/queries/1_stateful/00014_filtering_arrays.reference diff --git a/dbms/tests/queries/1_stateful/00014_filtering_arrays.sql b/tests/queries/1_stateful/00014_filtering_arrays.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00014_filtering_arrays.sql rename to tests/queries/1_stateful/00014_filtering_arrays.sql diff --git a/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference b/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference rename to tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference diff --git a/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql b/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql rename to tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql diff --git a/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference b/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference rename to tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference diff --git a/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql b/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql rename to tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql diff --git a/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference b/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference rename to tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference diff --git a/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql b/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql rename to tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql diff --git a/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference b/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference rename to tests/queries/1_stateful/00020_distinct_order_by_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql b/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql rename to tests/queries/1_stateful/00020_distinct_order_by_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00021_1_select_with_in.reference b/tests/queries/1_stateful/00021_1_select_with_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00021_1_select_with_in.reference rename to tests/queries/1_stateful/00021_1_select_with_in.reference diff --git a/dbms/tests/queries/1_stateful/00021_1_select_with_in.sql b/tests/queries/1_stateful/00021_1_select_with_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00021_1_select_with_in.sql rename to tests/queries/1_stateful/00021_1_select_with_in.sql diff --git a/dbms/tests/queries/1_stateful/00021_2_select_with_in.reference b/tests/queries/1_stateful/00021_2_select_with_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00021_2_select_with_in.reference rename to tests/queries/1_stateful/00021_2_select_with_in.reference diff --git a/dbms/tests/queries/1_stateful/00021_2_select_with_in.sql b/tests/queries/1_stateful/00021_2_select_with_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00021_2_select_with_in.sql rename to tests/queries/1_stateful/00021_2_select_with_in.sql diff --git a/dbms/tests/queries/1_stateful/00021_3_select_with_in.reference b/tests/queries/1_stateful/00021_3_select_with_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00021_3_select_with_in.reference rename to tests/queries/1_stateful/00021_3_select_with_in.reference diff --git a/dbms/tests/queries/1_stateful/00021_3_select_with_in.sql b/tests/queries/1_stateful/00021_3_select_with_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00021_3_select_with_in.sql rename to tests/queries/1_stateful/00021_3_select_with_in.sql diff --git a/dbms/tests/queries/1_stateful/00022_merge_prewhere.reference b/tests/queries/1_stateful/00022_merge_prewhere.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00022_merge_prewhere.reference rename to tests/queries/1_stateful/00022_merge_prewhere.reference diff --git a/dbms/tests/queries/1_stateful/00022_merge_prewhere.sql b/tests/queries/1_stateful/00022_merge_prewhere.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00022_merge_prewhere.sql rename to tests/queries/1_stateful/00022_merge_prewhere.sql diff --git a/dbms/tests/queries/1_stateful/00023_totals_limit.reference b/tests/queries/1_stateful/00023_totals_limit.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00023_totals_limit.reference rename to tests/queries/1_stateful/00023_totals_limit.reference diff --git a/dbms/tests/queries/1_stateful/00023_totals_limit.sql b/tests/queries/1_stateful/00023_totals_limit.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00023_totals_limit.sql rename to tests/queries/1_stateful/00023_totals_limit.sql diff --git a/dbms/tests/queries/1_stateful/00024_random_counters.reference b/tests/queries/1_stateful/00024_random_counters.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00024_random_counters.reference rename to tests/queries/1_stateful/00024_random_counters.reference diff --git a/dbms/tests/queries/1_stateful/00024_random_counters.sql b/tests/queries/1_stateful/00024_random_counters.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00024_random_counters.sql rename to tests/queries/1_stateful/00024_random_counters.sql diff --git a/tests/queries/1_stateful/00030_array_enumerate_uniq.reference b/tests/queries/1_stateful/00030_array_enumerate_uniq.reference new file mode 100644 index 00000000000..45a4fb75db8 --- /dev/null +++ b/tests/queries/1_stateful/00030_array_enumerate_uniq.reference @@ -0,0 +1 @@ +8 diff --git a/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.sql b/tests/queries/1_stateful/00030_array_enumerate_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.sql rename to tests/queries/1_stateful/00030_array_enumerate_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.reference b/tests/queries/1_stateful/00031_array_enumerate_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.reference rename to tests/queries/1_stateful/00031_array_enumerate_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.sql b/tests/queries/1_stateful/00031_array_enumerate_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.sql rename to tests/queries/1_stateful/00031_array_enumerate_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00032_aggregate_key64.reference b/tests/queries/1_stateful/00032_aggregate_key64.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00032_aggregate_key64.reference rename to tests/queries/1_stateful/00032_aggregate_key64.reference diff --git a/dbms/tests/queries/1_stateful/00032_aggregate_key64.sql b/tests/queries/1_stateful/00032_aggregate_key64.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00032_aggregate_key64.sql rename to tests/queries/1_stateful/00032_aggregate_key64.sql diff --git a/dbms/tests/queries/1_stateful/00033_aggregate_key_string.reference b/tests/queries/1_stateful/00033_aggregate_key_string.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00033_aggregate_key_string.reference rename to tests/queries/1_stateful/00033_aggregate_key_string.reference diff --git a/dbms/tests/queries/1_stateful/00033_aggregate_key_string.sql b/tests/queries/1_stateful/00033_aggregate_key_string.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00033_aggregate_key_string.sql rename to tests/queries/1_stateful/00033_aggregate_key_string.sql diff --git a/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference b/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference rename to tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference diff --git a/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql b/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql rename to tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql diff --git a/dbms/tests/queries/1_stateful/00035_aggregate_keys128.reference b/tests/queries/1_stateful/00035_aggregate_keys128.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00035_aggregate_keys128.reference rename to tests/queries/1_stateful/00035_aggregate_keys128.reference diff --git a/dbms/tests/queries/1_stateful/00035_aggregate_keys128.sql b/tests/queries/1_stateful/00035_aggregate_keys128.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00035_aggregate_keys128.sql rename to tests/queries/1_stateful/00035_aggregate_keys128.sql diff --git a/dbms/tests/queries/1_stateful/00036_aggregate_hashed.reference b/tests/queries/1_stateful/00036_aggregate_hashed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00036_aggregate_hashed.reference rename to tests/queries/1_stateful/00036_aggregate_hashed.reference diff --git a/dbms/tests/queries/1_stateful/00036_aggregate_hashed.sql b/tests/queries/1_stateful/00036_aggregate_hashed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00036_aggregate_hashed.sql rename to tests/queries/1_stateful/00036_aggregate_hashed.sql diff --git a/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.reference b/tests/queries/1_stateful/00037_uniq_state_merge1.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00037_uniq_state_merge1.reference rename to tests/queries/1_stateful/00037_uniq_state_merge1.reference diff --git a/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.sql b/tests/queries/1_stateful/00037_uniq_state_merge1.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00037_uniq_state_merge1.sql rename to tests/queries/1_stateful/00037_uniq_state_merge1.sql diff --git a/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.reference b/tests/queries/1_stateful/00038_uniq_state_merge2.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00038_uniq_state_merge2.reference rename to tests/queries/1_stateful/00038_uniq_state_merge2.reference diff --git a/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.sql b/tests/queries/1_stateful/00038_uniq_state_merge2.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00038_uniq_state_merge2.sql rename to tests/queries/1_stateful/00038_uniq_state_merge2.sql diff --git a/dbms/tests/queries/1_stateful/00039_primary_key.reference b/tests/queries/1_stateful/00039_primary_key.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00039_primary_key.reference rename to tests/queries/1_stateful/00039_primary_key.reference diff --git a/dbms/tests/queries/1_stateful/00039_primary_key.sql b/tests/queries/1_stateful/00039_primary_key.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00039_primary_key.sql rename to tests/queries/1_stateful/00039_primary_key.sql diff --git a/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.reference b/tests/queries/1_stateful/00040_aggregating_materialized_view.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.reference rename to tests/queries/1_stateful/00040_aggregating_materialized_view.reference diff --git a/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.sql b/tests/queries/1_stateful/00040_aggregating_materialized_view.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.sql rename to tests/queries/1_stateful/00040_aggregating_materialized_view.sql diff --git a/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.reference b/tests/queries/1_stateful/00041_aggregating_materialized_view.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.reference rename to tests/queries/1_stateful/00041_aggregating_materialized_view.reference diff --git a/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.sql b/tests/queries/1_stateful/00041_aggregating_materialized_view.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.sql rename to tests/queries/1_stateful/00041_aggregating_materialized_view.sql diff --git a/dbms/tests/queries/1_stateful/00042_any_left_join.reference b/tests/queries/1_stateful/00042_any_left_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00042_any_left_join.reference rename to tests/queries/1_stateful/00042_any_left_join.reference diff --git a/dbms/tests/queries/1_stateful/00042_any_left_join.sql b/tests/queries/1_stateful/00042_any_left_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00042_any_left_join.sql rename to tests/queries/1_stateful/00042_any_left_join.sql diff --git a/dbms/tests/queries/1_stateful/00043_any_left_join.reference b/tests/queries/1_stateful/00043_any_left_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00043_any_left_join.reference rename to tests/queries/1_stateful/00043_any_left_join.reference diff --git a/dbms/tests/queries/1_stateful/00043_any_left_join.sql b/tests/queries/1_stateful/00043_any_left_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00043_any_left_join.sql rename to tests/queries/1_stateful/00043_any_left_join.sql diff --git a/dbms/tests/queries/1_stateful/00044_any_left_join_string.reference b/tests/queries/1_stateful/00044_any_left_join_string.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00044_any_left_join_string.reference rename to tests/queries/1_stateful/00044_any_left_join_string.reference diff --git a/dbms/tests/queries/1_stateful/00044_any_left_join_string.sql b/tests/queries/1_stateful/00044_any_left_join_string.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00044_any_left_join_string.sql rename to tests/queries/1_stateful/00044_any_left_join_string.sql diff --git a/tests/queries/1_stateful/00045_uniq_upto.reference b/tests/queries/1_stateful/00045_uniq_upto.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00045_uniq_upto.sql b/tests/queries/1_stateful/00045_uniq_upto.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00045_uniq_upto.sql rename to tests/queries/1_stateful/00045_uniq_upto.sql diff --git a/tests/queries/1_stateful/00046_uniq_upto_distributed.reference b/tests/queries/1_stateful/00046_uniq_upto_distributed.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.sql b/tests/queries/1_stateful/00046_uniq_upto_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.sql rename to tests/queries/1_stateful/00046_uniq_upto_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00047_bar.reference b/tests/queries/1_stateful/00047_bar.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00047_bar.reference rename to tests/queries/1_stateful/00047_bar.reference diff --git a/dbms/tests/queries/1_stateful/00047_bar.sql b/tests/queries/1_stateful/00047_bar.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00047_bar.sql rename to tests/queries/1_stateful/00047_bar.sql diff --git a/dbms/tests/queries/1_stateful/00048_min_max.reference b/tests/queries/1_stateful/00048_min_max.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00048_min_max.reference rename to tests/queries/1_stateful/00048_min_max.reference diff --git a/dbms/tests/queries/1_stateful/00048_min_max.sql b/tests/queries/1_stateful/00048_min_max.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00048_min_max.sql rename to tests/queries/1_stateful/00048_min_max.sql diff --git a/dbms/tests/queries/1_stateful/00049_max_string_if.reference b/tests/queries/1_stateful/00049_max_string_if.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00049_max_string_if.reference rename to tests/queries/1_stateful/00049_max_string_if.reference diff --git a/dbms/tests/queries/1_stateful/00049_max_string_if.sql b/tests/queries/1_stateful/00049_max_string_if.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00049_max_string_if.sql rename to tests/queries/1_stateful/00049_max_string_if.sql diff --git a/dbms/tests/queries/1_stateful/00050_min_max.reference b/tests/queries/1_stateful/00050_min_max.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00050_min_max.reference rename to tests/queries/1_stateful/00050_min_max.reference diff --git a/dbms/tests/queries/1_stateful/00050_min_max.sql b/tests/queries/1_stateful/00050_min_max.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00050_min_max.sql rename to tests/queries/1_stateful/00050_min_max.sql diff --git a/dbms/tests/queries/1_stateful/00051_min_max_array.reference b/tests/queries/1_stateful/00051_min_max_array.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00051_min_max_array.reference rename to tests/queries/1_stateful/00051_min_max_array.reference diff --git a/dbms/tests/queries/1_stateful/00051_min_max_array.sql b/tests/queries/1_stateful/00051_min_max_array.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00051_min_max_array.sql rename to tests/queries/1_stateful/00051_min_max_array.sql diff --git a/dbms/tests/queries/1_stateful/00052_group_by_in.reference b/tests/queries/1_stateful/00052_group_by_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00052_group_by_in.reference rename to tests/queries/1_stateful/00052_group_by_in.reference diff --git a/dbms/tests/queries/1_stateful/00052_group_by_in.sql b/tests/queries/1_stateful/00052_group_by_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00052_group_by_in.sql rename to tests/queries/1_stateful/00052_group_by_in.sql diff --git a/tests/queries/1_stateful/00053_replicate_segfault.reference b/tests/queries/1_stateful/00053_replicate_segfault.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/1_stateful/00053_replicate_segfault.reference @@ -0,0 +1 @@ +1 diff --git a/dbms/tests/queries/1_stateful/00053_replicate_segfault.sql b/tests/queries/1_stateful/00053_replicate_segfault.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00053_replicate_segfault.sql rename to tests/queries/1_stateful/00053_replicate_segfault.sql diff --git a/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.reference b/tests/queries/1_stateful/00054_merge_tree_partitions.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00054_merge_tree_partitions.reference rename to tests/queries/1_stateful/00054_merge_tree_partitions.reference diff --git a/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.sql b/tests/queries/1_stateful/00054_merge_tree_partitions.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00054_merge_tree_partitions.sql rename to tests/queries/1_stateful/00054_merge_tree_partitions.sql diff --git a/dbms/tests/queries/1_stateful/00055_index_and_not.reference b/tests/queries/1_stateful/00055_index_and_not.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00055_index_and_not.reference rename to tests/queries/1_stateful/00055_index_and_not.reference diff --git a/dbms/tests/queries/1_stateful/00055_index_and_not.sql b/tests/queries/1_stateful/00055_index_and_not.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00055_index_and_not.sql rename to tests/queries/1_stateful/00055_index_and_not.sql diff --git a/dbms/tests/queries/1_stateful/00056_view.reference b/tests/queries/1_stateful/00056_view.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00056_view.reference rename to tests/queries/1_stateful/00056_view.reference diff --git a/dbms/tests/queries/1_stateful/00056_view.sql b/tests/queries/1_stateful/00056_view.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00056_view.sql rename to tests/queries/1_stateful/00056_view.sql diff --git a/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference b/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql b/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql rename to tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql diff --git a/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference b/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference rename to tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference diff --git a/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql b/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql rename to tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql diff --git a/dbms/tests/queries/1_stateful/00061_storage_buffer.reference b/tests/queries/1_stateful/00061_storage_buffer.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00061_storage_buffer.reference rename to tests/queries/1_stateful/00061_storage_buffer.reference diff --git a/dbms/tests/queries/1_stateful/00061_storage_buffer.sql b/tests/queries/1_stateful/00061_storage_buffer.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00061_storage_buffer.sql rename to tests/queries/1_stateful/00061_storage_buffer.sql diff --git a/dbms/tests/queries/1_stateful/00062_loyalty.reference b/tests/queries/1_stateful/00062_loyalty.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00062_loyalty.reference rename to tests/queries/1_stateful/00062_loyalty.reference diff --git a/dbms/tests/queries/1_stateful/00062_loyalty.sql b/tests/queries/1_stateful/00062_loyalty.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00062_loyalty.sql rename to tests/queries/1_stateful/00062_loyalty.sql diff --git a/dbms/tests/queries/1_stateful/00063_loyalty_joins.reference b/tests/queries/1_stateful/00063_loyalty_joins.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00063_loyalty_joins.reference rename to tests/queries/1_stateful/00063_loyalty_joins.reference diff --git a/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql b/tests/queries/1_stateful/00063_loyalty_joins.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00063_loyalty_joins.sql rename to tests/queries/1_stateful/00063_loyalty_joins.sql diff --git a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference b/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference rename to tests/queries/1_stateful/00065_loyalty_with_storage_join.reference diff --git a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql b/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql rename to tests/queries/1_stateful/00065_loyalty_with_storage_join.sql diff --git a/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference rename to tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference diff --git a/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql rename to tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql diff --git a/dbms/tests/queries/1_stateful/00067_union_all.reference b/tests/queries/1_stateful/00067_union_all.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00067_union_all.reference rename to tests/queries/1_stateful/00067_union_all.reference diff --git a/dbms/tests/queries/1_stateful/00067_union_all.sql b/tests/queries/1_stateful/00067_union_all.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00067_union_all.sql rename to tests/queries/1_stateful/00067_union_all.sql diff --git a/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.reference b/tests/queries/1_stateful/00068_subquery_in_prewhere.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.reference rename to tests/queries/1_stateful/00068_subquery_in_prewhere.reference diff --git a/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.sql b/tests/queries/1_stateful/00068_subquery_in_prewhere.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.sql rename to tests/queries/1_stateful/00068_subquery_in_prewhere.sql diff --git a/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference b/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference rename to tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference diff --git a/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql b/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql rename to tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql diff --git a/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference b/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference rename to tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference diff --git a/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql b/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql rename to tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql diff --git a/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.reference b/tests/queries/1_stateful/00072_compare_date_and_string_index.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.reference rename to tests/queries/1_stateful/00072_compare_date_and_string_index.reference diff --git a/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.sql b/tests/queries/1_stateful/00072_compare_date_and_string_index.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.sql rename to tests/queries/1_stateful/00072_compare_date_and_string_index.sql diff --git a/dbms/tests/queries/1_stateful/00073_uniq_array.reference b/tests/queries/1_stateful/00073_uniq_array.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00073_uniq_array.reference rename to tests/queries/1_stateful/00073_uniq_array.reference diff --git a/dbms/tests/queries/1_stateful/00073_uniq_array.sql b/tests/queries/1_stateful/00073_uniq_array.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00073_uniq_array.sql rename to tests/queries/1_stateful/00073_uniq_array.sql diff --git a/dbms/tests/queries/1_stateful/00074_full_join.reference b/tests/queries/1_stateful/00074_full_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00074_full_join.reference rename to tests/queries/1_stateful/00074_full_join.reference diff --git a/dbms/tests/queries/1_stateful/00074_full_join.sql b/tests/queries/1_stateful/00074_full_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00074_full_join.sql rename to tests/queries/1_stateful/00074_full_join.sql diff --git a/dbms/tests/queries/1_stateful/00075_left_array_join.reference b/tests/queries/1_stateful/00075_left_array_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00075_left_array_join.reference rename to tests/queries/1_stateful/00075_left_array_join.reference diff --git a/dbms/tests/queries/1_stateful/00075_left_array_join.sql b/tests/queries/1_stateful/00075_left_array_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00075_left_array_join.sql rename to tests/queries/1_stateful/00075_left_array_join.sql diff --git a/dbms/tests/queries/1_stateful/00076_system_columns_bytes.reference b/tests/queries/1_stateful/00076_system_columns_bytes.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00076_system_columns_bytes.reference rename to tests/queries/1_stateful/00076_system_columns_bytes.reference diff --git a/dbms/tests/queries/1_stateful/00076_system_columns_bytes.sql b/tests/queries/1_stateful/00076_system_columns_bytes.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00076_system_columns_bytes.sql rename to tests/queries/1_stateful/00076_system_columns_bytes.sql diff --git a/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference b/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference rename to tests/queries/1_stateful/00077_log_tinylog_stripelog.reference diff --git a/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql b/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql rename to tests/queries/1_stateful/00077_log_tinylog_stripelog.sql diff --git a/dbms/tests/queries/1_stateful/00078_group_by_arrays.reference b/tests/queries/1_stateful/00078_group_by_arrays.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00078_group_by_arrays.reference rename to tests/queries/1_stateful/00078_group_by_arrays.reference diff --git a/dbms/tests/queries/1_stateful/00078_group_by_arrays.sql b/tests/queries/1_stateful/00078_group_by_arrays.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00078_group_by_arrays.sql rename to tests/queries/1_stateful/00078_group_by_arrays.sql diff --git a/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference rename to tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference diff --git a/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql rename to tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql diff --git a/dbms/tests/queries/1_stateful/00080_array_join_and_union.reference b/tests/queries/1_stateful/00080_array_join_and_union.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00080_array_join_and_union.reference rename to tests/queries/1_stateful/00080_array_join_and_union.reference diff --git a/dbms/tests/queries/1_stateful/00080_array_join_and_union.sql b/tests/queries/1_stateful/00080_array_join_and_union.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00080_array_join_and_union.sql rename to tests/queries/1_stateful/00080_array_join_and_union.sql diff --git a/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference b/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference rename to tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference diff --git a/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql b/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql rename to tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql diff --git a/dbms/tests/queries/1_stateful/00082_quantiles.reference b/tests/queries/1_stateful/00082_quantiles.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00082_quantiles.reference rename to tests/queries/1_stateful/00082_quantiles.reference diff --git a/dbms/tests/queries/1_stateful/00082_quantiles.sql b/tests/queries/1_stateful/00082_quantiles.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00082_quantiles.sql rename to tests/queries/1_stateful/00082_quantiles.sql diff --git a/dbms/tests/queries/1_stateful/00083_array_filter.reference b/tests/queries/1_stateful/00083_array_filter.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00083_array_filter.reference rename to tests/queries/1_stateful/00083_array_filter.reference diff --git a/dbms/tests/queries/1_stateful/00083_array_filter.sql b/tests/queries/1_stateful/00083_array_filter.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00083_array_filter.sql rename to tests/queries/1_stateful/00083_array_filter.sql diff --git a/dbms/tests/queries/1_stateful/00084_external_aggregation.reference b/tests/queries/1_stateful/00084_external_aggregation.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00084_external_aggregation.reference rename to tests/queries/1_stateful/00084_external_aggregation.reference diff --git a/dbms/tests/queries/1_stateful/00084_external_aggregation.sql b/tests/queries/1_stateful/00084_external_aggregation.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00084_external_aggregation.sql rename to tests/queries/1_stateful/00084_external_aggregation.sql diff --git a/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference b/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference @@ -0,0 +1 @@ +0 diff --git a/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql b/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql rename to tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql diff --git a/tests/queries/1_stateful/00086_array_reduce.reference b/tests/queries/1_stateful/00086_array_reduce.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00086_array_reduce.sql b/tests/queries/1_stateful/00086_array_reduce.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00086_array_reduce.sql rename to tests/queries/1_stateful/00086_array_reduce.sql diff --git a/tests/queries/1_stateful/00087_where_0.reference b/tests/queries/1_stateful/00087_where_0.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00087_where_0.sql b/tests/queries/1_stateful/00087_where_0.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00087_where_0.sql rename to tests/queries/1_stateful/00087_where_0.sql diff --git a/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference rename to tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference diff --git a/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql rename to tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql diff --git a/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference b/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference rename to tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference diff --git a/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql b/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql rename to tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql diff --git a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.reference b/tests/queries/1_stateful/00090_thread_pool_deadlock.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.reference rename to tests/queries/1_stateful/00090_thread_pool_deadlock.reference diff --git a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh b/tests/queries/1_stateful/00090_thread_pool_deadlock.sh similarity index 100% rename from dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh rename to tests/queries/1_stateful/00090_thread_pool_deadlock.sh diff --git a/dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.reference b/tests/queries/1_stateful/00091_prewhere_two_conditions.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.reference rename to tests/queries/1_stateful/00091_prewhere_two_conditions.reference diff --git a/dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.sql b/tests/queries/1_stateful/00091_prewhere_two_conditions.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.sql rename to tests/queries/1_stateful/00091_prewhere_two_conditions.sql diff --git a/dbms/tests/queries/1_stateful/00092_obfuscator.reference b/tests/queries/1_stateful/00092_obfuscator.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00092_obfuscator.reference rename to tests/queries/1_stateful/00092_obfuscator.reference diff --git a/dbms/tests/queries/1_stateful/00092_obfuscator.sh b/tests/queries/1_stateful/00092_obfuscator.sh similarity index 100% rename from dbms/tests/queries/1_stateful/00092_obfuscator.sh rename to tests/queries/1_stateful/00092_obfuscator.sh diff --git a/tests/queries/1_stateful/00093_prewhere_array_join.reference b/tests/queries/1_stateful/00093_prewhere_array_join.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/1_stateful/00093_prewhere_array_join.sql b/tests/queries/1_stateful/00093_prewhere_array_join.sql new file mode 100644 index 00000000000..a1263144bb1 --- /dev/null +++ b/tests/queries/1_stateful/00093_prewhere_array_join.sql @@ -0,0 +1,9 @@ +SELECT arrayJoin([SearchEngineID]) AS search_engine, URL FROM test.hits WHERE SearchEngineID != 0 AND search_engine != 0 FORMAT Null; + +SELECT + arrayJoin([0]) AS browser, + arrayJoin([SearchEngineID]) AS search_engine, + URL +FROM test.hits +WHERE 1 AND (SearchEngineID != 0) AND (browser != 0) AND (search_engine != 0) +FORMAT Null; diff --git a/dbms/tests/queries/1_stateful/00139_like.reference b/tests/queries/1_stateful/00139_like.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00139_like.reference rename to tests/queries/1_stateful/00139_like.reference diff --git a/dbms/tests/queries/1_stateful/00139_like.sql b/tests/queries/1_stateful/00139_like.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00139_like.sql rename to tests/queries/1_stateful/00139_like.sql diff --git a/dbms/tests/queries/1_stateful/00140_rename.reference b/tests/queries/1_stateful/00140_rename.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00140_rename.reference rename to tests/queries/1_stateful/00140_rename.reference diff --git a/dbms/tests/queries/1_stateful/00140_rename.sql b/tests/queries/1_stateful/00140_rename.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00140_rename.sql rename to tests/queries/1_stateful/00140_rename.sql diff --git a/dbms/tests/queries/1_stateful/00141_transform.reference b/tests/queries/1_stateful/00141_transform.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00141_transform.reference rename to tests/queries/1_stateful/00141_transform.reference diff --git a/dbms/tests/queries/1_stateful/00141_transform.sql b/tests/queries/1_stateful/00141_transform.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00141_transform.sql rename to tests/queries/1_stateful/00141_transform.sql diff --git a/dbms/tests/queries/1_stateful/00142_system_columns.reference b/tests/queries/1_stateful/00142_system_columns.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00142_system_columns.reference rename to tests/queries/1_stateful/00142_system_columns.reference diff --git a/dbms/tests/queries/1_stateful/00142_system_columns.sql b/tests/queries/1_stateful/00142_system_columns.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00142_system_columns.sql rename to tests/queries/1_stateful/00142_system_columns.sql diff --git a/dbms/tests/queries/1_stateful/00143_transform_non_const_default.reference b/tests/queries/1_stateful/00143_transform_non_const_default.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00143_transform_non_const_default.reference rename to tests/queries/1_stateful/00143_transform_non_const_default.reference diff --git a/dbms/tests/queries/1_stateful/00143_transform_non_const_default.sql b/tests/queries/1_stateful/00143_transform_non_const_default.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00143_transform_non_const_default.sql rename to tests/queries/1_stateful/00143_transform_non_const_default.sql diff --git a/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference b/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference rename to tests/queries/1_stateful/00144_functions_of_aggregation_states.reference diff --git a/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql b/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql rename to tests/queries/1_stateful/00144_functions_of_aggregation_states.sql diff --git a/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference b/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference rename to tests/queries/1_stateful/00145_aggregate_functions_statistics.reference diff --git a/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql b/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql rename to tests/queries/1_stateful/00145_aggregate_functions_statistics.sql diff --git a/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.reference b/tests/queries/1_stateful/00146_aggregate_function_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.reference rename to tests/queries/1_stateful/00146_aggregate_function_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.sql b/tests/queries/1_stateful/00146_aggregate_function_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.sql rename to tests/queries/1_stateful/00146_aggregate_function_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.reference b/tests/queries/1_stateful/00147_global_in_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.reference rename to tests/queries/1_stateful/00147_global_in_aggregate_function.reference diff --git a/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.sql b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.sql rename to tests/queries/1_stateful/00147_global_in_aggregate_function.sql diff --git a/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference b/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference rename to tests/queries/1_stateful/00148_monotonic_functions_and_index.reference diff --git a/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql b/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql rename to tests/queries/1_stateful/00148_monotonic_functions_and_index.sql diff --git a/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference b/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference rename to tests/queries/1_stateful/00149_quantiles_timing_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql rename to tests/queries/1_stateful/00149_quantiles_timing_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.reference b/tests/queries/1_stateful/00150_quantiles_timing_precision.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.reference rename to tests/queries/1_stateful/00150_quantiles_timing_precision.reference diff --git a/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.sql b/tests/queries/1_stateful/00150_quantiles_timing_precision.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.sql rename to tests/queries/1_stateful/00150_quantiles_timing_precision.sql diff --git a/dbms/tests/queries/1_stateful/00151_order_by_read_in_order.reference b/tests/queries/1_stateful/00151_order_by_read_in_order.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00151_order_by_read_in_order.reference rename to tests/queries/1_stateful/00151_order_by_read_in_order.reference diff --git a/dbms/tests/queries/1_stateful/00151_order_by_read_in_order.sql b/tests/queries/1_stateful/00151_order_by_read_in_order.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00151_order_by_read_in_order.sql rename to tests/queries/1_stateful/00151_order_by_read_in_order.sql diff --git a/dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference b/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference rename to tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference diff --git a/dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql b/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql rename to tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql diff --git a/dbms/tests/queries/1_stateful/00152_insert_different_granularity.reference b/tests/queries/1_stateful/00152_insert_different_granularity.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00152_insert_different_granularity.reference rename to tests/queries/1_stateful/00152_insert_different_granularity.reference diff --git a/dbms/tests/queries/1_stateful/00152_insert_different_granularity.sql b/tests/queries/1_stateful/00152_insert_different_granularity.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00152_insert_different_granularity.sql rename to tests/queries/1_stateful/00152_insert_different_granularity.sql diff --git a/tests/queries/1_stateful/00153_aggregate_arena_race.reference b/tests/queries/1_stateful/00153_aggregate_arena_race.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00153_aggregate_arena_race.sql b/tests/queries/1_stateful/00153_aggregate_arena_race.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00153_aggregate_arena_race.sql rename to tests/queries/1_stateful/00153_aggregate_arena_race.sql diff --git a/dbms/tests/queries/1_stateful/00154_avro.reference b/tests/queries/1_stateful/00154_avro.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00154_avro.reference rename to tests/queries/1_stateful/00154_avro.reference diff --git a/dbms/tests/queries/1_stateful/00154_avro.sql b/tests/queries/1_stateful/00154_avro.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00154_avro.sql rename to tests/queries/1_stateful/00154_avro.sql diff --git a/tests/queries/__init__.py b/tests/queries/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/bugs/01060_defaults_all_columns.reference b/tests/queries/bugs/01060_defaults_all_columns.reference similarity index 100% rename from dbms/tests/queries/bugs/01060_defaults_all_columns.reference rename to tests/queries/bugs/01060_defaults_all_columns.reference diff --git a/dbms/tests/queries/bugs/default_prewhere.sql b/tests/queries/bugs/default_prewhere.sql similarity index 100% rename from dbms/tests/queries/bugs/default_prewhere.sql rename to tests/queries/bugs/default_prewhere.sql diff --git a/tests/queries/bugs/join_constants_on.sql b/tests/queries/bugs/join_constants_on.sql new file mode 100644 index 00000000000..ae967e07adb --- /dev/null +++ b/tests/queries/bugs/join_constants_on.sql @@ -0,0 +1,2 @@ +select cast(1, 'UInt8') from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8'); +select isConstant('UInt8'), toFixedString('hello', toUInt8(substring('UInt8', 5, 1))) from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8'); diff --git a/dbms/tests/queries/bugs/low_cardinality_remove.sql b/tests/queries/bugs/low_cardinality_remove.sql similarity index 100% rename from dbms/tests/queries/bugs/low_cardinality_remove.sql rename to tests/queries/bugs/low_cardinality_remove.sql diff --git a/dbms/tests/queries/bugs/missing_scalar_subquery_removal.sql b/tests/queries/bugs/missing_scalar_subquery_removal.sql similarity index 100% rename from dbms/tests/queries/bugs/missing_scalar_subquery_removal.sql rename to tests/queries/bugs/missing_scalar_subquery_removal.sql diff --git a/dbms/tests/queries/bugs/position_case_insensitive_utf8.sql b/tests/queries/bugs/position_case_insensitive_utf8.sql similarity index 100% rename from dbms/tests/queries/bugs/position_case_insensitive_utf8.sql rename to tests/queries/bugs/position_case_insensitive_utf8.sql diff --git a/dbms/tests/queries/conftest.py b/tests/queries/conftest.py similarity index 100% rename from dbms/tests/queries/conftest.py rename to tests/queries/conftest.py diff --git a/dbms/tests/queries/query_test.py b/tests/queries/query_test.py similarity index 100% rename from dbms/tests/queries/query_test.py rename to tests/queries/query_test.py diff --git a/dbms/tests/queries/server.py b/tests/queries/server.py similarity index 98% rename from dbms/tests/queries/server.py rename to tests/queries/server.py index 185b694619e..d1ffe9099b5 100644 --- a/dbms/tests/queries/server.py +++ b/tests/queries/server.py @@ -118,6 +118,7 @@ ServerThread.DEFAULT_SERVER_CONFIG = \ {tmp_dir}/data/ {tmp_dir}/tmp/ + {tmp_dir}/data/access/ users.xml 5368709120 @@ -193,6 +194,8 @@ ServerThread.DEFAULT_USERS_CONFIG = \ default default + + 1 diff --git a/dbms/tests/queries/shell_config.sh b/tests/queries/shell_config.sh similarity index 100% rename from dbms/tests/queries/shell_config.sh rename to tests/queries/shell_config.sh diff --git a/dbms/tests/server-test.xml b/tests/server-test.xml similarity index 97% rename from dbms/tests/server-test.xml rename to tests/server-test.xml index d9e547b4d55..7f792479065 100644 --- a/dbms/tests/server-test.xml +++ b/tests/server-test.xml @@ -31,7 +31,7 @@ true - + true true sslv2,sslv3 @@ -47,6 +47,7 @@ /tmp/clickhouse/data/ /tmp/clickhouse/tmp/ users.xml + /tmp/clickhouse/data/access/ 5368709120 default default diff --git a/dbms/tests/stress b/tests/stress similarity index 100% rename from dbms/tests/stress rename to tests/stress diff --git a/tests/strings_dictionary.xml b/tests/strings_dictionary.xml new file mode 120000 index 00000000000..be66c1da224 --- /dev/null +++ b/tests/strings_dictionary.xml @@ -0,0 +1 @@ +config/strings_dictionary.xml \ No newline at end of file diff --git a/dbms/tests/tsan_suppressions.txt b/tests/tsan_suppressions.txt similarity index 100% rename from dbms/tests/tsan_suppressions.txt rename to tests/tsan_suppressions.txt diff --git a/tests/users.d/access_management.xml b/tests/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/dbms/tests/users.d/readonly.xml b/tests/users.d/readonly.xml similarity index 100% rename from dbms/tests/users.d/readonly.xml rename to tests/users.d/readonly.xml diff --git a/dbms/tests/users.xml b/tests/users.xml similarity index 100% rename from dbms/tests/users.xml rename to tests/users.xml diff --git a/utils/build/build_debian.sh b/utils/build/build_debian.sh index 0767dc53450..0c194fe53db 100755 --- a/utils/build/build_debian.sh +++ b/utils/build/build_debian.sh @@ -29,7 +29,7 @@ cmake --build . cd .. # Run server: -# build/dbms/programs/clickhouse-server --config-file=ClickHouse/dbms/programs/server/config.xml & +# build/programs/clickhouse-server --config-file=ClickHouse/programs/server/config.xml & # Run client: -# build/dbms/programs/clickhouse-client +# build/programs/clickhouse-client diff --git a/utils/build/build_freebsd.sh b/utils/build/build_freebsd.sh index 5f2b9735501..b2bf7243f12 100755 --- a/utils/build/build_freebsd.sh +++ b/utils/build/build_freebsd.sh @@ -44,7 +44,7 @@ cmake --build . cd .. # Run server: -# build/dbms/programs/clickhouse-server --config-file=ClickHouse/dbms/programs/server/config.xml & +# build/programs/clickhouse-server --config-file=ClickHouse/programs/server/config.xml & # Run client: -# build/dbms/programs/clickhouse-client +# build/programs/clickhouse-client diff --git a/utils/build/build_macos.sh b/utils/build/build_macos.sh index 0e9bed37aa2..d27c8cf4367 100755 --- a/utils/build/build_macos.sh +++ b/utils/build/build_macos.sh @@ -43,10 +43,10 @@ cmake --build . cd .. # Run server: -# build/dbms/programs/clickhouse-server --config-file=ClickHouse/dbms/programs/server/config.xml & +# build/programs/clickhouse-server --config-file=ClickHouse/programs/server/config.xml & # Run client: -# build/dbms/programs/clickhouse-client +# build/programs/clickhouse-client ## Caveats diff --git a/utils/check-style/check-duplicate-includes.sh b/utils/check-style/check-duplicate-includes.sh index ecef0c76bad..df843ead623 100755 --- a/utils/check-style/check-duplicate-includes.sh +++ b/utils/check-style/check-duplicate-includes.sh @@ -3,4 +3,4 @@ ROOT_PATH=$(git rev-parse --show-toplevel) # Find duplicate include directives -find $ROOT_PATH/dbms -name '*.h' -or -name '*.cpp' | while read file; do grep -P '^#include ' $file | sort | uniq -c | grep -v -P '^\s+1\s' && echo $file; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | while read file; do grep -P '^#include ' $file | sort | uniq -c | grep -v -P '^\s+1\s' && echo $file; done diff --git a/utils/check-style/check-include b/utils/check-style/check-include index 3ced75d19d5..211172979bd 100755 --- a/utils/check-style/check-include +++ b/utils/check-style/check-include @@ -59,8 +59,8 @@ inc="-I. \ -I./contrib/lz4/lib \ -I./contrib/hyperscan/src \ -I./contrib/simdjson/include \ --I./dbms/src \ --I${BUILD_DIR}/dbms/src" +-I./src \ +-I${BUILD_DIR}/src" if [ -z $1 ]; then cd ${ROOT_DIR=${CUR_DIR}../..} diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 471488287ab..2a2e9dab42d 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -15,15 +15,20 @@ ROOT_PATH=$(git rev-parse --show-toplevel) EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|consistent-hashing' -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | - xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|\t|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' | -# a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | a tab character | missing whitespace after for/if/while... before opening brace | whitespaces inside braces + xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' | +# a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | missing whitespace after for/if/while... before opening brace | whitespaces inside braces grep -v -P '(//|:\s+\*|\$\(\()| \)"' # single-line comment | continuation of a multiline comment | a typical piece of embedded shell code | something like ending of raw string literal +# Tabs +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | + grep -vP $EXCLUDE_DIRS | + xargs grep $@ -F $'\t' + # // namespace comments are unneeded -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | xargs grep $@ -P '}\s*//+\s*namespace\s*' @@ -31,23 +36,26 @@ find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | find -L $ROOT_PATH -type l 2>/dev/null | grep -v contrib && echo "^ Broken symlinks found" # Double whitespaces -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done # Unused ErrorCodes # NOTE: to fix automatically, replace echo with: # sed -i "/extern const int $code/d" $file -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'extern const int [_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sed -r -e 's/^.*?extern const int ([_A-Z]+);.*?$/\1/' | while read code; do grep -q "ErrorCodes::$code" $file || echo "ErrorCode $code is defined but not used in file $file"; done; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'extern const int [_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sed -r -e 's/^.*?extern const int ([_A-Z]+);.*?$/\1/' | while read code; do grep -q "ErrorCodes::$code" $file || echo "ErrorCode $code is defined but not used in file $file"; done; done # Undefined ErrorCodes # NOTE: to fix automatically, replace echo with: # ( grep -q -F 'namespace ErrorCodes' $file && sed -i -r "0,/(\s*)extern const int [_A-Z]+/s//\1extern const int $code;\n&/" $file || awk '{ print; if (ns == 1) { ns = 2 }; if (ns == 2) { ns = 0; print "namespace ErrorCodes\n{\n extern const int '$code';\n}" } }; /namespace DB/ { ns = 1; };' < $file > ${file}.tmp && mv ${file}.tmp $file ) -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'ErrorCodes::[_A-Z]+' $file | sed -r -e 's/^.*?ErrorCodes::([_A-Z]+).*?$/\1/' | while read code; do grep -q "extern const int $code" $file || echo "ErrorCode $code is used in file $file but not defined"; done; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'ErrorCodes::[_A-Z]+' $file | sed -r -e 's/^.*?ErrorCodes::([_A-Z]+).*?$/\1/' | while read code; do grep -q "extern const int $code" $file || echo "ErrorCode $code is used in file $file but not defined"; done; done # Duplicate ErrorCodes -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate ErrorCode in file $file"; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate ErrorCode in file $file"; done # Three or more consecutive empty lines -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | while read file; do awk '/^$/ { ++i; if (i > 2) { print "More than two consecutive empty lines in file '$file'" } } /./ { i = 0 }' $file; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | while read file; do awk '/^$/ { ++i; if (i > 2) { print "More than two consecutive empty lines in file '$file'" } } /./ { i = 0 }' $file; done # Broken XML files (requires libxml2-utils) -find $ROOT_PATH/{dbms,base} -name '*.xml' | xargs xmllint --noout --nonet +find $ROOT_PATH/{src,base,programs,utils} -name '*.xml' | xargs xmllint --noout --nonet + +# Machine translation to Russian is strictly prohibited +find $ROOT_PATH/docs/ru -name '*.md' | xargs grep -l -F 'machine_translated: true' diff --git a/utils/check-style/check-ungrouped-includes.sh b/utils/check-style/check-ungrouped-includes.sh index c394c8a04c3..f1f124fc13e 100755 --- a/utils/check-style/check-ungrouped-includes.sh +++ b/utils/check-style/check-ungrouped-includes.sh @@ -3,6 +3,6 @@ ROOT_PATH=$(git rev-parse --show-toplevel) # Find files with includes not grouped together by first component of path -find $ROOT_PATH/dbms -name '*.h' -or -name '*.cpp' | while read file; do +find $ROOT_PATH/src -name '*.h' -or -name '*.cpp' | while read file; do [[ $(grep -oP '^#include <\w+' $file | uniq -c | wc -l) > $(grep -oP '^#include <\w+' $file | sort | uniq -c | wc -l) ]] && echo $file && grep -oP '^#include <\w+' $file | uniq -c; done diff --git a/utils/ci/run-clickhouse-from-binaries.sh b/utils/ci/run-clickhouse-from-binaries.sh index f16d840316a..5e9dc35869a 100755 --- a/utils/ci/run-clickhouse-from-binaries.sh +++ b/utils/ci/run-clickhouse-from-binaries.sh @@ -6,8 +6,8 @@ set -e -x source default-config -SERVER_BIN="${WORKSPACE}/build/dbms/src/Server/clickhouse" -SERVER_CONF="${WORKSPACE}/sources/dbms/src/Server/config.xml" +SERVER_BIN="${WORKSPACE}/build/src/Server/clickhouse" +SERVER_CONF="${WORKSPACE}/sources/src/Server/config.xml" SERVER_DATADIR="${WORKSPACE}/clickhouse" [[ -x "$SERVER_BIN" ]] || die "Run build-normal.sh first" diff --git a/utils/clickhouse-docker b/utils/clickhouse-docker new file mode 100755 index 00000000000..6f2d1197c0a --- /dev/null +++ b/utils/clickhouse-docker @@ -0,0 +1,57 @@ +#!/bin/bash + +if [ $# -lt 1 ] +then +cat << HELP + +clickhouse-docker -- open clickhouse-client of desired version in docker container (automatically removed after you exit bash shell). + +EXAMPLE: + - start latest version: + clickhouse-docker latest + + - start version 20.1: + clickhouse-docker 20.1 + + - list avaliable versions: + clickhouse-docker list +HELP +exit +fi + +param="$1" + +if [ "${param}" = "list" ] +then + # https://stackoverflow.com/a/39454426/1555175 + wget -q https://registry.hub.docker.com/v1/repositories/yandex/clickhouse-server/tags -O - | sed -e 's/[][]//g' -e 's/"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}' +else + docker pull yandex/clickhouse-server:${param} + tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) # older version require /nonexistent folder to exist to run clickhouse client :D + chmod 777 ${tmp_dir} + set -e + containerid=`docker run -v${tmp_dir}:/nonexistent -d yandex/clickhouse-server:${param}` + set +e + while : + do + # that trick with init-file allows to start clickhouse client inside bash shell (nice if you need exit to bash, check smth, and get back to clickhouse-client) + docker exec -it ${containerid} bash -c 'bash --init-file <(echo "clickhouse client -m")' + + printf "\n\nYou exited the session. What next?\n" + echo " [Q]uit and remove container." + echo " [R]estart clickhouse and run clickhouse-client in shell again." + echo "You can also hit Ctrl+C to exit and keep container running." + + while : + do + read -p "Quit or restart [Q/R]?" choice + case "$choice" in + q|Q|exit ) break 2;; + r|R|restart ) echo "Restarting container ..."; docker restart ${containerid} > /dev/null; break 1;; + * ) echo "I don't understand. Please type Q or R" ;; + esac + done + done + docker rm -f ${containerid} > /dev/null + rm -rf ${tmp_dir} +fi diff --git a/utils/compressor/CMakeLists.txt b/utils/compressor/CMakeLists.txt index 3498640acd1..df32330a137 100644 --- a/utils/compressor/CMakeLists.txt +++ b/utils/compressor/CMakeLists.txt @@ -1,5 +1,2 @@ -add_executable (mutator mutator.cpp) -target_link_libraries(mutator PRIVATE clickhouse_common_io) - add_executable (decompress_perf decompress_perf.cpp) target_link_libraries(decompress_perf PRIVATE dbms ${LZ4_LIBRARY}) diff --git a/utils/compressor/mutator.cpp b/utils/compressor/mutator.cpp deleted file mode 100644 index 13c80c292e2..00000000000 --- a/utils/compressor/mutator.cpp +++ /dev/null @@ -1,406 +0,0 @@ -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -/** Quick and dirty implementation of data scrambler. - * - * The task is to replace the data with pseudorandom values. - * But with keeping some probability distributions - * and with maintaining the same compression ratio. - * - * The solution is to operate directly on compressed LZ4 stream. - * The stream consists of independent compressed blocks. - * Each block is a stream of "literals" and "matches". - * Liteal is an instruction to literally put some following bytes, - * and match is an instruction to copy some bytes that was already seen before. - * - * We get literals and apply some scramble operation on it. - * But we keep literal length and matches without changes. - * - * That's how we get pseudorandom data but with keeping - * all repetitive patterns and maintaining the same compression ratio. - * - * Actually, the compression ratio, if you decompress scrambled data and compress again - * become slightly worse, because LZ4 use simple match finder based on value of hash function, - * and it can find different matches due to collisions in hash function. - * - * Scramble operation replace literals with pseudorandom bytes, - * but with some heuristics to keep some sort of data structure. - * - * It's in question, is it scramble data enough and while is it safe to publish scrambled data. - * In general, you should assume that it is not safe. - */ - - -#define ML_BITS 4 -#define ML_MASK ((1U<(src); - UInt8 * end = pos + length; - - while (pos < end) - { - if (pos + strlen("https") <= end && 0 == memcmp(pos, "https", strlen("https"))) - { - pos += strlen("https"); - continue; - } - - if (pos + strlen("http") <= end && 0 == memcmp(pos, "http", strlen("http"))) - { - pos += strlen("http"); - continue; - } - - if (pos + strlen("www") <= end && 0 == memcmp(pos, "www", strlen("www"))) - { - pos += strlen("www"); - continue; - } - - if (*pos >= '1' && *pos <= '9') - *pos = rand(generator, '1', '9'); - else if (*pos >= 'a' && *pos <= 'z') - *pos = rand(generator, 'a', 'z'); - else if (*pos >= 'A' && *pos <= 'Z') - *pos = rand(generator, 'A', 'Z'); - else if (*pos >= 0x80 && *pos <= 0xBF) - *pos = rand(generator, *pos & 0xF0U, *pos | 0x0FU); - else if (*pos == '\\') - ++pos; - - ++pos; - } - - pos = static_cast(src); - while (pos < end) - { - if (pos + 3 <= end - && isAlphaASCII(pos[0]) - && !isAlphaASCII(pos[1]) && pos[1] != '\\' && pos[1] >= 0x20 - && isAlphaASCII(pos[2])) - { - auto res = rand(generator, 0, 3); - if (res == 2) - { - std::swap(pos[0], pos[1]); - } - else if (res == 3) - std::swap(pos[1], pos[2]); - - pos += 3; - } - else if (pos + 5 <= end - && pos[0] >= 0xC0 && pos[0] <= 0xDF && pos[1] >= 0x80 && pos[1] <= 0xBF - && pos[2] >= 0x20 && pos[2] < 0x80 && !isAlphaASCII(pos[2]) - && pos[3] >= 0xC0 && pos[3] <= 0xDF && pos[4] >= 0x80 && pos[4] <= 0xBF) - { - auto res = rand(generator, 0, 3); - if (res == 2) - { - std::swap(pos[1], pos[2]); - std::swap(pos[0], pos[1]); - } - else if (res == 3) - { - std::swap(pos[3], pos[2]); - std::swap(pos[4], pos[3]); - } - - pos += 5; - } - else - ++pos; - } -} - - -static void LZ4_copy8(void* dst, const void* src) -{ - memcpy(dst,src,8); -} - -/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ -static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) -{ - UInt8* d = (UInt8*)dstPtr; - const UInt8* s = (const UInt8*)srcPtr; - UInt8* const e = (UInt8*)dstEnd; - - do { LZ4_copy8(d,s); d+=8; s+=8; } while (d>ML_BITS)) == RUN_MASK) { - unsigned s; - do { - s = *ip++; - length += s; - } while (s==255); - } - - /* copy literals */ - cpy = op+length; - if (cpy>oend-WILDCOPYLENGTH) - { - if (cpy != oend) goto _output_error; /* Error : block decoding must stop exactly there */ - mutate(generator, ip, length); - memcpy(op, ip, length); - ip += length; - op += length; - break; /* Necessarily EOF, due to parsing restrictions */ - } - mutate(generator, ip, cpy - op); - LZ4_wildCopy(op, ip, cpy); - ip += length; op = cpy; - - /* get offset */ - offset = LZ4_read16(ip); ip+=2; - match = op - offset; - LZ4_write32(op, (UInt32)offset); /* costs ~1%; silence an msan warning when offset==0 */ - - /* get matchlength */ - length = token & ML_MASK; - if (length == ML_MASK) { - unsigned s; - do { - s = *ip++; - length += s; - } while (s==255); - } - length += MINMATCH; - - /* copy match within block */ - cpy = op + length; - if (unlikely(offset<8)) { - const int dec64 = dec64table[offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[offset]; - memcpy(op+4, match, 4); - match -= dec64; - } else { LZ4_copy8(op, match); match+=8; } - op += 8; - - if (unlikely(cpy>oend-12)) { - UInt8* const oCopyLimit = oend-(WILDCOPYLENGTH-1); - if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ - if (op < oCopyLimit) { - LZ4_wildCopy(op, match, oCopyLimit); - match += oCopyLimit - op; - op = oCopyLimit; - } - while (op16) LZ4_wildCopy(op+8, match+8, cpy); - } - op=cpy; /* correction */ - } - - return (int) (((const char*)ip)-source); /* Nb of input bytes read */ - - /* Overflow error detected */ -_output_error: - return (int) (-(((const char*)ip)-source))-1; -} - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int UNKNOWN_COMPRESSION_METHOD; - extern const int TOO_LARGE_SIZE_COMPRESSED; - extern const int CANNOT_DECOMPRESS; -} - -class MutatingCompressedReadBufferBase -{ -protected: - ReadBuffer * compressed_in; - - /// If 'compressed_in' buffer has whole compressed block - then use it. Otherwise copy parts of data to 'own_compressed_buffer'. - PODArray own_compressed_buffer; - /// Points to memory, holding compressed block. - char * compressed_buffer = nullptr; - - size_t readCompressedData(size_t & size_decompressed, size_t & size_compressed_without_checksum) - { - if (compressed_in->eof()) - return 0; - - CityHash_v1_0_2::uint128 checksum; - compressed_in->readStrict(reinterpret_cast(&checksum), sizeof(checksum)); - - own_compressed_buffer.resize(COMPRESSED_BLOCK_HEADER_SIZE); - compressed_in->readStrict(&own_compressed_buffer[0], COMPRESSED_BLOCK_HEADER_SIZE); - - UInt8 method = own_compressed_buffer[0]; /// See CompressedWriteBuffer.h - - size_t & size_compressed = size_compressed_without_checksum; - - if (method == static_cast(CompressionMethodByte::LZ4) || - method == static_cast(CompressionMethodByte::ZSTD) || - method == static_cast(CompressionMethodByte::NONE)) - { - size_compressed = unalignedLoad(&own_compressed_buffer[1]); - size_decompressed = unalignedLoad(&own_compressed_buffer[5]); - } - else - throw Exception("Unknown compression method: " + toString(method), ErrorCodes::UNKNOWN_COMPRESSION_METHOD); - - if (size_compressed > DBMS_MAX_COMPRESSED_SIZE) - throw Exception("Too large size_compressed. Most likely corrupted data.", ErrorCodes::TOO_LARGE_SIZE_COMPRESSED); - - /// Is whole compressed block located in 'compressed_in' buffer? - if (compressed_in->offset() >= COMPRESSED_BLOCK_HEADER_SIZE && - compressed_in->position() + size_compressed - COMPRESSED_BLOCK_HEADER_SIZE <= compressed_in->buffer().end()) - { - compressed_in->position() -= COMPRESSED_BLOCK_HEADER_SIZE; - compressed_buffer = compressed_in->position(); - compressed_in->position() += size_compressed; - } - else - { - own_compressed_buffer.resize(size_compressed); - compressed_buffer = &own_compressed_buffer[0]; - compressed_in->readStrict(compressed_buffer + COMPRESSED_BLOCK_HEADER_SIZE, size_compressed - COMPRESSED_BLOCK_HEADER_SIZE); - } - - return size_compressed + sizeof(checksum); - } - - void decompress(char * to, size_t size_decompressed, size_t size_compressed_without_checksum) - { - UInt8 method = compressed_buffer[0]; /// See CompressedWriteBuffer.h - - if (method == static_cast(CompressionMethodByte::LZ4)) - { - if (LZ4_decompress_mutate(compressed_buffer + COMPRESSED_BLOCK_HEADER_SIZE, to, size_decompressed) < 0) - throw Exception("Cannot LZ4_decompress_fast", ErrorCodes::CANNOT_DECOMPRESS); - } - else - throw Exception("Unknown compression method: " + toString(method), ErrorCodes::UNKNOWN_COMPRESSION_METHOD); - } - -public: - /// 'compressed_in' could be initialized lazily, but before first call of 'readCompressedData'. - MutatingCompressedReadBufferBase(ReadBuffer * in = nullptr) - : compressed_in(in), own_compressed_buffer(COMPRESSED_BLOCK_HEADER_SIZE) - { - } -}; - - -class MutatingCompressedReadBuffer : public MutatingCompressedReadBufferBase, public BufferWithOwnMemory -{ -private: - size_t size_compressed = 0; - - bool nextImpl() override - { - size_t size_decompressed; - size_t size_compressed_without_checksum; - size_compressed = readCompressedData(size_decompressed, size_compressed_without_checksum); - if (!size_compressed) - return false; - - memory.resize(size_decompressed); - working_buffer = Buffer(&memory[0], &memory[size_decompressed]); - - decompress(working_buffer.begin(), size_decompressed, size_compressed_without_checksum); - - return true; - } - -public: - MutatingCompressedReadBuffer(ReadBuffer & in_) - : MutatingCompressedReadBufferBase(&in_), BufferWithOwnMemory(0) - { - } -}; - -} - - -int main(int, char **) -try -{ - DB::ReadBufferFromFileDescriptor in(STDIN_FILENO); - DB::MutatingCompressedReadBuffer mutating_in(in); - DB::WriteBufferFromFileDescriptor out(STDOUT_FILENO); - - DB::copyData(mutating_in, out); - - return 0; -} -catch (...) -{ - std::cerr << DB::getCurrentExceptionMessage(true); - return DB::getCurrentExceptionCode(); -} diff --git a/utils/github/__main__.py b/utils/github/__main__.py index e05d27c03d6..401908298eb 100644 --- a/utils/github/__main__.py +++ b/utils/github/__main__.py @@ -129,6 +129,7 @@ if bad_commits and not args.login: # TODO: check backports. if need_backporting: re_vlabel = re.compile(r'^v\d+\.\d+$') + re_vlabel_backported = re.compile(r'^v\d+\.\d+-backported$') re_vlabel_conflicts = re.compile(r'^v\d+\.\d+-conflicts$') print('\nPull-requests need to be backported:') @@ -146,8 +147,8 @@ if need_backporting: # FIXME: compatibility logic - check for a manually set label, that indicates status 'backported'. # FIXME: O(n²) - no need to iterate all labels for every `stable` for label in github.get_labels(pull_request): - if re_vlabel.match(label['name']): - if f'v{stable[0]}' == label['name']: + if re_vlabel.match(label['name']) or re_vlabel_backported.match(label['name']): + if f'v{stable[0]}' == label['name'] or f'v{stable[0]}-backported' == label['name']: backport_labeled.add(stable[0]) if re_vlabel_conflicts.match(label['name']): if f'v{stable[0]}-conflicts' == label['name']: diff --git a/utils/iotest/iotest.cpp b/utils/iotest/iotest.cpp index ea7cd439838..47264bdfc38 100644 --- a/utils/iotest/iotest.cpp +++ b/utils/iotest/iotest.cpp @@ -113,7 +113,7 @@ int mainImpl(int argc, char ** argv) for (int i = 0; argv[2][i]; ++i) { char c = argv[2][i]; - switch(c) + switch (c) { case 'r': mode |= MODE_READ; diff --git a/utils/iotest/iotest_aio.cpp b/utils/iotest/iotest_aio.cpp index 800e605d62e..82c2d12a0b7 100644 --- a/utils/iotest/iotest_aio.cpp +++ b/utils/iotest/iotest_aio.cpp @@ -15,9 +15,6 @@ int main(int, char **) { return 0; } #include #include #include -#include -#include -#include #include #include #include diff --git a/utils/junit_to_html/junit-noframes.xsl b/utils/junit_to_html/junit-noframes.xsl new file mode 100644 index 00000000000..01f09ced557 --- /dev/null +++ b/utils/junit_to_html/junit-noframes.xsl @@ -0,0 +1,390 @@ + + + + + + + + Test Results + + + + + + + + +
    + + + + +
    + + + + + + + + + + + + +

    +
    + + + + + + + + + +
    +

    + Back to top + + +

    Summary

    + + + + + + + + + + + + + + + + + Failure + Error + + + + + + + + +
    TestsFailuresErrorsSuccess rateTime
    + + + + + + + +
    + + + + +
    + Note: failures are anticipated and checked for with assertions while errors are unanticipated. +
    +
    + + + + +

    Test Results

    +
    +
    + + + Name + Tests + Errors + Failures + Time(s) + + + + + + Name + Tests + Errors + Failures + Time(s) + Time Stamp + Host + + + + + + Name + Status + Type + Time(s) + + + + + + + + + Failure + Error + + + + + + + + + + + + + + + + + + + + + Error + Failure + TableRowColor + + + + + + Failure + + + + Error + + + + Success + + + + + + + + + + + + +

    + + + + + +
    + + + +

    + + + + + +
    + + + + N/A + + + + + + +

    + at line + + + , column + + +
    +
    +
    + + + + + + + + + + 32 + + + + + + + + + + + + +
    + + + +
    + + +
    + + + +
    + + + +
    +
    + + + + + + + + +
    diff --git a/utils/junit_to_html/junit_to_html b/utils/junit_to_html/junit_to_html new file mode 100755 index 00000000000..d6bebccbf9f --- /dev/null +++ b/utils/junit_to_html/junit_to_html @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +import sys +import lxml.etree as etree + +def _convert_junit_to_html(junit_path, html_path): + with open(os.path.join(os.path.dirname(__file__), "junit-noframes.xsl")) as xslt_file: + junit_to_html_xslt = etree.parse(xslt_file) + with open(junit_path) as junit_file: + junit_xml = etree.parse(junit_file) + transform = etree.XSLT(junit_to_html_xslt) + html = etree.tostring(transform(junit_xml), encoding="utf-8") + html_dir = os.path.dirname(html_path) + if not os.path.exists(html_dir): + os.makedirs(html_dir) + with open(html_path, "w") as html_file: + html_file.write(html) + +if __name__ == "__main__": + if len(sys.argv) < 3: + raise "Insufficient arguments: junit.xml result.html", level + junit_path, html_path = sys.argv[1] , sys.argv[2] + _convert_junit_to_html(junit_path, html_path) diff --git a/utils/package/arch/CMakeLists.txt b/utils/package/arch/CMakeLists.txt index 07489cf9b19..e77819f6d98 100644 --- a/utils/package/arch/CMakeLists.txt +++ b/utils/package/arch/CMakeLists.txt @@ -1,2 +1,2 @@ -include (${ClickHouse_SOURCE_DIR}/dbms/cmake/version.cmake) +include (${ClickHouse_SOURCE_DIR}/cmake/version.cmake) configure_file (PKGBUILD.in PKGBUILD) diff --git a/utils/package/arch/PKGBUILD.in b/utils/package/arch/PKGBUILD.in index 20de555f8a7..b5c63a3edea 100644 --- a/utils/package/arch/PKGBUILD.in +++ b/utils/package/arch/PKGBUILD.in @@ -20,11 +20,11 @@ package() { mkdir -p $pkgdir/usr/bin/ mkdir -p $pkgdir/usr/lib/systemd/system ln -s clickhouse-client $pkgdir/usr/bin/clickhouse-server - cp $SRC/dbms/programs/server/config.xml $SRC/dbms/programs/server/users.xml $pkgdir/etc/clickhouse-server/ - cp $BIN/dbms/programs/clickhouse $pkgdir/usr/bin/clickhouse-client + cp $SRC/programs/server/config.xml $SRC/programs/server/users.xml $pkgdir/etc/clickhouse-server/ + cp $BIN/programs/clickhouse $pkgdir/usr/bin/clickhouse-client patchelf --remove-rpath $pkgdir/usr/bin/clickhouse-client patchelf --replace-needed libz.so.1 libz-ng.so.1 $pkgdir/usr/bin/clickhouse-client - cp $SRC/dbms/programs/client/clickhouse-client.xml $pkgdir/etc/clickhouse-client/config.xml + cp $SRC/programs/client/clickhouse-client.xml $pkgdir/etc/clickhouse-client/config.xml compiler="libclickhouse-compiler.so" if ! pacman -Q clang | grep '^clang 7'; then compiler="" diff --git a/utils/release/push_packages b/utils/release/push_packages index 4a0548bb4ca..68d72bb39fe 100755 --- a/utils/release/push_packages +++ b/utils/release/push_packages @@ -152,7 +152,7 @@ def transfer_packages_dupload(ssh_key, path, repo_user, repo_url, incoming_direc } with DebRelease(config, repo_user, ssh_key): logging.info("Duploading") - subprocess.check_call("dupload --nomail --to {repo} {path}".format(repo=repo_short_name, path=path), shell=True) + subprocess.check_call("dupload -f --nomail --to {repo} {path}".format(repo=repo_short_name, path=path), shell=True) logging.info("Dupload finished") diff --git a/utils/release/release_lib.sh b/utils/release/release_lib.sh index ab395c9ad37..148fe2c05df 100644 --- a/utils/release/release_lib.sh +++ b/utils/release/release_lib.sh @@ -12,10 +12,10 @@ function gen_version_string { function get_version { if [ -z "$VERSION_MAJOR" ] && [ -z "$VERSION_MINOR" ] && [ -z "$VERSION_PATCH" ]; then BASEDIR=$(dirname "${BASH_SOURCE[0]}")/../../ - VERSION_REVISION=`grep "set(VERSION_REVISION" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_REVISION \(.*\)$/\1/' | sed 's/[) ].*//'` - VERSION_MAJOR=`grep "set(VERSION_MAJOR" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_MAJOR \(.*\)/\1/' | sed 's/[) ].*//'` - VERSION_MINOR=`grep "set(VERSION_MINOR" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_MINOR \(.*\)/\1/' | sed 's/[) ].*//'` - VERSION_PATCH=`grep "set(VERSION_PATCH" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_PATCH \(.*\)/\1/' | sed 's/[) ].*//'` + VERSION_REVISION=`grep "set(VERSION_REVISION" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_REVISION \(.*\)$/\1/' | sed 's/[) ].*//'` + VERSION_MAJOR=`grep "set(VERSION_MAJOR" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_MAJOR \(.*\)/\1/' | sed 's/[) ].*//'` + VERSION_MINOR=`grep "set(VERSION_MINOR" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_MINOR \(.*\)/\1/' | sed 's/[) ].*//'` + VERSION_PATCH=`grep "set(VERSION_PATCH" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_PATCH \(.*\)/\1/' | sed 's/[) ].*//'` fi VERSION_PREFIX="${VERSION_PREFIX:-v}" VERSION_POSTFIX_TAG="${VERSION_POSTFIX:--testing}" @@ -97,12 +97,12 @@ function gen_revision_author { -e "s/set(VERSION_MINOR [^) ]*/set(VERSION_MINOR $VERSION_MINOR/g;" \ -e "s/set(VERSION_PATCH [^) ]*/set(VERSION_PATCH $VERSION_PATCH/g;" \ -e "s/set(VERSION_STRING [^) ]*/set(VERSION_STRING $VERSION_STRING/g;" \ - dbms/cmake/version.cmake + cmake/version.cmake gen_changelog "$VERSION_STRING" "" "$AUTHOR" "" gen_dockerfiles "$VERSION_STRING" - dbms/src/Storages/System/StorageSystemContributors.sh ||: - git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" dbms/cmake/version.cmake debian/changelog docker/*/Dockerfile dbms/src/Storages/System/StorageSystemContributors.generated.cpp + src/Storages/System/StorageSystemContributors.sh ||: + git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" cmake/version.cmake debian/changelog docker/*/Dockerfile src/Storages/System/StorageSystemContributors.generated.cpp if [ -z $NO_PUSH ]; then git push fi diff --git a/utils/simple-backport/.gitignore b/utils/simple-backport/.gitignore new file mode 100644 index 00000000000..72e8ffc0db8 --- /dev/null +++ b/utils/simple-backport/.gitignore @@ -0,0 +1 @@ +* diff --git a/utils/simple-backport/README.md b/utils/simple-backport/README.md new file mode 100644 index 00000000000..c5a625ca0d1 --- /dev/null +++ b/utils/simple-backport/README.md @@ -0,0 +1,107 @@ +# Упрощённый скрипт для бекпортирования + +Это упрощённый скрипт для бекпортирования. Он определяет, какие пулреквесты ещё не бекпортировали из мастера в указанную ветку. Запускать скрипт нужно из папки, где он лежит, указав ему название ветки. Он предполагает, что ваш апстримный remote называется origin. +``` +cd my-clickhouse-repo/utils/simple-backport +git fetch origin +time GITHUB_TOKEN= ./backport.sh 20.1 +``` + +Скрипт выведет примитивный отчёт: +``` +$ time GITHUB_TOKEN= ~/backport.sh 20.3 +144 PRs differ between 20.3 and master. +backport https://github.com/ClickHouse/ClickHouse/pull/10135 +backport https://github.com/ClickHouse/ClickHouse/pull/10121 +... +backport https://github.com/ClickHouse/ClickHouse/pull/9808 +backport https://github.com/ClickHouse/ClickHouse/pull/9410 + +real 0m1.213s +user 0m1.065s +sys 0m0.311s +``` + +Также в рабочей папке сгенерируется отчёт `<ваша-ветка>-report.tsv`: + +``` +$ cat 20.3-report.tsv +skip 10153 https://github.com/ClickHouse/ClickHouse/pull/10153 pr10153.json +skip 10147 https://github.com/ClickHouse/ClickHouse/pull/10147 pr10147.json +no-backport 10138 https://github.com/ClickHouse/ClickHouse/pull/10138 pr10138.json +backport 10135 https://github.com/ClickHouse/ClickHouse/pull/10135 pr10135.json +skip 10134 https://github.com/ClickHouse/ClickHouse/pull/10134 pr10134.json +... +``` + +Можно кликать по ссылкам прям из консоли, а можно ещё проще: + +``` +$ cat <ветка>-report.tsv | grep ^backport | cut -f3 +$ cat <ветка>-report.tsv | grep ^backport | cut -f3 | xargs -n1 xdg-open +``` + +Такая команда откроет в браузере все пулреквесты, которые надо бекпортировать. Есть и другие статусы, посмотрите какие: + +``` +$ cat 20.1-report.tsv | cut -f1 | sort | uniq -c | sort -rn + 446 skip + 38 done + 25 conflict + 18 backport + 10 no-backport +``` + +### Как разметить пулреквест? +По умолчанию бекпортируются все пулреквесты, у которых в описании указана +категория чейнжлога Bug fix. Если этого недостаточно, используйте теги: +* v20.1-no-backport -- в ветку 20.1 бекпортировать не нужно. +* pr-no-backport -- ни в какие ветки бекпортировать не нужно. +* v20.1-conflicts -- при бекпорте в 20.1 произошёл конфликт. Такие пулреквесты + скрипт пропускает, к ним можно потом вернуться. +* pr-must-backport -- нужно бекпортировать в поддерживаемые ветки. +* v20.1-must-backport -- нужно бекпортировать в 20.1. + +### Я бекпортировал, почему скрипт не видит? +* Сообщение коммита должно содержать текст backport/cherry-pick #12345, или + иметь вид стандартного гитхабовского мерж-коммита для ПР #12345. +* Коммит должен быть достижим по `git log --first-parent my-branch`. Возможно, + в ветке сделали pull с merge, от чего некоторые коммиты из ветки становятся +недоступны по `--first-parent`. + +В качестве обхода, добавьте в ветку пустой коммит с текстом вроде "backport +#12345 -- real backport commit is ". + +### Я поправил пулреквест, почему скрипт не видит? +В процессе работы скрипт кеширует данные о пулреквестах в текущей папке, чтобы +экономить квоту гитхаба. Удалите закешированные файлы, например, для всех +реквестов, которые не помечены как пропущенные: +``` +$ cat <ваша-ветка>-report.tsv | grep -v "^skip" | cut -f4 +$ cat <ваша-ветка>-report.tsv | grep -v "^skip" | cut -f4 | xargs rm +``` + +## Как сформировать change log +В этой же папке запустите: +``` +$ time GITHUB_TOKEN=... ./changelog.sh v20.3.4.10-stable v20.3.5.21-stable +9 PRs added between v20.3.4.10-stable and v20.3.5.21-stable. +### ClickHouse release v20.3.5.21-stable FIXME as compared to v20.3.4.10-stable + +#### Bug Fix + +* Fix 'Different expressions with the same alias' error when query has PREWHERE + and WHERE on distributed table and `SET distributed_product_mode = 'local'`. +[#9871](https://github.com/ClickHouse/ClickHouse/pull/9871) ([Artem +Zuikov](https://github.com/4ertus2)). +... +``` + +Скрипт выведет changelog на экран, а также сохранит его в `./changelog.md`. +Скопируйте этот текст в большой changelog, проверьте и поправьте версию и дату +релиза, вычитайте сообщения. Если сообщения неправильные, обязательно исправьте +их на гитхабе -- это поможет при последующей генерации changelog для других +версий, содержащих этот пулреквест. Чтобы скрипт подтянул изменения с гитхаба, +удалите соответствующие файлы `./pr12345.json`. Если вы часто видите +неправильно оформленные пулреквесты, это повод подумать об улучшении проверки +Description check в CI. diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh new file mode 100755 index 00000000000..7d5b12d6f7f --- /dev/null +++ b/utils/simple-backport/backport.sh @@ -0,0 +1,108 @@ +#!/bin/bash +set -e + +branch="$1" +merge_base=$(git merge-base origin/master "origin/$branch") +master_git_cmd=(git log "$merge_base..origin/master" --first-parent) +branch_git_cmd=(git log "$merge_base..origin/$branch" --first-parent) + +# Make lists of PRs that were merged into each branch. Use first parent here, or else +# we'll get weird things like seeing older master that was merged into a PR branch +# that was then merged into master. +"${master_git_cmd[@]}" > master-log.txt +"${branch_git_cmd[@]}" > "$branch-log.txt" + +# Check for diamond merges. +"${master_git_cmd[@]}" --oneline --grep "Merge branch '" | grep '' +diamonds_in_master=$? + +"${branch_git_cmd[@]}" --oneline --grep "Merge branch '" | grep '' +diamonds_in_branch=$? + +if [ "$diamonds_in_master" -eq 0 ] || [ "$diamonds_in_branch" -eq 0 ] +then + # DO NOT ADD automated handling of diamond merges to this script. + # It is an unsustainable way to work with git, and it MUST be visible. + echo Warning: suspected diamond merges above. + echo Some commits will be missed, review these manually. +fi + +# NOTE keep in sync with ./changelog.sh. +# Search for PR numbers in commit messages. First variant is normal merge, and second +# variant is squashed. Next are some backport message variants. +find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; + s/^.*(#\([[:digit:]]\+\))$/\1/p; + s/^.*back[- ]*port[ed of]*#\([[:digit:]]\+\).*$/\1/Ip; + s/^.*cherry[- ]*pick[ed of]*#\([[:digit:]]\+\).*$/\1/Ip") + +"${find_prs[@]}" master-log.txt | sort -rn > master-prs.txt +"${find_prs[@]}" "$branch-log.txt" | sort -rn > "$branch-prs.txt" + +# Find all master PRs that are not in branch by calculating differences of two PR lists. +grep -f "$branch-prs.txt" -F -x -v master-prs.txt > "$branch-diff-prs.txt" + +echo "$(wc -l < "$branch-diff-prs".txt) PRs differ between $branch and master." + +function github_download() +{ + local url=${1} + local file=${2} + if ! [ -f "$file" ] + then + if ! curl -H "Authorization: token $GITHUB_TOKEN" \ + -sSf "$url" \ + > "$file" + then + >&2 echo "Failed to download '$url' to '$file'. Contents: '$(cat "$file")'." + rm "$file" + return 1 + fi + sleep 0.1 + fi +} + +rm "$branch-report.tsv" &> /dev/null ||: +for pr in $(cat "$branch-diff-prs.txt") +do + # Download PR info from github. + file="pr$pr.json" + github_download "https://api.github.com/repos/ClickHouse/ClickHouse/pulls/$pr" "$file" || continue + + if ! [ "$pr" == "$(jq -r .number "$file")" ] + then + >&2 echo "Got wrong data for PR #$pr (please check and remove '$file')." + continue + fi + + action="skip" + + # First, check the changelog category. We port all bugfixes. + if jq -r .body "$file" | grep -i "^- bug[ -]*fix" > /dev/null + then + action="backport" + fi + + # Next, check the tag. They might override the decision. Checks are ordered by priority. + labels="$(jq -r .labels[].name "$file")" + if echo "$labels" | grep -x "pr-must-backport\|v$branch-must-backport" > /dev/null; then action="backport"; fi + if echo "$labels" | grep -x "v$branch-conflicts" > /dev/null; then action="conflict"; fi + if echo "$labels" | grep -x "pr-no-backport\|v$branch-no-backport" > /dev/null; then action="no-backport"; fi + # FIXME Ignore "backported" labels for now. If we can't find the backport commit, + # this means that the changelog script also won't be able to. An alternative + # way to mark PR as backported is to add an empty commit with text like + # "backported #12345", so that it can be found between tags and put in proper + # place in changelog. + #if echo "$labels" | grep -x "v$branch\|v$branch-backported" > /dev/null; then action="done"; fi + + # Find merge commit SHA for convenience + merge_sha="$(jq -r .merge_commit_sha "$file")" + + url="https://github.com/ClickHouse/ClickHouse/pull/$pr" + printf "%s\t%s\t%s\t%s\t%s\n" "$action" "$pr" "$url" "$file" "$merge_sha" >> "$branch-report.tsv" + if [ "$action" == "backport" ] + then + printf "%s\t%s\t%s\n" "$action" "$url" "$merge_sha" + fi +done + +echo "Done." diff --git a/utils/simple-backport/changelog.sh b/utils/simple-backport/changelog.sh new file mode 100755 index 00000000000..75a54a50b92 --- /dev/null +++ b/utils/simple-backport/changelog.sh @@ -0,0 +1,86 @@ +#!/bin/bash +set -e + +from="$1" +to="$2" +log_command=(git log "$from..$to" --first-parent) + +"${log_command[@]}" > "changelog-log.txt" + +# Check for diamond merges. +if "${log_command[@]}" --oneline --grep "Merge branch '" | grep '' +then + # DO NOT ADD automated handling of diamond merges to this script. + # It is an unsustainable way to work with git, and it MUST be visible. + echo Warning: suspected diamond merges above. + echo Some commits will be missed, review these manually. +fi + +# NOTE keep in sync with ./backport.sh. +# Search for PR numbers in commit messages. First variant is normal merge, and second +# variant is squashed. Next are some backport message variants. +find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; + s/^.*(#\([[:digit:]]\+\))$/\1/p; + s/^.*back[- ]*port[ed of]*#\([[:digit:]]\+\).*$/\1/Ip; + s/^.*cherry[- ]*pick[ed of]*#\([[:digit:]]\+\).*$/\1/Ip") + +"${find_prs[@]}" "changelog-log.txt" | sort -rn | uniq > "changelog-prs.txt" + +echo "$(wc -l < "changelog-prs.txt") PRs added between $from and $to." + +function github_download() +{ + local url=${1} + local file=${2} + if ! [ -f "$file" ] + then + if ! curl -H "Authorization: token $GITHUB_TOKEN" \ + -sSf "$url" \ + > "$file" + then + >&2 echo "Failed to download '$url' to '$file'. Contents: '$(cat "$file")'." + rm "$file" + return 1 + fi + sleep 0.1 + fi +} + +rm changelog-prs-filtered.txt &> /dev/null ||: +for pr in $(cat "changelog-prs.txt") +do + # Download PR info from github. + file="pr$pr.json" + github_download "https://api.github.com/repos/ClickHouse/ClickHouse/pulls/$pr" "$file" || continue + + if ! [ "$pr" == "$(jq -r .number "$file")" ] + then + >&2 echo "Got wrong data for PR #$pr (please check and remove '$file')." + continue + fi + + # Filter out PRs by bots. + user_login=$(jq -r .user.login "$file") + if echo "$user_login" | grep "\[bot\]$" > /dev/null + then + continue + fi + + # Download author info from github. + user_id=$(jq -r .user.id "$file") + user_file="user$user_id.json" + github_download "$(jq -r .user.url "$file")" "$user_file" || continue + + if ! [ "$user_id" == "$(jq -r .id "$user_file")" ] + then + >&2 echo "Got wrong data for user #$user_id (please check and remove '$user_file')." + continue + fi + + echo "$pr" >> changelog-prs-filtered.txt +done + +echo "### ClickHouse release $to FIXME as compared to $from +" > changelog.md +./format-changelog.py changelog-prs-filtered.txt >> changelog.md +cat changelog.md diff --git a/utils/simple-backport/format-changelog.py b/utils/simple-backport/format-changelog.py new file mode 100755 index 00000000000..356ed48b6fd --- /dev/null +++ b/utils/simple-backport/format-changelog.py @@ -0,0 +1,114 @@ +#!/usr/bin/python3 + +import os +import sys +import itertools +import argparse +import json +import collections +import re + +parser = argparse.ArgumentParser(description='Format changelog for given PRs.') +parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs='?', default=sys.stdin, help='File with PR numbers, one per line.') +args = parser.parse_args() + +# This function mirrors the PR description checks in ClickhousePullRequestTrigger. +# Returns False if the PR should not be mentioned changelog. +def parse_one_pull_request(item): + description = item['body'] + # Don't skip empty lines because they delimit parts of description + lines = [line for line in map(lambda x: x.strip(), description.split('\n') if description else [])] + lines = [re.sub(r'\s+', ' ', l) for l in lines] + + category = '' + entry = '' + + if lines: + i = 0 + while i < len(lines): + if re.match(r'(?i).*category.*:$', lines[i]): + i += 1 + if i >= len(lines): + break + # Can have one empty line between header and the category itself. Filter it out. + if not lines[i]: + i += 1 + if i >= len(lines): + break + category = re.sub(r'^[-*\s]*', '', lines[i]) + i += 1 + elif re.match(r'(?i)^\**\s*(Short description|Change\s*log entry)', lines[i]): + i += 1 + # Can have one empty line between header and the entry itself. Filter it out. + if i < len(lines) and not lines[i]: + i += 1 + # All following lines until empty one are the changelog entry. + entry_lines = [] + while i < len(lines) and lines[i]: + entry_lines.append(lines[i]) + i += 1 + entry = ' '.join(entry_lines) + else: + i += 1 + + if not category: + # Shouldn't happen, because description check in CI should catch such PRs. + # Fall through, so that it shows up in output and the user can fix it. + category = "NO CL CATEGORY" + + # Filter out the PR categories that are not for changelog. + if re.match(r'(?i)doc|((non|in|not|un)[-\s]*significant)', category): + return False + + if not entry: + # Shouldn't happen, because description check in CI should catch such PRs. + category = "NO CL ENTRY" + entry = "NO CL ENTRY: '" + item['title'] + "'" + + entry = entry.strip() + if entry[-1] != '.': + entry += '.' + + item['entry'] = entry + item['category'] = category + + return True + + +category_to_pr = collections.defaultdict(lambda: []) +users = {} +for line in args.file: + pr = json.loads(open(f'pr{line.strip()}.json').read()) + assert(pr['number']) + if not parse_one_pull_request(pr): + continue + + assert(pr['category']) + category_to_pr[pr['category']].append(pr) + user_id = pr['user']['id'] + users[user_id] = json.loads(open(f'user{user_id}.json').read()) + +def print_category(category): + print("#### " + category) + print() + for pr in category_to_pr[category]: + user = users[pr["user"]["id"]] + user_name = user["name"] if user["name"] else user["login"] + + # Substitute issue links + pr["entry"] = re.sub(r'([^[])#([0-9]{4,})', r'\1[#\2](https://github.com/ClickHouse/ClickHouse/issues/\2)', pr["entry"]) + + print(f'* {pr["entry"]} [#{pr["number"]}]({pr["html_url"]}) ([{user_name}]({user["html_url"]})).') + + print() + +# Print categories in preferred order +categories_preferred_order = ['Backward Incompatible Change', 'New Feature', 'Bug Fix', 'Improvement', 'Performance Improvement', 'Build/Testing/Packaging Improvement', 'Other'] +for category in categories_preferred_order: + if category in category_to_pr: + print_category(category) + category_to_pr.pop(category) + +# Print the rest of the categories +for category in category_to_pr: + print_category(category) diff --git a/utils/test-data-generator/CMakeLists.txt b/utils/test-data-generator/CMakeLists.txt index 2e11b537873..c46853229d7 100644 --- a/utils/test-data-generator/CMakeLists.txt +++ b/utils/test-data-generator/CMakeLists.txt @@ -1,9 +1,9 @@ if(USE_PROTOBUF) - protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs ProtobufDelimitedMessagesSerializer_Hdrs ${CMAKE_CURRENT_SOURCE_DIR}/../../dbms/tests/queries/0_stateless/00825_protobuf_format.proto) - protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs2 ProtobufDelimitedMessagesSerializer_Hdrs2 ${CMAKE_CURRENT_SOURCE_DIR}/../../dbms/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto) + protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs ProtobufDelimitedMessagesSerializer_Hdrs ${CMAKE_CURRENT_SOURCE_DIR}/../../tests/queries/0_stateless/00825_protobuf_format.proto) + protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs2 ProtobufDelimitedMessagesSerializer_Hdrs2 ${CMAKE_CURRENT_SOURCE_DIR}/../../tests/queries/0_stateless/00825_protobuf_format_syntax2.proto) add_executable (ProtobufDelimitedMessagesSerializer ProtobufDelimitedMessagesSerializer.cpp ${ProtobufDelimitedMessagesSerializer_Srcs} ${ProtobufDelimitedMessagesSerializer_Hdrs} ${ProtobufDelimitedMessagesSerializer_Srcs2} ${ProtobufDelimitedMessagesSerializer_Hdrs2}) target_include_directories (ProtobufDelimitedMessagesSerializer SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) target_link_libraries (ProtobufDelimitedMessagesSerializer PRIVATE ${Protobuf_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY}) - get_filename_component(ProtobufDelimitedMessagesSerializer_OutputDir "${CMAKE_CURRENT_LIST_DIR}/../../dbms/tests/queries/0_stateless" REALPATH) + get_filename_component(ProtobufDelimitedMessagesSerializer_OutputDir "${CMAKE_CURRENT_LIST_DIR}/../../tests/queries/0_stateless" REALPATH) target_compile_definitions(ProtobufDelimitedMessagesSerializer PRIVATE OUTPUT_DIR="${ProtobufDelimitedMessagesSerializer_OutputDir}") endif() diff --git a/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp b/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp index c956dea8712..d16df83d12f 100644 --- a/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp +++ b/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp @@ -1,5 +1,5 @@ // Generator of protobuf delimited messages used in the protobuf IO tests -// dbms/tests/queries/0_stateless/00825_protobuf_format* +// tests/queries/0_stateless/00825_protobuf_format* #include #include diff --git a/website/Dockerfile b/website/Dockerfile deleted file mode 100644 index 64eb0ce5e33..00000000000 --- a/website/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM nginx:mainline -COPY . /usr/share/nginx/html -COPY nginx/nginx.conf /etc/nginx/nginx.conf -COPY nginx/default.conf /etc/nginx/conf.d/default.conf diff --git a/website/benchmark_hardware.html b/website/benchmark_hardware.html index ac320cd5415..b45ccb4875c 100644 --- a/website/benchmark_hardware.html +++ b/website/benchmark_hardware.html @@ -2376,6 +2376,159 @@ var results = [0.032, 0.009, 0.007] ] }, + + { + "system": "MacBook Pro 2018, 2.7 GHz Quad-Core Intel Core i7, 16 GiB RAM, 1TB SSD", + "time": "2020-04-04 00:00:00", + "result": + [ +[0.002, 0.002, 0.002], +[0.028, 0.031, 0.025], +[0.060, 0.058, 0.047], +[0.125, 0.101, 0.070], +[0.164, 0.185, 0.168], +[0.672, 0.568, 0.557], +[0.072, 0.038, 0.037], +[0.031, 0.021, 0.021], +[0.849, 0.836, 0.820], +[0.941, 0.938, 0.942], +[0.423, 0.444, 0.457], +[0.617, 0.556, 0.555], +[1.761, 1.694, 1.641], +[2.190, 2.277, 2.226], +[1.964, 1.895, 1.934], +[1.956, 1.978, 1.884], +[6.029, 5.977, 5.975], +[3.372, 3.436, 3.439], +[12.883, 12.778, 12.572], +[0.116, 0.080, 0.076], +[1.874, 1.372, 1.467], +[2.321, 2.356, 2.238], +[5.304, 4.955, 4.912], +[2.474, 1.993, 2.033], +[0.744, 0.708, 0.719], +[0.562, 0.568, 0.602], +[0.737, 0.742, 0.719], +[1.547, 1.580, 1.583], +[3.074, 2.665, 2.697], +[5.466, 5.560, 5.693], +[1.658, 1.562, 1.543], +[2.935, 2.802, 2.743], +[19.141, 19.674, 19.212], +[8.738, 8.334, 8.302], +[8.268, 8.276, 8.364], +[3.311, 3.288, 3.243], +[0.182, 0.169, 0.169], +[0.075, 0.066, 0.066], +[0.066, 0.057, 0.053], +[0.353, 0.324, 0.327], +[0.030, 0.018, 0.018], +[0.018, 0.015, 0.015], +[0.011, 0.007, 0.007] + ] + }, + + { + "system": "AMD EPYC 7702, 256 cores, 512 GiB, NVMe SSD", + "time": "2020-04-09 00:00:00", + "result": + [ +[0.006, 0.002, 0.002], +[0.252, 0.072, 0.057], +[0.113, 0.066, 0.057], +[0.197, 0.055, 0.065], +[0.311, 0.199, 0.217], +[0.360, 0.200, 0.183], +[0.119, 0.050, 0.045], +[0.066, 0.061, 0.057], +[0.320, 0.150, 0.144], +[0.346, 0.170, 0.162], +[0.226, 0.117, 0.115], +[0.265, 0.112, 0.118], +[0.402, 0.249, 0.250], +[0.561, 0.327, 0.332], +[0.397, 0.267, 0.257], +[0.323, 0.221, 0.233], +[0.710, 0.527, 0.517], +[0.667, 0.437, 0.443], +[1.269, 0.936, 0.957], +[0.189, 0.043, 0.043], +[1.673, 0.206, 0.169], +[1.937, 0.214, 0.184], +[3.527, 0.755, 0.737], +[3.197, 0.551, 0.523], +[0.519, 0.076, 0.086], +[0.268, 0.060, 0.080], +[0.522, 0.075, 0.079], +[1.693, 0.345, 0.351], +[1.466, 0.330, 0.318], +[1.078, 0.974, 1.019], +[0.501, 0.196, 0.200], +[1.032, 0.266, 0.271], +[1.621, 1.156, 1.169], +[2.089, 0.998, 0.972], +[2.106, 0.974, 0.959], +[0.366, 0.305, 0.305], +[0.190, 0.187, 0.183], +[0.071, 0.066, 0.075], +[0.072, 0.068, 0.062], +[0.415, 0.353, 0.457], +[0.034, 0.032, 0.028], +[0.031, 0.027, 0.032], +[0.024, 0.007, 0.007] + ] + }, + + { + "system": "Intel NUC, 4 cores (Intel i7-6770HQ), 32 GiB RAM, 1 TB NVMe SSD", + "time": "2020-04-15 00:00:00", + "result": + [ +[0.003, 0.002, 0.001], +[0.025, 0.016, 0.018], +[0.084, 0.058, 0.057], +[0.158, 0.092, 0.085], +[0.273, 0.211, 0.190], +[0.671, 0.555, 0.539], +[0.031, 0.033, 0.033], +[0.026, 0.019, 0.017], +[1.183, 1.110, 1.090], +[1.330, 1.246, 1.254], +[0.352, 0.297, 0.296], +[0.441, 0.375, 0.352], +[1.611, 1.491, 1.439], +[2.130, 2.022, 1.976], +[1.903, 1.795, 1.819], +[1.927, 1.851, 1.861], +[5.282, 5.155, 5.172], +[3.246, 3.313, 3.189], +[12.059, 11.378, 10.562], +[0.146, 0.092, 0.090], +[2.103, 1.496, 1.477], +[2.447, 1.777, 1.734], +[5.123, 3.999, 3.955], +[3.733, 1.808, 1.775], +[0.685, 0.530, 0.523], +[0.525, 0.446, 0.438], +[0.755, 0.545, 0.547], +[2.052, 1.416, 1.403], +[2.976, 2.441, 2.423], +[2.197, 2.189, 2.164], +[1.748, 1.596, 1.607], +[2.773, 2.481, 2.466], +[18.903, 19.166, 16.563], +[7.457, 7.116, 6.943], +[7.311, 6.957, 6.958], +[3.036, 3.005, 2.991], +[0.247, 0.186, 0.162], +[0.100, 0.063, 0.065], +[0.098, 0.061, 0.056], +[0.434, 0.344, 0.331], +[0.040, 0.025, 0.025], +[0.049, 0.026, 0.026], +[0.022, 0.008, 0.006] + ] + }, ]; @@ -2810,6 +2963,9 @@ Results for AMD EPYC 7502P are from Kostiantyn Velychkovskyi.
    Results for Pinebook Pro are from Aleksey R. @kITerE.
    Results for AMD Ryzen are from Alexey Milovidov. Firefox was running in background.
    Results for Azure E32s are from Piotr Maśko.
    +Results for MacBook Pro are from Denis Glazachev. MacOS Catalina Version 10.15.4 (19E266). For "drop caches", the "Free Up RAM" in CleanMyMac is used.
    +Results for AMD EPYC 7702 are from Peng Gao in sina.com.
    +Results for Intel NUC are from Alexander Zaitsev, Altinity.
    Xeon Gold 6230 server is using 4 x SAMSUNG datacenter class SSD in RAID-10.
    Results for Yandex Managed ClickHouse for "cold cache" are biased and should not be compared, because cache was not flushed for every next query.
    diff --git a/website/css/docs.css b/website/css/docs.css index f4c4857bc7e..7e4e1040848 100644 --- a/website/css/docs.css +++ b/website/css/docs.css @@ -1,3 +1,14 @@ +details { + background: #444451; + padding: 1rem; + margin-bottom: 1rem; + margin-top: 1rem; +} + +summary { + font-weight: bold; +} + #sidebar { position: fixed; z-index: 50; @@ -109,6 +120,15 @@ border-color: #333; } +.algolia-autocomplete .algolia-docsearch-suggestion--content { + background-color: #333; +} + +.algolia-autocomplete .algolia-docsearch-suggestion--content:hover, +.algolia-autocomplete .ds-dropdown-menu .ds-suggestion.ds-cursor .algolia-docsearch-suggestion:not(.suggestion-layout-simple) .algolia-docsearch-suggestion--content { + background-color: #444451 !important; +} + .algolia-autocomplete .algolia-docsearch-suggestion--category-header, .algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column, .algolia-autocomplete .algolia-docsearch-suggestion--title, @@ -184,4 +204,3 @@ color: #bbb; } } - diff --git a/website/images/flags/es.svg b/website/images/flags/es.svg index 04f609b6c1d..d859aa650b2 100644 --- a/website/images/flags/es.svg +++ b/website/images/flags/es.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/website/images/flags/tr.svg b/website/images/flags/tr.svg new file mode 100644 index 00000000000..30524de46d8 --- /dev/null +++ b/website/images/flags/tr.svg @@ -0,0 +1,8 @@ + + + + + + + diff --git a/website/images/index/linearly-scalable.svg b/website/images/index/linearly-scalable.svg new file mode 100644 index 00000000000..b2cd41338ec --- /dev/null +++ b/website/images/index/linearly-scalable.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/website/images/index/shield.svg b/website/images/index/shield.svg new file mode 100644 index 00000000000..e48b824909f --- /dev/null +++ b/website/images/index/shield.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/website/index.html b/website/index.html index 8770c8a97a9..e2ac6e31441 100644 --- a/website/index.html +++ b/website/index.html @@ -1,3 +1,7 @@ +{% set prefetch_items = [ + ('/docs/en/', 'document') +] %} + {% extends "templates/base.html" %} {% block content %} @@ -11,7 +15,7 @@ {% include "templates/index/efficient.html" %} {% include "templates/index/rich.html" %} {% include "templates/index/features.html" %} -{% include "templates/index/reliable.html" %} +{% include "templates/index/scalable.html" %} {% include "templates/index/use.html" %} {% include "templates/index/quickstart.html" %} {% include "templates/index/community.html" %} diff --git a/website/js/base.js b/website/js/base.js index 8dfd426d6ed..4e43a44d63a 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -38,15 +38,32 @@ window.history.replaceState('', document.title, dst); } }); + + var top_nav = $('#top-nav.sticky-top'); + if (window.location.hash.length > 1 && top_nav.length) { + var offset = $(window.location.hash).offset().top - top_nav.height() * 1.5; + $('html, body').animate({ + scrollTop: offset + }, 70); + } + + $('img').each(function() { + var src = $(this).attr('data-src'); + if (src) { + $(this).attr('src', src); + } + }); + (function (d, w, c) { (w[c] = w[c] || []).push(function() { + var is_single_page = $('html').attr('data-single-page') === 'true'; try { w.yaCounter18343495 = new Ya.Metrika2({ - id:18343495, - clickmap:true, - trackLinks:true, - accurateTrackBounce:true, - webvisor:true + id: 18343495, + clickmap: !is_single_page, + trackLinks: !is_single_page, + accurateTrackBounce: !is_single_page, + webvisor: !is_single_page }); } catch(e) { } }); @@ -62,12 +79,14 @@ d.addEventListener("DOMContentLoaded", f, false); } else { f(); } })(document, window, "yandex_metrika_callbacks2"); + var beforePrint = function() { var details = document.getElementsByTagName("details"); for (var i = 0; i < details.length; ++i) { details[i].open = 1; } }; + if (window.matchMedia) { window.matchMedia('print').addListener(function(q) { if (q.matches) { diff --git a/website/js/docs.js b/website/js/docs.js index d54644cd040..364531f0521 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -1,8 +1,9 @@ function onResize() { var window_height = $(window).height(); - $('#sidebar, #toc.toc-right').css({ - 'height': (window_height - $('#top-nav').height()) + 'px' - }); + var window_width = $(window).width(); + var is_wide = window_width >= 768; + var docs_top_nav = $('#top-nav.bg-dark-alt'); + $('body').attr('data-offset', window_height.toString()); var sidebar = $('#sidebar'); var languages = $('#languages-dropdown') @@ -12,17 +13,33 @@ function onResize() { } else { single_page_switch.removeClass('float-right'); } - if ($(window).width() >= 768) { + if (is_wide) { sidebar.removeClass('collapse'); languages.detach().appendTo($('#languages-wrapper')); - } else { sidebar.addClass('collapse'); languages.detach().insertBefore(single_page_switch); languages.addClass('float-right'); single_page_switch.removeClass('float-right'); } + if (window_height < 800 && is_wide) { + docs_top_nav.removeClass('sticky-top'); + $('#sidebar, #toc.toc-right').css({ + 'height': window_height, + 'position': 'sticky', + 'top': 0 + }); + } else { + var top_nav_height = docs_top_nav.height(); + docs_top_nav.addClass('sticky-top'); + $('#sidebar, #toc.toc-right').css({ + 'height': (window_height - top_nav_height) + 'px', + 'position': 'fixed', + 'top': top_nav_height + 16 + }); + } } + $(document).ready(function () { onResize(); $('#sidebar .nav-link.active').parents('.collapse').each(function() { @@ -49,6 +66,15 @@ $(document).ready(function () { } }); }); + $('#sidebar').on('shown.bs.collapse', function () { + onResize(); + $('body').on('touchmove', function (e) { + e.preventDefault(); + }); + }); + $('#sidebar').on('hidden.bs.collapse', function () { + $('body').on('touchmove', function () {}); + }); var headers = $('#content h1, #content h2, #content h3, #content h4, #content h5, #content h6'); headers.mouseenter(function() { @@ -85,9 +111,12 @@ $(document).ready(function () { advancedSyntax: true, clickAnalytics: true, hitsPerPage: 25, - 'facetFilters': ["lang:" + $('html').attr('lang')] + 'facetFilters': [ + 'lang:' + $('html').attr('lang'), + 'version:' + $('html').attr('data-version') + ] }, - debug: true + debug: false }); } }); diff --git a/website/locale/en/LC_MESSAGES/messages.po b/website/locale/en/LC_MESSAGES/messages.po index c272d156de6..ef9726c101c 100644 --- a/website/locale/en/LC_MESSAGES/messages.po +++ b/website/locale/en/LC_MESSAGES/messages.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: en\n" @@ -80,11 +80,15 @@ msgstr "" "codebase and waits for fellow native speakers to make it more human-" "readable." +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "You can also use the original English version as a reference." + #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "Help ClickHouse documentation by editing this page" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "Built from" @@ -184,15 +188,15 @@ msgstr "Email discussions" msgid "Like ClickHouse?" msgstr "Like ClickHouse?" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "Help to spread the word about it via" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "and" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "Hosting ClickHouse Meetups" diff --git a/website/locale/es/LC_MESSAGES/messages.mo b/website/locale/es/LC_MESSAGES/messages.mo index e7a8e7bd13c..6f29d0fd3df 100644 Binary files a/website/locale/es/LC_MESSAGES/messages.mo and b/website/locale/es/LC_MESSAGES/messages.mo differ diff --git a/website/locale/es/LC_MESSAGES/messages.po b/website/locale/es/LC_MESSAGES/messages.po index 1a8706e60fd..96632f2b7f3 100644 --- a/website/locale/es/LC_MESSAGES/messages.po +++ b/website/locale/es/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: es\n" @@ -79,11 +79,15 @@ msgstr "" "la base de código principal de ClickHouse y espera a que otros hablantes " "nativos lo hagan más legible por humanos." +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "También puede usar la versión original en inglés como referencia." + #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "Ayuda a la documentación de ClickHouse editando esta página" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "Construido a partir de" @@ -187,15 +191,15 @@ msgstr "Discusiones por correo electrónico" msgid "Like ClickHouse?" msgstr "¿Te gusta ClickHouse?" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "Ayuda a correr la voz al respecto a través de" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "y" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "Grupos de Meetup de Hosting ClickHouse" diff --git a/website/locale/fa/LC_MESSAGES/messages.mo b/website/locale/fa/LC_MESSAGES/messages.mo index 6d2a415b7fc..d474a31e03a 100644 Binary files a/website/locale/fa/LC_MESSAGES/messages.mo and b/website/locale/fa/LC_MESSAGES/messages.mo differ diff --git a/website/locale/fa/LC_MESSAGES/messages.po b/website/locale/fa/LC_MESSAGES/messages.po index 5e4205412d1..03a7d9baf3f 100644 --- a/website/locale/fa/LC_MESSAGES/messages.po +++ b/website/locale/fa/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: fa\n" @@ -78,11 +78,15 @@ msgstr "" " کنار پایگاه داده اصلی خانه کلیک زندگی می کند و منتظر سخنرانان مادری " "همکار به انسان بیشتر قابل خواندن است." +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "شما همچنین می توانید نسخه اصلی انگلیسی به عنوان یک مرجع استفاده کنید." + #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "راهنما مستندات تاتر با ویرایش این صفحه" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "ساخته شده از" @@ -182,15 +186,15 @@ msgstr "بحث های ایمیل" msgid "Like ClickHouse?" msgstr "مانند خانه کلیک?" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "کمک به گسترش این کلمه از طریق" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "و" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "میزبانی اکسسوری تاتر" diff --git a/website/locale/fr/LC_MESSAGES/messages.mo b/website/locale/fr/LC_MESSAGES/messages.mo index 7d67a68c6e5..c5f2ca8793f 100644 Binary files a/website/locale/fr/LC_MESSAGES/messages.mo and b/website/locale/fr/LC_MESSAGES/messages.mo differ diff --git a/website/locale/fr/LC_MESSAGES/messages.po b/website/locale/fr/LC_MESSAGES/messages.po index 38cd1031b49..0797104a1a5 100644 --- a/website/locale/fr/LC_MESSAGES/messages.po +++ b/website/locale/fr/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: 2020-03-30 15:12+0300\n" "Last-Translator: FULL NAME \n" "Language: fr\n" @@ -79,11 +79,17 @@ msgstr "" "code ClickHouse principale et attend que d'autres locuteurs natifs le " "rendent plus lisible par l'homme." +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "" +"Vous pouvez également utiliser la version originale anglaise comme " +"référence." + #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "Aide clickhouse documentation en éditant cette page" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "Construit à partir de" @@ -185,15 +191,15 @@ msgstr "Discussions par courriel" msgid "Like ClickHouse?" msgstr "Comme ClickHouse?" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "Aider à passer le mot à ce sujet via" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "et" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "Accueil Clickhouse Meetups" diff --git a/website/locale/ja/LC_MESSAGES/messages.mo b/website/locale/ja/LC_MESSAGES/messages.mo index ec416b79c0a..96cc1b834bd 100644 Binary files a/website/locale/ja/LC_MESSAGES/messages.mo and b/website/locale/ja/LC_MESSAGES/messages.mo differ diff --git a/website/locale/ja/LC_MESSAGES/messages.po b/website/locale/ja/LC_MESSAGES/messages.po index e0c901ea399..9ef50c6d5b9 100644 --- a/website/locale/ja/LC_MESSAGES/messages.po +++ b/website/locale/ja/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: ja\n" @@ -71,11 +71,15 @@ msgstr "" "このドキュメントページの以下の内容は機械翻訳されています。 異な他のウェブサイトで行われませんができます。 " "この翻訳テキスト生活をGitHubリポジトリとClickHouseコードベース、待ち員ネイティブスピーカーで人間が読む." +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "また、参照として、元の英語版を使用することができます。" + #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "このページを編集してClickHouseの文書を編集する" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "から構築" @@ -175,15 +179,15 @@ msgstr "メールでの議論" msgid "Like ClickHouse?" msgstr "ClickHouseのような?" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "それについての言葉を広めるのに役立ちます" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "と" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "開催ClickHouse Meetups" diff --git a/website/locale/messages.pot b/website/locale/messages.pot index a09273f2907..8465be2bf7c 100644 --- a/website/locale/messages.pot +++ b/website/locale/messages.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -64,11 +64,15 @@ msgid "" "readable." msgstr "" +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "" + #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "" @@ -168,15 +172,15 @@ msgstr "" msgid "Like ClickHouse?" msgstr "" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "" diff --git a/website/locale/ru/LC_MESSAGES/messages.mo b/website/locale/ru/LC_MESSAGES/messages.mo index d6e0e5eaffe..8aecc12706d 100644 Binary files a/website/locale/ru/LC_MESSAGES/messages.mo and b/website/locale/ru/LC_MESSAGES/messages.mo differ diff --git a/website/locale/ru/LC_MESSAGES/messages.po b/website/locale/ru/LC_MESSAGES/messages.po index 32241543cbd..27a36517edd 100644 --- a/website/locale/ru/LC_MESSAGES/messages.po +++ b/website/locale/ru/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: ru\n" @@ -64,7 +64,7 @@ msgstr "ООО «Яндекс»" #: templates/docs/content.html:6 msgid "Help wanted!" -msgstr "Помощь нужна!" +msgstr "Нужна помощь!" #: templates/docs/content.html:7 msgid "" @@ -74,17 +74,23 @@ msgid "" "codebase and waits for fellow native speakers to make it more human-" "readable." msgstr "" -"Следующее содержание этой страницы документации было переведено на " -"машинный язык. Но в отличие от других сайтов, это делается не на лету. " +"Нижеследующее содержание этой страницы документации было создано через " +"машинный перевод. Но в отличие от других сайтов, это делается не на лету. " "Этот переведенный текст живет в репозитории GitHub вместе с основной " "кодовой базой ClickHouse и ждет, когда другие носители языка сделают его " -"более удобочитаемым для человека." +"более удобочитаемым." + +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "" +"Вы также можете использовать оригинальную английскую версию в качестве " +"образца." #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "Помогите документации ClickHouse, отредактировав эту страницу" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "Собрано из" @@ -184,15 +190,15 @@ msgstr "Обсуждения по электронной почте" msgid "Like ClickHouse?" msgstr "Нравится ClickHouse?" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "Помогите распространить информацию о нём через" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "и" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "Таких Встреч ClickHouse " diff --git a/website/locale/tr/LC_MESSAGES/messages.mo b/website/locale/tr/LC_MESSAGES/messages.mo new file mode 100644 index 00000000000..c8aa99e505d Binary files /dev/null and b/website/locale/tr/LC_MESSAGES/messages.mo differ diff --git a/website/locale/tr/LC_MESSAGES/messages.po b/website/locale/tr/LC_MESSAGES/messages.po new file mode 100644 index 00000000000..562fde71a59 --- /dev/null +++ b/website/locale/tr/LC_MESSAGES/messages.po @@ -0,0 +1,271 @@ +# Turkish translations for PROJECT. +# Copyright (C) 2020 ORGANIZATION +# This file is distributed under the same license as the PROJECT project. +# FIRST AUTHOR , 2020. +# +msgid "" +msgstr "" +"Project-Id-Version: PROJECT VERSION\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" +"PO-Revision-Date: 2020-04-15 13:17+0000\n" +"Last-Translator: FULL NAME \n" +"Language: tr\n" +"Language-Team: tr \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.8.0\n" + +#: templates/common_meta.html:5 templates/common_meta.html:9 +#: templates/common_meta.html:16 +msgid "ClickHouse DBMS" +msgstr "ClickHouse DBMS" + +#: templates/common_meta.html:11 +msgid "" +"ClickHouse is an open source column-oriented database management system " +"that allows generating analytical data reports in real time using SQL " +"queries." +msgstr "" +"ClickHouse SQL sorguları kullanarak gerçek zamanlı olarak analitik veri " +"raporları üreten sağlayan bir açık kaynak sütun odaklı veritabanı yönetim" +" sistemidir." + +#: templates/common_meta.html:19 +msgid "" +"ClickHouse is an open source distributed column-oriented database " +"management system that allows generating analytical data reports in real " +"time using SQL queries. Сreated by Yandex ClickHouse manages extremely " +"large volumes of data in a stable and sustainable manner." +msgstr "" +"ClickHouse is an open source distributed column-oriented database " +"management system that allows generating analytical data reports in real " +"time using SQL queries. Сreated by Yandex ClickHouse manages extremely " +"large volumes of data in a stable and sustainable manner." + +#: templates/footer.html:8 +msgid "ClickHouse source code is published under the Apache 2.0 License." +msgstr "ClickHouse kaynak kodu Apache 2.0 Lisansı altında yayınlandı." + +#: templates/footer.html:8 +msgid "" +"Software is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR " +"CONDITIONS OF ANY KIND, either express or implied." +msgstr "" +"Yazılım \"OLDUĞU GİBİ\" BAZINDA dağıtılır, GARANTİ VEYA HERHANGİ bir şart" +" OLMADAN, Açık veya zımni." + +#: templates/footer.html:11 +msgid "Yandex LLC" +msgstr "Yandex LLC" + +#: templates/docs/content.html:6 +msgid "Help wanted!" +msgstr "Yardım istedi!" + +#: templates/docs/content.html:7 +msgid "" +"The following content of this documentation page has been machine-" +"translated. But unlike other websites, it is not done on the fly. This " +"translated text lives on GitHub repository alongside main ClickHouse " +"codebase and waits for fellow native speakers to make it more human-" +"readable." +msgstr "" +"Bu Dokümantasyon sayfasının aşağıdaki içeriği makine tarafından " +"çevrilmiştir. Ancak diğer web sitelerinin aksine, anında yapılmaz. Bu " +"çevrilmiş metin, ana ClickHouse kod tabanının yanında GitHub deposunda " +"yaşar ve diğer anadili konuşanların daha insan tarafından okunabilir " +"olmasını bekler." + +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "Orijinal İngilizce sürümünü referans olarak da kullanabilirsiniz." + +#: templates/docs/content.html:10 +msgid "Help ClickHouse documentation by editing this page" +msgstr "Bu sayfayı düzenleyerek ClickHouse belgelerine yardım edin" + +#: templates/docs/content.html:27 +msgid "Built from" +msgstr "Dahili" + +#: templates/docs/sidebar.html:3 +msgid "Multi-page or single-page" +msgstr "Çok sayfalı veya tek sayfalı" + +#: templates/docs/sidebar.html:5 +msgid "Multi-page version" +msgstr "Çok sayfalı sürüm" + +#: templates/docs/sidebar.html:8 +msgid "Single-page version" +msgstr "Tek sayfalık sürüm" + +#: templates/docs/sidebar.html:13 +msgid "Version" +msgstr "Sürüm" + +#: templates/docs/sidebar.html:13 templates/docs/sidebar.html:19 +msgid "latest" +msgstr "son" + +#: templates/docs/sidebar.html:36 +msgid "PDF version" +msgstr "PDF versiyonu" + +#: templates/docs/toc.html:8 +msgid "Table of Contents" +msgstr "İçindekiler tablosu" + +#: templates/index/community.html:4 +msgid "ClickHouse community" +msgstr "ClickHouse topluluğu" + +#: templates/index/community.html:13 templates/index/community.html:14 +msgid "ClickHouse YouTube Channel" +msgstr "ClickHouse YouTube Kanalı" + +#: templates/index/community.html:25 templates/index/community.html:26 +msgid "ClickHouse Official Twitter Account" +msgstr "ClickHouse Resmi Twitter Hesabı" + +#: templates/index/community.html:36 templates/index/community.html:37 +msgid "ClickHouse at Telegram" +msgstr "Telegram'da ClickHouse" + +#: templates/index/community.html:41 +msgid "Chat with real users in " +msgstr "Gerçek kullanıcılar ile sohbet " + +#: templates/index/community.html:44 templates/index/community.html:116 +msgid "English" +msgstr "İngilizce" + +#: templates/index/community.html:45 +msgid "or in" +msgstr "veya içinde" + +#: templates/index/community.html:47 templates/index/community.html:117 +msgid "Russian" +msgstr "Rusça" + +#: templates/index/community.html:65 +msgid "Open GitHub issue to ask for help or to file a feature request" +msgstr "Yardım istemek veya bir özellik isteği göndermek için GitHub sorununu açın" + +#: templates/index/community.html:76 templates/index/community.html:77 +msgid "ClickHouse Slack Workspace" +msgstr "ClickHouse Slack Çalışma Alanı" + +#: templates/index/community.html:82 +msgid "Multipurpose public hangout" +msgstr "Çok amaçlı kamu hangout" + +#: templates/index/community.html:101 +msgid "Ask any questions" +msgstr "Herhangi bir soru sorun" + +#: templates/index/community.html:115 +msgid "ClickHouse Blog" +msgstr "ClickHouse Blog" + +#: templates/index/community.html:116 +msgid "in" +msgstr "içinde" + +#: templates/index/community.html:128 templates/index/community.html:129 +msgid "ClickHouse at Google Groups" +msgstr "Google Grupları'nda ClickHouse" + +#: templates/index/community.html:133 +msgid "Email discussions" +msgstr "E-posta tartış discussionsmaları" + +#: templates/index/community.html:142 +msgid "Like ClickHouse?" +msgstr "ClickHouse Gibi Mi?" + +#: templates/index/community.html:143 +msgid "Help to spread the word about it via" +msgstr "Aracılığıyla bu konuda kelime yaymak için yardım" + +#: templates/index/community.html:144 +msgid "and" +msgstr "ve" + +#: templates/index/community.html:153 +msgid "Hosting ClickHouse Meetups" +msgstr "ClickHouse Buluşmaları Barındırma" + +#: templates/index/community.html:157 +msgid "" +"ClickHouse meetups are essential for strengthening community worldwide, " +"but they couldn't be possible without the help of local organizers. " +"Please, feel this form if you want to become one or want to meet " +"ClickHouse core team for any other reason." +msgstr "" +"ClickHouse buluşmalar dünya çapında toplumu güçlendirmek için gereklidir," +" ancak yerel organizatörlerin yardımı olmadan mümkün olamazdı. Eğer biri " +"olmak istiyorsanız veya başka bir nedenle ClickHouse çekirdek ekibi " +"karşılamak istiyorsanız, bu formu hissedin." + +#: templates/index/community.html:159 +msgid "ClickHouse Meetup" +msgstr "ClickHouse Meetup" + +#: templates/index/community.html:165 +msgid "Name" +msgstr "Ad" + +#: templates/index/community.html:168 +msgid "Email" +msgstr "Posta" + +#: templates/index/community.html:171 +msgid "Company" +msgstr "Şirket" + +#: templates/index/community.html:174 +msgid "City" +msgstr "Şehir" + +#: templates/index/community.html:179 +msgid "We'd like to host a public ClickHouse Meetup" +msgstr "Halka açık bir ClickHouse buluşmasına ev sahipliği yapmak istiyoruz" + +#: templates/index/community.html:185 +msgid "We'd like to invite Yandex ClickHouse team to our office" +msgstr "Yandex ClickHouse ekibini ofisimize davet etmek istiyoruz" + +#: templates/index/community.html:191 +msgid "We'd like to invite Yandex ClickHouse team to another event we organize" +msgstr "" +"Yandex ClickHouse ekibini organize ettiğimiz başka bir etkinliğe davet " +"etmek istiyoruz" + +#: templates/index/community.html:197 +msgid "We're interested in commercial consulting, support or managed service" +msgstr "Ticari danışmanlık, destek veya yönetilen hizmetle ilgileniyoruz" + +#: templates/index/community.html:201 +msgid "Additional comments" +msgstr "Ek yorumlar" + +#: templates/index/community.html:203 +msgid "Send" +msgstr "Göndermek" + +#: templates/index/community.html:212 +msgid "" +"If you have any more thoughts or questions, feel free to contact Yandex " +"ClickHouse team directly at" +msgstr "" +"Daha fazla düşünceniz veya sorunuz varsa, Yandex ClickHouse ekibiyle " +"doğrudan iletişime geçmekten çekinmeyin" + +#: templates/index/community.html:213 +msgid "turn on JavaScript to see email address" +msgstr "e-posta adresini görmek için JavaScript'i açın" + diff --git a/website/locale/zh/LC_MESSAGES/messages.mo b/website/locale/zh/LC_MESSAGES/messages.mo index 3a2aa7f01c6..896fedc0658 100644 Binary files a/website/locale/zh/LC_MESSAGES/messages.mo and b/website/locale/zh/LC_MESSAGES/messages.mo differ diff --git a/website/locale/zh/LC_MESSAGES/messages.po b/website/locale/zh/LC_MESSAGES/messages.po index f86d756797b..21ec6c77696 100644 --- a/website/locale/zh/LC_MESSAGES/messages.po +++ b/website/locale/zh/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-03-30 15:12+0300\n" +"POT-Creation-Date: 2020-04-15 13:19+0000\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: zh\n" @@ -71,11 +71,15 @@ msgstr "" "本文档页面的以下内容已经过机器翻译。 但与其他网站不同,它不是在飞行中完成。 " "这个翻译的文本生活在github存储库旁边的主ClickHouse代码库,并等待同胞母语,使其更具人类可读性。" +#: templates/docs/content.html:7 +msgid "You can also use the original English version as a reference." +msgstr "您也可以使用原始的英文版本作为参考。" + #: templates/docs/content.html:10 msgid "Help ClickHouse documentation by editing this page" msgstr "通过编辑此页面帮助ClickHouse文档" -#: templates/docs/content.html:31 +#: templates/docs/content.html:27 msgid "Built from" msgstr "建于" @@ -175,15 +179,15 @@ msgstr "电子邮件讨论" msgid "Like ClickHouse?" msgstr "像克里克豪斯?" -#: templates/index/community.html:142 +#: templates/index/community.html:143 msgid "Help to spread the word about it via" msgstr "帮助通过传播这个词" -#: templates/index/community.html:143 +#: templates/index/community.html:144 msgid "and" msgstr "和" -#: templates/index/community.html:155 +#: templates/index/community.html:153 msgid "Hosting ClickHouse Meetups" msgstr "碌莽禄Hosting拢Hosting0755-88888888" diff --git a/website/robots.txt b/website/robots.txt index 2af539491b1..f9970836f18 100644 --- a/website/robots.txt +++ b/website/robots.txt @@ -1,18 +1,15 @@ User-agent: * Disallow: /docs/en/single/ -Disallow: /docs/ru/single/ Disallow: /docs/zh/single/ +Disallow: /docs/es/single/ +Disallow: /docs/fr/single/ +Disallow: /docs/ru/single/ Disallow: /docs/ja/single/ Disallow: /docs/fa/single/ Disallow: /docs/v1* Disallow: /docs/v2* Disallow: /docs/v3* -Disallow: /docs/en/search.html -Disallow: /docs/ru/search.html -Disallow: /docs/ja/search.html -Disallow: /docs/zh/search.html -Disallow: /docs/fa/search.html Disallow: /cdn-cgi/ Allow: / Host: https://clickhouse.tech -Sitemap: https://clickhouse.tech/docs/sitemap.xml +Sitemap: https://clickhouse.tech/sitemap.xml diff --git a/website/sitemap.xml b/website/sitemap.xml index 9305d9d0454..a147404ec6f 100644 --- a/website/sitemap.xml +++ b/website/sitemap.xml @@ -3,14 +3,17 @@ https://clickhouse.tech/docs/en/sitemap.xml + + https://clickhouse.tech/docs/zh/sitemap.xml + https://clickhouse.tech/docs/es/sitemap.xml - https://clickhouse.tech/docs/ru/sitemap.xml + https://clickhouse.tech/docs/fr/sitemap.xml - https://clickhouse.tech/docs/zh/sitemap.xml + https://clickhouse.tech/docs/ru/sitemap.xml https://clickhouse.tech/docs/ja/sitemap.xml @@ -19,6 +22,6 @@ https://clickhouse.tech/docs/fa/sitemap.xml - https://clickhouse.tech/docs/sitemap_static.xml + https://clickhouse.tech/sitemap_static.xml diff --git a/website/sitemap_static.xml b/website/sitemap_static.xml index 7a08e066874..33d258674f6 100644 --- a/website/sitemap_static.xml +++ b/website/sitemap_static.xml @@ -6,10 +6,14 @@
    https://clickhouse.tech/benchmark.html - daily + weekly https://clickhouse.tech/benchmark_hardware.html + weekly + + + https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html daily diff --git a/website/templates/common_js.html b/website/templates/common_js.html index 52f0e8dae32..b2bed146503 100644 --- a/website/templates/common_js.html +++ b/website/templates/common_js.html @@ -1,6 +1,5 @@ - diff --git a/website/templates/common_meta.html b/website/templates/common_meta.html index 7ed5a8409ec..2aca17f93a2 100644 --- a/website/templates/common_meta.html +++ b/website/templates/common_meta.html @@ -19,3 +19,7 @@ content="{% if description %}{{ description }}{% else %}{{ _('ClickHouse is an open source distributed column-oriented database management system that allows generating analytical data reports in real time using SQL queries. Сreated by Yandex ClickHouse manages extremely large volumes of data in a stable and sustainable manner.') }}{% endif %}"/> + +{% for prefetch_item in prefetch_items %} + +{% endfor %} diff --git a/website/templates/docs/content.html b/website/templates/docs/content.html index 320f1a2b53f..2ad6855a684 100644 --- a/website/templates/docs/content.html +++ b/website/templates/docs/content.html @@ -4,7 +4,7 @@ {% if page.meta.machine_translated %} @@ -32,3 +28,12 @@ {% endif %} +{% if single_page and page.content %} + +(function() { + {% for chunk in page.content|chunks %} + document.write({{ chunk|tojson|safe }}); + {% endfor %} +})(); + +{% endif %} diff --git a/website/templates/docs/nav.html b/website/templates/docs/nav.html index 42808819ef3..adc7231658f 100644 --- a/website/templates/docs/nav.html +++ b/website/templates/docs/nav.html @@ -6,7 +6,11 @@ diff --git a/website/templates/docs/sidebar.html b/website/templates/docs/sidebar.html index 96715fbb6c4..a4ff0e780d8 100644 --- a/website/templates/docs/sidebar.html +++ b/website/templates/docs/sidebar.html @@ -21,7 +21,7 @@ {% for release in config.extra.stable_releases %} {{ release.0 }} + href="/docs/{{ release.0 }}/{{ language }}/">{{ release.0 }}{% if release.1.2 %} LTS{% endif %} {% endfor %} @@ -32,7 +32,7 @@ {% set level = 1 %} {% include "templates/docs/sidebar-item.html" %} {% endfor %} - + {{ _('PDF version') }}
    {{ _('PDF version') }}
    diff --git a/website/templates/footer.html b/website/templates/footer.html index 42f1e4263be..765ea63d528 100644 --- a/website/templates/footer.html +++ b/website/templates/footer.html @@ -8,7 +8,7 @@ {{ _('ClickHouse source code is published under the Apache 2.0 License.') }} {{ _('Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.') }}
    - © 2016–2020 {{ _('Yandex LLC') }}} + © 2016–2020 {{ _('Yandex LLC') }}
    diff --git a/website/templates/index/community.html b/website/templates/index/community.html index 206dce5d34a..e230cac8da9 100644 --- a/website/templates/index/community.html +++ b/website/templates/index/community.html @@ -1,7 +1,7 @@
    -

    {{ _('ClickHouse community') }}

    +

    {{ _('ClickHouse community') }}

    @@ -9,7 +9,7 @@
    - {{ _('ClickHouse YouTube Channel') }} @@ -21,7 +21,7 @@
    - {{ _('ClickHouse Official Twitter Account') }} @@ -32,7 +32,7 @@
    - {{ _('ClickHouse at Telegram') }} @@ -56,7 +56,7 @@
    - ClickHouse GitHub @@ -69,10 +69,10 @@
    -
    - {{ _('ClickHouse Slack Workspace') }}
    - ClickHouse at Stack Overflow @@ -107,7 +107,7 @@
    - ClickHouse Blog @@ -124,7 +124,7 @@
    - {{ _('ClickHouse at Google Groups') }} @@ -139,9 +139,9 @@
    -

    {{ _('Like ClickHouse?') }} {{ _('Help to spread the word about it via') }} Facebook, - Twitter{{ _('and') }} - LinkedIn!

    +

    {{ _('Like ClickHouse?') }}

    +

    {{ _('Help to spread the word about it via') }} Facebook, + Twitter {{ _('and') }} LinkedIn!

    @@ -150,18 +150,18 @@
    -
    -
    -

    {{ _('Hosting ClickHouse Meetups') }}

    +

    {{ _('Hosting ClickHouse Meetups') }}

    +
    +

    {{ _('ClickHouse meetups are essential for strengthening community worldwide, but they couldn\'t be possible without the help of local organizers. Please, feel this form if you want to become one or want to meet ClickHouse core team for any other reason.') }}

    - {{ _('ClickHouse Meetup') }} + {{ _('ClickHouse Meetup') }}
    -
    +
    @@ -206,7 +206,7 @@
    -
    +

    {{ _('If you have any more thoughts or questions, feel free to contact Yandex ClickHouse team directly at') }} diff --git a/website/templates/index/efficient.html b/website/templates/index/efficient.html index ae0a7f2c17b..97d3e0c20ca 100644 --- a/website/templates/index/efficient.html +++ b/website/templates/index/efficient.html @@ -2,14 +2,14 @@

    -

    Hardware efficient

    +

    Hardware efficient

    ClickHouse processes typical analytical queries two to three orders of magnitude faster than traditional row-oriented systems with the same available I/O throughput and CPU capacity. Columnar storage format allows fitting more hot data in RAM, which leads to shorter typical response times.

    Total cost of ownership could be further lowered by using commodity hardware with rotating disk drives instead of enterprise grade NVMe or SSD without significant sacrifices in latency for most kinds of queries.

    - ClickHouse is hardware efficient + ClickHouse is hardware efficient
    @@ -37,6 +37,6 @@
    - ClickHouse не тормозит + ClickHouse не тормозит
    diff --git a/website/templates/index/features.html b/website/templates/index/features.html index 30d682843b0..c659e0d9301 100644 --- a/website/templates/index/features.html +++ b/website/templates/index/features.html @@ -2,7 +2,7 @@
    -

    ClickHouse. Just makes you think faster!

    +

    ClickHouse. Just makes you think faster!

    diff --git a/website/templates/index/performance.html b/website/templates/index/performance.html index 9f31ad7a3ad..de54b05dd2d 100644 --- a/website/templates/index/performance.html +++ b/website/templates/index/performance.html @@ -38,12 +38,11 @@ rel="external nofollow noreferrer" target="_blank" class="text-reset">Geospatial processing with Clickhouse
    Carto
  • ClickHouse and Vertica comparison
    zhtsh (machine translation from Chinese)
  • -
  • ClickHouse and InfiniDB comparison
    RamboLau (machine translation from Chinese)
  • +
  • MySQL vs ColumnStore vs ClickHouse
    Mafiree (machine translation from Chinese)
  • - - + {## TODO: ##}
    diff --git a/website/templates/index/quickstart.html b/website/templates/index/quickstart.html index 32d3b21bcc5..0afa40e6030 100644 --- a/website/templates/index/quickstart.html +++ b/website/templates/index/quickstart.html @@ -1,6 +1,6 @@
    -

    Quick start

    +

    Quick start

    System requirements for pre-built packages: Linux, x86_64 with SSE 4.2.

    diff --git a/website/templates/index/reliable.html b/website/templates/index/reliable.html deleted file mode 100644 index 05ba1b00027..00000000000 --- a/website/templates/index/reliable.html +++ /dev/null @@ -1,27 +0,0 @@ -
    -
    - -

    Highly reliable

    - -

    ClickHouse has been managing petabytes of data serving a number of highload mass audience services of - Yandex, Russia's - leading search provider and one of the largest European IT companies. - Since 2012, ClickHouse has been providing robust database management for the company's web analytics service, comparison - e-commerce platform, public email service, online advertising platform, business intelligence tools - and infrastructure monitoring.

    - -

    ClickHouse can be configured as a purely distributed system located on independent nodes, - without any single points of failure.

    - -

    Software and hardware failures or misconfigurations do not result in loss of data. Instead of deleting "broken" - data, ClickHouse saves it or asks you what to do before a startup. All data is checksummed before every - read or write to disk or network. It is virtually impossible to delete data by accident as there are safeguards - even for human errors.

    - -

    ClickHouse offers flexible limits on query complexity and resource usage, which can be fine-tuned with settings. - It is possible to simultaneously serve both a number of high priority low-latency requests and some - long-running queries with a background priority.

    -
    -
    diff --git a/website/templates/index/rich.html b/website/templates/index/rich.html index 14b2f86e75f..1f2b4957306 100644 --- a/website/templates/index/rich.html +++ b/website/templates/index/rich.html @@ -3,7 +3,7 @@
    -

    Feature-rich

    +

    Feature-rich

    diff --git a/website/templates/index/scalable.html b/website/templates/index/scalable.html new file mode 100644 index 00000000000..672a02f202b --- /dev/null +++ b/website/templates/index/scalable.html @@ -0,0 +1,16 @@ +
    +
    +
    +
    +

    Linearly scalable

    +

    ClickHouse scales well both vertically and horizontally. ClickHouse is easily adaptable to perform either on a cluster with hundreds or thousands of nodes or on a single server or even on a tiny virtual machine. Currently, there are installations with more multiple trillion rows or hundreds of terabytes of data per single node.

    +

    + There are many ClickHouse clusters consisting of multiple hundred nodes, including few clusters of Yandex Metrica, while the largest known ClickHouse cluster is well over a thousand nodes. +

    +
    +
    + Lineraly scalable +
    +
    +
    +
    diff --git a/website/templates/index/success.html b/website/templates/index/success.html index 2d34d808e3d..961dc859535 100644 --- a/website/templates/index/success.html +++ b/website/templates/index/success.html @@ -1,6 +1,6 @@
    -

    Success stories

    +

    Success stories

    diff --git a/website/templates/index/use.html b/website/templates/index/use.html index edf4a28cf67..1f345186d71 100644 --- a/website/templates/index/use.html +++ b/website/templates/index/use.html @@ -2,7 +2,7 @@
    -

    When to use ClickHouse

    +

    When to use ClickHouse

    For analytics over a stream of clean, well structured and immutable events or logs. It is recommended to put each such stream into a single wide fact table with pre-joined dimensions.

    @@ -27,7 +27,7 @@
    -

    When NOT to use ClickHouse

    +

    When NOT to use ClickHouse

      diff --git a/website/templates/index/why.html b/website/templates/index/why.html index 131b6757793..a2917258923 100644 --- a/website/templates/index/why.html +++ b/website/templates/index/why.html @@ -2,40 +2,40 @@
      -

      Why ClickHouse might be the right choice for you?

      +

      Why ClickHouse might be the right choice?

      - Blazing fast + Blazing fast
      -

      Blazing fast

      +

      Blazing fast

      ClickHouse uses all available hardware to its full potential to process each query as fast as possible. Peak processing performance for a single query stands at more than 2 terabytes per second (after decompression, only used columns). In distributed setup reads are automatically balanced among healthy replicas to avoid increasing latency.

      - Fault tolerant + Fault tolerant
      -

      Fault-tolerant

      +

      Fault-tolerant

      ClickHouse supports multi-master asynchronous replication and can be deployed across multiple datacenters. All nodes are equal, which allows avoiding having single points of failure. Downtime of a single node or the whole datacenter won't affect the system's availability for both reads and writes.

      - Linearly scalable + Easy to use
      -

      Linearly scalable

      -

      ClickHouse scales well both vertically and horizontally. ClickHouse is easily adaptable to perform either on a cluster with hundreds or thousands of nodes or on a single server or even on a tiny virtual machine. Currently, there are installations with more multiple trillion rows or hundreds of terabytes of data per single node.

      +

      Easy to use

      +

      ClickHouse is simple and works out-of-the-box. It streamlines all your data processing: ingest all your structured data into the system and it becomes instantly available for building reports. SQL dialect allows expressing the desired result without involving any custom non-standard API that could be found in some DBMS.

      - Easy to use + Highly reliable
      -

      Easy to use

      -

      ClickHouse is simple and works out-of-the-box. It streamlines all your data processing: ingest all your structured data into the system and it becomes instantly available for building reports. SQL dialect allows expressing the desired result without involving any custom non-standard API that could be found in some DBMS.

      +

      Highly reliable

      +

      ClickHouse can be configured as a purely distributed system located on independent nodes, without any single points of failure. It also includes a lot of enterprise-grade security features and fail-safe mechanisms against human errors.

      diff --git a/website/workers/events.js b/website/workers/events.js deleted file mode 100644 index 653139af9f9..00000000000 --- a/website/workers/events.js +++ /dev/null @@ -1,34 +0,0 @@ -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - let raw = await fetch('https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/README.md'); - let text = await raw.text(); - let lines = text.split('\n'); - let skip = true; - let events = []; - for (let idx in lines) { - let line = lines[idx]; - if (skip) { - if (line.includes('Upcoming Events')) { - skip = false; - } - } else { - if (!line) { continue; }; - line = line.split(']('); - var tail = line[1].split(') '); - events.push({ - 'signup_link': tail[0], - 'event_name': line[0].replace('* [', ''), - 'event_date': tail[1].slice(0, -1).replace('on ', '') - }); - } - } - - let response = new Response(JSON.stringify({ - 'events': events - })); - response.headers.set('Content-Type', 'application/json'); - return response; -} diff --git a/website/workers/meet-form.js b/website/workers/meet-form.js deleted file mode 100644 index 6506d59522e..00000000000 --- a/website/workers/meet-form.js +++ /dev/null @@ -1,75 +0,0 @@ - -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - if (request.method != 'POST') { - return new Response('Bad request', { - status: 400, - statusText: 'Bad request' - }); - } - let url = new URL('https://api.sendgrid.com/v3/mail/send'); - let newHdrs = new Headers(); - newHdrs.set('Authorization', 'Bearer ' + SENDGRID_TOKEN); - newHdrs.set('Content-Type', 'application/json'); - let args = await request.json(); - let subject = args['name'] + ' wants to meet'; - let content = ''; - let argsKeys = Object.keys(args); - if (['name', 'email', 'city', 'company'].filter(n=>!argsKeys.includes(n)).length) { - return new Response('Bad request', { - status: 400, - statusText: 'Bad request' - }); - } - for (let key in args) { - content += key.charAt(0).toUpperCase() + key.slice(1); - content += ':\r\n' + args[key] + '\r\n\r\n'; - } - let body = { - "personalizations": [ - { - "to": [ - { - "email": "clickhouse-feedback@yandex-team.ru", - "name": "ClickHouse Core Team" - } - ], - "subject": subject - } - ], "content": [ - { - "type": "text/plain", - "value": content - } - ], "from": { - "email": "no-reply@clickhouse.tech", - "name": "ClickHouse Website" - }, "reply_to": - { - "email": "no-reply@clickhouse.tech", - "name": "ClickHouse Website" - } - }; - const init = { - body: JSON.stringify(body), - headers: newHdrs, - method: 'POST' - } - - let response = await fetch(url, init); - let status = 200; - if (response.status != 202) { - status = 200; - } - - return new Response('{}', { - status: status, - statusText: response.statusText.replace('Accepted', 'OK'), - headers: new Headers({ - 'Content-Type': 'application/json' - }) - }) -} diff --git a/website/workers/play-api.js b/website/workers/play-api.js deleted file mode 100644 index 62792d37a4d..00000000000 --- a/website/workers/play-api.js +++ /dev/null @@ -1,24 +0,0 @@ -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - let url = new URL(request.url); - url.hostname = 'play-api.clickhouse.tech'; - url.port = 8443; - url.pathname = url.pathname.replace('/api/', '/'); - let newHdrs = new Headers() - - const init = { - body: request.body, - headers: request.headers, - method: request.method - } - - let response = await fetch(url, init); - - return new Response(response.body, { - status: response.status, - statusText: response.statusText - }) -} diff --git a/website/workers/repo.js b/website/workers/repo.js deleted file mode 100644 index 470391cf225..00000000000 --- a/website/workers/repo.js +++ /dev/null @@ -1,10 +0,0 @@ -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - let url = new URL(request.url); - url.hostname = 'repo.yandex.ru'; - url.pathname = '/clickhouse' + url.pathname; - return fetch(url) -}