diff --git a/.clang-tidy b/.clang-tidy index 359d717d73d..7241c372319 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -162,56 +162,28 @@ Checks: '*, WarningsAsErrors: '*' -# TODO: use dictionary syntax for CheckOptions when minimum clang-tidy level rose to 15 -# some-check.SomeOption: 'some value' -# instead of -# - key: some-check.SomeOption -# value: 'some value' CheckOptions: - - key: readability-identifier-naming.ClassCase - value: CamelCase - - key: readability-identifier-naming.EnumCase - value: CamelCase - - key: readability-identifier-naming.LocalVariableCase - value: lower_case - - key: readability-identifier-naming.StaticConstantCase - value: aNy_CasE - - key: readability-identifier-naming.MemberCase - value: lower_case - - key: readability-identifier-naming.PrivateMemberPrefix - value: '' - - key: readability-identifier-naming.ProtectedMemberPrefix - value: '' - - key: readability-identifier-naming.PublicMemberCase - value: lower_case - - key: readability-identifier-naming.MethodCase - value: camelBack - - key: readability-identifier-naming.PrivateMethodPrefix - value: '' - - key: readability-identifier-naming.ProtectedMethodPrefix - value: '' - - key: readability-identifier-naming.ParameterPackCase - value: lower_case - - key: readability-identifier-naming.StructCase - value: CamelCase - - key: readability-identifier-naming.TemplateTemplateParameterCase - value: CamelCase - - key: readability-identifier-naming.TemplateUsingCase - value: lower_case - - key: readability-identifier-naming.TypeTemplateParameterCase - value: CamelCase - - key: readability-identifier-naming.TypedefCase - value: CamelCase - - key: readability-identifier-naming.UnionCase - value: CamelCase - - key: readability-identifier-naming.UsingCase - value: CamelCase - - key: modernize-loop-convert.UseCxx20ReverseRanges - value: false - - key: performance-move-const-arg.CheckTriviallyCopyableMove - value: false - # Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097 - - key: readability-identifier-naming.TypeTemplateParameterIgnoredRegexp - value: expr-type - - key: cppcoreguidelines-avoid-do-while.IgnoreMacros - value: true + readability-identifier-naming.ClassCase: CamelCase + readability-identifier-naming.EnumCase: CamelCase + readability-identifier-naming.LocalVariableCase: lower_case + readability-identifier-naming.StaticConstantCase: aNy_CasE + readability-identifier-naming.MemberCase: lower_case + readability-identifier-naming.PrivateMemberPrefix: '' + readability-identifier-naming.ProtectedMemberPrefix: '' + readability-identifier-naming.PublicMemberCase: lower_case + readability-identifier-naming.MethodCase: camelBack + readability-identifier-naming.PrivateMethodPrefix: '' + readability-identifier-naming.ProtectedMethodPrefix: '' + readability-identifier-naming.ParameterPackCase: lower_case + readability-identifier-naming.StructCase: CamelCase + readability-identifier-naming.TemplateTemplateParameterCase: CamelCase + readability-identifier-naming.TemplateUsingCase: lower_case + readability-identifier-naming.TypeTemplateParameterCase: CamelCase + readability-identifier-naming.TypedefCase: CamelCase + readability-identifier-naming.UnionCase: CamelCase + readability-identifier-naming.UsingCase: CamelCase + modernize-loop-convert.UseCxx20ReverseRanges: false + performance-move-const-arg.CheckTriviallyCopyableMove: false + # Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097 + readability-identifier-naming.TypeTemplateParameterIgnoredRegexp: expr-type + cppcoreguidelines-avoid-do-while.IgnoreMacros: true diff --git a/CMakeLists.txt b/CMakeLists.txt index 296fb3de7a8..74e1c0046e6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -121,6 +121,7 @@ if (ENABLE_COLORED_BUILD AND CMAKE_GENERATOR STREQUAL "Ninja") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always") # ... such manually setting of flags can be removed once CMake supports a variable to # activate colors in *all* build systems: https://gitlab.kitware.com/cmake/cmake/-/issues/15502 + # --> available since CMake 3.24: https://stackoverflow.com/a/73349744 endif () include (cmake/check_flags.cmake) @@ -134,24 +135,15 @@ if (COMPILER_CLANG) set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges") endif () - if (HAS_USE_CTOR_HOMING) - # For more info see https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/ - if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing") - endif() + # See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/ + if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing") endif() no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16 endif () -# If compiler has support for -Wreserved-identifier. It is difficult to detect by clang version, -# because there are two different branches of clang: clang and AppleClang. -# (AppleClang is not supported by ClickHouse, but some developers have misfortune to use it). -if (HAS_RESERVED_IDENTIFIER) - add_compile_definitions (HAS_RESERVED_IDENTIFIER) -endif () - option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON) option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF) @@ -184,26 +176,12 @@ if (OS_DARWIN) set (ENABLE_CURL_BUILD OFF) endif () -# Ignored if `lld` is used -option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.") - if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") # Can be lld or ld-lld or lld-13 or /path/to/lld. if (LINKER_NAME MATCHES "lld" AND OS_LINUX) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index") message (STATUS "Adding .gdb-index via --gdb-index linker option.") - # we use another tool for gdb-index, because gold linker removes section .debug_aranges, which used inside clickhouse stacktraces - # http://sourceware-org.1504.n7.nabble.com/gold-No-debug-aranges-section-when-linking-with-gdb-index-td540965.html#a556932 - elseif (LINKER_NAME MATCHES "gold$" AND ADD_GDB_INDEX_FOR_GOLD) - find_program (GDB_ADD_INDEX_EXE NAMES "gdb-add-index" DOC "Path to gdb-add-index executable") - if (NOT GDB_ADD_INDEX_EXE) - set (USE_GDB_ADD_INDEX 0) - message (WARNING "Cannot add gdb index to binaries, because gold linker is used, but gdb-add-index executable not found.") - else() - set (USE_GDB_ADD_INDEX 1) - message (STATUS "gdb-add-index found: ${GDB_ADD_INDEX_EXE}") - endif() endif () endif() @@ -302,15 +280,16 @@ if (ENABLE_BUILD_PROFILING) endif () set (CMAKE_CXX_STANDARD 23) -set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html +set (CMAKE_CXX_EXTENSIONS OFF) set (CMAKE_CXX_STANDARD_REQUIRED ON) set (CMAKE_C_STANDARD 11) -set (CMAKE_C_EXTENSIONS ON) +set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C set (CMAKE_C_STANDARD_REQUIRED ON) if (COMPILER_GCC OR COMPILER_CLANG) # Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure. + # See https://reviews.llvm.org/D112921 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation") endif () @@ -329,11 +308,7 @@ if (ARCH_AMD64) set(BRANCHES_WITHIN_32B_BOUNDARIES "-Wa,${BRANCHES_WITHIN_32B_BOUNDARIES}") endif() - include(CheckCXXCompilerFlag) - check_cxx_compiler_flag("${BRANCHES_WITHIN_32B_BOUNDARIES}" HAS_BRANCHES_WITHIN_32B_BOUNDARIES) - if (HAS_BRANCHES_WITHIN_32B_BOUNDARIES) - set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}") - endif() + set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}") endif() if (COMPILER_GCC) @@ -445,6 +420,7 @@ option(WERROR "Enable -Werror compiler option" ON) if (WERROR) # Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks. # Instead, adopt modern cmake usage requirement. + # TODO: Set CMAKE_COMPILE_WARNING_AS_ERROR (cmake 3.24) target_compile_options(global-group INTERFACE "-Werror") endif () diff --git a/base/base/phdr_cache.cpp b/base/base/phdr_cache.cpp index c3d7fed2d3f..7d37f01b560 100644 --- a/base/base/phdr_cache.cpp +++ b/base/base/phdr_cache.cpp @@ -1,6 +1,4 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif /// This code was based on the code by Fedor Korotkiy https://www.linkedin.com/in/fedor-korotkiy-659a1838/ diff --git a/base/base/unit.h b/base/base/unit.h index 1fb530be1f0..0fc314af479 100644 --- a/base/base/unit.h +++ b/base/base/unit.h @@ -5,10 +5,8 @@ constexpr size_t KiB = 1024; constexpr size_t MiB = 1024 * KiB; constexpr size_t GiB = 1024 * MiB; -#ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wreserved-identifier" -#endif +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wreserved-identifier" // NOLINTBEGIN(google-runtime-int) constexpr size_t operator"" _KiB(unsigned long long val) { return val * KiB; } @@ -16,6 +14,4 @@ constexpr size_t operator"" _MiB(unsigned long long val) { return val * MiB; } constexpr size_t operator"" _GiB(unsigned long long val) { return val * GiB; } // NOLINTEND(google-runtime-int) -#ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic pop -#endif +#pragma clang diagnostic pop diff --git a/base/readpassphrase/readpassphrase.c b/base/readpassphrase/readpassphrase.c index a84ec43767c..fbd582ffe79 100644 --- a/base/readpassphrase/readpassphrase.c +++ b/base/readpassphrase/readpassphrase.c @@ -27,9 +27,7 @@ #define _PATH_TTY "/dev/tty" #endif -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include #include diff --git a/cmake/check_flags.cmake b/cmake/check_flags.cmake index 518f9ecf8de..294f135e8ee 100644 --- a/cmake/check_flags.cmake +++ b/cmake/check_flags.cmake @@ -1,7 +1,5 @@ include (CheckCXXCompilerFlag) include (CheckCCompilerFlag) -check_cxx_compiler_flag("-Wreserved-identifier" HAS_RESERVED_IDENTIFIER) -check_cxx_compiler_flag("-Wsuggest-destructor-override" HAS_SUGGEST_DESTRUCTOR_OVERRIDE) -check_cxx_compiler_flag("-Wsuggest-override" HAS_SUGGEST_OVERRIDE) -check_cxx_compiler_flag("-Xclang -fuse-ctor-homing" HAS_USE_CTOR_HOMING) +# Set/unset variable based on existence of compiler flags. Example: +# check_cxx_compiler_flag("-Wreserved-identifier" HAS_RESERVED_IDENTIFIER) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 4d4d741cc3a..4e1954f27f7 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -50,15 +50,18 @@ endif () string (REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION}) list (GET COMPILER_VERSION_LIST 0 COMPILER_VERSION_MAJOR) -# Example values: `lld-10`, `gold`. +# Example values: `lld-10` option (LINKER_NAME "Linker name or full path") +if (LINKER_NAME MATCHES "gold") + message (FATAL_ERROR "Linking with gold is unsupported. Please use lld.") +endif () + # s390x doesnt support lld if (NOT ARCH_S390X) if (NOT LINKER_NAME) if (COMPILER_GCC) find_program (LLD_PATH NAMES "ld.lld") - find_program (GOLD_PATH NAMES "ld.gold") elseif (COMPILER_CLANG) # llvm lld is a generic driver. # Invoke ld.lld (Unix), ld64.lld (macOS), lld-link (Windows), wasm-ld (WebAssembly) instead @@ -67,13 +70,11 @@ if (NOT ARCH_S390X) elseif (OS_DARWIN) find_program (LLD_PATH NAMES "ld64.lld-${COMPILER_VERSION_MAJOR}" "ld64.lld") endif () - find_program (GOLD_PATH NAMES "ld.gold" "gold") endif () endif() endif() if ((OS_LINUX OR OS_DARWIN) AND NOT LINKER_NAME) - # prefer lld linker over gold or ld on linux and macos if (LLD_PATH) if (COMPILER_GCC) # GCC driver requires one of supported linker names like "lld". @@ -83,17 +84,6 @@ if ((OS_LINUX OR OS_DARWIN) AND NOT LINKER_NAME) set (LINKER_NAME ${LLD_PATH}) endif () endif () - - if (NOT LINKER_NAME) - if (GOLD_PATH) - message (FATAL_ERROR "Linking with gold is unsupported. Please use lld.") - if (COMPILER_GCC) - set (LINKER_NAME "gold") - else () - set (LINKER_NAME ${GOLD_PATH}) - endif () - endif () - endif () endif () # TODO: allow different linker on != OS_LINUX diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index eb424ee7cbe..98dbe5f8d57 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -71,7 +71,7 @@ SELECT 1 | `global` | Same as `shard`. Prefer `shard` || | `zookeeper` | Test requires Zookeeper or ClickHouse Keeper to run | Test uses `ReplicatedMergeTree` | | `replica` | Same as `zookeeper`. Prefer `zookeeper` || -| `no-fasttest`| Test is not run under [Fast test](continuous-integration#fast-test) | Test uses `MySQL` table engine which is disabled in Fast test| +| `no-fasttest`| Test is not run under [Fast test](continuous-integration.md#fast-test) | Test uses `MySQL` table engine which is disabled in Fast test| | `no-[asan, tsan, msan, ubsan]` | Disables tests in build with [sanitizers](#sanitizers) | Test is run under QEMU which doesn't work with sanitizers | | `no-replicated-database` ||| | `no-ordinary-database` ||| diff --git a/docs/en/engines/_category_.yml b/docs/en/engines/_category_.yml index 8c6ba12c6f1..2aa5df72955 100644 --- a/docs/en/engines/_category_.yml +++ b/docs/en/engines/_category_.yml @@ -4,5 +4,4 @@ collapsible: true collapsed: true link: type: generated-index - title: Database & Table Engines slug: /en/engines diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index e00347c3163..4b285ee80a5 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -180,4 +180,4 @@ Default value: `300`. ## See Also {#see-also} - [The mysql table function](../../../sql-reference/table-functions/mysql.md) -- [Using MySQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql) +- [Using MySQL as a dictionary source](../../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-mysql) diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md index aabc37442f9..8aac9dc3af0 100644 --- a/docs/en/engines/table-engines/integrations/odbc.md +++ b/docs/en/engines/table-engines/integrations/odbc.md @@ -126,5 +126,5 @@ SELECT * FROM odbc_t ## See Also {#see-also} -- [ODBC dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC dictionaries](../../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-odbc) - [ODBC table function](../../../sql-reference/table-functions/odbc.md) diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index b73d28c8508..18e884f3bcc 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -174,7 +174,7 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) **See Also** - [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md) -- [Using PostgreSQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) +- [Using PostgreSQL as a dictionary source](../../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-postgresql) ## Related content - Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres) diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 723425429a5..dd843945e10 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -150,6 +150,7 @@ The following settings can be specified in configuration file for given endpoint - `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) metadata for given endpoint. Optional, default value is `false`. - `region` — Specifies S3 region name. Optional. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`. +- `expiration_window_seconds` — Grace period for checking if expiration-based credentials have expired. Optional, default value is `120`. - `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times. - `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional. - `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional. @@ -166,6 +167,7 @@ The following settings can be specified in configuration file for given endpoint + diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index fc8060077b0..acfc5382ab1 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -901,7 +901,7 @@ User can assign new big parts to different disks of a [JBOD](https://en.wikipedi ## Using S3 for Data Storage {#table_engine-mergetree-s3} :::note -Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/data-ingestion/s3/gcs-merge-tree.md). +Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/gcs). ::: `MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`. @@ -960,6 +960,7 @@ Optional parameters: - `support_batch_delete` — This controls the check to see if batch deletes are supported. Set this to `false` when using Google Cloud Storage (GCS) as GCS does not support batch deletes and preventing the checks will prevent error messages in the logs. - `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. +- `expiration_window_seconds` — Grace period for checking if expiration-based credentials have expired. Optional, default value is `120`. - `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. - `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`. - `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`. diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index 37ab8ac9fd3..c50433f2aeb 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -39,7 +39,7 @@ Compressed data for `INSERT` and `ALTER` queries is replicated (for more informa - The `DROP TABLE` query deletes the replica located on the server where the query is run. - The `RENAME` query renames the table on one of the replicas. In other words, replicated tables can have different names on different replicas. -ClickHouse uses [ClickHouse Keeper](/docs/en/guides/sre/keeper/clickhouse-keeper.md) for storing replicas meta information. It is possible to use ZooKeeper version 3.4.5 or newer, but ClickHouse Keeper is recommended. +ClickHouse uses [ClickHouse Keeper](/docs/en/guides/sre/keeper/index.md) for storing replicas meta information. It is possible to use ZooKeeper version 3.4.5 or newer, but ClickHouse Keeper is recommended. To use replication, set parameters in the [zookeeper](/docs/en/operations/server-configuration-parameters/settings.md/#server-settings_zookeeper) server configuration section. @@ -144,7 +144,7 @@ ENGINE = ReplicatedReplacingMergeTree The `Replicated` prefix is added to the table engine name. For example:`ReplicatedMergeTree`. :::tip -Adding `Replicated` is optional in ClickHouse Cloud, as all of the tables are replicated. +Adding `Replicated` is optional in ClickHouse Cloud, as all of the tables are replicated. ::: ### Replicated\*MergeTree parameters diff --git a/docs/en/engines/table-engines/special/dictionary.md b/docs/en/engines/table-engines/special/dictionary.md index e487ca2002f..05d07d94e56 100644 --- a/docs/en/engines/table-engines/special/dictionary.md +++ b/docs/en/engines/table-engines/special/dictionary.md @@ -6,7 +6,7 @@ sidebar_label: Dictionary # Dictionary Table Engine -The `Dictionary` engine displays the [dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. +The `Dictionary` engine displays the [dictionary](../../../sql-reference/dictionaries/index.md) data as a ClickHouse table. ## Example {#example} diff --git a/docs/en/engines/table-engines/special/distributed.md b/docs/en/engines/table-engines/special/distributed.md index f4f541843d3..52d82483a46 100644 --- a/docs/en/engines/table-engines/special/distributed.md +++ b/docs/en/engines/table-engines/special/distributed.md @@ -184,7 +184,7 @@ The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `com - `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server does not start. If you change the DNS record, restart the server. - `port` – The TCP port for messenger activity (`tcp_port` in the config, usually set to 9000). Not to be confused with `http_port`. -- `user` – Name of the user for connecting to a remote server. Default value is the `default` user. This user must have access to connect to the specified server. Access is configured in the `users.xml` file. For more information, see the section [Access rights](../../../operations/access-rights.md). +- `user` – Name of the user for connecting to a remote server. Default value is the `default` user. This user must have access to connect to the specified server. Access is configured in the `users.xml` file. For more information, see the section [Access rights](../../../guides/sre/user-management/index.md). - `password` – The password for connecting to a remote server (not masked). Default value: empty string. - `secure` - Whether to use a secure SSL/TLS connection. Usually also requires specifying the port (the default secure port is `9440`). The server should listen on `9440` and be configured with correct certificates. - `compression` - Use data compression. Default value: `true`. diff --git a/docs/en/getting-started/example-datasets/cell-towers.md b/docs/en/getting-started/example-datasets/cell-towers.md index b19d09c777a..d88ce5159d4 100644 --- a/docs/en/getting-started/example-datasets/cell-towers.md +++ b/docs/en/getting-started/example-datasets/cell-towers.md @@ -1,9 +1,10 @@ --- slug: /en/getting-started/example-datasets/cell-towers -sidebar_label: Cell Towers +sidebar_label: Geo Data sidebar_position: 3 -title: "Cell Towers" +title: "Geo Data using the Cell Tower Dataset" --- + import ConnectionDetails from '@site/docs/en/_snippets/_gather_your_details_http.mdx'; import Tabs from '@theme/Tabs'; @@ -163,7 +164,7 @@ SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 Based on the above query and the [MCC list](https://en.wikipedia.org/wiki/Mobile_country_code), the countries with the most cell towers are: the USA, Germany, and Russia. -You may want to create a [Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values. +You may want to create a [Dictionary](../../sql-reference/dictionaries/index.md) in ClickHouse to decode these values. ## Use case: Incorporate geo data {#use-case} diff --git a/docs/en/getting-started/example-datasets/criteo.md b/docs/en/getting-started/example-datasets/criteo.md index 3bd0230d4cc..a2e0fda0cb0 100644 --- a/docs/en/getting-started/example-datasets/criteo.md +++ b/docs/en/getting-started/example-datasets/criteo.md @@ -3,14 +3,56 @@ slug: /en/getting-started/example-datasets/criteo sidebar_label: Terabyte Click Logs from Criteo --- -# Terabyte of Click Logs from Criteo +# Terabyte of Click Logs from Criteo Download the data from http://labs.criteo.com/downloads/download-terabyte-click-logs/ Create a table to import the log to: ``` sql -CREATE TABLE criteo_log (date Date, clicked UInt8, int1 Int32, int2 Int32, int3 Int32, int4 Int32, int5 Int32, int6 Int32, int7 Int32, int8 Int32, int9 Int32, int10 Int32, int11 Int32, int12 Int32, int13 Int32, cat1 String, cat2 String, cat3 String, cat4 String, cat5 String, cat6 String, cat7 String, cat8 String, cat9 String, cat10 String, cat11 String, cat12 String, cat13 String, cat14 String, cat15 String, cat16 String, cat17 String, cat18 String, cat19 String, cat20 String, cat21 String, cat22 String, cat23 String, cat24 String, cat25 String, cat26 String) ENGINE = Log +CREATE TABLE criteo_log ( + date Date, + clicked UInt8, + int1 Int32, + int2 Int32, + int3 Int32, + int4 Int32, + int5 Int32, + int6 Int32, + int7 Int32, + int8 Int32, + int9 Int32, + int10 Int32, + int11 Int32, + int12 Int32, + int13 Int32, + cat1 String, + cat2 String, + cat3 String, + cat4 String, + cat5 String, + cat6 String, + cat7 String, + cat8 String, + cat9 String, + cat10 String, + cat11 String, + cat12 String, + cat13 String, + cat14 String, + cat15 String, + cat16 String, + cat17 String, + cat18 String, + cat19 String, + cat20 String, + cat21 String, + cat22 String, + cat23 String, + cat24 String, + cat25 String, + cat26 String +) ENGINE = Log; ``` Download the data: @@ -73,7 +115,52 @@ ORDER BY (date, icat1) Transform data from the raw log and put it in the second table: ``` sql -INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, reinterpretAsUInt32(unhex(cat1)) AS icat1, reinterpretAsUInt32(unhex(cat2)) AS icat2, reinterpretAsUInt32(unhex(cat3)) AS icat3, reinterpretAsUInt32(unhex(cat4)) AS icat4, reinterpretAsUInt32(unhex(cat5)) AS icat5, reinterpretAsUInt32(unhex(cat6)) AS icat6, reinterpretAsUInt32(unhex(cat7)) AS icat7, reinterpretAsUInt32(unhex(cat8)) AS icat8, reinterpretAsUInt32(unhex(cat9)) AS icat9, reinterpretAsUInt32(unhex(cat10)) AS icat10, reinterpretAsUInt32(unhex(cat11)) AS icat11, reinterpretAsUInt32(unhex(cat12)) AS icat12, reinterpretAsUInt32(unhex(cat13)) AS icat13, reinterpretAsUInt32(unhex(cat14)) AS icat14, reinterpretAsUInt32(unhex(cat15)) AS icat15, reinterpretAsUInt32(unhex(cat16)) AS icat16, reinterpretAsUInt32(unhex(cat17)) AS icat17, reinterpretAsUInt32(unhex(cat18)) AS icat18, reinterpretAsUInt32(unhex(cat19)) AS icat19, reinterpretAsUInt32(unhex(cat20)) AS icat20, reinterpretAsUInt32(unhex(cat21)) AS icat21, reinterpretAsUInt32(unhex(cat22)) AS icat22, reinterpretAsUInt32(unhex(cat23)) AS icat23, reinterpretAsUInt32(unhex(cat24)) AS icat24, reinterpretAsUInt32(unhex(cat25)) AS icat25, reinterpretAsUInt32(unhex(cat26)) AS icat26 FROM criteo_log; +INSERT INTO + criteo +SELECT + date, + clicked, + int1, + int2, + int3, + int4, + int5, + int6, + int7, + int8, + int9, + int10, + int11, + int12, + int13, + reinterpretAsUInt32(unhex(cat1)) AS icat1, + reinterpretAsUInt32(unhex(cat2)) AS icat2, + reinterpretAsUInt32(unhex(cat3)) AS icat3, + reinterpretAsUInt32(unhex(cat4)) AS icat4, + reinterpretAsUInt32(unhex(cat5)) AS icat5, + reinterpretAsUInt32(unhex(cat6)) AS icat6, + reinterpretAsUInt32(unhex(cat7)) AS icat7, + reinterpretAsUInt32(unhex(cat8)) AS icat8, + reinterpretAsUInt32(unhex(cat9)) AS icat9, + reinterpretAsUInt32(unhex(cat10)) AS icat10, + reinterpretAsUInt32(unhex(cat11)) AS icat11, + reinterpretAsUInt32(unhex(cat12)) AS icat12, + reinterpretAsUInt32(unhex(cat13)) AS icat13, + reinterpretAsUInt32(unhex(cat14)) AS icat14, + reinterpretAsUInt32(unhex(cat15)) AS icat15, + reinterpretAsUInt32(unhex(cat16)) AS icat16, + reinterpretAsUInt32(unhex(cat17)) AS icat17, + reinterpretAsUInt32(unhex(cat18)) AS icat18, + reinterpretAsUInt32(unhex(cat19)) AS icat19, + reinterpretAsUInt32(unhex(cat20)) AS icat20, + reinterpretAsUInt32(unhex(cat21)) AS icat21, + reinterpretAsUInt32(unhex(cat22)) AS icat22, + reinterpretAsUInt32(unhex(cat23)) AS icat23, + reinterpretAsUInt32(unhex(cat24)) AS icat24, + reinterpretAsUInt32(unhex(cat25)) AS icat25, + reinterpretAsUInt32(unhex(cat26)) AS icat26 +FROM + criteo_log; DROP TABLE criteo_log; ``` diff --git a/docs/en/getting-started/example-datasets/github.md b/docs/en/getting-started/example-datasets/github.md index 239637a34e9..e18c7dec1a6 100644 --- a/docs/en/getting-started/example-datasets/github.md +++ b/docs/en/getting-started/example-datasets/github.md @@ -1,12 +1,13 @@ --- slug: /en/getting-started/example-datasets/github -sidebar_label: GitHub Repo Analysis +sidebar_label: Github Repo +sidebar_position: 1 description: Analyze the ClickHouse GitHub repo or any repository of your choosing --- -# ClickHouse GitHub data +# Writing Queries in ClickHouse using GitHub Data -This dataset contains all of the commits and changes for the ClickHouse repository. It can be generated using the native `git-import` tool distributed with ClickHouse. +This dataset contains all of the commits and changes for the ClickHouse repository. It can be generated using the native `git-import` tool distributed with ClickHouse. The generated data provides a `tsv` file for each of the following tables: @@ -323,7 +324,7 @@ Note a more complex variant of this query exists where we find the [line-by-line ## Find the current active files -This is important for later analysis when we only want to consider the current files in the repository. We estimate this set as the files which haven't been renamed or deleted (and then re-added/re-named). +This is important for later analysis when we only want to consider the current files in the repository. We estimate this set as the files which haven't been renamed or deleted (and then re-added/re-named). **Note there appears to have been a broken commit history in relation to files under the `dbms`, `libs`, `tests/testflows/` directories during their renames. We also thus exclude these.** @@ -417,7 +418,7 @@ git ls-files | grep -v -E 'generated\.cpp|^(contrib|docs?|website|libs/(libcityh The difference here is caused by a few factors: -- A rename can occur alongside other modifications to the file. These are listed as separate events in file_changes but with the same time. The `argMax` function has no way of distinguishing these - it picks the first value. The natural ordering of the inserts (the only means of knowing the correct order) is not maintained across the union so modified events can be selected. For example, below the `src/Functions/geometryFromColumn.h` file has several modifications before being renamed to `src/Functions/geometryConverters.h`. Our current solution may pick a Modify event as the latest change causing `src/Functions/geometryFromColumn.h` to be retained. +- A rename can occur alongside other modifications to the file. These are listed as separate events in file_changes but with the same time. The `argMax` function has no way of distinguishing these - it picks the first value. The natural ordering of the inserts (the only means of knowing the correct order) is not maintained across the union so modified events can be selected. For example, below the `src/Functions/geometryFromColumn.h` file has several modifications before being renamed to `src/Functions/geometryConverters.h`. Our current solution may pick a Modify event as the latest change causing `src/Functions/geometryFromColumn.h` to be retained. [play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICAgIGNoYW5nZV90eXBlLAogICAgICBwYXRoLAogICAgICBvbGRfcGF0aCwKICAgICAgdGltZSwKICAgICAgY29tbWl0X2hhc2gKICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogIFdIRVJFIChwYXRoID0gJ3NyYy9GdW5jdGlvbnMvZ2VvbWV0cnlGcm9tQ29sdW1uLmgnKSBPUiAob2xkX3BhdGggPSAnc3JjL0Z1bmN0aW9ucy9nZW9tZXRyeUZyb21Db2x1bW4uaCcpCg==) @@ -1386,7 +1387,7 @@ LIMIT 1 BY day_of_week 7 rows in set. Elapsed: 0.004 sec. Processed 21.82 thousand rows, 140.02 KB (4.88 million rows/s., 31.29 MB/s.) ``` -This is still a little simple and doesn't reflect people's work. +This is still a little simple and doesn't reflect people's work. A better metric might be who is the top contributor each day as a fraction of the total work performed in the last year. Note that we treat the deletion and adding code equally. @@ -1952,7 +1953,7 @@ SELECT Most contributors write more code than tests, as you'd expect. -What about who adds the most comments when contributing code? +What about who adds the most comments when contributing code? [play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhdXRob3IsCiAgICBhdmcocmF0aW9fY29tbWVudHMpIEFTIGF2Z19yYXRpb19jb21tZW50cywKICAgIHN1bShjb2RlKSBBUyBjb2RlCkZST00KKAogICAgU0VMRUNUCiAgICAgICAgYXV0aG9yLAogICAgICAgIGNvbW1pdF9oYXNoLAogICAgICAgIGNvdW50SWYobGluZV90eXBlID0gJ0NvbW1lbnQnKSBBUyBjb21tZW50cywKICAgICAgICBjb3VudElmKGxpbmVfdHlwZSA9ICdDb2RlJykgQVMgY29kZSwKICAgICAgICBpZihjb21tZW50cyA+IDAsIGNvbW1lbnRzIC8gKGNvbW1lbnRzICsgY29kZSksIDApIEFTIHJhdGlvX2NvbW1lbnRzCiAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwogICAgR1JPVVAgQlkKICAgICAgICBhdXRob3IsCiAgICAgICAgY29tbWl0X2hhc2gKKQpHUk9VUCBCWSBhdXRob3IKT1JERVIgQlkgY29kZSBERVNDCkxJTUlUIDEwCg==) @@ -2393,7 +2394,7 @@ WHERE (path = 'src/Storages/StorageReplicatedMergeTree.cpp') AND (change_type = This makes viewing the full history of a file challenging since we don't have a single value connecting all line or file changes. -To address this, we can use User Defined Functions (UDFs). These cannot, currently, be recursive, so to identify the history of a file we must define a series of UDFs which call each other explicitly. +To address this, we can use User Defined Functions (UDFs). These cannot, currently, be recursive, so to identify the history of a file we must define a series of UDFs which call each other explicitly. This means we can only track renames to a maximum depth - the below example is 5 deep. It is unlikely a file will be renamed more times than this, so for now, this is sufficient. diff --git a/docs/en/getting-started/example-datasets/metrica.md b/docs/en/getting-started/example-datasets/metrica.md index e966f6c20d6..e21237f39bb 100644 --- a/docs/en/getting-started/example-datasets/metrica.md +++ b/docs/en/getting-started/example-datasets/metrica.md @@ -84,7 +84,7 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" 1680609 ``` -## An example JOIN +## An example JOIN The hits and visits dataset is used in the ClickHouse test routines, this is one of the queries from the test suite. The rest @@ -131,10 +131,10 @@ FORMAT PrettyCompact" ## Next Steps -[A Practical Introduction to Sparse Primary Indexes in ClickHouse](../../guides/improving-query-performance/sparse-primary-indexes/sparse-primary-indexes-intro.md) uses the hits dataset to discuss the differences in ClickHouse indexing compared to traditional relational databases, how ClickHouse builds and uses a sparse primary index, and indexing best practices. +[A Practical Introduction to Sparse Primary Indexes in ClickHouse](/docs/en/guides/best-practices/sparse-primary-indexes.md) uses the hits dataset to discuss the differences in ClickHouse indexing compared to traditional relational databases, how ClickHouse builds and uses a sparse primary index, and indexing best practices. Additional examples of queries to these tables can be found among the ClickHouse [stateful tests](https://github.com/ClickHouse/ClickHouse/blob/d7129855757f38ceec3e4ecc6dafacdabe9b178f/tests/queries/1_stateful/00172_parallel_join.sql). :::note -The test suite uses a database name `test`, and the tables are named `hits` and `visits`. You can rename your database and tables, or edit the SQL from the test file. +The test suite uses a database name `test`, and the tables are named `hits` and `visits`. You can rename your database and tables, or edit the SQL from the test file. ::: diff --git a/docs/en/getting-started/example-datasets/nypd_complaint_data.md b/docs/en/getting-started/example-datasets/nypd_complaint_data.md index 8b02ac23cf9..154cfa78e53 100644 --- a/docs/en/getting-started/example-datasets/nypd_complaint_data.md +++ b/docs/en/getting-started/example-datasets/nypd_complaint_data.md @@ -16,7 +16,7 @@ While working through this guide you will: The dataset used in this guide comes from the NYC Open Data team, and contains data about "all valid felony, misdemeanor, and violation crimes reported to the New York City Police Department (NYPD)". At the time of writing, the data file is 166MB, but it is updated regularly. -**Source**: [data.cityofnewyork.us](https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Current-Year-To-Date-/5uac-w243) +**Source**: [data.cityofnewyork.us](https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Current-Year-To-Date-/5uac-w243) **Terms of use**: https://www1.nyc.gov/home/terms-of-use.page ## Prerequisites @@ -35,7 +35,7 @@ The examples in this guide assume that you have saved the TSV file to `${HOME}/N ## Familiarize yourself with the TSV file -Before starting to work with the ClickHouse database familiarize yourself with the data. +Before starting to work with the ClickHouse database familiarize yourself with the data. ### Look at the fields in the source TSV file @@ -47,15 +47,15 @@ clickhouse-local --query \ Sample response ```response -CMPLNT_NUM Nullable(Float64) -ADDR_PCT_CD Nullable(Float64) -BORO_NM Nullable(String) -CMPLNT_FR_DT Nullable(String) -CMPLNT_FR_TM Nullable(String) +CMPLNT_NUM Nullable(Float64) +ADDR_PCT_CD Nullable(Float64) +BORO_NM Nullable(String) +CMPLNT_FR_DT Nullable(String) +CMPLNT_FR_TM Nullable(String) ``` :::tip -Most of the time the above command will let you know which fields in the input data are numeric, and which are strings, and which are tuples. This is not always the case. Because ClickHouse is routineley used with datasets containing billions of records there is a default number (100) of rows examined to [infer the schema](../../guides/developer/working-with-json/json-semi-structured.md/#relying-on-schema-inference) in order to avoid parsing billions of rows to infer the schema. The response below may not match what you see, as the dataset is updated several times each year. Looking at the Data Dictionary you can see that CMPLNT_NUM is specified as text, and not numeric. By overriding the default of 100 rows for inference with the setting `SETTINGS input_format_max_rows_to_read_for_schema_inference=2000` +Most of the time the above command will let you know which fields in the input data are numeric, and which are strings, and which are tuples. This is not always the case. Because ClickHouse is routineley used with datasets containing billions of records there is a default number (100) of rows examined to [infer the schema](/docs/en/integrations/data-ingestion/data-formats/json.md#relying-on-schema-inference) in order to avoid parsing billions of rows to infer the schema. The response below may not match what you see, as the dataset is updated several times each year. Looking at the Data Dictionary you can see that CMPLNT_NUM is specified as text, and not numeric. By overriding the default of 100 rows for inference with the setting `SETTINGS input_format_max_rows_to_read_for_schema_inference=2000` you can get a better idea of the content. Note: as of version 22.5 the default is now 25,000 rows for inferring the schema, so only change the setting if you are on an older version or if you need more than 25,000 rows to be sampled. @@ -65,46 +65,46 @@ Run this command at your command prompt. You will be using `clickhouse-local` t ```sh clickhouse-local --input_format_max_rows_to_read_for_schema_inference=2000 \ --query \ -"describe file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames')" +"describe file('${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv', 'TSVWithNames')" ``` Result: ```response -CMPLNT_NUM Nullable(String) -ADDR_PCT_CD Nullable(Float64) -BORO_NM Nullable(String) -CMPLNT_FR_DT Nullable(String) -CMPLNT_FR_TM Nullable(String) -CMPLNT_TO_DT Nullable(String) -CMPLNT_TO_TM Nullable(String) -CRM_ATPT_CPTD_CD Nullable(String) -HADEVELOPT Nullable(String) -HOUSING_PSA Nullable(Float64) -JURISDICTION_CODE Nullable(Float64) -JURIS_DESC Nullable(String) -KY_CD Nullable(Float64) -LAW_CAT_CD Nullable(String) -LOC_OF_OCCUR_DESC Nullable(String) -OFNS_DESC Nullable(String) -PARKS_NM Nullable(String) -PATROL_BORO Nullable(String) -PD_CD Nullable(Float64) -PD_DESC Nullable(String) -PREM_TYP_DESC Nullable(String) -RPT_DT Nullable(String) -STATION_NAME Nullable(String) -SUSP_AGE_GROUP Nullable(String) -SUSP_RACE Nullable(String) -SUSP_SEX Nullable(String) -TRANSIT_DISTRICT Nullable(Float64) -VIC_AGE_GROUP Nullable(String) -VIC_RACE Nullable(String) -VIC_SEX Nullable(String) -X_COORD_CD Nullable(Float64) -Y_COORD_CD Nullable(Float64) -Latitude Nullable(Float64) -Longitude Nullable(Float64) -Lat_Lon Tuple(Nullable(Float64), Nullable(Float64)) +CMPLNT_NUM Nullable(String) +ADDR_PCT_CD Nullable(Float64) +BORO_NM Nullable(String) +CMPLNT_FR_DT Nullable(String) +CMPLNT_FR_TM Nullable(String) +CMPLNT_TO_DT Nullable(String) +CMPLNT_TO_TM Nullable(String) +CRM_ATPT_CPTD_CD Nullable(String) +HADEVELOPT Nullable(String) +HOUSING_PSA Nullable(Float64) +JURISDICTION_CODE Nullable(Float64) +JURIS_DESC Nullable(String) +KY_CD Nullable(Float64) +LAW_CAT_CD Nullable(String) +LOC_OF_OCCUR_DESC Nullable(String) +OFNS_DESC Nullable(String) +PARKS_NM Nullable(String) +PATROL_BORO Nullable(String) +PD_CD Nullable(Float64) +PD_DESC Nullable(String) +PREM_TYP_DESC Nullable(String) +RPT_DT Nullable(String) +STATION_NAME Nullable(String) +SUSP_AGE_GROUP Nullable(String) +SUSP_RACE Nullable(String) +SUSP_SEX Nullable(String) +TRANSIT_DISTRICT Nullable(Float64) +VIC_AGE_GROUP Nullable(String) +VIC_RACE Nullable(String) +VIC_SEX Nullable(String) +X_COORD_CD Nullable(Float64) +Y_COORD_CD Nullable(Float64) +Latitude Nullable(Float64) +Longitude Nullable(Float64) +Lat_Lon Tuple(Nullable(Float64), Nullable(Float64)) New Georeferenced Column Nullable(String) ``` @@ -362,7 +362,7 @@ The dates shown as `1925` above are from errors in the data. There are several The decisions made above on the data types used for the columns are reflected in the table schema below. We also need to decide on the `ORDER BY` and `PRIMARY KEY` used for the table. At least one -of `ORDER BY` or `PRIMARY KEY` must be specified. Here are some guidelines on deciding on the +of `ORDER BY` or `PRIMARY KEY` must be specified. Here are some guidelines on deciding on the columns to includes in `ORDER BY`, and more information is in the *Next Steps* section at the end of this document. @@ -420,7 +420,7 @@ ORDER BY ( borough, offense_description, date_reported ) Putting together the changes to data types and the `ORDER BY` tuple gives this table structure: ```sql -CREATE TABLE NYPD_Complaint ( +CREATE TABLE NYPD_Complaint ( complaint_number String, precinct UInt8, borough LowCardinality(String), @@ -429,7 +429,7 @@ CREATE TABLE NYPD_Complaint ( was_crime_completed String, housing_authority String, housing_level_code UInt32, - jurisdiction_code UInt8, + jurisdiction_code UInt8, jurisdiction LowCardinality(String), offense_code UInt8, offense_level LowCardinality(String), @@ -478,7 +478,7 @@ Query id: 6a5b10bf-9333-4090-b36e-c7f08b1d9e01 Row 1: ────── -partition_key: +partition_key: sorting_key: borough, offense_description, date_reported primary_key: borough, offense_description, date_reported table: NYPD_Complaint @@ -495,7 +495,7 @@ We will use `clickhouse-local` tool for data preprocessing and `clickhouse-clien :::tip `table='input'` appears in the arguments to clickhouse-local below. clickhouse-local takes the provided input (`cat ${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv`) and inserts the input into a table. By default the table is named `table`. In this guide the name of the table is set to `input` to make the data flow clearer. The final argument to clickhouse-local is a query that selects from the table (`FROM input`) which is then piped to `clickhouse-client` to populate the table `NYPD_Complaint`. ::: - + ```sql cat ${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv \ | clickhouse-local --table='input' --input-format='TSVWithNames' \ @@ -512,12 +512,12 @@ cat ${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv \ CRM_ATPT_CPTD_CD AS was_crime_completed, HADEVELOPT AS housing_authority_development, HOUSING_PSA AS housing_level_code, - JURISDICTION_CODE AS jurisdiction_code, + JURISDICTION_CODE AS jurisdiction_code, JURIS_DESC AS jurisdiction, KY_CD AS offense_code, LAW_CAT_CD AS offense_level, LOC_OF_OCCUR_DESC AS location_descriptor, - OFNS_DESC AS offense_description, + OFNS_DESC AS offense_description, PARKS_NM AS park_name, PATROL_BORO AS patrol_borough, PD_CD, @@ -529,7 +529,7 @@ cat ${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv \ SUSP_RACE AS suspect_race, SUSP_SEX AS suspect_sex, TRANSIT_DISTRICT AS transit_district, - VIC_AGE_GROUP AS victim_age_group, + VIC_AGE_GROUP AS victim_age_group, VIC_RACE AS victim_race, VIC_SEX AS victim_sex, X_COORD_CD AS NY_x_coordinate, @@ -538,7 +538,7 @@ cat ${HOME}/NYPD_Complaint_Data_Current__Year_To_Date_.tsv \ Longitude FROM input" \ | clickhouse-client --query='INSERT INTO NYPD_Complaint FORMAT TSV' -``` +``` ## Validate the Data {#validate-data} @@ -560,7 +560,7 @@ Result: │ 208993 │ └─────────┘ -1 row in set. Elapsed: 0.001 sec. +1 row in set. Elapsed: 0.001 sec. ``` The size of the dataset in ClickHouse is just 12% of the original TSV file, compare the size of the original TSV file with the size of the table: @@ -651,4 +651,4 @@ Query id: 8cdcdfd4-908f-4be0-99e3-265722a2ab8d ## Next Steps -[A Practical Introduction to Sparse Primary Indexes in ClickHouse](../../guides/improving-query-performance/sparse-primary-indexes/sparse-primary-indexes-intro.md) discusses the differences in ClickHouse indexing compared to traditional relational databases, how ClickHouse builds and uses a sparse primary index, and indexing best practices. +[A Practical Introduction to Sparse Primary Indexes in ClickHouse](/docs/en/guides/best-practices/sparse-primary-indexes.md) discusses the differences in ClickHouse indexing compared to traditional relational databases, how ClickHouse builds and uses a sparse primary index, and indexing best practices. diff --git a/docs/en/getting-started/example-datasets/recipes.md b/docs/en/getting-started/example-datasets/recipes.md index 4cc94c3ce5b..729d3d17015 100644 --- a/docs/en/getting-started/example-datasets/recipes.md +++ b/docs/en/getting-started/example-datasets/recipes.md @@ -80,7 +80,7 @@ Result: ### Top Components by the Number of Recipes: -In this example we learn how to use [arrayJoin](../../sql-reference/functions/array-join/) function to expand an array into a set of rows. +In this example we learn how to use [arrayJoin](../../sql-reference/functions/array-join.md) function to expand an array into a set of rows. Query: @@ -185,7 +185,7 @@ Result: 10 rows in set. Elapsed: 0.215 sec. Processed 2.23 million rows, 1.48 GB (10.35 million rows/s., 6.86 GB/s.) ``` -In this example, we involve [has](../../sql-reference/functions/array-functions/#hasarr-elem) function to filter by array elements and sort by the number of directions. +In this example, we involve [has](../../sql-reference/functions/array-functions.md#hasarr-elem) function to filter by array elements and sort by the number of directions. There is a wedding cake that requires the whole 126 steps to produce! Show that directions: diff --git a/docs/en/getting-started/example-datasets/uk-price-paid.md b/docs/en/getting-started/example-datasets/uk-price-paid.md index 2a89bfda2e7..8ed79c3986f 100644 --- a/docs/en/getting-started/example-datasets/uk-price-paid.md +++ b/docs/en/getting-started/example-datasets/uk-price-paid.md @@ -1,17 +1,17 @@ --- slug: /en/getting-started/example-datasets/uk-price-paid -sidebar_label: UK Property Price Paid +sidebar_label: UK Property Prices sidebar_position: 1 -title: "UK Property Price Paid" --- -The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995. -The size of the dataset in uncompressed form is about 4 GiB and it will take about 278 MiB in ClickHouse. +# The UK property prices dataset -Source: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads -Description of the fields: https://www.gov.uk/guidance/about-the-price-paid-data +Projections are a great way to improve the performance of queries that you run frequently. We will demonstrate the power of projections +using the UK property dataset, which contains data about prices paid for real-estate property in England and Wales. The data is available since 1995, and the size of the dataset in uncompressed form is about 4 GiB (which will only take about 278 MiB in ClickHouse). -Contains HM Land Registry data © Crown copyright and database right 2021. This data is licensed under the Open Government Licence v3.0. +- Source: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads +- Description of the fields: https://www.gov.uk/guidance/about-the-price-paid-data +- Contains HM Land Registry data © Crown copyright and database right 2021. This data is licensed under the Open Government Licence v3.0. ## Create the Table {#create-table} diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 0867f3a0795..ef4b79dcf63 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -14,75 +14,35 @@ import CodeBlock from '@theme/CodeBlock'; You have three options for getting up and running with ClickHouse: - **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse -- **[Self-managed ClickHouse](#self-managed-install):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture -- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** Read the guide with the official image in Docker Hub +- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse +- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture +- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub ## ClickHouse Cloud The quickest and easiest way to get up and running with ClickHouse is to create a new service in [ClickHouse Cloud](https://clickhouse.cloud/). -## Self-Managed Install +## Quick Install :::tip For production installs of a specific release version see the [installation options](#available-installation-options) down below. ::: - - +On Linux and macOS: -1. The simplest way to download ClickHouse locally is to run the following command. If your operating system is supported, an appropriate ClickHouse binary will be downloaded and made runnable: +1. If you are just getting started and want to see what ClickHouse can do, the simplest way to download ClickHouse locally is to run the following command. It downloads a single binary for your operating system that can be used to run the ClickHouse server, clickhouse-client, clickhouse-local, +ClickHouse Keeper, and other tools: ```bash curl https://clickhouse.com/ | sh ``` -1. Run the `install` command, which defines a collection of useful symlinks along with the files and folders used by ClickHouse - all of which you can see in the output of the install script: - - ```bash - sudo ./clickhouse install - ``` - -1. At the end of the install script, you are prompted for a password for the `default` user. Feel free to enter a password, or you can optionally leave it blank: - - ```response - Creating log directory /var/log/clickhouse-server. - Creating data directory /var/lib/clickhouse. - Creating pid directory /var/run/clickhouse-server. - chown -R clickhouse:clickhouse '/var/log/clickhouse-server' - chown -R clickhouse:clickhouse '/var/run/clickhouse-server' - chown clickhouse:clickhouse '/var/lib/clickhouse' - Enter password for default user: - ``` - You should see the following output: - - ```response - ClickHouse has been successfully installed. - - Start clickhouse-server with: - sudo clickhouse start - - Start clickhouse-client with: - clickhouse-client - ``` - 1. Run the following command to start the ClickHouse server: ```bash - sudo clickhouse start + ./clickhouse server ``` - - - -1. The simplest way to download ClickHouse locally is to run the following command. If your operating system is supported, an appropriate ClickHouse binary will be downloaded and made runnable: - ```bash - curl https://clickhouse.com/ | sh - ``` - -1. Run the ClickHouse server: - - ```bash - ./clickhouse server - ``` + The first time you run this script, the necessary files and folders are created in the current directory, then the server starts. 1. Open a new terminal and use the **clickhouse-client** to connect to your service: @@ -101,15 +61,14 @@ For production installs of a specific release version see the [installation opti You are ready to start sending DDL and SQL commands to ClickHouse! - - - :::tip -The [Quick Start](/docs/en/quick-start.mdx/#step-1-get-clickhouse) walks through the steps to download and run ClickHouse, connect to it, and insert data. +The [Quick Start](/docs/en/quick-start.mdx) walks through the steps for creating tables and inserting data. ::: -## Available Installation Options {#available-installation-options} +## Production Deployments {#available-installation-options} + +For production deployments of ClickHouse, choose from one of the following install options. ### From DEB Packages {#install-from-deb-packages} @@ -174,7 +133,7 @@ clickhouse-client # or "clickhouse-client --password" if you set up a password. -You can replace `stable` with `lts` to use different [release kinds](/docs/en/faq/operations/production.md) based on your needs. +You can replace `stable` with `lts` to use different [release kinds](/knowledgebase/production) based on your needs. You can also download and install packages manually from [here](https://packages.clickhouse.com/deb/pool/main/c/). @@ -272,7 +231,7 @@ clickhouse-client # or "clickhouse-client --password" if you set up a password. -You can replace `stable` with `lts` to use different [release kinds](/docs/en/faq/operations/production.md) based on your needs. +You can replace `stable` with `lts` to use different [release kinds](/knowledgebase/production) based on your needs. Then run these commands to install packages: diff --git a/docs/en/getting-started/playground.md b/docs/en/getting-started/playground.md index e995ea6ef8b..dbb8d46a2fc 100644 --- a/docs/en/getting-started/playground.md +++ b/docs/en/getting-started/playground.md @@ -1,5 +1,5 @@ --- -sidebar_label: Playground +sidebar_label: ClickHouse Playground sidebar_position: 2 keywords: [clickhouse, playground, getting, started, docs] description: The ClickHouse Playground allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. @@ -11,7 +11,7 @@ slug: /en/getting-started/playground [ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. Several example datasets are available in Playground. -You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces). +You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../integrations/index.mdx). ## Credentials {#credentials} diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index db2e773a685..788b82dfa30 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1,7 +1,7 @@ --- slug: /en/interfaces/formats sidebar_position: 21 -sidebar_label: Input and Output Formats +sidebar_label: View all formats... title: Formats for Input and Output Data --- @@ -684,7 +684,7 @@ Example: ## JSONColumns {#jsoncolumns} :::tip -The output of the JSONColumns* formats provides the ClickHouse field name and then the content of each row of the table for that field; +The output of the JSONColumns* formats provides the ClickHouse field name and then the content of each row of the table for that field; visually, the data is rotated 90 degrees to the left. ::: diff --git a/docs/en/interfaces/postgresql.md b/docs/en/interfaces/postgresql.md index 9ff83559787..f7a619ca620 100644 --- a/docs/en/interfaces/postgresql.md +++ b/docs/en/interfaces/postgresql.md @@ -8,7 +8,7 @@ sidebar_label: PostgreSQL Interface ClickHouse supports the PostgreSQL wire protocol, which allows you to use Postgres clients to connect to ClickHouse. In a sense, ClickHouse can pretend to be a PostgreSQL instance - allowing you to connect a PostgreSQL client application to ClickHouse that is not already directly supported by ClickHouse (for example, Amazon Redshift). -To enable the PostgreSQL wire protocol, add the [postgresql_port](../operations/server-configuration-parameters/settings#server_configuration_parameters-postgresql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d` folder: +To enable the PostgreSQL wire protocol, add the [postgresql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-postgresql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d` folder: ```xml diff --git a/docs/en/operations/_category_.yml b/docs/en/operations/_category_.yml index 08849e7489d..352809f663b 100644 --- a/docs/en/operations/_category_.yml +++ b/docs/en/operations/_category_.yml @@ -2,7 +2,3 @@ position: 70 label: 'Operations' collapsible: true collapsed: true -link: - type: generated-index - title: Operations - slug: /en/operations diff --git a/docs/en/operations/access-rights.md b/docs/en/operations/access-rights.md deleted file mode 100644 index 4c4a06dbe1e..00000000000 --- a/docs/en/operations/access-rights.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -slug: /en/operations/access-rights -sidebar_position: 48 -sidebar_label: Access Control and Account Management -title: Access Control and Account Management ---- - -ClickHouse supports access control management based on [RBAC](https://en.wikipedia.org/wiki/Role-based_access_control) approach. - -ClickHouse access entities: -- [User account](#user-account-management) -- [Role](#role-management) -- [Row Policy](#row-policy-management) -- [Settings Profile](#settings-profiles-management) -- [Quota](#quotas-management) - -You can configure access entities using: - -- SQL-driven workflow. - - You need to [enable](#enabling-access-control) this functionality. - -- Server [configuration files](../operations/configuration-files.md) `users.xml` and `config.xml`. - -We recommend using SQL-driven workflow. Both of the configuration methods work simultaneously, so if you use the server configuration files for managing accounts and access rights, you can smoothly switch to SQL-driven workflow. - -:::warning -You can’t manage the same access entity by both configuration methods simultaneously. -::: - -To see all users, roles, profiles, etc. and all their grants use [SHOW ACCESS](../sql-reference/statements/show.md#show-access-statement) statement. - -## Usage {#access-control-usage} - -By default, the ClickHouse server provides the `default` user account which is not allowed using SQL-driven access control and account management but has all the rights and permissions. The `default` user account is used in any cases when the username is not defined, for example, at login from client or in distributed queries. In distributed query processing a default user account is used, if the configuration of the server or cluster does not specify the [user and password](../engines/table-engines/special/distributed.md) properties. - -If you just started using ClickHouse, consider the following scenario: - -1. [Enable](#enabling-access-control) SQL-driven access control and account management for the `default` user. -2. Log in to the `default` user account and create all the required users. Don’t forget to create an administrator account (`GRANT ALL ON *.* TO admin_user_account WITH GRANT OPTION`). -3. [Restrict permissions](../operations/settings/permissions-for-queries.md#permissions_for_queries) for the `default` user and disable SQL-driven access control and account management for it. - -### Properties of Current Solution {#access-control-properties} - -- You can grant permissions for databases and tables even if they do not exist. -- If a table was deleted, all the privileges that correspond to this table are not revoked. This means that even if you create a new table with the same name later, all the privileges remain valid. To revoke privileges corresponding to the deleted table, you need to execute, for example, the `REVOKE ALL PRIVILEGES ON db.table FROM ALL` query. -- There are no lifetime settings for privileges. - -## User Account {#user-account-management} - -A user account is an access entity that allows to authorize someone in ClickHouse. A user account contains: - -- Identification information. -- [Privileges](../sql-reference/statements/grant.md#grant-privileges) that define a scope of queries the user can execute. -- Hosts allowed to connect to the ClickHouse server. -- Assigned and default roles. -- Settings with their constraints applied by default at user login. -- Assigned settings profiles. - -Privileges can be granted to a user account by the [GRANT](../sql-reference/statements/grant.md) query or by assigning [roles](#role-management). To revoke privileges from a user, ClickHouse provides the [REVOKE](../sql-reference/statements/revoke.md) query. To list privileges for a user, use the [SHOW GRANTS](../sql-reference/statements/show.md#show-grants-statement) statement. - -Management queries: - -- [CREATE USER](../sql-reference/statements/create/user.md) -- [ALTER USER](../sql-reference/statements/alter/user.md#alter-user-statement) -- [DROP USER](../sql-reference/statements/drop.md) -- [SHOW CREATE USER](../sql-reference/statements/show.md#show-create-user-statement) -- [SHOW USERS](../sql-reference/statements/show.md#show-users-statement) - -### Settings Applying {#access-control-settings-applying} - -Settings can be configured differently: for a user account, in its granted roles and in settings profiles. At user login, if a setting is configured for different access entities, the value and constraints of this setting are applied as follows (from higher to lower priority): - -1. User account settings. -2. The settings of default roles of the user account. If a setting is configured in some roles, then order of the setting application is undefined. -3. The settings from settings profiles assigned to a user or to its default roles. If a setting is configured in some profiles, then order of setting application is undefined. -4. Settings applied to all the server by default or from the [default profile](../operations/server-configuration-parameters/settings.md#default-profile). - -## Role {#role-management} - -Role is a container for access entities that can be granted to a user account. - -Role contains: - -- [Privileges](../sql-reference/statements/grant.md#grant-privileges) -- Settings and constraints -- List of assigned roles - -Management queries: - -- [CREATE ROLE](../sql-reference/statements/create/role.md) -- [ALTER ROLE](../sql-reference/statements/alter/role.md#alter-role-statement) -- [DROP ROLE](../sql-reference/statements/drop.md) -- [SET ROLE](../sql-reference/statements/set-role.md) -- [SET DEFAULT ROLE](../sql-reference/statements/set-role.md#set-default-role-statement) -- [SHOW CREATE ROLE](../sql-reference/statements/show.md#show-create-role-statement) -- [SHOW ROLES](../sql-reference/statements/show.md#show-roles-statement) - -Privileges can be granted to a role by the [GRANT](../sql-reference/statements/grant.md) query. To revoke privileges from a role ClickHouse provides the [REVOKE](../sql-reference/statements/revoke.md) query. - -## Row Policy {#row-policy-management} - -Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. - -:::warning -Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies. -::: - -Management queries: - -- [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md) -- [ALTER ROW POLICY](../sql-reference/statements/alter/row-policy.md#alter-row-policy-statement) -- [DROP ROW POLICY](../sql-reference/statements/drop.md#drop-row-policy-statement) -- [SHOW CREATE ROW POLICY](../sql-reference/statements/show.md#show-create-row-policy-statement) -- [SHOW POLICIES](../sql-reference/statements/show.md#show-policies-statement) - -## Settings Profile {#settings-profiles-management} - -Settings profile is a collection of [settings](../operations/settings/index.md). Settings profile contains settings and constraints, as well as a list of roles and/or users to which this profile is applied. - -Management queries: - -- [CREATE SETTINGS PROFILE](../sql-reference/statements/create/settings-profile.md#create-settings-profile-statement) -- [ALTER SETTINGS PROFILE](../sql-reference/statements/alter/settings-profile.md#alter-settings-profile-statement) -- [DROP SETTINGS PROFILE](../sql-reference/statements/drop.md#drop-settings-profile-statement) -- [SHOW CREATE SETTINGS PROFILE](../sql-reference/statements/show.md#show-create-settings-profile-statement) -- [SHOW PROFILES](../sql-reference/statements/show.md#show-profiles-statement) - -## Quota {#quotas-management} - -Quota limits resource usage. See [Quotas](../operations/quotas.md). - -Quota contains a set of limits for some durations, as well as a list of roles and/or users which should use this quota. - -Management queries: - -- [CREATE QUOTA](../sql-reference/statements/create/quota.md) -- [ALTER QUOTA](../sql-reference/statements/alter/quota.md#alter-quota-statement) -- [DROP QUOTA](../sql-reference/statements/drop.md#drop-quota-statement) -- [SHOW CREATE QUOTA](../sql-reference/statements/show.md#show-create-quota-statement) -- [SHOW QUOTA](../sql-reference/statements/show.md#show-quota-statement) -- [SHOW QUOTAS](../sql-reference/statements/show.md#show-quotas-statement) - -## Enabling SQL-driven Access Control and Account Management {#enabling-access-control} - -- Setup a directory for configurations storage. - - ClickHouse stores access entity configurations in the folder set in the [access_control_path](../operations/server-configuration-parameters/settings.md#access_control_path) server configuration parameter. - -- Enable SQL-driven access control and account management for at least one user account. - - By default, SQL-driven access control and account management is disabled for all users. You need to configure at least one user in the `users.xml` configuration file and set the value of the [access_management](../operations/settings/settings-users.md#access_management-user-setting) setting to 1. diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index f1a5649cd4c..69eb782868a 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -1,5 +1,6 @@ --- slug: /en/operations/backup +description: In order to effectively mitigate possible human errors, you should carefully prepare a strategy for backing up and restoring your data. --- # Backup and Restore @@ -213,7 +214,7 @@ To write backups to an S3 bucket you need three pieces of information: for example `Abc+123` :::note -Creating an S3 bucket is covered in [Use S3 Object Storage as a ClickHouse disk](/docs/en/integrations/data-ingestion/s3/configuring-s3-for-clickhouse-use.md), just come back to this doc after saving the policy, there is no need to configure ClickHouse to use the S3 bucket. +Creating an S3 bucket is covered in [Use S3 Object Storage as a ClickHouse disk](/docs/en/integrations/data-ingestion/s3/index.md#configuring-s3-for-clickhouse-use), just come back to this doc after saving the policy, there is no need to configure ClickHouse to use the S3 bucket. ::: The destination for a backup will be specified like this: diff --git a/docs/en/operations/caches.md b/docs/en/operations/caches.md index 0f9156048c4..86bf8065d94 100644 --- a/docs/en/operations/caches.md +++ b/docs/en/operations/caches.md @@ -3,6 +3,7 @@ slug: /en/operations/caches sidebar_position: 65 sidebar_label: Caches title: "Cache Types" +description: When performing queries, ClickHouse uses different caches. --- When performing queries, ClickHouse uses different caches. diff --git a/docs/en/operations/clickhouse-keeper.md b/docs/en/operations/clickhouse-keeper.md deleted file mode 100644 index 10bad586a54..00000000000 --- a/docs/en/operations/clickhouse-keeper.md +++ /dev/null @@ -1,378 +0,0 @@ ---- -slug: /en/operations/clickhouse-keeper -sidebar_position: 66 -sidebar_label: ClickHouse Keeper ---- - -# ClickHouse Keeper -import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_automated.md'; - - - -ClickHouse Keeper provides the coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is compatible with ZooKeeper. - -## Implementation details {#implementation-details} - -ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, and has quite a simple and powerful data model. ZooKeeper's coordination algorithm, ZooKeeper Atomic Broadcast (ZAB), doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper ClickHouse Keeper is written in C++ and uses the [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows linearizability for reads and writes, and has several open-source implementations in different languages. - -By default, ClickHouse Keeper provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with ClickHouse Keeper. Snapshots and logs have an incompatible format with ZooKeeper, but the `clickhouse-keeper-converter` tool enables the conversion of ZooKeeper data to ClickHouse Keeper snapshots. The interserver protocol in ClickHouse Keeper is also incompatible with ZooKeeper so a mixed ZooKeeper / ClickHouse Keeper cluster is impossible. - -ClickHouse Keeper supports Access Control Lists (ACLs) the same way as [ZooKeeper](https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) does. ClickHouse Keeper supports the same set of permissions and has the identical built-in schemes: `world`, `auth` and `digest`. The digest authentication scheme uses the pair `username:password`, the password is encoded in Base64. - -:::note -External integrations are not supported. -::: - -## Configuration {#configuration} - -ClickHouse Keeper can be used as a standalone replacement for ZooKeeper or as an internal part of the ClickHouse server. In both cases the configuration is almost the same `.xml` file. The main ClickHouse Keeper configuration tag is ``. Keeper configuration has the following parameters: - -- `tcp_port` — Port for a client to connect (default for ZooKeeper is `2181`). -- `tcp_port_secure` — Secure port for an SSL connection between client and keeper-server. -- `server_id` — Unique server id, each participant of the ClickHouse Keeper cluster must have a unique number (1, 2, 3, and so on). -- `log_storage_path` — Path to coordination logs, just like ZooKeeper it is best to store logs on non-busy nodes. -- `snapshot_storage_path` — Path to coordination snapshots. - -Other common parameters are inherited from the ClickHouse server config (`listen_host`, `logger`, and so on). - -Internal coordination settings are located in the `.` section: - -- `operation_timeout_ms` — Timeout for a single client operation (ms) (default: 10000). -- `min_session_timeout_ms` — Min timeout for client session (ms) (default: 10000). -- `session_timeout_ms` — Max timeout for client session (ms) (default: 100000). -- `dead_session_check_period_ms` — How often ClickHouse Keeper checks for dead sessions and removes them (ms) (default: 500). -- `heart_beat_interval_ms` — How often a ClickHouse Keeper leader will send heartbeats to followers (ms) (default: 500). -- `election_timeout_lower_bound_ms` — If the follower does not receive a heartbeat from the leader in this interval, then it can initiate leader election (default: 1000). Must be less than or equal to `election_timeout_upper_bound_ms`. Ideally they shouldn't be equal. -- `election_timeout_upper_bound_ms` — If the follower does not receive a heartbeat from the leader in this interval, then it must initiate leader election (default: 2000). -- `rotate_log_storage_interval` — How many log records to store in a single file (default: 100000). -- `reserved_log_items` — How many coordination log records to store before compaction (default: 100000). -- `snapshot_distance` — How often ClickHouse Keeper will create new snapshots (in the number of records in logs) (default: 100000). -- `snapshots_to_keep` — How many snapshots to keep (default: 3). -- `stale_log_gap` — Threshold when leader considers follower as stale and sends the snapshot to it instead of logs (default: 10000). -- `fresh_log_gap` — When node became fresh (default: 200). -- `max_requests_batch_size` - Max size of batch in requests count before it will be sent to RAFT (default: 100). -- `force_sync` — Call `fsync` on each write to coordination log (default: true). -- `quorum_reads` — Execute read requests as writes through whole RAFT consensus with similar speed (default: false). -- `raft_logs_level` — Text logging level about coordination (trace, debug, and so on) (default: system default). -- `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true). -- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000). -- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000). -- `four_letter_word_white_list` — White list of 4lw commands (default: `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld`). - -Quorum configuration is located in the `.` section and contain servers description. - -The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The parameter can be set `true` if SSL connection is required for internal communication between nodes, or left unspecified otherwise. - -The main parameters for each `` are: - -- `id` — Server identifier in a quorum. -- `hostname` — Hostname where this server is placed. -- `port` — Port where this server listens for connections. - -:::note -In the case of a change in the topology of your ClickHouse Keeper cluster (e.g., replacing a server), please make sure to keep the mapping of `server_id` to `hostname` consistent and avoid shuffling or reusing an existing `server_id` for different servers (e.g., it can happen if your rely on automation scripts to deploy ClickHouse Keeper) -::: - -Examples of configuration for quorum with three nodes can be found in [integration tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) with `test_keeper_` prefix. Example configuration for server #1: - -```xml - - 2181 - 1 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - - 10000 - 30000 - trace - - - - - 1 - zoo1 - 9444 - - - 2 - zoo2 - 9444 - - - 3 - zoo3 - 9444 - - - -``` - -## How to run {#how-to-run} - -ClickHouse Keeper is bundled into the ClickHouse server package, just add configuration of `` and start ClickHouse server as always. If you want to run standalone ClickHouse Keeper you can start it in a similar way with: - -```bash -clickhouse-keeper --config /etc/your_path_to_config/config.xml -``` - -If you don't have the symlink (`clickhouse-keeper`) you can create it or specify `keeper` as an argument to `clickhouse`: - -```bash -clickhouse keeper --config /etc/your_path_to_config/config.xml -``` - -## Four Letter Word Commands {#four-letter-word-commands} - -ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively. - -The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld`. - -You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port. - -``` -echo mntr | nc localhost 9181 -``` - -Bellow is the detailed 4lw commands: - -- `ruok`: Tests if server is running in a non-error state. The server will respond with `imok` if it is running. Otherwise it will not respond at all. A response of `imok` does not necessarily indicate that the server has joined the quorum, just that the server process is active and bound to the specified client port. Use "stat" for details on state wrt quorum and client connection information. - -``` -imok -``` - -- `mntr`: Outputs a list of variables that could be used for monitoring the health of the cluster. - -``` -zk_version v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -zk_avg_latency 0 -zk_max_latency 0 -zk_min_latency 0 -zk_packets_received 68 -zk_packets_sent 68 -zk_num_alive_connections 1 -zk_outstanding_requests 0 -zk_server_state leader -zk_znode_count 4 -zk_watch_count 1 -zk_ephemerals_count 0 -zk_approximate_data_size 723 -zk_open_file_descriptor_count 310 -zk_max_file_descriptor_count 10240 -zk_followers 0 -zk_synced_followers 0 -``` - -- `srvr`: Lists full details for the server. - -``` -ClickHouse Keeper version: v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -Latency min/avg/max: 0/0/0 -Received: 2 -Sent : 2 -Connections: 1 -Outstanding: 0 -Zxid: 34 -Mode: leader -Node count: 4 -``` - -- `stat`: Lists brief details for the server and connected clients. - -``` -ClickHouse Keeper version: v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -Clients: - 192.168.1.1:52852(recved=0,sent=0) - 192.168.1.1:52042(recved=24,sent=48) -Latency min/avg/max: 0/0/0 -Received: 4 -Sent : 4 -Connections: 1 -Outstanding: 0 -Zxid: 36 -Mode: leader -Node count: 4 -``` - -- `srst`: Reset server statistics. The command will affect the result of `srvr`, `mntr` and `stat`. - -``` -Server stats reset. -``` - -- `conf`: Print details about serving configuration. - -``` -server_id=1 -tcp_port=2181 -four_letter_word_white_list=* -log_storage_path=./coordination/logs -snapshot_storage_path=./coordination/snapshots -max_requests_batch_size=100 -session_timeout_ms=30000 -operation_timeout_ms=10000 -dead_session_check_period_ms=500 -heart_beat_interval_ms=500 -election_timeout_lower_bound_ms=1000 -election_timeout_upper_bound_ms=2000 -reserved_log_items=1000000000000000 -snapshot_distance=10000 -auto_forwarding=true -shutdown_timeout=5000 -startup_timeout=240000 -raft_logs_level=information -snapshots_to_keep=3 -rotate_log_storage_interval=100000 -stale_log_gap=10000 -fresh_log_gap=200 -max_requests_batch_size=100 -quorum_reads=false -force_sync=false -compress_logs=true -compress_snapshots_with_zstd_format=true -configuration_change_tries_count=20 -``` - -- `cons`: List full connection/session details for all clients connected to this server. Includes information on numbers of packets received/sent, session id, operation latencies, last operation performed, etc... - -``` - 192.168.1.1:52163(recved=0,sent=0,sid=0xffffffffffffffff,lop=NA,est=1636454787393,to=30000,lzxid=0xffffffffffffffff,lresp=0,llat=0,minlat=0,avglat=0,maxlat=0) - 192.168.1.1:52042(recved=9,sent=18,sid=0x0000000000000001,lop=List,est=1636454739887,to=30000,lcxid=0x0000000000000005,lzxid=0x0000000000000005,lresp=1636454739892,llat=0,minlat=0,avglat=0,maxlat=0) -``` - -- `crst`: Reset connection/session statistics for all connections. - -``` -Connection stats reset. -``` - -- `envi`: Print details about serving environment - -``` -Environment: -clickhouse.keeper.version=v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7 -host.name=ZBMAC-C02D4054M.local -os.name=Darwin -os.arch=x86_64 -os.version=19.6.0 -cpu.count=12 -user.name=root -user.home=/Users/JackyWoo/ -user.dir=/Users/JackyWoo/project/jd/clickhouse/cmake-build-debug/programs/ -user.tmp=/var/folders/b4/smbq5mfj7578f2jzwn602tt40000gn/T/ -``` - - -- `dirs`: Shows the total size of snapshot and log files in bytes - -``` -snapshot_dir_size: 0 -log_dir_size: 3875 -``` - -- `isro`: Tests if server is running in read-only mode. The server will respond with "ro" if in read-only mode or "rw" if not in read-only mode. - -``` -rw -``` - -- `wchs`: Lists brief information on watches for the server. - -``` -1 connections watching 1 paths -Total watches:1 -``` - -- `wchc`: Lists detailed information on watches for the server, by session. This outputs a list of sessions (connections) with associated watches (paths). Note, depending on the number of watches this operation may be expensive (ie impact server performance), use it carefully. - -``` -0x0000000000000001 - /clickhouse/task_queue/ddl -``` - -- `wchp`: Lists detailed information on watches for the server, by path. This outputs a list of paths (znodes) with associated sessions. Note, depending on the number of watches this operation may be expensive (i. e. impact server performance), use it carefully. - -``` -/clickhouse/task_queue/ddl - 0x0000000000000001 -``` - -- `dump`: Lists the outstanding sessions and ephemeral nodes. This only works on the leader. - -``` -Sessions dump (2): -0x0000000000000001 -0x0000000000000002 -Sessions with Ephemerals (1): -0x0000000000000001 - /clickhouse/task_queue/ddl -``` - -- `csnp`: Schedule a snapshot creation task. Return the last committed log index of the scheduled snapshot if success or `Failed to schedule snapshot creation task.` if failed. Note that `lgif` command can help you determine whether the snapshot is done. - -``` -100 -``` - -- `lgif`: Keeper log information. `first_log_idx` : my first log index in log store; `first_log_term` : my first log term; `last_log_idx` : my last log index in log store; `last_log_term` : my last log term; `last_committed_log_idx` : my last committed log index in state machine; `leader_committed_log_idx` : leader's committed log index from my perspective; `target_committed_log_idx` : target log index should be committed to; `last_snapshot_idx` : the largest committed log index in last snapshot. - -``` -first_log_idx 1 -first_log_term 1 -last_log_idx 101 -last_log_term 1 -last_committed_log_idx 100 -leader_committed_log_idx 101 -target_committed_log_idx 101 -last_snapshot_idx 50 -``` - -- `rqld`: Request to become new leader. Return `Sent leadership request to leader.` if request sent or `Failed to send leadership request to leader.` if request not sent. Note that if node is already leader the outcome is same as the request is sent. - -``` -Sent leadership request to leader. -``` - -## Migration from ZooKeeper {#migration-from-zookeeper} - -Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration: - -1. Stop all ZooKeeper nodes. - -2. Optional, but recommended: find ZooKeeper leader node, start and stop it again. It will force ZooKeeper to create a consistent snapshot. - -3. Run `clickhouse-keeper-converter` on a leader, for example: - -```bash -clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots -``` - -4. Copy snapshot to ClickHouse server nodes with a configured `keeper` or start ClickHouse Keeper instead of ZooKeeper. The snapshot must persist on all nodes, otherwise, empty nodes can be faster and one of them can become a leader. - - - -## Recovering after losing quorum - -Because ClickHouse Keeper uses Raft it can tolerate certain amount of node crashes depending on the cluster size. \ -E.g. for a 3-node cluster, it will continue working correctly if only 1 node crashes. - -Cluster configuration can be dynamically configured but there are some limitations. Reconfiguration relies on Raft also -so to add/remove a node from the cluster you need to have a quorum. If you lose too many nodes in your cluster at the same time without any chance -of starting them again, Raft will stop working and not allow you to reconfigure your cluster using the conventional way. - -Nevertheless, ClickHouse Keeper has a recovery mode which allows you to forcefully reconfigure your cluster with only 1 node. -This should be done only as your last resort if you cannot start your nodes again, or start a new instance on the same endpoint. - -Important things to note before continuing: -- Make sure that the failed nodes cannot connect to the cluster again. -- Do not start any of the new nodes until it's specified in the steps. - -After making sure that the above things are true, you need to do following: -1. Pick a single Keeper node to be your new leader. Be aware that the data of that node will be used for the entire cluster so we recommend to use a node with the most up to date state. -2. Before doing anything else, make a backup of the `log_storage_path` and `snapshot_storage_path` folders of the picked node. -3. Reconfigure the cluster on all of the nodes you want to use. -4. Send the four letter command `rcvr` to the node you picked which will move the node to the recovery mode OR stop Keeper instance on the picked node and start it again with the `--force-recovery` argument. -5. One by one, start Keeper instances on the new nodes making sure that `mntr` returns `follower` for the `zk_server_state` before starting the next one. -6. While in the recovery mode, the leader node will return error message for `mntr` command until it achieves quorum with the new nodes and refuse any requests from the client and the followers. -7. After quorum is achieved, the leader node will return to the normal mode of operation, accepting all the requests using Raft - verify with `mntr` which should return `leader` for the `zk_server_state`. diff --git a/docs/en/operations/external-authenticators/kerberos.md b/docs/en/operations/external-authenticators/kerberos.md index 95944e96194..b7a11d7445b 100644 --- a/docs/en/operations/external-authenticators/kerberos.md +++ b/docs/en/operations/external-authenticators/kerberos.md @@ -113,7 +113,7 @@ Note, that now, once user `my_user` uses `kerberos`, Kerberos must be enabled in ### Enabling Kerberos using SQL {#enabling-kerberos-using-sql} -When [SQL-driven Access Control and Account Management](../access-rights.md#access-control) is enabled in ClickHouse, users identified by Kerberos can also be created using SQL statements. +When [SQL-driven Access Control and Account Management](/docs/en/guides/sre/user-management/index.md#access-control) is enabled in ClickHouse, users identified by Kerberos can also be created using SQL statements. ```sql CREATE USER my_user IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM' diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index eba560f6ea5..fa44e6e2978 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -112,7 +112,7 @@ At each login attempt, ClickHouse tries to "bind" to the specified DN defined by Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously. -When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. +When SQL-driven [Access Control and Account Management](/docs/en/guides/sre/user-management/index.md#access-control) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](/docs/en/sql-reference/statements/create/user.md#create-user-statement) statement. Query: @@ -124,7 +124,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. To achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file. -At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. +At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](/docs/en/guides/sre/user-management/index.md#access-control) is enabled and roles are created using the [CREATE ROLE](/docs/en/sql-reference/statements/create/role.md#create-role-statement) statement. **Example** @@ -173,7 +173,7 @@ Note that `my_ldap_server` referred in the `ldap` section inside the `user_direc - `roles` — Section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. - If no roles are specified here or assigned during role mapping (below), user will not be able to perform any actions after authentication. - `role_mapping` — Section with LDAP search parameters and mapping rules. - - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged-in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. + - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged-in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](/docs/en/sql-reference/statements/create/role.md#create-role-statement) statement. - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. - `base_dn` — Template used to construct the base DN for the LDAP search. - The resulting DN will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{user_dn}` substrings of the template with the actual user name, bind DN, and user DN during each LDAP search. diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index 2b3c4bdbbdf..04c5840d514 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -2,6 +2,7 @@ slug: /en/operations/monitoring sidebar_position: 45 sidebar_label: Monitoring +description: You can monitor the utilization of hardware resources and also ClickHouse server metrics. --- # Monitoring diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 0424c3520e0..08be318f334 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -2,6 +2,7 @@ slug: /en/operations/server-configuration-parameters/settings sidebar_position: 57 sidebar_label: Server Settings +description: This section contains descriptions of server settings that cannot be changed at the session or query level. --- # Server Settings @@ -275,7 +276,7 @@ Path: - Specify the absolute path or the path relative to the server config file. - The path can contain wildcards \* and ?. -See also “[Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”. +See also “[Dictionaries](../../sql-reference/dictionaries/index.md)”. **Example** @@ -1025,7 +1026,7 @@ If the number of **idle** threads in the Backups IO Thread pool exceeds `max_bac Possible values: - Positive integer. -- Zero. +- Zero. Default value: `0`. @@ -1917,7 +1918,7 @@ Default value: `/var/lib/clickhouse/access/`. **See also** -- [Access Control and Account Management](../../operations/access-rights.md#access-control) +- [Access Control and Account Management](../../guides/sre/user-management/index.md#access-control) ## user_directories {#user_directories} diff --git a/docs/en/operations/settings/settings-profiles.md b/docs/en/operations/settings/settings-profiles.md index 4527152583f..2f39a75453c 100644 --- a/docs/en/operations/settings/settings-profiles.md +++ b/docs/en/operations/settings/settings-profiles.md @@ -9,7 +9,7 @@ sidebar_label: Settings Profiles A settings profile is a collection of settings grouped under the same name. :::note -ClickHouse also supports [SQL-driven workflow](../../operations/access-rights.md#access-control) for managing settings profiles. We recommend using it. +ClickHouse also supports [SQL-driven workflow](../../guides/sre/user-management/index.md#access-control) for managing settings profiles. We recommend using it. ::: The profile can have any name. You can specify the same profile for different users. The most important thing you can write in the settings profile is `readonly=1`, which ensures read-only access. diff --git a/docs/en/operations/settings/settings-users.md b/docs/en/operations/settings/settings-users.md index b55d64fc4f7..9b27af61851 100644 --- a/docs/en/operations/settings/settings-users.md +++ b/docs/en/operations/settings/settings-users.md @@ -9,7 +9,7 @@ sidebar_label: User Settings The `users` section of the `user.xml` configuration file contains user settings. :::note -ClickHouse also supports [SQL-driven workflow](../../operations/access-rights.md#access-control) for managing users. We recommend using it. +ClickHouse also supports [SQL-driven workflow](../../guides/sre/user-management/index.md#access-control) for managing users. We recommend using it. ::: Structure of the `users` section: @@ -77,7 +77,7 @@ Password can be specified in plaintext or in SHA256 (hex format). ### access_management {#access_management-user-setting} -This setting enables or disables using of SQL-driven [access control and account management](../../operations/access-rights.md#access-control) for the user. +This setting enables or disables using of SQL-driven [access control and account management](../../guides/sre/user-management/index.md#access-control) for the user. Possible values: diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 3c53f4fd0cf..daaa79e90db 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -2999,7 +2999,7 @@ It can be useful when merges are CPU bounded not IO bounded (performing heavy da ## max_final_threads {#max-final-threads} -Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md/#select-from-final) modifier. +Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier. Possible values: @@ -3094,9 +3094,9 @@ Possible values: Default value: `0`. -## s3_truncate_on_insert +## s3_truncate_on_insert -Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists. +Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists. Possible values: - 0 — `INSERT` query appends new data to the end of the file. @@ -3104,9 +3104,9 @@ Possible values: Default value: `0`. -## hdfs_truncate_on_insert +## hdfs_truncate_on_insert -Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists. +Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists. Possible values: - 0 — `INSERT` query appends new data to the end of the file. @@ -3114,11 +3114,11 @@ Possible values: Default value: `0`. -## engine_file_allow_create_multiple_files +## engine_file_allow_create_multiple_files Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern: -`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc. +`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc. Possible values: - 0 — `INSERT` query appends new data to the end of the file. @@ -3126,11 +3126,11 @@ Possible values: Default value: `0`. -## s3_create_new_file_on_insert +## s3_create_new_file_on_insert Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern: -initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc. +initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc. Possible values: - 0 — `INSERT` query appends new data to the end of the file. @@ -3142,7 +3142,7 @@ Default value: `0`. Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern: -initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc. +initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc. Possible values: - 0 — `INSERT` query appends new data to the end of the file. @@ -3753,7 +3753,7 @@ Default value: `1`. ## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final} -Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md/#select-from-final) modifier. +Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier. Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables. @@ -3770,7 +3770,7 @@ Default value: `0`. ## optimize_using_constraints -Use [constraints](../../sql-reference/statements/create/table#constraints) for query optimization. The default is `false`. +Use [constraints](../../sql-reference/statements/create/table.md#constraints) for query optimization. The default is `false`. Possible values: @@ -3778,7 +3778,7 @@ Possible values: ## optimize_append_index -Use [constraints](../../sql-reference/statements/create/table#constraints) in order to append index condition. The default is `false`. +Use [constraints](../../sql-reference/statements/create/table.md#constraints) in order to append index condition. The default is `false`. Possible values: @@ -3786,7 +3786,7 @@ Possible values: ## optimize_substitute_columns -Use [constraints](../../sql-reference/statements/create/table#constraints) for column substitution. The default is `false`. +Use [constraints](../../sql-reference/statements/create/table.md#constraints) for column substitution. The default is `false`. Possible values: @@ -3984,7 +3984,7 @@ Use this setting only for backward compatibility if your use cases depend on old ## final {#final} -Automatically applies [FINAL](../../sql-reference/statements/select/from/#final-modifier) modifier to all tables in a query, to tables where [FINAL](../../sql-reference/statements/select/from/#final-modifier) is applicable, including joined tables and tables in sub-queries, and +Automatically applies [FINAL](../../sql-reference/statements/select/from.md#final-modifier) modifier to all tables in a query, to tables where [FINAL](../../sql-reference/statements/select/from.md#final-modifier) is applicable, including joined tables and tables in sub-queries, and distributed tables. Possible values: @@ -4030,7 +4030,7 @@ SELECT * FROM test; ## asterisk_include_materialized_columns {#asterisk_include_materialized_columns} -Include [MATERIALIZED](../../sql-reference/statements/create/table/#materialized) columns for wildcard query (`SELECT *`). +Include [MATERIALIZED](../../sql-reference/statements/create/table.md#materialized) columns for wildcard query (`SELECT *`). Possible values: @@ -4041,7 +4041,7 @@ Default value: `0`. ## asterisk_include_alias_columns {#asterisk_include_alias_columns} -Include [ALIAS](../../sql-reference/statements/create/table/#alias) columns for wildcard query (`SELECT *`). +Include [ALIAS](../../sql-reference/statements/create/table.md#alias) columns for wildcard query (`SELECT *`). Possible values: diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index 4b256f0de97..ca6b7faaa78 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -3,12 +3,12 @@ slug: /en/operations/system-tables/dictionaries --- # dictionaries -Contains information about [dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). +Contains information about [dictionaries](../../sql-reference/dictionaries/index.md). Columns: - `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries. -- `name` ([String](../../sql-reference/data-types/string.md)) — [Dictionary name](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md). +- `name` ([String](../../sql-reference/data-types/string.md)) — [Dictionary name](../../sql-reference/dictionaries/index.md). - `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Dictionary UUID. - `status` ([Enum8](../../sql-reference/data-types/enum.md)) — Dictionary status. Possible values: - `NOT_LOADED` — Dictionary was not loaded because it was not used. @@ -18,20 +18,20 @@ Columns: - `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary) query, timeout, dictionary config has changed). - `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now. - `origin` ([String](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary. -- `type` ([String](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). -- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [key names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary. -- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [key types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary. -- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [attribute names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary. -- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary. +- `type` ([String](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/index.md#storig-dictionaries-in-memory). +- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [key names](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-key) provided by the dictionary. +- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [key types](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-key) provided by the dictionary. +- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [attribute names](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-attributes) provided by the dictionary. +- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-attributes) provided by the dictionary. - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. - `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of uses for which the value was found. - `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary. - `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). -- `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary. -- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. -- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. +- `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/index.md#dictionary-sources) for the dictionary. +- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [lifetime](../../sql-reference/dictionaries/index.md#dictionary-updates) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. +- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [lifetime](../../sql-reference/dictionaries/index.md#dictionary-updates) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. - `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary. - `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with dictionary sources and investigate the causes. - `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading. diff --git a/docs/en/operations/system-tables/quotas.md b/docs/en/operations/system-tables/quotas.md index ca8fc4d166f..ffe7a95df5b 100644 --- a/docs/en/operations/system-tables/quotas.md +++ b/docs/en/operations/system-tables/quotas.md @@ -20,7 +20,7 @@ Columns: - `apply_to_all` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Logical value. It shows which users the quota is applied to. Values: - `0` — The quota applies to users specify in the `apply_to_list`. - `1` — The quota applies to all users except those listed in `apply_to_except`. -- `apply_to_list` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — List of user names/[roles](../../operations/access-rights.md#role-management) that the quota should be applied to. +- `apply_to_list` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — List of user names/[roles](../../guides/sre/user-management/index.md#role-management) that the quota should be applied to. - `apply_to_except` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — List of user names/roles that the quota should not apply to. ## See Also {#see-also} diff --git a/docs/en/operations/system-tables/roles.md b/docs/en/operations/system-tables/roles.md index 729c98c89f3..5ef5e765c0f 100644 --- a/docs/en/operations/system-tables/roles.md +++ b/docs/en/operations/system-tables/roles.md @@ -3,7 +3,7 @@ slug: /en/operations/system-tables/roles --- # roles -Contains information about configured [roles](../../operations/access-rights.md#role-management). +Contains information about configured [roles](../../guides/sre/user-management/index.md#role-management). Columns: diff --git a/docs/en/operations/system-tables/users.md b/docs/en/operations/system-tables/users.md index b8c0403b8d6..385e3151eb7 100644 --- a/docs/en/operations/system-tables/users.md +++ b/docs/en/operations/system-tables/users.md @@ -3,7 +3,7 @@ slug: /en/operations/system-tables/users --- # users -Contains a list of [user accounts](../../operations/access-rights.md#user-account-management) configured at the server. +Contains a list of [user accounts](../../guides/sre/user-management/index.md#user-account-management) configured at the server. Columns: - `name` ([String](../../sql-reference/data-types/string.md)) — User name. diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index da34a6b7e9c..13353cd8e6a 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -126,7 +126,7 @@ Otherwise you may get `Illegal instruction` crashes when hypervisor is run on ol ## ClickHouse Keeper and ZooKeeper {#zookeeper} -ClickHouse Keeper is recommended to replace ZooKeeper for ClickHouse clusters. See the documentation for [ClickHouse Keeper](clickhouse-keeper.md) +ClickHouse Keeper is recommended to replace ZooKeeper for ClickHouse clusters. See the documentation for [ClickHouse Keeper](../guides/sre/keeper/index.md) If you would like to continue using ZooKeeper then it is best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. @@ -134,7 +134,7 @@ You should never use manually written scripts to transfer data between different If you want to divide an existing ZooKeeper cluster into two, the correct way is to increase the number of its replicas and then reconfigure it as two independent clusters. -You can run ClickHouse Keeper on the same server as ClickHouse in test environments, or in environments with low ingestion rate. +You can run ClickHouse Keeper on the same server as ClickHouse in test environments, or in environments with low ingestion rate. For production environments we suggest to use separate servers for ClickHouse and ZooKeeper/Keeper, or place ClickHouse files and Keeper files on to separate disks. Because ZooKeeper/Keeper are very sensitive for disk latency and ClickHouse may utilize all available system resources. You can have ZooKeeper observers in an ensemble but ClickHouse servers should not interact with observers. diff --git a/docs/en/operations/utilities/clickhouse-format.md b/docs/en/operations/utilities/clickhouse-format.md index bf2e618b791..101310cc65e 100644 --- a/docs/en/operations/utilities/clickhouse-format.md +++ b/docs/en/operations/utilities/clickhouse-format.md @@ -27,7 +27,7 @@ $ clickhouse-format --query "select number from numbers(10) where number%2 order Result: -```text +```sql SELECT number FROM numbers(10) WHERE number % 2 @@ -54,7 +54,7 @@ $ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNIO Result: -```text +```sql SELECT * FROM ( @@ -75,7 +75,7 @@ $ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWE Result: -```text +```sql SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; ``` @@ -87,7 +87,7 @@ $ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWE Result: -```text +```sql SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; ``` @@ -99,7 +99,7 @@ $ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELE Result: -```text +```sql SELECT * \ FROM \ ( \ diff --git a/docs/en/operations/utilities/clickhouse-local.md b/docs/en/operations/utilities/clickhouse-local.md index 08640b5c16b..6bf1269c1d9 100644 --- a/docs/en/operations/utilities/clickhouse-local.md +++ b/docs/en/operations/utilities/clickhouse-local.md @@ -4,9 +4,9 @@ sidebar_position: 60 sidebar_label: clickhouse-local --- -# clickhouse-local +# clickhouse-local -The `clickhouse-local` program enables you to perform fast processing on local files, without having to deploy and configure the ClickHouse server. It accepts data that represent tables and queries them using [ClickHouse SQL dialect](../../sql-reference/). `clickhouse-local` uses the same core as ClickHouse server, so it supports most of the features and the same set of formats and table engines. +The `clickhouse-local` program enables you to perform fast processing on local files, without having to deploy and configure the ClickHouse server. It accepts data that represent tables and queries them using [ClickHouse SQL dialect](../../sql-reference/index.md). `clickhouse-local` uses the same core as ClickHouse server, so it supports most of the features and the same set of formats and table engines. By default `clickhouse-local` has access to data on the same host, and it does not depend on the server's configuration. It also supports loading server configuration using `--config-file` argument. For temporary data, a unique temporary data directory is created by default. diff --git a/docs/en/operations/utilities/index.md b/docs/en/operations/utilities/index.md index a8c0239c102..b2f66af1084 100644 --- a/docs/en/operations/utilities/index.md +++ b/docs/en/operations/utilities/index.md @@ -1,11 +1,11 @@ --- slug: /en/operations/utilities/ sidebar_position: 56 -sidebar_label: Overview +sidebar_label: Utilities pagination_next: 'en/operations/utilities/clickhouse-copier' --- -# ClickHouse Utilities +# List of tools and utilities - [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without starting the ClickHouse server, similar to how `awk` does this. - [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. diff --git a/docs/en/sql-reference/_category_.yml b/docs/en/sql-reference/_category_.yml index d799ecef539..45eaa6e7c16 100644 --- a/docs/en/sql-reference/_category_.yml +++ b/docs/en/sql-reference/_category_.yml @@ -1,7 +1,7 @@ -position: 15 +position: 1 label: 'SQL Reference' collapsible: true collapsed: true link: - type: doc - id: en/sql-reference/index + type: generated-index + slug: /en/sql-reference diff --git a/docs/en/sql-reference/aggregate-functions/reference/contingency.md b/docs/en/sql-reference/aggregate-functions/reference/contingency.md index e75537778fe..9e89e99e66d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/contingency.md +++ b/docs/en/sql-reference/aggregate-functions/reference/contingency.md @@ -5,7 +5,7 @@ sidebar_position: 350 # contingency -The `contingency` function calculates the [contingency coefficient](https://en.wikipedia.org/wiki/Contingency_table#Cram%C3%A9r's_V_and_the_contingency_coefficient_C), a value that measures the association between two columns in a table. The computation is similar to [the `cramersV` function](./cramersv) but with a different denominator in the square root. +The `contingency` function calculates the [contingency coefficient](https://en.wikipedia.org/wiki/Contingency_table#Cram%C3%A9r's_V_and_the_contingency_coefficient_C), a value that measures the association between two columns in a table. The computation is similar to [the `cramersV` function](./cramersv.md) but with a different denominator in the square root. **Syntax** diff --git a/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md b/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md index 51524033147..651b5e7b5a2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md +++ b/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md @@ -6,7 +6,7 @@ sidebar_position: 352 # cramersVBiasCorrected -Cramér's V is a measure of association between two columns in a table. The result of the [`cramersV` function](./cramersv) ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. The function can be heavily biased, so this version of Cramér's V uses the [bias correction](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V#Bias_correction). +Cramér's V is a measure of association between two columns in a table. The result of the [`cramersV` function](./cramersv.md) ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. The function can be heavily biased, so this version of Cramér's V uses the [bias correction](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V#Bias_correction). diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md index 5546ade1758..5d82d3575fc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md +++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md @@ -19,7 +19,7 @@ Each `value` corresponds to the determinate `timeunit`. The half-life `x` is the **Arguments** - `value` — Value. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). -- `timeunit` — Timeunit. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). Timeunit is not timestamp (seconds), it's -- an index of the time interval. Can be calculated using [intDiv](../../functions/arithmetic-functions/#intdiva-b). +- `timeunit` — Timeunit. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). Timeunit is not timestamp (seconds), it's -- an index of the time interval. Can be calculated using [intDiv](../../functions/arithmetic-functions.md#intdiva-b). **Parameters** diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index d9099ba5ad3..a21898de9a2 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -7,7 +7,7 @@ sidebar_label: JSON # JSON :::warning -This feature is experimental and is not production ready. If you need to work with JSON documents, consider using [this guide](/docs/en/guides/developer/working-with-json/json-load-data.md) instead. +This feature is experimental and is not production ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead. ::: Stores JavaScript Object Notation (JSON) documents in a single column. diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md b/docs/en/sql-reference/dictionaries/_snippet_dictionary_in_cloud.md similarity index 100% rename from docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md rename to docs/en/sql-reference/dictionaries/_snippet_dictionary_in_cloud.md diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/_category_.yml b/docs/en/sql-reference/dictionaries/external-dictionaries/_category_.yml deleted file mode 100644 index af79ff9af23..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/_category_.yml +++ /dev/null @@ -1,8 +0,0 @@ -position: 37 -label: 'Dictionaries' -collapsible: true -collapsed: true -link: - type: generated-index - title: Dictionaries - slug: /en/sql-reference/dictionaries/external-dictionaries diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md deleted file mode 100644 index ee9cd2c1f2e..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical -sidebar_position: 45 -sidebar_label: Hierarchical dictionaries ---- - -# Hierarchical Dictionaries - -ClickHouse supports hierarchical dictionaries with a [numeric key](../../dictionaries/external-dictionaries/external-dicts-dict-structure.md#numeric-key). - -Look at the following hierarchical structure: - -``` text -0 (Common parent) -│ -├── 1 (Russia) -│ │ -│ └── 2 (Moscow) -│ │ -│ └── 3 (Center) -│ -└── 4 (Great Britain) - │ - └── 5 (London) -``` - -This hierarchy can be expressed as the following dictionary table. - -| region_id | parent_region | region_name | -|------------|----------------|---------------| -| 1 | 0 | Russia | -| 2 | 1 | Moscow | -| 3 | 2 | Center | -| 4 | 0 | Great Britain | -| 5 | 4 | London | - -This table contains a column `parent_region` that contains the key of the nearest parent for the element. - -ClickHouse supports the [hierarchical](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#hierarchical-dict-attr) property for [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/) attributes. This property allows you to configure the hierarchical dictionary similar to described above. - -The [dictGetHierarchy](../../../sql-reference/functions/ext-dict-functions.md#dictgethierarchy) function allows you to get the parent chain of an element. - -For our example, the structure of dictionary can be the following: - -``` xml - - - - region_id - - - - parent_region - UInt64 - 0 - true - - - - region_name - String - - - - - -``` diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md deleted file mode 100644 index 4dc6fd33849..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ /dev/null @@ -1,751 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout -sidebar_position: 41 -sidebar_label: Storing Dictionaries in Memory ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -# Storing Dictionaries in Memory - -There are a variety of ways to store dictionaries in memory. - -We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex_key_hashed](#complex-key-hashed), which provide optimal processing speed. - -Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section [cache](#cache). - -There are several ways to improve dictionary performance: - -- Call the function for working with the dictionary after `GROUP BY`. -- Mark attributes to extract as injective. An attribute is called injective if different attribute values correspond to different keys. So when `GROUP BY` uses a function that fetches an attribute value by the key, this function is automatically taken out of `GROUP BY`. - -ClickHouse generates an exception for errors with dictionaries. Examples of errors: - -- The dictionary being accessed could not be loaded. -- Error querying a `cached` dictionary. - -You can view the list of dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table. - - - -The configuration looks like this: - -``` xml - - - ... - - - - - - ... - - -``` - -Corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md): - -``` sql -CREATE DICTIONARY (...) -... -LAYOUT(LAYOUT_TYPE(param value)) -- layout settings -... -``` - -Dictionaries without word `complex-key*` in a layout have a key with [UInt64](../../../sql-reference/data-types/int-uint.md) type, `complex-key*` dictionaries have a composite key (complex, with arbitrary types). - -[UInt64](../../../sql-reference/data-types/int-uint.md) keys in XML dictionaries are defined with `` tag. - -Configuration example (column key_column has UInt64 type): -```xml -... - - - key_column - -... -``` - -Composite `complex` keys XML dictionaries are defined `` tag. - -Configuration example of a composite key (key has one element with [String](../../../sql-reference/data-types/string.md) type): -```xml -... - - - - country_code - String - - -... -``` - -## Ways to Store Dictionaries in Memory - -- [flat](#flat) -- [hashed](#dicts-external_dicts_dict_layout-hashed) -- [sparse_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) -- [complex_key_hashed](#complex-key-hashed) -- [complex_key_sparse_hashed](#complex-key-sparse-hashed) -- [hashed_array](#dicts-external_dicts_dict_layout-hashed-array) -- [complex_key_hashed_array](#complex-key-hashed-array) -- [range_hashed](#range-hashed) -- [complex_key_range_hashed](#complex-key-range-hashed) -- [cache](#cache) -- [complex_key_cache](#complex-key-cache) -- [ssd_cache](#ssd-cache) -- [complex_key_ssd_cache](#complex-key-ssd-cache) -- [direct](#direct) -- [complex_key_direct](#complex-key-direct) -- [ip_trie](#ip-trie) - -### flat - -The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type and the value is limited to `max_array_size` (by default — 500,000). If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. Dictionary flat arrays initial size is controlled by `initial_array_size` setting (by default — 1024). - -All types of sources are supported. When updating, data (from a file or from a table) is read in it entirety. - -This method provides the best performance among all available methods of storing the dictionary. - -Configuration example: - -``` xml - - - 50000 - 5000000 - - -``` - -or - -``` sql -LAYOUT(FLAT(INITIAL_ARRAY_SIZE 50000 MAX_ARRAY_SIZE 5000000)) -``` - -### hashed - -The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type. - -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(HASHED()) -``` - -If `shards` greater then 1 (default is `1`) the dictionary will load data in parallel, useful if you have huge amount of elements in one dictionary. - -Configuration example: - -``` xml - - - 10 - - 10000 - - -``` - -or - -``` sql -LAYOUT(HASHED(SHARDS 10 [SHARD_LOAD_QUEUE_BACKLOG 10000])) -``` - -### sparse_hashed - -Similar to `hashed`, but uses less memory in favor more CPU usage. - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type. - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(SPARSE_HASHED()) -``` - -It is also possible to use `shards` for this type of dictionary, and again it is more important for `sparse_hashed` then for `hashed`, since `sparse_hashed` is slower. - -### complex_key_hashed - -This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `hashed`. - -Configuration example: - -``` xml - - - 1 - - - -``` - -or - -``` sql -LAYOUT(COMPLEX_KEY_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000])) -``` - -### complex_key_sparse_hashed - -This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to [sparse_hashed](#dicts-external_dicts_dict_layout-sparse_hashed). - -Configuration example: - -``` xml - - - 1 - - -``` - -or - -``` sql -LAYOUT(COMPLEX_KEY_SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000])) -``` - -### hashed_array - -The dictionary is completely stored in memory. Each attribute is stored in an array. The key attribute is stored in the form of a hashed table where value is an index in the attributes array. The dictionary can contain any number of elements with any identifiers. In practice, the number of keys can reach tens of millions of items. - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type. - -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. - -Configuration example: - -``` xml - - - - -``` - -or - -``` sql -LAYOUT(HASHED_ARRAY()) -``` - -### complex_key_hashed_array - -This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to [hashed_array](#dicts-external_dicts_dict_layout-hashed-array). - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(COMPLEX_KEY_HASHED_ARRAY()) -``` - -### range_hashed - -The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values. - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type. -This storage method works the same way as hashed and allows using date/time (arbitrary numeric type) ranges in addition to the key. - -Example: The table contains discounts for each advertiser in the format: - -``` text -┌─advertiser_id─┬─discount_start_date─┬─discount_end_date─┬─amount─┐ -│ 123 │ 2015-01-16 │ 2015-01-31 │ 0.25 │ -│ 123 │ 2015-01-01 │ 2015-01-15 │ 0.15 │ -│ 456 │ 2015-01-01 │ 2015-01-15 │ 0.05 │ -└───────────────┴─────────────────────┴───────────────────┴────────┘ -``` - -To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). These elements must contain elements `name` and `type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others). - -:::warning -Values of `range_min` and `range_max` should fit in `Int64` type. -::: - -Example: - -``` xml - - - - min - - - - - advertiser_id - - - discount_start_date - Date - - - discount_end_date - Date - - ... -``` - -or - -``` sql -CREATE DICTIONARY discounts_dict ( - advertiser_id UInt64, - discount_start_date Date, - discount_end_date Date, - amount Float64 -) -PRIMARY KEY id -SOURCE(CLICKHOUSE(TABLE 'discounts')) -LIFETIME(MIN 1 MAX 1000) -LAYOUT(RANGE_HASHED(range_lookup_strategy 'max')) -RANGE(MIN discount_start_date MAX discount_end_date) -``` - -To work with these dictionaries, you need to pass an additional argument to the `dictGet` function, for which a range is selected: - -``` sql -dictGet('dict_name', 'attr_name', id, date) -``` -Query example: - -``` sql -SELECT dictGet('discounts_dict', 'amount', 1, '2022-10-20'::Date); -``` - -This function returns the value for the specified `id`s and the date range that includes the passed date. - -Details of the algorithm: - -- If the `id` is not found or a range is not found for the `id`, it returns the default value of the attribute's type. -- If there are overlapping ranges and `range_lookup_strategy=min`, it returns a matching range with minimal `range_min`, if several ranges found, it returns a range with minimal `range_max`, if again several ranges found (several ranges had the same `range_min` and `range_max` it returns a random range of them. -- If there are overlapping ranges and `range_lookup_strategy=max`, it returns a matching range with maximal `range_min`, if several ranges found, it returns a range with maximal `range_max`, if again several ranges found (several ranges had the same `range_min` and `range_max` it returns a random range of them. -- If the `range_max` is `NULL`, the range is open. `NULL` is treated as maximal possible value. For the `range_min` `1970-01-01` or `0` (-MAX_INT) can be used as the open value. - -Configuration example: - -``` xml - - - ... - - - - - - - - Abcdef - - - StartTimeStamp - UInt64 - - - EndTimeStamp - UInt64 - - - XXXType - String - - - - - - -``` - -or - -``` sql -CREATE DICTIONARY somedict( - Abcdef UInt64, - StartTimeStamp UInt64, - EndTimeStamp UInt64, - XXXType String DEFAULT '' -) -PRIMARY KEY Abcdef -RANGE(MIN StartTimeStamp MAX EndTimeStamp) -``` - -Configuration example with overlapping ranges and open ranges: - -```sql -CREATE TABLE discounts -( - advertiser_id UInt64, - discount_start_date Date, - discount_end_date Nullable(Date), - amount Float64 -) -ENGINE = Memory; - -INSERT INTO discounts VALUES (1, '2015-01-01', Null, 0.1); -INSERT INTO discounts VALUES (1, '2015-01-15', Null, 0.2); -INSERT INTO discounts VALUES (2, '2015-01-01', '2015-01-15', 0.3); -INSERT INTO discounts VALUES (2, '2015-01-04', '2015-01-10', 0.4); -INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-15', 0.5); -INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-10', 0.6); - -SELECT * FROM discounts ORDER BY advertiser_id, discount_start_date; -┌─advertiser_id─┬─discount_start_date─┬─discount_end_date─┬─amount─┐ -│ 1 │ 2015-01-01 │ ᴺᵁᴸᴸ │ 0.1 │ -│ 1 │ 2015-01-15 │ ᴺᵁᴸᴸ │ 0.2 │ -│ 2 │ 2015-01-01 │ 2015-01-15 │ 0.3 │ -│ 2 │ 2015-01-04 │ 2015-01-10 │ 0.4 │ -│ 3 │ 1970-01-01 │ 2015-01-15 │ 0.5 │ -│ 3 │ 1970-01-01 │ 2015-01-10 │ 0.6 │ -└───────────────┴─────────────────────┴───────────────────┴────────┘ - --- RANGE_LOOKUP_STRATEGY 'max' - -CREATE DICTIONARY discounts_dict -( - advertiser_id UInt64, - discount_start_date Date, - discount_end_date Nullable(Date), - amount Float64 -) -PRIMARY KEY advertiser_id -SOURCE(CLICKHOUSE(TABLE discounts)) -LIFETIME(MIN 600 MAX 900) -LAYOUT(RANGE_HASHED(RANGE_LOOKUP_STRATEGY 'max')) -RANGE(MIN discount_start_date MAX discount_end_date); - -select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res; -┌─res─┐ -│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null -└─────┘ - -select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; -┌─res─┐ -│ 0.2 │ -- two ranges are matching, range_min 2015-01-15 (0.2) is bigger than 2015-01-01 (0.1) -└─────┘ - -select dictGet('discounts_dict', 'amount', 2, toDate('2015-01-06')) res; -┌─res─┐ -│ 0.4 │ -- two ranges are matching, range_min 2015-01-04 (0.4) is bigger than 2015-01-01 (0.3) -└─────┘ - -select dictGet('discounts_dict', 'amount', 3, toDate('2015-01-01')) res; -┌─res─┐ -│ 0.5 │ -- two ranges are matching, range_min are equal, 2015-01-15 (0.5) is bigger than 2015-01-10 (0.6) -└─────┘ - -DROP DICTIONARY discounts_dict; - --- RANGE_LOOKUP_STRATEGY 'min' - -CREATE DICTIONARY discounts_dict -( - advertiser_id UInt64, - discount_start_date Date, - discount_end_date Nullable(Date), - amount Float64 -) -PRIMARY KEY advertiser_id -SOURCE(CLICKHOUSE(TABLE discounts)) -LIFETIME(MIN 600 MAX 900) -LAYOUT(RANGE_HASHED(RANGE_LOOKUP_STRATEGY 'min')) -RANGE(MIN discount_start_date MAX discount_end_date); - -select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res; -┌─res─┐ -│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null -└─────┘ - -select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; -┌─res─┐ -│ 0.1 │ -- two ranges are matching, range_min 2015-01-01 (0.1) is less than 2015-01-15 (0.2) -└─────┘ - -select dictGet('discounts_dict', 'amount', 2, toDate('2015-01-06')) res; -┌─res─┐ -│ 0.3 │ -- two ranges are matching, range_min 2015-01-01 (0.3) is less than 2015-01-04 (0.4) -└─────┘ - -select dictGet('discounts_dict', 'amount', 3, toDate('2015-01-01')) res; -┌─res─┐ -│ 0.6 │ -- two ranges are matching, range_min are equal, 2015-01-10 (0.6) is less than 2015-01-15 (0.5) -└─────┘ -``` - -### complex_key_range_hashed - -The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values (see [range_hashed](#range-hashed)). This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). - -Configuration example: - -``` sql -CREATE DICTIONARY range_dictionary -( - CountryID UInt64, - CountryKey String, - StartDate Date, - EndDate Date, - Tax Float64 DEFAULT 0.2 -) -PRIMARY KEY CountryID, CountryKey -SOURCE(CLICKHOUSE(TABLE 'date_table')) -LIFETIME(MIN 1 MAX 1000) -LAYOUT(COMPLEX_KEY_RANGE_HASHED()) -RANGE(MIN StartDate MAX EndDate); -``` - -### cache - -The dictionary is stored in a cache that has a fixed number of cells. These cells contain frequently used elements. - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type. - -When searching for a dictionary, the cache is searched first. For each block of data, all keys that are not found in the cache or are outdated are requested from the source using `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. The received data is then written to the cache. - -If keys are not found in dictionary, then update cache task is created and added into update queue. Update queue properties can be controlled with settings `max_update_queue_size`, `update_queue_push_timeout_milliseconds`, `query_wait_timeout_milliseconds`, `max_threads_for_updates`. - -For cache dictionaries, the expiration [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used and key becomes expired. The key is re-requested the next time it needs to be used. This behaviour can be configured with setting `allow_read_expired_keys`. - -This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table. - -If setting `allow_read_expired_keys` is set to 1, by default 0. Then dictionary can support asynchronous updates. If a client requests keys and all of them are in cache, but some of them are expired, then dictionary will return expired keys for a client and request them asynchronously from the source. - -To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally. - -All types of sources are supported. - -Example of settings: - -``` xml - - - - 1000000000 - - 0 - - 100000 - - 10 - - 60000 - - 4 - - -``` - -or - -``` sql -LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) -``` - -Set a large enough cache size. You need to experiment to select the number of cells: - -1. Set some value. -2. Run queries until the cache is completely full. -3. Assess memory consumption using the `system.dictionaries` table. -4. Increase or decrease the number of cells until the required memory consumption is reached. - -:::warning -Do not use ClickHouse as a source, because it is slow to process queries with random reads. -::: - -### complex_key_cache - -This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `cache`. - -### ssd_cache - -Similar to `cache`, but stores data on SSD and index in RAM. All cache dictionary settings related to update queue can also be applied to SSD cache dictionaries. - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type. - -``` xml - - - - 4096 - - 16777216 - - 131072 - - 1048576 - - /var/lib/clickhouse/user_files/test_dict - - -``` - -or - -``` sql -LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576 - PATH '/var/lib/clickhouse/user_files/test_dict')) -``` - -### complex_key_ssd_cache - -This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `ssd_cache`. - -### direct - -The dictionary is not stored in memory and directly goes to the source during the processing of a request. - -The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type. - -All types of [sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), except local files, are supported. - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(DIRECT()) -``` - -### complex_key_direct - -This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `direct`. - -### ip_trie - -This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN. - -**Example** - -Suppose we have a table in ClickHouse that contains our IP prefixes and mappings: - -```sql -CREATE TABLE my_ip_addresses ( - prefix String, - asn UInt32, - cca2 String -) -ENGINE = MergeTree -PRIMARY KEY prefix; -``` - -```sql -INSERT INTO my_ip_addresses VALUES - ('202.79.32.0/20', 17501, 'NP'), - ('2620:0:870::/48', 3856, 'US'), - ('2a02:6b8:1::/48', 13238, 'RU'), - ('2001:db8::/32', 65536, 'ZZ') -; -``` - -Let's define an `ip_trie` dictionary for this table. The `ip_trie` layout requires a composite key: - -``` xml - - - - prefix - String - - - - asn - UInt32 - - - - cca2 - String - ?? - - ... - - - - - - true - - -``` - -or - -``` sql -CREATE DICTIONARY my_ip_trie_dictionary ( - prefix String, - asn UInt32, - cca2 String DEFAULT '??' -) -PRIMARY KEY prefix -SOURCE(CLICKHOUSE(TABLE 'my_ip_addresses')) -LAYOUT(IP_TRIE) -LIFETIME(3600); -``` - -The key must have only one `String` type attribute that contains an allowed IP prefix. Other types are not supported yet. - -For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys. The syntax is: - -``` sql -dictGetT('dict_name', 'attr_name', tuple(ip)) -``` - -The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6. For example: - -``` sql -select dictGet('my_ip_trie_dictionary', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) -``` - -Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned. - -Data must completely fit into RAM. - -## Related Content - -- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md deleted file mode 100644 index 8e9dbd392aa..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime -sidebar_position: 42 -sidebar_label: Dictionary Updates ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -# Dictionary Updates - -ClickHouse periodically updates the dictionaries. The update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries are defined in the `lifetime` tag in seconds. - -Dictionary updates (other than loading for first use) do not block queries. During updates, the old version of a dictionary is used. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. - -Example of settings: - - - -``` xml - - ... - 300 - ... - -``` - -or - -``` sql -CREATE DICTIONARY (...) -... -LIFETIME(300) -... -``` - -Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. - -You can set a time interval for updates, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when updating on a large number of servers. - -Example of settings: - -``` xml - - ... - - 300 - 360 - - ... - -``` - -or - -``` sql -LIFETIME(MIN 300 MAX 360) -``` - -If `0` and `0`, ClickHouse does not reload the dictionary by timeout. -In this case, ClickHouse can reload the dictionary earlier if the dictionary configuration file was changed or the `SYSTEM RELOAD DICTIONARY` command was executed. - -When updating the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): - -- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. -- For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`). -- Dictionaries from other sources are updated every time by default. - -For other sources (ODBC, PostgreSQL, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: - -- The dictionary table must have a field that always changes when the source data is updated. -- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md). - -Example of settings: - -``` xml - - ... - - ... - SELECT update_time FROM dictionary_source where id = 1 - - ... - -``` - -or - -``` sql -... -SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) -... -``` - -For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported. - -It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after the previous update. If `update_field` is specified as part of the dictionary source configuration, value of the previous update time in seconds will be added to the data request. Depends on source type (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, or ODBC) different logic will be applied to `update_field` before request data from an external source. - -- If the source is HTTP then `update_field` will be added as a query parameter with the last update time as the parameter value. -- If the source is Executable then `update_field` will be added as an executable script argument with the last update time as the argument value. -- If the source is ClickHouse, MySQL, PostgreSQL, ODBC there will be an additional part of `WHERE`, where `update_field` is compared as greater or equal with the last update time. - - Per default, this `WHERE`-condition is checked at the highest level of the SQL-Query. Alternatively, the condition can be checked in any other `WHERE`-clause within the query using the `{condition}`-keyword. Example: - ```sql - ... - SOURCE(CLICKHOUSE(... - update_field 'added_time' - QUERY ' - SELECT my_arr.1 AS x, my_arr.2 AS y, creation_time - FROM ( - SELECT arrayZip(x_arr, y_arr) AS my_arr, creation_time - FROM dictionary_source - WHERE {condition} - )' - )) - ... - ``` - -If `update_field` option is set, additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data. - -Example of settings: - -``` xml - - ... - - ... - added_time - 15 - - ... - -``` - -or - -``` sql -... -SOURCE(CLICKHOUSE(... update_field 'added_time' update_lag 15)) -... -``` - -## Related Content - -- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md deleted file mode 100644 index 8ef19a181e7..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon -sidebar_position: 46 -sidebar_label: Polygon Dictionaries With Grids -title: "Polygon dictionaries" ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -Polygon dictionaries allow you to efficiently search for the polygon containing specified points. -For example: defining a city area by geographical coordinates. - -Example of a polygon dictionary configuration: - - - -``` xml - - - - - key - Array(Array(Array(Array(Float64)))) - - - - - name - String - - - - - value - UInt64 - 0 - - - - - - 1 - - - - ... - -``` - -The corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md#create-dictionary-query): -``` sql -CREATE DICTIONARY polygon_dict_name ( - key Array(Array(Array(Array(Float64)))), - name String, - value UInt64 -) -PRIMARY KEY key -LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)) -... -``` - -When configuring the polygon dictionary, the key must have one of two types: - -- A simple polygon. It is an array of points. -- MultiPolygon. It is an array of polygons. Each polygon is a two-dimensional array of points. The first element of this array is the outer boundary of the polygon, and subsequent elements specify areas to be excluded from it. - -Points can be specified as an array or a tuple of their coordinates. In the current implementation, only two-dimensional points are supported. - -The user can [upload their own data](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) in all formats supported by ClickHouse. - -There are 3 types of [in-memory storage](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) available: - -- `POLYGON_SIMPLE`. This is a naive implementation, where a linear pass through all polygons is made for each query, and membership is checked for each one without using additional indexes. - -- `POLYGON_INDEX_EACH`. A separate index is built for each polygon, which allows you to quickly check whether it belongs in most cases (optimized for geographical regions). -Also, a grid is superimposed on the area under consideration, which significantly narrows the number of polygons under consideration. -The grid is created by recursively dividing the cell into 16 equal parts and is configured with two parameters. -The division stops when the recursion depth reaches `MAX_DEPTH` or when the cell crosses no more than `MIN_INTERSECTIONS` polygons. -To respond to the query, there is a corresponding cell, and the index for the polygons stored in it is accessed alternately. - -- `POLYGON_INDEX_CELL`. This placement also creates the grid described above. The same options are available. For each sheet cell, an index is built on all pieces of polygons that fall into it, which allows you to quickly respond to a request. - -- `POLYGON`. Synonym to `POLYGON_INDEX_CELL`. - -Dictionary queries are carried out using standard [functions](../../../sql-reference/functions/ext-dict-functions.md) for working with dictionaries. -An important difference is that here the keys will be the points for which you want to find the polygon containing them. - -**Example** - -Example of working with the dictionary defined above: - -``` sql -CREATE TABLE points ( - x Float64, - y Float64 -) -... -SELECT tuple(x, y) AS key, dictGet(dict_name, 'name', key), dictGet(dict_name, 'value', key) FROM points ORDER BY x, y; -``` - -As a result of executing the last command for each point in the 'points' table, a minimum area polygon containing this point will be found, and the requested attributes will be output. - -**Example** - -You can read columns from polygon dictionaries via SELECT query, just turn on the `store_polygon_key_column = 1` in the dictionary configuration or corresponding DDL-query. - -Query: - -``` sql -CREATE TABLE polygons_test_table -( - key Array(Array(Array(Tuple(Float64, Float64)))), - name String -) ENGINE = TinyLog; - -INSERT INTO polygons_test_table VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Value'); - -CREATE DICTIONARY polygons_test_dictionary -( - key Array(Array(Array(Tuple(Float64, Float64)))), - name String -) -PRIMARY KEY key -SOURCE(CLICKHOUSE(TABLE 'polygons_test_table')) -LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)) -LIFETIME(0); - -SELECT * FROM polygons_test_dictionary; -``` - -Result: - -``` text -┌─key─────────────────────────────┬─name──┐ -│ [[[(3,1),(0,1),(0,-1),(3,-1)]]] │ Value │ -└─────────────────────────────────┴───────┘ -``` - -## Related Content - -- [Exploring massive, real-world data sets: 100+ Years of Weather Records in ClickHouse](https://clickhouse.com/blog/real-world-data-noaa-climate-data) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md deleted file mode 100644 index 897945a6d9d..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ /dev/null @@ -1,847 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources -sidebar_position: 43 -sidebar_label: Dictionary Sources ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -# Dictionary Sources - - - -A dictionary can be connected to ClickHouse from many different sources. - -If the dictionary is configured using an xml-file, the configuration looks like this: - -``` xml - - - ... - - - - - - ... - - ... - -``` - -In case of [DDL-query](../../../sql-reference/statements/create/dictionary.md), the configuration described above will look like: - -``` sql -CREATE DICTIONARY dict_name (...) -... -SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration -... -``` - -The source is configured in the `source` section. - -For source types [Local file](#dicts-external_dicts_dict_sources-local_file), [Executable file](#dicts-external_dicts_dict_sources-executable), [HTTP(s)](#dicts-external_dicts_dict_sources-http), [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) -optional settings are available: - -``` xml - - - /opt/dictionaries/os.tsv - TabSeparated - - - 0 - - -``` - -or - -``` sql -SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) -SETTINGS(format_csv_allow_single_quotes = 0) -``` - -Types of sources (`source_type`): - -- [Local file](#dicts-external_dicts_dict_sources-local_file) -- [Executable File](#dicts-external_dicts_dict_sources-executable) -- [Executable Pool](#dicts-external_dicts_dict_sources-executable_pool) -- [HTTP(s)](#dicts-external_dicts_dict_sources-http) -- DBMS - - [ODBC](#odbc) - - [MySQL](#mysql) - - [ClickHouse](#clickhouse) - - [MongoDB](#mongodb) - - [Redis](#redis) - - [Cassandra](#cassandra) - - [PostgreSQL](#postgresql) - -## Local File - -Example of settings: - -``` xml - - - /opt/dictionaries/os.tsv - TabSeparated - - -``` - -or - -``` sql -SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) -``` - -Setting fields: - -- `path` – The absolute path to the file. -- `format` – The file format. All the formats described in [Formats](../../../interfaces/formats.md#formats) are supported. - -When a dictionary with source `FILE` is created via DDL command (`CREATE DICTIONARY ...`), the source file needs to be located in the `user_files` directory to prevent DB users from accessing arbitrary files on the ClickHouse node. - -**See Also** - -- [Dictionary function](../../../sql-reference/table-functions/dictionary.md#dictionary-function) - -## Executable File - -Working with executable files depends on [how the dictionary is stored in memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts the executable file and treats its output as dictionary data. - -Example of settings: - -``` xml - - - cat /opt/dictionaries/os.tsv - TabSeparated - false - - -``` - -Setting fields: - -- `command` — The absolute path to the executable file, or the file name (if the command's directory is in the `PATH`). -- `format` — The file format. All the formats described in [Formats](../../../interfaces/formats.md#formats) are supported. -- `command_termination_timeout` — The executable script should contain a main read-write loop. After the dictionary is destroyed, the pipe is closed, and the executable file will have `command_termination_timeout` seconds to shutdown before ClickHouse will send a SIGTERM signal to the child process. `command_termination_timeout` is specified in seconds. Default value is 10. Optional parameter. -- `command_read_timeout` - Timeout for reading data from command stdout in milliseconds. Default value 10000. Optional parameter. -- `command_write_timeout` - Timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter. -- `implicit_key` — The executable source file can return only values, and the correspondence to the requested keys is determined implicitly — by the order of rows in the result. Default value is false. -- `execute_direct` - If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder specified by [user_scripts_path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_scripts_path). Additional script arguments can be specified using a whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c`. Default value is `0`. Optional parameter. -- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Optional. Default value is `false`. - -That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled; otherwise, the DB user would be able to execute arbitrary binaries on the ClickHouse node. - -## Executable Pool - -Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](external-dicts-dict-layout.md#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, or `complex_key_direct` layouts. - -Executable pool will spawn a pool of processes with the specified command and keep them running until they exit. The program should read data from STDIN while it is available and output the result to STDOUT. It can wait for the next block of data on STDIN. ClickHouse will not close STDIN after processing a block of data, but will pipe another chunk of data when needed. The executable script should be ready for this way of data processing — it should poll STDIN and flush data to STDOUT early. - -Example of settings: - -``` xml - - - while read key; do printf "$key\tData for key $key\n"; done - TabSeparated - 10 - 10 - false - - -``` - -Setting fields: - -- `command` — The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). -- `format` — The file format. All the formats described in “[Formats](../../../interfaces/formats.md#formats)” are supported. -- `pool_size` — Size of pool. If 0 is specified as `pool_size` then there is no pool size restrictions. Default value is `16`. -- `command_termination_timeout` — executable script should contain main read-write loop. After dictionary is destroyed, pipe is closed, and executable file will have `command_termination_timeout` seconds to shutdown, before ClickHouse will send SIGTERM signal to child process. Specified in seconds. Default value is 10. Optional parameter. -- `max_command_execution_time` — Maximum executable script command execution time for processing block of data. Specified in seconds. Default value is 10. Optional parameter. -- `command_read_timeout` - timeout for reading data from command stdout in milliseconds. Default value 10000. Optional parameter. -- `command_write_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter. -- `implicit_key` — The executable source file can return only values, and the correspondence to the requested keys is determined implicitly — by the order of rows in the result. Default value is false. Optional parameter. -- `execute_direct` - If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder specified by [user_scripts_path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_scripts_path). Additional script arguments can be specified using whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c`. Default value is `1`. Optional parameter. -- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Optional. Default value is `false`. - -That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. - -## Http(s) - -Working with an HTTP(s) server depends on [how the dictionary is stored in memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. - -Example of settings: - -``` xml - - - http://[::1]/os.tsv - TabSeparated - - user - password - - -
- API-KEY - key -
-
-
- -``` - -or - -``` sql -SOURCE(HTTP( - url 'http://[::1]/os.tsv' - format 'TabSeparated' - credentials(user 'user' password 'password') - headers(header(name 'API-KEY' value 'key')) -)) -``` - -In order for ClickHouse to access an HTTPS resource, you must [configure openSSL](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl) in the server configuration. - -Setting fields: - -- `url` – The source URL. -- `format` – The file format. All the formats described in “[Formats](../../../interfaces/formats.md#formats)” are supported. -- `credentials` – Basic HTTP authentication. Optional parameter. -- `user` – Username required for the authentication. -- `password` – Password required for the authentication. -- `headers` – All custom HTTP headers entries used for the HTTP request. Optional parameter. -- `header` – Single HTTP header entry. -- `name` – Identifiant name used for the header send on the request. -- `value` – Value set for a specific identifiant name. - -When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server. - -### Known Vulnerability of the ODBC Dictionary Functionality - -:::note -When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised. -::: - -**Example of insecure use** - -Let’s configure unixODBC for PostgreSQL. Content of `/etc/odbc.ini`: - -``` text -[gregtest] -Driver = /usr/lib/psqlodbca.so -Servername = localhost -PORT = 5432 -DATABASE = test_db -#OPTION = 3 -USERNAME = test -PASSWORD = test -``` - -If you then make a query such as - -``` sql -SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); -``` - -ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`. - -### Example of Connecting Postgresql - -Ubuntu OS. - -Installing unixODBC and the ODBC driver for PostgreSQL: - -``` bash -$ sudo apt-get install -y unixodbc odbcinst odbc-postgresql -``` - -Configuring `/etc/odbc.ini` (or `~/.odbc.ini` if you signed in under a user that runs ClickHouse): - -``` text - [DEFAULT] - Driver = myconnection - - [myconnection] - Description = PostgreSQL connection to my_db - Driver = PostgreSQL Unicode - Database = my_db - Servername = 127.0.0.1 - UserName = username - Password = password - Port = 5432 - Protocol = 9.3 - ReadOnly = No - RowVersioning = No - ShowSystemTables = No - ConnSettings = -``` - -The dictionary configuration in ClickHouse: - -``` xml - - - table_name - - - - - DSN=myconnection - postgresql_table
-
- - - 300 - 360 - - - - - - - id - - - some_column - UInt64 - 0 - - -
-
-``` - -or - -``` sql -CREATE DICTIONARY table_name ( - id UInt64, - some_column UInt64 DEFAULT 0 -) -PRIMARY KEY id -SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) -LAYOUT(HASHED()) -LIFETIME(MIN 300 MAX 360) -``` - -You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. - -### Example of Connecting MS SQL Server - -Ubuntu OS. - -Installing the ODBC driver for connecting to MS SQL: - -``` bash -$ sudo apt-get install tdsodbc freetds-bin sqsh -``` - -Configuring the driver: - -```bash - $ cat /etc/freetds/freetds.conf - ... - - [MSSQL] - host = 192.168.56.101 - port = 1433 - tds version = 7.0 - client charset = UTF-8 - - # test TDS connection - $ sqsh -S MSSQL -D database -U user -P password - - - $ cat /etc/odbcinst.ini - - [FreeTDS] - Description = FreeTDS - Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so - Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so - FileUsage = 1 - UsageCount = 5 - - $ cat /etc/odbc.ini - # $ cat ~/.odbc.ini # if you signed in under a user that runs ClickHouse - - [MSSQL] - Description = FreeTDS - Driver = FreeTDS - Servername = MSSQL - Database = test - UID = test - PWD = test - Port = 1433 - - - # (optional) test ODBC connection (to use isql-tool install the [unixodbc](https://packages.debian.org/sid/unixodbc)-package) - $ isql -v MSSQL "user" "password" -``` - -Remarks: -- to determine the earliest TDS version that is supported by a particular SQL Server version, refer to the product documentation or look at [MS-TDS Product Behavior](https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-tds/135d0ebe-5c4c-4a94-99bf-1811eccb9f4a) - -Configuring the dictionary in ClickHouse: - -``` xml - - - test - - - dict
- DSN=MSSQL;UID=test;PWD=test -
- - - - 300 - 360 - - - - - - - - - k - - - s - String - - - -
-
-``` - -or - -``` sql -CREATE DICTIONARY test ( - k UInt64, - s String DEFAULT '' -) -PRIMARY KEY k -SOURCE(ODBC(table 'dict' connection_string 'DSN=MSSQL;UID=test;PWD=test')) -LAYOUT(FLAT()) -LIFETIME(MIN 300 MAX 360) -``` - -## DBMS - -### ODBC - -You can use this method to connect any database that has an ODBC driver. - -Example of settings: - -``` xml - - - DatabaseName - ShemaName.TableName
- DSN=some_parameters - SQL_QUERY - SELECT id, value_1, value_2 FROM ShemaName.TableName -
- -``` - -or - -``` sql -SOURCE(ODBC( - db 'DatabaseName' - table 'SchemaName.TableName' - connection_string 'DSN=some_parameters' - invalidate_query 'SQL_QUERY' - query 'SELECT id, value_1, value_2 FROM db_name.table_name' -)) -``` - -Setting fields: - -- `db` – Name of the database. Omit it if the database name is set in the `` parameters. -- `table` – Name of the table and schema if exists. -- `connection_string` – Connection string. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -- `query` – The custom query. Optional parameter. - -:::note -The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared. -::: - -ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. - -If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../../faq/integration/oracle-odbc.md) item. - -### Mysql - -Example of settings: - -``` xml - - - 3306 - clickhouse - qwerty - - example01-1 - 1 - - - example01-2 - 1 - - db_name - table_name
- id=10 - SQL_QUERY - true - SELECT id, value_1, value_2 FROM db_name.table_name -
- -``` - -or - -``` sql -SOURCE(MYSQL( - port 3306 - user 'clickhouse' - password 'qwerty' - replica(host 'example01-1' priority 1) - replica(host 'example01-2' priority 1) - db 'db_name' - table 'table_name' - where 'id=10' - invalidate_query 'SQL_QUERY' - fail_on_connection_loss 'true' - query 'SELECT id, value_1, value_2 FROM db_name.table_name' -)) -``` - -Setting fields: - -- `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). - -- `user` – Name of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). - -- `password` – Password of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). - -- `replica` – Section of replica configurations. There can be multiple sections. - - - `replica/host` – The MySQL host. - - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. - -- `db` – Name of the database. - -- `table` – Name of the table. - -- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in MySQL, for example, `id > 10 AND id < 20`. Optional parameter. - -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - -- `fail_on_connection_loss` – The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. - -- `query` – The custom query. Optional parameter. - -:::note -The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. -::: - -:::note -There is no explicit parameter `secure`. When establishing an SSL-connection security is mandatory. -::: - -MySQL can be connected to on a local host via sockets. To do this, set `host` and `socket`. - -Example of settings: - -``` xml - - - localhost - /path/to/socket/file.sock - clickhouse - qwerty - db_name - table_name
- id=10 - SQL_QUERY - true - SELECT id, value_1, value_2 FROM db_name.table_name -
- -``` - -or - -``` sql -SOURCE(MYSQL( - host 'localhost' - socket '/path/to/socket/file.sock' - user 'clickhouse' - password 'qwerty' - db 'db_name' - table 'table_name' - where 'id=10' - invalidate_query 'SQL_QUERY' - fail_on_connection_loss 'true' - query 'SELECT id, value_1, value_2 FROM db_name.table_name' -)) -``` - -### ClickHouse - -Example of settings: - -``` xml - - - example01-01-1 - 9000 - default - - default - ids
- id=10 - 1 - SELECT id, value_1, value_2 FROM default.ids -
- -``` - -or - -``` sql -SOURCE(CLICKHOUSE( - host 'example01-01-1' - port 9000 - user 'default' - password '' - db 'default' - table 'ids' - where 'id=10' - secure 1 - query 'SELECT id, value_1, value_2 FROM default.ids' -)); -``` - -Setting fields: - -- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distributed](../../../engines/table-engines/special/distributed.md) table and enter it in subsequent configurations. -- `port` – The port on the ClickHouse server. -- `user` – Name of the ClickHouse user. -- `password` – Password of the ClickHouse user. -- `db` – Name of the database. -- `table` – Name of the table. -- `where` – The selection criteria. May be omitted. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -- `secure` - Use ssl for connection. -- `query` – The custom query. Optional parameter. - -:::note -The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. -::: - -### Mongodb - -Example of settings: - -``` xml - - - localhost - 27017 - - - test - dictionary_source - - -``` - -or - -``` sql -SOURCE(MONGODB( - host 'localhost' - port 27017 - user '' - password '' - db 'test' - collection 'dictionary_source' -)) -``` - -Setting fields: - -- `host` – The MongoDB host. -- `port` – The port on the MongoDB server. -- `user` – Name of the MongoDB user. -- `password` – Password of the MongoDB user. -- `db` – Name of the database. -- `collection` – Name of the collection. - -### Redis - -Example of settings: - -``` xml - - - localhost - 6379 - simple - 0 - - -``` - -or - -``` sql -SOURCE(REDIS( - host 'localhost' - port 6379 - storage_type 'simple' - db_index 0 -)) -``` - -Setting fields: - -- `host` – The Redis host. -- `port` – The port on the Redis server. -- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`. -- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. - -### Cassandra - -Example of settings: - -``` xml - - - localhost - 9042 - username - qwerty123 - database_name - table_name - 1 - 1 - One - "SomeColumn" = 42 - 8 - SELECT id, value_1, value_2 FROM database_name.table_name - - -``` - -Setting fields: - -- `host` – The Cassandra host or comma-separated list of hosts. -- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used. -- `user` – Name of the Cassandra user. -- `password` – Password of the Cassandra user. -- `keyspace` – Name of the keyspace (database). -- `column_family` – Name of the column family (table). -- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1. -- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table. Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra. Default value is 1 (the first key column is a partition key and other key columns are clustering key). -- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default value is `One`. -- `where` – Optional selection criteria. -- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries. -- `query` – The custom query. Optional parameter. - -:::note -The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared. -::: - -### PostgreSQL - -Example of settings: - -``` xml - - - 5432 - clickhouse - qwerty - db_name - table_name
- id=10 - SQL_QUERY - SELECT id, value_1, value_2 FROM db_name.table_name -
- -``` - -or - -``` sql -SOURCE(POSTGRESQL( - port 5432 - host 'postgresql-hostname' - user 'postgres_user' - password 'postgres_password' - db 'db_name' - table 'table_name' - replica(host 'example01-1' port 5432 priority 1) - replica(host 'example01-2' port 5432 priority 2) - where 'id=10' - invalidate_query 'SQL_QUERY' - query 'SELECT id, value_1, value_2 FROM db_name.table_name' -)) -``` - -Setting fields: - -- `host` – The host on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside ``). -- `port` – The port on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside ``). -- `user` – Name of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). -- `password` – Password of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). -- `replica` – Section of replica configurations. There can be multiple sections: - - `replica/host` – The PostgreSQL host. - - `replica/port` – The PostgreSQL port. - - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. -- `db` – Name of the database. -- `table` – Name of the table. -- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL. For example, `id > 10 AND id < 20`. Optional parameter. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -- `query` – The custom query. Optional parameter. - -:::note -The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. -::: - -## Null - -A special source that can be used to create dummy (empty) dictionaries. Such dictionaries can useful for tests or with setups with separated data and query nodes at nodes with Distributed tables. - -``` sql -CREATE DICTIONARY null_dict ( - id UInt64, - val UInt8, - default_val UInt8 DEFAULT 123, - nullable_val Nullable(UInt8) -) -PRIMARY KEY id -SOURCE(NULL()) -LAYOUT(FLAT()) -LIFETIME(0); -``` - -## Related Content - -- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md deleted file mode 100644 index 8271a342941..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure -sidebar_position: 44 -sidebar_label: Dictionary Key and Fields ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -# Dictionary Key and Fields - - - -The `structure` clause describes the dictionary key and fields available for queries. - -XML description: - -``` xml - - - - Id - - - - - - - ... - - - -``` - -Attributes are described in the elements: - -- `` — [Key column](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key). -- `` — [Data column](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. - -DDL query: - -``` sql -CREATE DICTIONARY dict_name ( - Id UInt64, - -- attributes -) -PRIMARY KEY Id -... -``` - -Attributes are described in the query body: - -- `PRIMARY KEY` — [Key column](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) -- `AttrName AttrType` — [Data column](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. - -## Key - -ClickHouse supports the following types of keys: - -- Numeric key. `UInt64`. Defined in the `` tag or using `PRIMARY KEY` keyword. -- Composite key. Set of values of different types. Defined in the tag `` or `PRIMARY KEY` keyword. - -An xml structure can contain either `` or ``. DDL-query must contain single `PRIMARY KEY`. - -:::warning -You must not describe key as an attribute. -::: - -### Numeric Key - -Type: `UInt64`. - -Configuration example: - -``` xml - - Id - -``` - -Configuration fields: - -- `name` – The name of the column with keys. - -For DDL-query: - -``` sql -CREATE DICTIONARY ( - Id UInt64, - ... -) -PRIMARY KEY Id -... -``` - -- `PRIMARY KEY` – The name of the column with keys. - -### Composite Key - -The key can be a `tuple` from any types of fields. The [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) in this case must be `complex_key_hashed` or `complex_key_cache`. - -:::tip -A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. -::: - -The key structure is set in the element ``. Key fields are specified in the same format as the dictionary [attributes](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Example: - -``` xml - - - - field1 - String - - - field2 - UInt32 - - ... - -... -``` - -or - -``` sql -CREATE DICTIONARY ( - field1 String, - field2 String - ... -) -PRIMARY KEY field1, field2 -... -``` - -For a query to the `dictGet*` function, a tuple is passed as the key. Example: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. - -## Attributes - -Configuration example: - -``` xml - - ... - - Name - ClickHouseDataType - - rand64() - true - true - true - - -``` - -or - -``` sql -CREATE DICTIONARY somename ( - Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID -) -``` - -Configuration fields: - -| Tag | Description | Required | -|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| -| `name` | Column name. | Yes | -| `type` | ClickHouse data type: [UInt8](../../../sql-reference/data-types/int-uint.md), [UInt16](../../../sql-reference/data-types/int-uint.md), [UInt32](../../../sql-reference/data-types/int-uint.md), [UInt64](../../../sql-reference/data-types/int-uint.md), [Int8](../../../sql-reference/data-types/int-uint.md), [Int16](../../../sql-reference/data-types/int-uint.md), [Int32](../../../sql-reference/data-types/int-uint.md), [Int64](../../../sql-reference/data-types/int-uint.md), [Float32](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md), [UUID](../../../sql-reference/data-types/uuid.md), [Decimal32](../../../sql-reference/data-types/decimal.md), [Decimal64](../../../sql-reference/data-types/decimal.md), [Decimal128](../../../sql-reference/data-types/decimal.md), [Decimal256](../../../sql-reference/data-types/decimal.md),[Date](../../../sql-reference/data-types/date), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md), [String](../../../sql-reference/data-types/string.md), [Array](../../../sql-reference/data-types/array.md).
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) is currently supported for [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache) dictionaries. In [IPTrie](external-dicts-dict-layout.md#ip-trie) dictionaries `Nullable` types are not supported. | Yes | -| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. [NULL](../../syntax.md#null-literal) value can be used only for the `Nullable` types (see the previous line with types description). | Yes | -| `expression` | [Expression](../../../sql-reference/syntax.md#syntax-expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | -| `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md).

Default value: `false`. | No | -| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

Default value: `false`. | No | -| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. | No | - -**See Also** - -- [Functions for working with dictionaries](../../../sql-reference/functions/ext-dict-functions.md). - -## Related Content - -- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse) \ No newline at end of file diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md deleted file mode 100644 index a923511ca5e..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict -sidebar_position: 40 -sidebar_label: Configuring a Dictionary ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -# Configuring a Dictionary - - - -If dictionary is configured using xml file, than dictionary configuration has the following structure: - -``` xml - - dict_name - - - - - - - - - - - - - - - - - -``` - -Corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md) has the following structure: - -``` sql -CREATE DICTIONARY dict_name -( - ... -- attributes -) -PRIMARY KEY ... -- complex or single key configuration -SOURCE(...) -- Source configuration -LAYOUT(...) -- Memory layout configuration -LIFETIME(...) -- Lifetime of dictionary in memory -``` - -- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. -- [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) — Source of the dictionary. -- [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) — Dictionary layout in memory. -- [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. -- [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) — Frequency of dictionary updates. - -## Related Content - -- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse) \ No newline at end of file diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md deleted file mode 100644 index 8621c68b428..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts -sidebar_position: 39 -sidebar_label: General Description ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -# Dictionaries - -:::tip Tutorial -If you are getting started with Dictionaries in ClickHouse we have a tutorial that covers that topic. Take a look [here](/docs/en/tutorial.md). -::: - -You can add your own dictionaries from various data sources. The source for a dictionary can be a ClickHouse table, a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)”. - -ClickHouse: - -- Fully or partially stores dictionaries in RAM. -- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically. -- Allows creating dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md). - -The configuration of dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter. - -Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) setting. - -The [dictionaries](../../../operations/system-tables/dictionaries.md#system_tables-dictionaries) system table contains information about dictionaries configured at server. For each dictionary you can find there: - -- Status of the dictionary. -- Configuration parameters. -- Metrics like amount of RAM allocated for the dictionary or a number of queries since the dictionary was successfully loaded. - - - -## Creating a dictionary with a DDL query - -Dictionaries can be created with [DDL queries](../../../sql-reference/statements/create/dictionary.md), and this is the recommended method because with DDL created dictionaries: -- No additional records are added to server configuration files -- The dictionaries can be worked with as first-class entities, like tables or views -- Data can be read directly, using familiar SELECT rather than dictionary table functions -- The dictionaries can be easily renamed - -## Creating a dictionary with a configuration file - -:::note -Creating a dictionary with a configuration file is not applicable to ClickHouse Cloud. Please use DDL (see above), and create your dictionary as user `default`. -::: - -The dictionary configuration file has the following format: - -``` xml - - An optional element with any content. Ignored by the ClickHouse server. - - - /etc/metrika.xml - - - - - - - - -``` - -You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file. - - -:::note -You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to dictionaries. -::: - -## See Also - -- [Configuring a Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) -- [Storing Dictionaries in Memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) -- [Dictionary Updates](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) -- [Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) -- [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md) -- [Functions for Working with Dictionaries](../../../sql-reference/functions/ext-dict-functions.md) - -## Related Content - -- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/regexp-tree.md b/docs/en/sql-reference/dictionaries/external-dictionaries/regexp-tree.md deleted file mode 100644 index 5ad15b11d07..00000000000 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/regexp-tree.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/external-dictionaries/regexp-tree -sidebar_position: 47 -sidebar_label: RegExp Tree Dictionary -title: "RegExp Tree Dictionary" ---- -import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; - -Regexp Tree dictionary stores multiple trees of regular expressions with attributions. Users can retrieve strings in the dictionary. If a string matches the root of the regexp tree, we will collect the corresponding attributes of the matched root and continue to walk the children. If any of the children matches the string, we will collect attributes and rewrite the old ones if conflicts occur, then continue the traverse until we reach leaf nodes. - -Example of the ddl query for creating Regexp Tree dictionary: - - - -```sql -create dictionary regexp_dict -( - regexp String, - name String, - version String -) -PRIMARY KEY(regexp) -SOURCE(YAMLRegExpTree(PATH '/var/lib/clickhouse/user_files/regexp_tree.yaml')) -LAYOUT(regexp_tree) -... -``` - -We only allow `YAMLRegExpTree` to work with regexp_tree dicitionary layout. If you want to use other sources, please set variable `regexp_dict_allow_other_sources` true. - -**Source** - -We introduce a type of source called `YAMLRegExpTree` representing the structure of Regexp Tree dictionary. An Example of a valid yaml config is like: - -```xml -- regexp: 'Linux/(\d+[\.\d]*).+tlinux' - name: 'TencentOS' - version: '\1' - -- regexp: '\d+/tclwebkit(?:\d+[\.\d]*)' - name: 'Andriod' - versions: - - regexp: '33/tclwebkit' - version: '13' - - regexp: '3[12]/tclwebkit' - version: '12' - - regexp: '30/tclwebkit' - version: '11' - - regexp: '29/tclwebkit' - version: '10' -``` - -The key `regexp` represents the regular expression of a tree node. The name of key is same as the dictionary key. The `name` and `version` is user-defined attributions in the dicitionary. The `versions` (which can be any name that not appear in attributions or the key) indicates the children nodes of this tree. - -**Back Reference** - -The value of an attribution could contain a back reference which refers to a capture group of the matched regular expression. Reference number ranges from 1 to 9 and writes as `$1` or `\1`. - -During the query execution, the back reference in the value will be replaced by the matched capture group. - -**Query** - -Due to the specialty of Regexp Tree dictionary, we only allow functions `dictGet`, `dictGetOrDefault` and `dictGetOrNull` work with it. - -Example: - -```sql -SELECT dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024'); -``` - -Result: - -``` -┌─dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024')─┐ -│ ('Andriod','12') │ -└─────────────────────────────────────────────────────────────────┘ -``` diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 9e6eed47d4a..2185e2b31c1 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -1,9 +1,12 @@ --- -slug: /en/sql-reference/dictionaries/ -sidebar_label: Dictionaries +slug: /en/sql-reference/dictionaries +sidebar_label: Defining Dictionaries sidebar_position: 35 --- +import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.md'; +import CloudDetails from '@site/docs/en/sql-reference/dictionaries/_snippet_dictionary_in_cloud.md'; + # Dictionaries A dictionary is a mapping (`key -> attributes`) that is convenient for various types of reference lists. @@ -12,5 +15,2349 @@ ClickHouse supports special functions for working with dictionaries that can be ClickHouse supports: -- [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md). -- [Embedded dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md). +- Dictionaries with a [set of functions](../../sql-reference/functions/ext-dict-functions.md). +- [Embedded dictionaries](#embedded_dictionaries) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md). + + +:::tip Tutorial +If you are getting started with Dictionaries in ClickHouse we have a tutorial that covers that topic. Take a look [here](/docs/en/tutorial.md). +::: + +You can add your own dictionaries from various data sources. The source for a dictionary can be a ClickHouse table, a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Dictionary Sources](#dictionary-sources)”. + +ClickHouse: + +- Fully or partially stores dictionaries in RAM. +- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically. +- Allows creating dictionaries with xml files or [DDL queries](../../sql-reference/statements/create/dictionary.md). + +The configuration of dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter. + +Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries_lazy_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) setting. + +The [dictionaries](../../operations/system-tables/dictionaries.md#system_tables-dictionaries) system table contains information about dictionaries configured at server. For each dictionary you can find there: + +- Status of the dictionary. +- Configuration parameters. +- Metrics like amount of RAM allocated for the dictionary or a number of queries since the dictionary was successfully loaded. + + + +## Creating a dictionary with a DDL query {#creating-a-dictionary-with-a-ddl-query} + +Dictionaries can be created with [DDL queries](../../sql-reference/statements/create/dictionary.md), and this is the recommended method because with DDL created dictionaries: +- No additional records are added to server configuration files +- The dictionaries can be worked with as first-class entities, like tables or views +- Data can be read directly, using familiar SELECT rather than dictionary table functions +- The dictionaries can be easily renamed + +## Creating a dictionary with a configuration file + +:::note +Creating a dictionary with a configuration file is not applicable to ClickHouse Cloud. Please use DDL (see above), and create your dictionary as user `default`. +::: + +The dictionary configuration file has the following format: + +``` xml + + An optional element with any content. Ignored by the ClickHouse server. + + + /etc/metrika.xml + + + + + + + + +``` + +You can [configure](#configuring-a-dictionary) any number of dictionaries in the same file. + + +:::note +You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../sql-reference/functions/other-functions.md) function). This functionality is not related to dictionaries. +::: + +## Configuring a Dictionary {#configuring-a-dictionary} + + + +If dictionary is configured using xml file, than dictionary configuration has the following structure: + +``` xml + + dict_name + + + + + + + + + + + + + + + + + +``` + +Corresponding [DDL-query](../../sql-reference/statements/create/dictionary.md) has the following structure: + +``` sql +CREATE DICTIONARY dict_name +( + ... -- attributes +) +PRIMARY KEY ... -- complex or single key configuration +SOURCE(...) -- Source configuration +LAYOUT(...) -- Memory layout configuration +LIFETIME(...) -- Lifetime of dictionary in memory +``` + +## Storing Dictionaries in Memory {#storig-dictionaries-in-memory} + +There are a variety of ways to store dictionaries in memory. + +We recommend [flat](#flat), [hashed](#hashed) and [complex_key_hashed](#complex_key_hashed), which provide optimal processing speed. + +Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section [cache](#cache). + +There are several ways to improve dictionary performance: + +- Call the function for working with the dictionary after `GROUP BY`. +- Mark attributes to extract as injective. An attribute is called injective if different attribute values correspond to different keys. So when `GROUP BY` uses a function that fetches an attribute value by the key, this function is automatically taken out of `GROUP BY`. + +ClickHouse generates an exception for errors with dictionaries. Examples of errors: + +- The dictionary being accessed could not be loaded. +- Error querying a `cached` dictionary. + +You can view the list of dictionaries and their statuses in the [system.dictionaries](../../operations/system-tables/dictionaries.md) table. + + + +The configuration looks like this: + +``` xml + + + ... + + + + + + ... + + +``` + +Corresponding [DDL-query](../../sql-reference/statements/create/dictionary.md): + +``` sql +CREATE DICTIONARY (...) +... +LAYOUT(LAYOUT_TYPE(param value)) -- layout settings +... +``` + +Dictionaries without word `complex-key*` in a layout have a key with [UInt64](../../sql-reference/data-types/int-uint.md) type, `complex-key*` dictionaries have a composite key (complex, with arbitrary types). + +[UInt64](../../sql-reference/data-types/int-uint.md) keys in XML dictionaries are defined with `` tag. + +Configuration example (column key_column has UInt64 type): +```xml +... + + + key_column + +... +``` + +Composite `complex` keys XML dictionaries are defined `` tag. + +Configuration example of a composite key (key has one element with [String](../../sql-reference/data-types/string.md) type): +```xml +... + + + + country_code + String + + +... +``` + +## Ways to Store Dictionaries in Memory + +- [flat](#flat) +- [hashed](#hashed) +- [sparse_hashed](#sparse_hashed) +- [complex_key_hashed](#complex_key_hashed) +- [complex_key_sparse_hashed](#complex_key_sparse_hashed) +- [hashed_array](#hashed_array) +- [complex_key_hashed_array](#complex_key_hashed_array) +- [range_hashed](#range_hashed) +- [complex_key_range_hashed](#complex_key_range_hashed) +- [cache](#cache) +- [complex_key_cache](#complex_key_cache) +- [ssd_cache](#ssd_cache) +- [complex_key_ssd_cache](#complex_key_ssd_cache) +- [direct](#direct) +- [complex_key_direct](#complex_key_direct) +- [ip_trie](#ip_trie) + +### flat + +The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type and the value is limited to `max_array_size` (by default — 500,000). If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. Dictionary flat arrays initial size is controlled by `initial_array_size` setting (by default — 1024). + +All types of sources are supported. When updating, data (from a file or from a table) is read in it entirety. + +This method provides the best performance among all available methods of storing the dictionary. + +Configuration example: + +``` xml + + + 50000 + 5000000 + + +``` + +or + +``` sql +LAYOUT(FLAT(INITIAL_ARRAY_SIZE 50000 MAX_ARRAY_SIZE 5000000)) +``` + +### hashed + +The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type. + +All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. + +Configuration example: + +``` xml + + + +``` + +or + +``` sql +LAYOUT(HASHED()) +``` + +If `shards` greater then 1 (default is `1`) the dictionary will load data in parallel, useful if you have huge amount of elements in one dictionary. + +Configuration example: + +``` xml + + + 10 + + 10000 + + +``` + +or + +``` sql +LAYOUT(HASHED(SHARDS 10 [SHARD_LOAD_QUEUE_BACKLOG 10000])) +``` + +### sparse_hashed + +Similar to `hashed`, but uses less memory in favor more CPU usage. + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type. + +Configuration example: + +``` xml + + + +``` + +or + +``` sql +LAYOUT(SPARSE_HASHED()) +``` + +It is also possible to use `shards` for this type of dictionary, and again it is more important for `sparse_hashed` then for `hashed`, since `sparse_hashed` is slower. + +### complex_key_hashed + +This type of storage is for use with composite [keys](#dictionary-key-and-fields). Similar to `hashed`. + +Configuration example: + +``` xml + + + 1 + + + +``` + +or + +``` sql +LAYOUT(COMPLEX_KEY_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000])) +``` + +### complex_key_sparse_hashed + +This type of storage is for use with composite [keys](#dictionary-key-and-fields). Similar to [sparse_hashed](#sparse_hashed). + +Configuration example: + +``` xml + + + 1 + + +``` + +or + +``` sql +LAYOUT(COMPLEX_KEY_SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000])) +``` + +### hashed_array + +The dictionary is completely stored in memory. Each attribute is stored in an array. The key attribute is stored in the form of a hashed table where value is an index in the attributes array. The dictionary can contain any number of elements with any identifiers. In practice, the number of keys can reach tens of millions of items. + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type. + +All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. + +Configuration example: + +``` xml + + + + +``` + +or + +``` sql +LAYOUT(HASHED_ARRAY()) +``` + +### complex_key_hashed_array + +This type of storage is for use with composite [keys](#dictionary-key-and-fields). Similar to [hashed_array](#hashed_array). + +Configuration example: + +``` xml + + + +``` + +or + +``` sql +LAYOUT(COMPLEX_KEY_HASHED_ARRAY()) +``` + +### range_hashed {#range_hashed} + +The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values. + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type. +This storage method works the same way as hashed and allows using date/time (arbitrary numeric type) ranges in addition to the key. + +Example: The table contains discounts for each advertiser in the format: + +``` text +┌─advertiser_id─┬─discount_start_date─┬─discount_end_date─┬─amount─┐ +│ 123 │ 2015-01-16 │ 2015-01-31 │ 0.25 │ +│ 123 │ 2015-01-01 │ 2015-01-15 │ 0.15 │ +│ 456 │ 2015-01-01 │ 2015-01-15 │ 0.05 │ +└───────────────┴─────────────────────┴───────────────────┴────────┘ +``` + +To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](#dictionary-key-and-fields). These elements must contain elements `name` and `type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others). + +:::warning +Values of `range_min` and `range_max` should fit in `Int64` type. +::: + +Example: + +``` xml + + + + min + + + + + advertiser_id + + + discount_start_date + Date + + + discount_end_date + Date + + ... +``` + +or + +``` sql +CREATE DICTIONARY discounts_dict ( + advertiser_id UInt64, + discount_start_date Date, + discount_end_date Date, + amount Float64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'discounts')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED(range_lookup_strategy 'max')) +RANGE(MIN discount_start_date MAX discount_end_date) +``` + +To work with these dictionaries, you need to pass an additional argument to the `dictGet` function, for which a range is selected: + +``` sql +dictGet('dict_name', 'attr_name', id, date) +``` +Query example: + +``` sql +SELECT dictGet('discounts_dict', 'amount', 1, '2022-10-20'::Date); +``` + +This function returns the value for the specified `id`s and the date range that includes the passed date. + +Details of the algorithm: + +- If the `id` is not found or a range is not found for the `id`, it returns the default value of the attribute's type. +- If there are overlapping ranges and `range_lookup_strategy=min`, it returns a matching range with minimal `range_min`, if several ranges found, it returns a range with minimal `range_max`, if again several ranges found (several ranges had the same `range_min` and `range_max` it returns a random range of them. +- If there are overlapping ranges and `range_lookup_strategy=max`, it returns a matching range with maximal `range_min`, if several ranges found, it returns a range with maximal `range_max`, if again several ranges found (several ranges had the same `range_min` and `range_max` it returns a random range of them. +- If the `range_max` is `NULL`, the range is open. `NULL` is treated as maximal possible value. For the `range_min` `1970-01-01` or `0` (-MAX_INT) can be used as the open value. + +Configuration example: + +``` xml + + + ... + + + + + + + + Abcdef + + + StartTimeStamp + UInt64 + + + EndTimeStamp + UInt64 + + + XXXType + String + + + + + + +``` + +or + +``` sql +CREATE DICTIONARY somedict( + Abcdef UInt64, + StartTimeStamp UInt64, + EndTimeStamp UInt64, + XXXType String DEFAULT '' +) +PRIMARY KEY Abcdef +RANGE(MIN StartTimeStamp MAX EndTimeStamp) +``` + +Configuration example with overlapping ranges and open ranges: + +```sql +CREATE TABLE discounts +( + advertiser_id UInt64, + discount_start_date Date, + discount_end_date Nullable(Date), + amount Float64 +) +ENGINE = Memory; + +INSERT INTO discounts VALUES (1, '2015-01-01', Null, 0.1); +INSERT INTO discounts VALUES (1, '2015-01-15', Null, 0.2); +INSERT INTO discounts VALUES (2, '2015-01-01', '2015-01-15', 0.3); +INSERT INTO discounts VALUES (2, '2015-01-04', '2015-01-10', 0.4); +INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-15', 0.5); +INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-10', 0.6); + +SELECT * FROM discounts ORDER BY advertiser_id, discount_start_date; +┌─advertiser_id─┬─discount_start_date─┬─discount_end_date─┬─amount─┐ +│ 1 │ 2015-01-01 │ ᴺᵁᴸᴸ │ 0.1 │ +│ 1 │ 2015-01-15 │ ᴺᵁᴸᴸ │ 0.2 │ +│ 2 │ 2015-01-01 │ 2015-01-15 │ 0.3 │ +│ 2 │ 2015-01-04 │ 2015-01-10 │ 0.4 │ +│ 3 │ 1970-01-01 │ 2015-01-15 │ 0.5 │ +│ 3 │ 1970-01-01 │ 2015-01-10 │ 0.6 │ +└───────────────┴─────────────────────┴───────────────────┴────────┘ + +-- RANGE_LOOKUP_STRATEGY 'max' + +CREATE DICTIONARY discounts_dict +( + advertiser_id UInt64, + discount_start_date Date, + discount_end_date Nullable(Date), + amount Float64 +) +PRIMARY KEY advertiser_id +SOURCE(CLICKHOUSE(TABLE discounts)) +LIFETIME(MIN 600 MAX 900) +LAYOUT(RANGE_HASHED(RANGE_LOOKUP_STRATEGY 'max')) +RANGE(MIN discount_start_date MAX discount_end_date); + +select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res; +┌─res─┐ +│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null +└─────┘ + +select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; +┌─res─┐ +│ 0.2 │ -- two ranges are matching, range_min 2015-01-15 (0.2) is bigger than 2015-01-01 (0.1) +└─────┘ + +select dictGet('discounts_dict', 'amount', 2, toDate('2015-01-06')) res; +┌─res─┐ +│ 0.4 │ -- two ranges are matching, range_min 2015-01-04 (0.4) is bigger than 2015-01-01 (0.3) +└─────┘ + +select dictGet('discounts_dict', 'amount', 3, toDate('2015-01-01')) res; +┌─res─┐ +│ 0.5 │ -- two ranges are matching, range_min are equal, 2015-01-15 (0.5) is bigger than 2015-01-10 (0.6) +└─────┘ + +DROP DICTIONARY discounts_dict; + +-- RANGE_LOOKUP_STRATEGY 'min' + +CREATE DICTIONARY discounts_dict +( + advertiser_id UInt64, + discount_start_date Date, + discount_end_date Nullable(Date), + amount Float64 +) +PRIMARY KEY advertiser_id +SOURCE(CLICKHOUSE(TABLE discounts)) +LIFETIME(MIN 600 MAX 900) +LAYOUT(RANGE_HASHED(RANGE_LOOKUP_STRATEGY 'min')) +RANGE(MIN discount_start_date MAX discount_end_date); + +select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res; +┌─res─┐ +│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null +└─────┘ + +select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; +┌─res─┐ +│ 0.1 │ -- two ranges are matching, range_min 2015-01-01 (0.1) is less than 2015-01-15 (0.2) +└─────┘ + +select dictGet('discounts_dict', 'amount', 2, toDate('2015-01-06')) res; +┌─res─┐ +│ 0.3 │ -- two ranges are matching, range_min 2015-01-01 (0.3) is less than 2015-01-04 (0.4) +└─────┘ + +select dictGet('discounts_dict', 'amount', 3, toDate('2015-01-01')) res; +┌─res─┐ +│ 0.6 │ -- two ranges are matching, range_min are equal, 2015-01-10 (0.6) is less than 2015-01-15 (0.5) +└─────┘ +``` + +### complex_key_range_hashed + +The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values (see [range_hashed](#range_hashed)). This type of storage is for use with composite [keys](#dictionary-key-and-fields). + +Configuration example: + +``` sql +CREATE DICTIONARY range_dictionary +( + CountryID UInt64, + CountryKey String, + StartDate Date, + EndDate Date, + Tax Float64 DEFAULT 0.2 +) +PRIMARY KEY CountryID, CountryKey +SOURCE(CLICKHOUSE(TABLE 'date_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_RANGE_HASHED()) +RANGE(MIN StartDate MAX EndDate); +``` + +### cache + +The dictionary is stored in a cache that has a fixed number of cells. These cells contain frequently used elements. + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type. + +When searching for a dictionary, the cache is searched first. For each block of data, all keys that are not found in the cache or are outdated are requested from the source using `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. The received data is then written to the cache. + +If keys are not found in dictionary, then update cache task is created and added into update queue. Update queue properties can be controlled with settings `max_update_queue_size`, `update_queue_push_timeout_milliseconds`, `query_wait_timeout_milliseconds`, `max_threads_for_updates`. + +For cache dictionaries, the expiration [lifetime](#dictionary-updates) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used and key becomes expired. The key is re-requested the next time it needs to be used. This behaviour can be configured with setting `allow_read_expired_keys`. + +This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the [system.dictionaries](../../operations/system-tables/dictionaries.md) table. + +If setting `allow_read_expired_keys` is set to 1, by default 0. Then dictionary can support asynchronous updates. If a client requests keys and all of them are in cache, but some of them are expired, then dictionary will return expired keys for a client and request them asynchronously from the source. + +To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally. + +All types of sources are supported. + +Example of settings: + +``` xml + + + + 1000000000 + + 0 + + 100000 + + 10 + + 60000 + + 4 + + +``` + +or + +``` sql +LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) +``` + +Set a large enough cache size. You need to experiment to select the number of cells: + +1. Set some value. +2. Run queries until the cache is completely full. +3. Assess memory consumption using the `system.dictionaries` table. +4. Increase or decrease the number of cells until the required memory consumption is reached. + +:::warning +Do not use ClickHouse as a source, because it is slow to process queries with random reads. +::: + +### complex_key_cache + +This type of storage is for use with composite [keys](#dictionary-key-and-fields). Similar to `cache`. + +### ssd_cache + +Similar to `cache`, but stores data on SSD and index in RAM. All cache dictionary settings related to update queue can also be applied to SSD cache dictionaries. + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type. + +``` xml + + + + 4096 + + 16777216 + + 131072 + + 1048576 + + /var/lib/clickhouse/user_files/test_dict + + +``` + +or + +``` sql +LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576 + PATH '/var/lib/clickhouse/user_files/test_dict')) +``` + +### complex_key_ssd_cache + +This type of storage is for use with composite [keys](#dictionary-key-and-fields). Similar to `ssd_cache`. + +### direct + +The dictionary is not stored in memory and directly goes to the source during the processing of a request. + +The dictionary key has the [UInt64](../../sql-reference/data-types/int-uint.md) type. + +All types of [sources](#dictionary-sources), except local files, are supported. + +Configuration example: + +``` xml + + + +``` + +or + +``` sql +LAYOUT(DIRECT()) +``` + +### complex_key_direct + +This type of storage is for use with composite [keys](#dictionary-key-and-fields). Similar to `direct`. + +### ip_trie + +This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN. + +**Example** + +Suppose we have a table in ClickHouse that contains our IP prefixes and mappings: + +```sql +CREATE TABLE my_ip_addresses ( + prefix String, + asn UInt32, + cca2 String +) +ENGINE = MergeTree +PRIMARY KEY prefix; +``` + +```sql +INSERT INTO my_ip_addresses VALUES + ('202.79.32.0/20', 17501, 'NP'), + ('2620:0:870::/48', 3856, 'US'), + ('2a02:6b8:1::/48', 13238, 'RU'), + ('2001:db8::/32', 65536, 'ZZ') +; +``` + +Let's define an `ip_trie` dictionary for this table. The `ip_trie` layout requires a composite key: + +``` xml + + + + prefix + String + + + + asn + UInt32 + + + + cca2 + String + ?? + + ... + + + + + + true + + +``` + +or + +``` sql +CREATE DICTIONARY my_ip_trie_dictionary ( + prefix String, + asn UInt32, + cca2 String DEFAULT '??' +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(TABLE 'my_ip_addresses')) +LAYOUT(IP_TRIE) +LIFETIME(3600); +``` + +The key must have only one `String` type attribute that contains an allowed IP prefix. Other types are not supported yet. + +For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys. The syntax is: + +``` sql +dictGetT('dict_name', 'attr_name', tuple(ip)) +``` + +The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6. For example: + +``` sql +select dictGet('my_ip_trie_dictionary', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +``` + +Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned. + +Data must completely fit into RAM. + +## Dictionary Updates {#dictionary-updates} + +ClickHouse periodically updates the dictionaries. The update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries are defined in the `lifetime` tag in seconds. + +Dictionary updates (other than loading for first use) do not block queries. During updates, the old version of a dictionary is used. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. + +Example of settings: + + + +``` xml + + ... + 300 + ... + +``` + +or + +``` sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. + +You can set a time interval for updates, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when updating on a large number of servers. + +Example of settings: + +``` xml + + ... + + 300 + 360 + + ... + +``` + +or + +``` sql +LIFETIME(MIN 300 MAX 360) +``` + +If `0` and `0`, ClickHouse does not reload the dictionary by timeout. +In this case, ClickHouse can reload the dictionary earlier if the dictionary configuration file was changed or the `SYSTEM RELOAD DICTIONARY` command was executed. + +When updating the dictionaries, the ClickHouse server applies different logic depending on the type of [source](#dictionary-sources): + +- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. +- For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`). +- Dictionaries from other sources are updated every time by default. + +For other sources (ODBC, PostgreSQL, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: + +- The dictionary table must have a field that always changes when the source data is updated. +- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](#dictionary-sources). + +Example of settings: + +``` xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +or + +``` sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + +For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported. + +It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after the previous update. If `update_field` is specified as part of the dictionary source configuration, value of the previous update time in seconds will be added to the data request. Depends on source type (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, or ODBC) different logic will be applied to `update_field` before request data from an external source. + +- If the source is HTTP then `update_field` will be added as a query parameter with the last update time as the parameter value. +- If the source is Executable then `update_field` will be added as an executable script argument with the last update time as the argument value. +- If the source is ClickHouse, MySQL, PostgreSQL, ODBC there will be an additional part of `WHERE`, where `update_field` is compared as greater or equal with the last update time. + - Per default, this `WHERE`-condition is checked at the highest level of the SQL-Query. Alternatively, the condition can be checked in any other `WHERE`-clause within the query using the `{condition}`-keyword. Example: + ```sql + ... + SOURCE(CLICKHOUSE(... + update_field 'added_time' + QUERY ' + SELECT my_arr.1 AS x, my_arr.2 AS y, creation_time + FROM ( + SELECT arrayZip(x_arr, y_arr) AS my_arr, creation_time + FROM dictionary_source + WHERE {condition} + )' + )) + ... + ``` + +If `update_field` option is set, additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data. + +Example of settings: + +``` xml + + ... + + ... + added_time + 15 + + ... + +``` + +or + +``` sql +... +SOURCE(CLICKHOUSE(... update_field 'added_time' update_lag 15)) +... +``` + +## Dictionary Sources {#dictionary-sources} + + + +A dictionary can be connected to ClickHouse from many different sources. + +If the dictionary is configured using an xml-file, the configuration looks like this: + +``` xml + + + ... + + + + + + ... + + ... + +``` + +In case of [DDL-query](../../sql-reference/statements/create/dictionary.md), the configuration described above will look like: + +``` sql +CREATE DICTIONARY dict_name (...) +... +SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration +... +``` + +The source is configured in the `source` section. + +For source types [Local file](#local_file), [Executable file](#executable), [HTTP(s)](#https), [ClickHouse](#clickhouse) +optional settings are available: + +``` xml + + + /opt/dictionaries/os.tsv + TabSeparated + + + 0 + + +``` + +or + +``` sql +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) +SETTINGS(format_csv_allow_single_quotes = 0) +``` + +Types of sources (`source_type`): + +- [Local file](#local_file) +- [Executable File](#executable) +- [Executable Pool](#executable_pool) +- [HTTP(s)](#http) +- DBMS + - [ODBC](#odbc) + - [MySQL](#mysql) + - [ClickHouse](#clickhouse) + - [MongoDB](#mongodb) + - [Redis](#redis) + - [Cassandra](#cassandra) + - [PostgreSQL](#postgresql) + +## Local File {#local_file} + +Example of settings: + +``` xml + + + /opt/dictionaries/os.tsv + TabSeparated + + +``` + +or + +``` sql +SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated')) +``` + +Setting fields: + +- `path` – The absolute path to the file. +- `format` – The file format. All the formats described in [Formats](../../interfaces/formats.md#formats) are supported. + +When a dictionary with source `FILE` is created via DDL command (`CREATE DICTIONARY ...`), the source file needs to be located in the `user_files` directory to prevent DB users from accessing arbitrary files on the ClickHouse node. + +**See Also** + +- [Dictionary function](../../sql-reference/table-functions/dictionary.md#dictionary-function) + +## Executable File {#executable} + +Working with executable files depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts the executable file and treats its output as dictionary data. + +Example of settings: + +``` xml + + + cat /opt/dictionaries/os.tsv + TabSeparated + false + + +``` + +Setting fields: + +- `command` — The absolute path to the executable file, or the file name (if the command's directory is in the `PATH`). +- `format` — The file format. All the formats described in [Formats](../../interfaces/formats.md#formats) are supported. +- `command_termination_timeout` — The executable script should contain a main read-write loop. After the dictionary is destroyed, the pipe is closed, and the executable file will have `command_termination_timeout` seconds to shutdown before ClickHouse will send a SIGTERM signal to the child process. `command_termination_timeout` is specified in seconds. Default value is 10. Optional parameter. +- `command_read_timeout` - Timeout for reading data from command stdout in milliseconds. Default value 10000. Optional parameter. +- `command_write_timeout` - Timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter. +- `implicit_key` — The executable source file can return only values, and the correspondence to the requested keys is determined implicitly — by the order of rows in the result. Default value is false. +- `execute_direct` - If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder specified by [user_scripts_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_scripts_path). Additional script arguments can be specified using a whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c`. Default value is `0`. Optional parameter. +- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Optional. Default value is `false`. + +That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled; otherwise, the DB user would be able to execute arbitrary binaries on the ClickHouse node. + +## Executable Pool {#executable_pool} + +Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, or `complex_key_direct` layouts. + +Executable pool will spawn a pool of processes with the specified command and keep them running until they exit. The program should read data from STDIN while it is available and output the result to STDOUT. It can wait for the next block of data on STDIN. ClickHouse will not close STDIN after processing a block of data, but will pipe another chunk of data when needed. The executable script should be ready for this way of data processing — it should poll STDIN and flush data to STDOUT early. + +Example of settings: + +``` xml + + + while read key; do printf "$key\tData for key $key\n"; done + TabSeparated + 10 + 10 + false + + +``` + +Setting fields: + +- `command` — The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). +- `format` — The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `pool_size` — Size of pool. If 0 is specified as `pool_size` then there is no pool size restrictions. Default value is `16`. +- `command_termination_timeout` — executable script should contain main read-write loop. After dictionary is destroyed, pipe is closed, and executable file will have `command_termination_timeout` seconds to shutdown, before ClickHouse will send SIGTERM signal to child process. Specified in seconds. Default value is 10. Optional parameter. +- `max_command_execution_time` — Maximum executable script command execution time for processing block of data. Specified in seconds. Default value is 10. Optional parameter. +- `command_read_timeout` - timeout for reading data from command stdout in milliseconds. Default value 10000. Optional parameter. +- `command_write_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter. +- `implicit_key` — The executable source file can return only values, and the correspondence to the requested keys is determined implicitly — by the order of rows in the result. Default value is false. Optional parameter. +- `execute_direct` - If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder specified by [user_scripts_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_scripts_path). Additional script arguments can be specified using whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c`. Default value is `1`. Optional parameter. +- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Optional. Default value is `false`. + +That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. + +## Http(s) {#https} + +Working with an HTTP(s) server depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. + +Example of settings: + +``` xml + + + http://[::1]/os.tsv + TabSeparated + + user + password + + +
+ API-KEY + key +
+
+
+ +``` + +or + +``` sql +SOURCE(HTTP( + url 'http://[::1]/os.tsv' + format 'TabSeparated' + credentials(user 'user' password 'password') + headers(header(name 'API-KEY' value 'key')) +)) +``` + +In order for ClickHouse to access an HTTPS resource, you must [configure openSSL](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl) in the server configuration. + +Setting fields: + +- `url` – The source URL. +- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `credentials` – Basic HTTP authentication. Optional parameter. +- `user` – Username required for the authentication. +- `password` – Password required for the authentication. +- `headers` – All custom HTTP headers entries used for the HTTP request. Optional parameter. +- `header` – Single HTTP header entry. +- `name` – Identifiant name used for the header send on the request. +- `value` – Value set for a specific identifiant name. + +When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server. + +### Known Vulnerability of the ODBC Dictionary Functionality + +:::note +When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised. +::: + +**Example of insecure use** + +Let’s configure unixODBC for PostgreSQL. Content of `/etc/odbc.ini`: + +``` text +[gregtest] +Driver = /usr/lib/psqlodbca.so +Servername = localhost +PORT = 5432 +DATABASE = test_db +#OPTION = 3 +USERNAME = test +PASSWORD = test +``` + +If you then make a query such as + +``` sql +SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); +``` + +ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`. + +### Example of Connecting Postgresql + +Ubuntu OS. + +Installing unixODBC and the ODBC driver for PostgreSQL: + +``` bash +$ sudo apt-get install -y unixodbc odbcinst odbc-postgresql +``` + +Configuring `/etc/odbc.ini` (or `~/.odbc.ini` if you signed in under a user that runs ClickHouse): + +``` text + [DEFAULT] + Driver = myconnection + + [myconnection] + Description = PostgreSQL connection to my_db + Driver = PostgreSQL Unicode + Database = my_db + Servername = 127.0.0.1 + UserName = username + Password = password + Port = 5432 + Protocol = 9.3 + ReadOnly = No + RowVersioning = No + ShowSystemTables = No + ConnSettings = +``` + +The dictionary configuration in ClickHouse: + +``` xml + + + table_name + + + + + DSN=myconnection + postgresql_table
+
+ + + 300 + 360 + + + + + + + id + + + some_column + UInt64 + 0 + + +
+
+``` + +or + +``` sql +CREATE DICTIONARY table_name ( + id UInt64, + some_column UInt64 DEFAULT 0 +) +PRIMARY KEY id +SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) +LAYOUT(HASHED()) +LIFETIME(MIN 300 MAX 360) +``` + +You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. + +### Example of Connecting MS SQL Server + +Ubuntu OS. + +Installing the ODBC driver for connecting to MS SQL: + +``` bash +$ sudo apt-get install tdsodbc freetds-bin sqsh +``` + +Configuring the driver: + +```bash + $ cat /etc/freetds/freetds.conf + ... + + [MSSQL] + host = 192.168.56.101 + port = 1433 + tds version = 7.0 + client charset = UTF-8 + + # test TDS connection + $ sqsh -S MSSQL -D database -U user -P password + + + $ cat /etc/odbcinst.ini + + [FreeTDS] + Description = FreeTDS + Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so + Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so + FileUsage = 1 + UsageCount = 5 + + $ cat /etc/odbc.ini + # $ cat ~/.odbc.ini # if you signed in under a user that runs ClickHouse + + [MSSQL] + Description = FreeTDS + Driver = FreeTDS + Servername = MSSQL + Database = test + UID = test + PWD = test + Port = 1433 + + + # (optional) test ODBC connection (to use isql-tool install the [unixodbc](https://packages.debian.org/sid/unixodbc)-package) + $ isql -v MSSQL "user" "password" +``` + +Remarks: +- to determine the earliest TDS version that is supported by a particular SQL Server version, refer to the product documentation or look at [MS-TDS Product Behavior](https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-tds/135d0ebe-5c4c-4a94-99bf-1811eccb9f4a) + +Configuring the dictionary in ClickHouse: + +``` xml + + + test + + + dict
+ DSN=MSSQL;UID=test;PWD=test +
+ + + + 300 + 360 + + + + + + + + + k + + + s + String + + + +
+
+``` + +or + +``` sql +CREATE DICTIONARY test ( + k UInt64, + s String DEFAULT '' +) +PRIMARY KEY k +SOURCE(ODBC(table 'dict' connection_string 'DSN=MSSQL;UID=test;PWD=test')) +LAYOUT(FLAT()) +LIFETIME(MIN 300 MAX 360) +``` + +## DBMS + +### ODBC + +You can use this method to connect any database that has an ODBC driver. + +Example of settings: + +``` xml + + + DatabaseName + ShemaName.TableName
+ DSN=some_parameters + SQL_QUERY + SELECT id, value_1, value_2 FROM ShemaName.TableName +
+ +``` + +or + +``` sql +SOURCE(ODBC( + db 'DatabaseName' + table 'SchemaName.TableName' + connection_string 'DSN=some_parameters' + invalidate_query 'SQL_QUERY' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' +)) +``` + +Setting fields: + +- `db` – Name of the database. Omit it if the database name is set in the `` parameters. +- `table` – Name of the table and schema if exists. +- `connection_string` – Connection string. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). +- `query` – The custom query. Optional parameter. + +:::note +The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared. +::: + +ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. + +If you have a problems with encodings when using Oracle, see the corresponding [FAQ](/knowledgebase/oracle-odbc) item. + +### Mysql + +Example of settings: + +``` xml + + + 3306 + clickhouse + qwerty + + example01-1 + 1 + + + example01-2 + 1 + + db_name + table_name
+ id=10 + SQL_QUERY + true + SELECT id, value_1, value_2 FROM db_name.table_name +
+ +``` + +or + +``` sql +SOURCE(MYSQL( + port 3306 + user 'clickhouse' + password 'qwerty' + replica(host 'example01-1' priority 1) + replica(host 'example01-2' priority 1) + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' + fail_on_connection_loss 'true' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' +)) +``` + +Setting fields: + +- `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). + +- `user` – Name of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `password` – Password of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `replica` – Section of replica configurations. There can be multiple sections. + + - `replica/host` – The MySQL host. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. + +- `db` – Name of the database. + +- `table` – Name of the table. + +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in MySQL, for example, `id > 10 AND id < 20`. Optional parameter. + +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). + +- `fail_on_connection_loss` – The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. + +- `query` – The custom query. Optional parameter. + +:::note +The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. +::: + +:::note +There is no explicit parameter `secure`. When establishing an SSL-connection security is mandatory. +::: + +MySQL can be connected to on a local host via sockets. To do this, set `host` and `socket`. + +Example of settings: + +``` xml + + + localhost + /path/to/socket/file.sock + clickhouse + qwerty + db_name + table_name
+ id=10 + SQL_QUERY + true + SELECT id, value_1, value_2 FROM db_name.table_name +
+ +``` + +or + +``` sql +SOURCE(MYSQL( + host 'localhost' + socket '/path/to/socket/file.sock' + user 'clickhouse' + password 'qwerty' + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' + fail_on_connection_loss 'true' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' +)) +``` + +### ClickHouse + +Example of settings: + +``` xml + + + example01-01-1 + 9000 + default + + default + ids
+ id=10 + 1 + SELECT id, value_1, value_2 FROM default.ids +
+ +``` + +or + +``` sql +SOURCE(CLICKHOUSE( + host 'example01-01-1' + port 9000 + user 'default' + password '' + db 'default' + table 'ids' + where 'id=10' + secure 1 + query 'SELECT id, value_1, value_2 FROM default.ids' +)); +``` + +Setting fields: + +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distributed](../../engines/table-engines/special/distributed.md) table and enter it in subsequent configurations. +- `port` – The port on the ClickHouse server. +- `user` – Name of the ClickHouse user. +- `password` – Password of the ClickHouse user. +- `db` – Name of the database. +- `table` – Name of the table. +- `where` – The selection criteria. May be omitted. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). +- `secure` - Use ssl for connection. +- `query` – The custom query. Optional parameter. + +:::note +The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. +::: + +### Mongodb + +Example of settings: + +``` xml + + + localhost + 27017 + + + test + dictionary_source + + +``` + +or + +``` sql +SOURCE(MONGODB( + host 'localhost' + port 27017 + user '' + password '' + db 'test' + collection 'dictionary_source' +)) +``` + +Setting fields: + +- `host` – The MongoDB host. +- `port` – The port on the MongoDB server. +- `user` – Name of the MongoDB user. +- `password` – Password of the MongoDB user. +- `db` – Name of the database. +- `collection` – Name of the collection. + +### Redis + +Example of settings: + +``` xml + + + localhost + 6379 + simple + 0 + + +``` + +or + +``` sql +SOURCE(REDIS( + host 'localhost' + port 6379 + storage_type 'simple' + db_index 0 +)) +``` + +Setting fields: + +- `host` – The Redis host. +- `port` – The port on the Redis server. +- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`. +- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. + +### Cassandra + +Example of settings: + +``` xml + + + localhost + 9042 + username + qwerty123 + database_name + table_name + 1 + 1 + One + "SomeColumn" = 42 + 8 + SELECT id, value_1, value_2 FROM database_name.table_name + + +``` + +Setting fields: + +- `host` – The Cassandra host or comma-separated list of hosts. +- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used. +- `user` – Name of the Cassandra user. +- `password` – Password of the Cassandra user. +- `keyspace` – Name of the keyspace (database). +- `column_family` – Name of the column family (table). +- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1. +- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table. Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra. Default value is 1 (the first key column is a partition key and other key columns are clustering key). +- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default value is `One`. +- `where` – Optional selection criteria. +- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries. +- `query` – The custom query. Optional parameter. + +:::note +The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared. +::: + +### PostgreSQL + +Example of settings: + +``` xml + + + 5432 + clickhouse + qwerty + db_name + table_name
+ id=10 + SQL_QUERY + SELECT id, value_1, value_2 FROM db_name.table_name +
+ +``` + +or + +``` sql +SOURCE(POSTGRESQL( + port 5432 + host 'postgresql-hostname' + user 'postgres_user' + password 'postgres_password' + db 'db_name' + table 'table_name' + replica(host 'example01-1' port 5432 priority 1) + replica(host 'example01-2' port 5432 priority 2) + where 'id=10' + invalidate_query 'SQL_QUERY' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' +)) +``` + +Setting fields: + +- `host` – The host on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside ``). +- `port` – The port on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside ``). +- `user` – Name of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). +- `password` – Password of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). +- `replica` – Section of replica configurations. There can be multiple sections: + - `replica/host` – The PostgreSQL host. + - `replica/port` – The PostgreSQL port. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. +- `db` – Name of the database. +- `table` – Name of the table. +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL. For example, `id > 10 AND id < 20`. Optional parameter. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). +- `query` – The custom query. Optional parameter. + +:::note +The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. +::: + +## Null + +A special source that can be used to create dummy (empty) dictionaries. Such dictionaries can useful for tests or with setups with separated data and query nodes at nodes with Distributed tables. + +``` sql +CREATE DICTIONARY null_dict ( + id UInt64, + val UInt8, + default_val UInt8 DEFAULT 123, + nullable_val Nullable(UInt8) +) +PRIMARY KEY id +SOURCE(NULL()) +LAYOUT(FLAT()) +LIFETIME(0); +``` + +## Dictionary Key and Fields {#dictionary-key-and-fields} + + + +The `structure` clause describes the dictionary key and fields available for queries. + +XML description: + +``` xml + + + + Id + + + + + + + ... + + + +``` + +Attributes are described in the elements: + +- `` — Key column +- `` — Data column: there can be a multiple number of attributes. + +DDL query: + +``` sql +CREATE DICTIONARY dict_name ( + Id UInt64, + -- attributes +) +PRIMARY KEY Id +... +``` + +Attributes are described in the query body: + +- `PRIMARY KEY` — Key column +- `AttrName AttrType` — Data column. There can be a multiple number of attributes. + +## Key + +ClickHouse supports the following types of keys: + +- Numeric key. `UInt64`. Defined in the `` tag or using `PRIMARY KEY` keyword. +- Composite key. Set of values of different types. Defined in the tag `` or `PRIMARY KEY` keyword. + +An xml structure can contain either `` or ``. DDL-query must contain single `PRIMARY KEY`. + +:::warning +You must not describe key as an attribute. +::: + +### Numeric Key + +Type: `UInt64`. + +Configuration example: + +``` xml + + Id + +``` + +Configuration fields: + +- `name` – The name of the column with keys. + +For DDL-query: + +``` sql +CREATE DICTIONARY ( + Id UInt64, + ... +) +PRIMARY KEY Id +... +``` + +- `PRIMARY KEY` – The name of the column with keys. + +### Composite Key + +The key can be a `tuple` from any types of fields. The [layout](#storig-dictionaries-in-memory) in this case must be `complex_key_hashed` or `complex_key_cache`. + +:::tip +A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. +::: + +The key structure is set in the element ``. Key fields are specified in the same format as the dictionary [attributes](#dictionary-key-and-fields). Example: + +``` xml + + + + field1 + String + + + field2 + UInt32 + + ... + +... +``` + +or + +``` sql +CREATE DICTIONARY ( + field1 String, + field2 String + ... +) +PRIMARY KEY field1, field2 +... +``` + +For a query to the `dictGet*` function, a tuple is passed as the key. Example: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. + +## Attributes + +Configuration example: + +``` xml + + ... + + Name + ClickHouseDataType + + rand64() + true + true + true + + +``` + +or + +``` sql +CREATE DICTIONARY somename ( + Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID +) +``` + +Configuration fields: + +| Tag | Description | Required | +|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `name` | Column name. | Yes | +| `type` | ClickHouse data type: [UInt8](../../sql-reference/data-types/int-uint.md), [UInt16](../../sql-reference/data-types/int-uint.md), [UInt32](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md), [Int8](../../sql-reference/data-types/int-uint.md), [Int16](../../sql-reference/data-types/int-uint.md), [Int32](../../sql-reference/data-types/int-uint.md), [Int64](../../sql-reference/data-types/int-uint.md), [Float32](../../sql-reference/data-types/float.md), [Float64](../../sql-reference/data-types/float.md), [UUID](../../sql-reference/data-types/uuid.md), [Decimal32](../../sql-reference/data-types/decimal.md), [Decimal64](../../sql-reference/data-types/decimal.md), [Decimal128](../../sql-reference/data-types/decimal.md), [Decimal256](../../sql-reference/data-types/decimal.md),[Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md), [DateTime64](../../sql-reference/data-types/datetime64.md), [String](../../sql-reference/data-types/string.md), [Array](../../sql-reference/data-types/array.md).
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../sql-reference/data-types/nullable.md) is currently supported for [Flat](#flat), [Hashed](#hashed), [ComplexKeyHashed](#complex_key_hashed), [Direct](#direct), [ComplexKeyDirect](#complex_key_direct), [RangeHashed](#range_hashed), Polygon, [Cache](#cache), [ComplexKeyCache](#complex_key_cache), [SSDCache](#ssd_cache), [SSDComplexKeyCache](#complex_key_ssd_cache) dictionaries. In [IPTrie](#ip_trie) dictionaries `Nullable` types are not supported. | Yes | +| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. [NULL](../syntax.md#null) value can be used only for the `Nullable` types (see the previous line with types description). | Yes | +| `expression` | [Expression](../../sql-reference/syntax.md#expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | +| `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](#hierarchical-dictionaries).

Default value: `false`. | No | +| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

Default value: `false`. | No | +| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. + +## Hierarchical Dictionaries {#hierarchical-dictionaries} + +ClickHouse supports hierarchical dictionaries with a [numeric key](#numeric-key). + +Look at the following hierarchical structure: + +``` text +0 (Common parent) +│ +├── 1 (Russia) +│ │ +│ └── 2 (Moscow) +│ │ +│ └── 3 (Center) +│ +└── 4 (Great Britain) + │ + └── 5 (London) +``` + +This hierarchy can be expressed as the following dictionary table. + +| region_id | parent_region | region_name | +|------------|----------------|---------------| +| 1 | 0 | Russia | +| 2 | 1 | Moscow | +| 3 | 2 | Center | +| 4 | 0 | Great Britain | +| 5 | 4 | London | + +This table contains a column `parent_region` that contains the key of the nearest parent for the element. + +ClickHouse supports the hierarchical property for external dictionary attributes. This property allows you to configure the hierarchical dictionary similar to described above. + +The [dictGetHierarchy](../../sql-reference/functions/ext-dict-functions.md#dictgethierarchy) function allows you to get the parent chain of an element. + +For our example, the structure of dictionary can be the following: + +``` xml + + + + region_id + + + + parent_region + UInt64 + 0 + true + + + + region_name + String + + + + + +``` + +## Polygon dictionaries {#polygon-dictionaries} + +Polygon dictionaries allow you to efficiently search for the polygon containing specified points. +For example: defining a city area by geographical coordinates. + +Example of a polygon dictionary configuration: + + + +``` xml + + + + + key + Array(Array(Array(Array(Float64)))) + + + + + name + String + + + + + value + UInt64 + 0 + + + + + + 1 + + + + ... + +``` + +The corresponding [DDL-query](../../sql-reference/statements/create/dictionary.md#create-dictionary-query): +``` sql +CREATE DICTIONARY polygon_dict_name ( + key Array(Array(Array(Array(Float64)))), + name String, + value UInt64 +) +PRIMARY KEY key +LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)) +... +``` + +When configuring the polygon dictionary, the key must have one of two types: + +- A simple polygon. It is an array of points. +- MultiPolygon. It is an array of polygons. Each polygon is a two-dimensional array of points. The first element of this array is the outer boundary of the polygon, and subsequent elements specify areas to be excluded from it. + +Points can be specified as an array or a tuple of their coordinates. In the current implementation, only two-dimensional points are supported. + +The user can upload their own data in all formats supported by ClickHouse. + +There are 3 types of [in-memory storage](#storig-dictionaries-in-memory) available: + +- `POLYGON_SIMPLE`. This is a naive implementation, where a linear pass through all polygons is made for each query, and membership is checked for each one without using additional indexes. + +- `POLYGON_INDEX_EACH`. A separate index is built for each polygon, which allows you to quickly check whether it belongs in most cases (optimized for geographical regions). +Also, a grid is superimposed on the area under consideration, which significantly narrows the number of polygons under consideration. +The grid is created by recursively dividing the cell into 16 equal parts and is configured with two parameters. +The division stops when the recursion depth reaches `MAX_DEPTH` or when the cell crosses no more than `MIN_INTERSECTIONS` polygons. +To respond to the query, there is a corresponding cell, and the index for the polygons stored in it is accessed alternately. + +- `POLYGON_INDEX_CELL`. This placement also creates the grid described above. The same options are available. For each sheet cell, an index is built on all pieces of polygons that fall into it, which allows you to quickly respond to a request. + +- `POLYGON`. Synonym to `POLYGON_INDEX_CELL`. + +Dictionary queries are carried out using standard [functions](../../sql-reference/functions/ext-dict-functions.md) for working with dictionaries. +An important difference is that here the keys will be the points for which you want to find the polygon containing them. + +**Example** + +Example of working with the dictionary defined above: + +``` sql +CREATE TABLE points ( + x Float64, + y Float64 +) +... +SELECT tuple(x, y) AS key, dictGet(dict_name, 'name', key), dictGet(dict_name, 'value', key) FROM points ORDER BY x, y; +``` + +As a result of executing the last command for each point in the 'points' table, a minimum area polygon containing this point will be found, and the requested attributes will be output. + +**Example** + +You can read columns from polygon dictionaries via SELECT query, just turn on the `store_polygon_key_column = 1` in the dictionary configuration or corresponding DDL-query. + +Query: + +``` sql +CREATE TABLE polygons_test_table +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String +) ENGINE = TinyLog; + +INSERT INTO polygons_test_table VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Value'); + +CREATE DICTIONARY polygons_test_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 'polygons_test_table')) +LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)) +LIFETIME(0); + +SELECT * FROM polygons_test_dictionary; +``` + +Result: + +``` text +┌─key─────────────────────────────┬─name──┐ +│ [[[(3,1),(0,1),(0,-1),(3,-1)]]] │ Value │ +└─────────────────────────────────┴───────┘ +``` + +## RegExp Tree Dictionary {#regexp-tree-dictionary} + +Regexp Tree dictionary stores multiple trees of regular expressions with attributions. Users can retrieve strings in the dictionary. If a string matches the root of the regexp tree, we will collect the corresponding attributes of the matched root and continue to walk the children. If any of the children matches the string, we will collect attributes and rewrite the old ones if conflicts occur, then continue the traverse until we reach leaf nodes. + +Example of the ddl query for creating Regexp Tree dictionary: + + + +```sql +create dictionary regexp_dict +( + regexp String, + name String, + version String +) +PRIMARY KEY(regexp) +SOURCE(YAMLRegExpTree(PATH '/var/lib/clickhouse/user_files/regexp_tree.yaml')) +LAYOUT(regexp_tree) +... +``` + +We only allow `YAMLRegExpTree` to work with regexp_tree dicitionary layout. If you want to use other sources, please set variable `regexp_dict_allow_other_sources` true. + +**Source** + +We introduce a type of source called `YAMLRegExpTree` representing the structure of Regexp Tree dictionary. An Example of a valid yaml config is like: + +```xml +- regexp: 'Linux/(\d+[\.\d]*).+tlinux' + name: 'TencentOS' + version: '\1' + +- regexp: '\d+/tclwebkit(?:\d+[\.\d]*)' + name: 'Andriod' + versions: + - regexp: '33/tclwebkit' + version: '13' + - regexp: '3[12]/tclwebkit' + version: '12' + - regexp: '30/tclwebkit' + version: '11' + - regexp: '29/tclwebkit' + version: '10' +``` + +The key `regexp` represents the regular expression of a tree node. The name of key is same as the dictionary key. The `name` and `version` is user-defined attributions in the dicitionary. The `versions` (which can be any name that not appear in attributions or the key) indicates the children nodes of this tree. + +**Back Reference** + +The value of an attribution could contain a back reference which refers to a capture group of the matched regular expression. Reference number ranges from 1 to 9 and writes as `$1` or `\1`. + +During the query execution, the back reference in the value will be replaced by the matched capture group. + +**Query** + +Due to the specialty of Regexp Tree dictionary, we only allow functions `dictGet`, `dictGetOrDefault` and `dictGetOrNull` work with it. + +Example: + +```sql +SELECT dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024'); +``` + +Result: + +``` +┌─dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024')─┐ +│ ('Andriod','12') │ +└─────────────────────────────────────────────────────────────────┘ +``` + +## Embedded Dictionaries {#embedded-dictionaries} + + + +ClickHouse contains a built-in feature for working with a geobase. + +This allows you to: + +- Use a region’s ID to get its name in the desired language. +- Use a region’s ID to get the ID of a city, area, federal district, country, or continent. +- Check whether a region is part of another region. +- Get a chain of parent regions. + +All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with web analytics dictionaries”. + +The internal dictionaries are disabled in the default package. +To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file. + +The geobase is loaded from text files. + +Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory. + +Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory. + +You can also create these files yourself. The file format is as follows: + +`regions_hierarchy*.txt`: TabSeparated (no header), columns: + +- region ID (`UInt32`) +- parent region ID (`UInt32`) +- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types do not have values +- population (`UInt32`) — optional column + +`regions_names_*.txt`: TabSeparated (no header), columns: + +- region ID (`UInt32`) +- region name (`String`) — Can’t contain tabs or line feeds, even escaped ones. + +A flat array is used for storing in RAM. For this reason, IDs shouldn’t be more than a million. + +Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated. +For updates, the file modification times are checked. If a file has changed, the dictionary is updated. +The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter. +Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. + +We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. + +There are also functions for working with OS identifiers and search engines, but they shouldn’t be used. + +## Embedded Dictionaries + + + +ClickHouse contains a built-in feature for working with a geobase. + +This allows you to: + +- Use a region’s ID to get its name in the desired language. +- Use a region’s ID to get the ID of a city, area, federal district, country, or continent. +- Check whether a region is part of another region. +- Get a chain of parent regions. + +All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with web analytics dictionaries”. + +The internal dictionaries are disabled in the default package. +To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file. + +The geobase is loaded from text files. + +Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory. + +Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory. + +You can also create these files yourself. The file format is as follows: + +`regions_hierarchy*.txt`: TabSeparated (no header), columns: + +- region ID (`UInt32`) +- parent region ID (`UInt32`) +- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types do not have values +- population (`UInt32`) — optional column + +`regions_names_*.txt`: TabSeparated (no header), columns: + +- region ID (`UInt32`) +- region name (`String`) — Can’t contain tabs or line feeds, even escaped ones. + +A flat array is used for storing in RAM. For this reason, IDs shouldn’t be more than a million. + +Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated. +For updates, the file modification times are checked. If a file has changed, the dictionary is updated. +The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter. +Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. + +We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. + +There are also functions for working with OS identifiers and search engines, but they shouldn’t be used. diff --git a/docs/en/sql-reference/dictionaries/internal-dicts.md b/docs/en/sql-reference/dictionaries/internal-dicts.md deleted file mode 100644 index 11c6ee93aa6..00000000000 --- a/docs/en/sql-reference/dictionaries/internal-dicts.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -slug: /en/sql-reference/dictionaries/internal-dicts -sidebar_position: 39 -sidebar_label: Embedded Dictionaries ---- -import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.md'; - -# Embedded Dictionaries - - - -ClickHouse contains a built-in feature for working with a geobase. - -This allows you to: - -- Use a region’s ID to get its name in the desired language. -- Use a region’s ID to get the ID of a city, area, federal district, country, or continent. -- Check whether a region is part of another region. -- Get a chain of parent regions. - -All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with web analytics dictionaries”. - -The internal dictionaries are disabled in the default package. -To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file. - -The geobase is loaded from text files. - -Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory. - -Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory. - -You can also create these files yourself. The file format is as follows: - -`regions_hierarchy*.txt`: TabSeparated (no header), columns: - -- region ID (`UInt32`) -- parent region ID (`UInt32`) -- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types do not have values -- population (`UInt32`) — optional column - -`regions_names_*.txt`: TabSeparated (no header), columns: - -- region ID (`UInt32`) -- region name (`String`) — Can’t contain tabs or line feeds, even escaped ones. - -A flat array is used for storing in RAM. For this reason, IDs shouldn’t be more than a million. - -Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated. -For updates, the file modification times are checked. If a file has changed, the dictionary is updated. -The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter. -Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. - -We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. - -There are also functions for working with OS identifiers and search engines, but they shouldn’t be used. diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index ef0475027dd..503ef66143e 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -283,7 +283,7 @@ Result: ``` :::note -The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) which is `0` by default. +The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings.md#enable-extended-results-for-datetime-functions) which is `0` by default. Behavior for * `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. @@ -1135,7 +1135,7 @@ SELECT ``` ```response ┌─toYYYYMM(now(), 'US/Eastern')─┐ -│ 202303 │ +│ 202303 │ └───────────────────────────────┘ ``` @@ -1335,7 +1335,7 @@ Similar to formatDateTime, except that it formats datetime in Joda style instead **Replacement fields** -Using replacement fields, you can define a pattern for the resulting string. +Using replacement fields, you can define a pattern for the resulting string. | Placeholder | Description | Presentation | Examples | diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index b4b7ec5ab21..07226b67601 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -6,11 +6,11 @@ sidebar_label: Dictionaries # Functions for Working with Dictionaries -:::note +:::note For dictionaries created with [DDL queries](../../sql-reference/statements/create/dictionary.md), the `dict_name` parameter must be fully specified, like `.`. Otherwise, the current database is used. ::: -For information on connecting and configuring dictionaries, see [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). +For information on connecting and configuring dictionaries, see [Dictionaries](../../sql-reference/dictionaries/index.md). ## dictGet, dictGetOrDefault, dictGetOrNull @@ -31,7 +31,7 @@ dictGetOrNull('dict_name', attr_name, id_expr) **Returned value** -- If ClickHouse parses the attribute successfully in the [attribute’s data type](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), functions return the value of the dictionary attribute that corresponds to `id_expr`. +- If ClickHouse parses the attribute successfully in the [attribute’s data type](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-attributes), functions return the value of the dictionary attribute that corresponds to `id_expr`. - If there is no the key, corresponding to `id_expr`, in the dictionary, then: @@ -226,7 +226,7 @@ Result: **See Also** -- [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) +- [Dictionaries](../../sql-reference/dictionaries/index.md) ## dictHas @@ -250,7 +250,7 @@ Type: `UInt8`. ## dictGetHierarchy -Creates an array, containing all the parents of a key in the [hierarchical dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md). +Creates an array, containing all the parents of a key in the [hierarchical dictionary](../../sql-reference/dictionaries/index.md#hierarchical-dictionaries). **Syntax** @@ -436,7 +436,7 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) **Returned value** -- If ClickHouse parses the attribute successfully in the [attribute’s data type](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), functions return the value of the dictionary attribute that corresponds to `id_expr`. +- If ClickHouse parses the attribute successfully in the [attribute’s data type](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-attributes), functions return the value of the dictionary attribute that corresponds to `id_expr`. - If there is no requested `id_expr` in the dictionary then: diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 7146484361e..011b73405c5 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -792,7 +792,7 @@ neighbor(column, offset[, default_value]) The result of the function depends on the affected data blocks and the order of data in the block. -:::warning +:::warning It can reach the neighbor rows only inside the currently processed data block. ::: @@ -902,7 +902,7 @@ Result: Calculates the difference between successive row values ​​in the data block. Returns 0 for the first row and the difference from the previous row for each subsequent row. -:::warning +:::warning It can reach the previous row only inside the currently processed data block. ::: @@ -986,7 +986,7 @@ Each event has a start time and an end time. The start time is included in the e The function calculates the total number of active (concurrent) events for each event start time. -:::warning +:::warning Events must be ordered by the start time in ascending order. If this requirement is violated the function raises an exception. Every data block is processed separately. If events from different data blocks overlap then they can not be processed correctly. ::: @@ -1674,7 +1674,7 @@ Result: Accumulates states of an aggregate function for each row of a data block. -:::warning +:::warning The state is reset for each new data block. ::: @@ -2177,7 +2177,7 @@ Number of digits. Type: [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). -:::note +:::note For `Decimal` values takes into account their scales: calculates result over underlying integer type which is `(value * scale)`. For example: `countDigits(42) = 2`, `countDigits(42.000) = 5`, `countDigits(0.04200) = 4`. I.e. you may check decimal overflow for `Decimal64` with `countDecimal(x) > 18`. It's a slow variant of [isDecimalOverflow](#is-decimal-overflow). ::: @@ -2260,7 +2260,7 @@ Result: ## currentProfiles -Returns a list of the current [settings profiles](../../operations/access-rights.md#settings-profiles-management) for the current user. +Returns a list of the current [settings profiles](../../guides/sre/user-management/index.md#settings-profiles-management) for the current user. The command [SET PROFILE](../../sql-reference/statements/set.md#query-set) could be used to change the current setting profile. If the command `SET PROFILE` was not used the function returns the profiles specified at the current user's definition (see [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement)). @@ -2272,7 +2272,7 @@ currentProfiles() **Returned value** -- List of the current user settings profiles. +- List of the current user settings profiles. Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). @@ -2288,7 +2288,7 @@ enabledProfiles() **Returned value** -- List of the enabled settings profiles. +- List of the enabled settings profiles. Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). @@ -2304,7 +2304,7 @@ defaultProfiles() **Returned value** -- List of the default settings profiles. +- List of the default settings profiles. Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). @@ -2320,7 +2320,7 @@ currentRoles() **Returned value** -- List of the current roles for the current user. +- List of the current roles for the current user. Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). @@ -2336,13 +2336,13 @@ enabledRoles() **Returned value** -- List of the enabled roles for the current user. +- List of the enabled roles for the current user. Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). ## defaultRoles -Returns the names of the roles which are enabled by default for the current user when he logins. Initially these are all roles granted to the current user (see [GRANT](../../sql-reference/statements/grant/#grant-select)), but that can be changed with the [SET DEFAULT ROLE](../../sql-reference/statements/set-role.md#set-default-role-statement) statement. +Returns the names of the roles which are enabled by default for the current user when he logins. Initially these are all roles granted to the current user (see [GRANT](../../sql-reference/statements/grant.md#grant-select)), but that can be changed with the [SET DEFAULT ROLE](../../sql-reference/statements/set-role.md#set-default-role-statement) statement. **Syntax** @@ -2352,7 +2352,7 @@ defaultRoles() **Returned value** -- List of the default roles for the current user. +- List of the default roles for the current user. Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). @@ -2499,7 +2499,7 @@ In the following example a configuration with two shards is used. The query is e Query: ``` sql -CREATE TABLE shard_num_example (dummy UInt8) +CREATE TABLE shard_num_example (dummy UInt8) ENGINE=Distributed(test_cluster_two_shards_localhost, system, one, dummy); SELECT dummy, shardNum(), shardCount() FROM shard_num_example; ``` diff --git a/docs/en/sql-reference/functions/tuple-functions.md b/docs/en/sql-reference/functions/tuple-functions.md index a31ec3c41d2..c248499be69 100644 --- a/docs/en/sql-reference/functions/tuple-functions.md +++ b/docs/en/sql-reference/functions/tuple-functions.md @@ -22,15 +22,15 @@ tuple(x, y, …) ## tupleElement A function that allows getting a column from a tuple. -‘N’ is the column index, starting from 1. ‘N’ must be a constant. ‘N’ must be a strict postive integer no greater than the size of the tuple. -There is no cost to execute the function. -The function implements the operator `x.N`. +If the second argument is a number `n`, it is the column index, starting from 1. If the second argument is a string `s`, it represents the name of the element. Besides, we can provide the third optional argument, such that when index out of bounds or element for such name does not exist, the default value returned instead of throw exception. The second and third arguments if provided are always must be constant. There is no cost to execute the function. + +The function implements the operator `x.n` and `x.s`. **Syntax** ``` sql -tupleElement(tuple, n) +tupleElement(tuple, n/s [, default_value]) ``` ## untuple diff --git a/docs/en/sql-reference/index.md b/docs/en/sql-reference/index.md new file mode 100644 index 00000000000..eddc5b204d9 --- /dev/null +++ b/docs/en/sql-reference/index.md @@ -0,0 +1,22 @@ +--- +keywords: [clickhouse, docs, sql reference, sql statements, sql, syntax] +title: SQL Reference +--- + +import { TwoColumnList } from '/src/components/two_column_list' +import { ClickableSquare } from '/src/components/clickable_square' +import { HorizontalDivide } from '/src/components/horizontal_divide' +import { ViewAllLink } from '/src/components/view_all_link' +import { VideoContainer } from '/src/components/video_container' + +import LinksDeployment from './sql-reference-links.json' + +# ClickHouse SQL Reference + +ClickHouse supports a declarative query language based on SQL that is identical to the ANSI SQL standard in many cases. + +Supported queries include GROUP BY, ORDER BY, subqueries in FROM, JOIN clause, IN operator, window functions and scalar subqueries. + + + + \ No newline at end of file diff --git a/docs/en/sql-reference/sql-reference-links.json b/docs/en/sql-reference/sql-reference-links.json new file mode 100644 index 00000000000..3811ad18462 --- /dev/null +++ b/docs/en/sql-reference/sql-reference-links.json @@ -0,0 +1,12 @@ +[ + { + "title": "Statements", + "description": "A list of available SQL statements in ClickHouse", + "url": "/docs/en/sql-reference/statements/" + }, + { + "title": "Database and Table Engines", + "description": "Engines determine where and how your data is stored", + "url": "/docs/en/engines/table-engines" + } +] diff --git a/docs/en/sql-reference/statements/alter/comment.md b/docs/en/sql-reference/statements/alter/comment.md index f8742765619..cc49c6abf80 100644 --- a/docs/en/sql-reference/statements/alter/comment.md +++ b/docs/en/sql-reference/statements/alter/comment.md @@ -16,7 +16,7 @@ ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' **Examples** -Creating a table with comment (for more information, see the [COMMENT] clause(../../../sql-reference/statements/create/table.md#comment-table)): +Creating a table with comment (for more information, see the [COMMENT](../../../sql-reference/statements/create/table.md#comment-table) clause): ``` sql CREATE TABLE table_with_comment diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index 908d28d7ab1..626d71709ac 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -17,7 +17,7 @@ Projections will create internally a new hidden table, this means that more IO a Example, If the projection has defined a different primary key, all the data from the original table will be duplicated. ::: -You can see more technical details about how projections work internally on this [page](/docs/en/guides/improving-query-performance/sparse-primary-indexes/sparse-primary-indexes-multiple.md/#option-3-projections). +You can see more technical details about how projections work internally on this [page](/docs/en/guides/best-practices/sparse-primary-indexes.md/#option-3-projections). ## Example filtering without using primary keys @@ -37,7 +37,7 @@ Using `ALTER TABLE`, we could add the Projection to an existing table: ``` ALTER TABLE visits_order ADD PROJECTION user_name_projection ( SELECT -* +* ORDER BY user_name ) @@ -161,6 +161,6 @@ The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only Also, they are replicated, syncing projections metadata via ClickHouse Keeper or ZooKeeper. -:::note +:::note Projection manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/docs/en/sql-reference/statements/create/dictionary.md b/docs/en/sql-reference/statements/create/dictionary.md index e789dd9257f..29c72d62f24 100644 --- a/docs/en/sql-reference/statements/create/dictionary.md +++ b/docs/en/sql-reference/statements/create/dictionary.md @@ -5,7 +5,7 @@ sidebar_label: DICTIONARY title: "CREATE DICTIONARY" --- -Creates a new [dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). +Creates a new [dictionary](../../../sql-reference/dictionaries/index.md) with given [structure](../../../sql-reference/dictionaries/index.md#dictionary-key-and-fields), [source](../../../sql-reference/dictionaries/index.md#dictionary-sources), [layout](../../../sql-reference/dictionaries/index.md#storig-dictionaries-in-memory) and [lifetime](../../../sql-reference/dictionaries/index.md#dictionary-updates). ## Syntax @@ -29,7 +29,7 @@ The dictionary structure consists of attributes. Dictionary attributes are speci `ON CLUSTER` clause allows creating dictionary on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). -Depending on dictionary [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) one or more attributes can be specified as dictionary keys. +Depending on dictionary [layout](../../../sql-reference/dictionaries/index.md#storig-dictionaries-in-memory) one or more attributes can be specified as dictionary keys. ## SOURCE @@ -125,9 +125,9 @@ LAYOUT(HASHED()) ### Create a dictionary from another database -Please see the details in [Dictionary sources](/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md/#dbms). +Please see the details in [Dictionary sources](/docs/en/sql-reference/dictionaries/index.md#dictionary-sources/#dbms). **See Also** -- For more information, see the [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section. -- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). +- For more information, see the [Dictionaries](../../../sql-reference/dictionaries/index.md) section. +- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [Dictionaries](../../../sql-reference/dictionaries/index.md). diff --git a/docs/en/sql-reference/statements/create/quota.md b/docs/en/sql-reference/statements/create/quota.md index 3952743b480..7c31f93fff7 100644 --- a/docs/en/sql-reference/statements/create/quota.md +++ b/docs/en/sql-reference/statements/create/quota.md @@ -5,7 +5,7 @@ sidebar_label: QUOTA title: "CREATE QUOTA" --- -Creates a [quota](../../../operations/access-rights.md#quotas-management) that can be assigned to a user or a role. +Creates a [quota](../../../guides/sre/user-management/index.md#quotas-management) that can be assigned to a user or a role. Syntax: diff --git a/docs/en/sql-reference/statements/create/role.md b/docs/en/sql-reference/statements/create/role.md index 68fdd51e957..9b14e220e1f 100644 --- a/docs/en/sql-reference/statements/create/role.md +++ b/docs/en/sql-reference/statements/create/role.md @@ -5,7 +5,7 @@ sidebar_label: ROLE title: "CREATE ROLE" --- -Creates new [roles](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role. +Creates new [roles](../../../guides/sre/user-management/index.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role. Syntax: @@ -22,7 +22,7 @@ User can have default roles which apply at user login. To set default roles, use To revoke a role, use the [REVOKE](../../../sql-reference/statements/revoke.md) statement. -To delete role, use the [DROP ROLE](../../../sql-reference/statements/drop#drop-role-statement) statement. The deleted role is being automatically revoked from all the users and roles to which it was assigned. +To delete role, use the [DROP ROLE](../../../sql-reference/statements/drop.md#drop-role-statement) statement. The deleted role is being automatically revoked from all the users and roles to which it was assigned. ## Examples diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index 31ce9221eea..56a57534234 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -5,9 +5,9 @@ sidebar_label: ROW POLICY title: "CREATE ROW POLICY" --- -Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. +Creates a [row policy](../../../guides/sre/user-management/index.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. -:::warning +:::warning Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies. ::: @@ -31,7 +31,7 @@ In the section `TO` you can provide a list of users and roles this policy should Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` -:::note +:::note If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` diff --git a/docs/en/sql-reference/statements/create/settings-profile.md b/docs/en/sql-reference/statements/create/settings-profile.md index c2424ff6046..8e221a4d82f 100644 --- a/docs/en/sql-reference/statements/create/settings-profile.md +++ b/docs/en/sql-reference/statements/create/settings-profile.md @@ -5,7 +5,7 @@ sidebar_label: SETTINGS PROFILE title: "CREATE SETTINGS PROFILE" --- -Creates [settings profiles](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role. +Creates [settings profiles](../../../guides/sre/user-management/index.md#settings-profiles-management) that can be assigned to a user or a role. Syntax: @@ -27,7 +27,7 @@ CREATE USER robin IDENTIFIED BY 'password'; Create the `max_memory_usage_profile` settings profile with value and constraints for the `max_memory_usage` setting and assign it to user `robin`: ``` sql -CREATE -SETTINGS PROFILE max_memory_usage_profile SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 +CREATE +SETTINGS PROFILE max_memory_usage_profile SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin ``` diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index ed35df9b97a..de39d960476 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -393,15 +393,15 @@ These codecs are designed to make compression more effective by using specific f #### DoubleDelta -`DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +`DoubleDelta(bytes_size)` — Calculates delta of deltas and writes it in compact binary form. Possible `bytes_size` values: 1, 2, 4, 8, the default value is `sizeof(type)` if equal to 1, 2, 4, or 8. In all other cases, it’s 1. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-bit deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). #### Gorilla -`Gorilla` — Calculates XOR between current and previous floating point value and writes it in compact binary form. The smaller the difference between consecutive values is, i.e. the slower the values of the series changes, the better the compression rate. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see section 4.1 in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](https://doi.org/10.14778/2824032.2824078). +`Gorilla(bytes_size)` — Calculates XOR between current and previous floating point value and writes it in compact binary form. The smaller the difference between consecutive values is, i.e. the slower the values of the series changes, the better the compression rate. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Possible `bytes_size` values: 1, 2, 4, 8, the default value is `sizeof(type)` if equal to 1, 2, 4, or 8. In all other cases, it’s 1. For additional information, see section 4.1 in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](https://doi.org/10.14778/2824032.2824078). #### FPC -`FPC` - Repeatedly predicts the next floating point value in the sequence using the better of two predictors, then XORs the actual with the predicted value, and leading-zero compresses the result. Similar to Gorilla, this is efficient when storing a series of floating point values that change slowly. For 64-bit values (double), FPC is faster than Gorilla, for 32-bit values your mileage may vary. For a detailed description of the algorithm see [High Throughput Compression of Double-Precision Floating-Point Data](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf). +`FPC(level, float_size)` - Repeatedly predicts the next floating point value in the sequence using the better of two predictors, then XORs the actual with the predicted value, and leading-zero compresses the result. Similar to Gorilla, this is efficient when storing a series of floating point values that change slowly. For 64-bit values (double), FPC is faster than Gorilla, for 32-bit values your mileage may vary. Possible `level` values: 1-28, the default value is 12. Possible `float_size` values: 4, 8, the default value is `sizeof(type)` if type is Float. In all other cases, it’s 4. For a detailed description of the algorithm see [High Throughput Compression of Double-Precision Floating-Point Data](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf). #### T64 @@ -473,7 +473,7 @@ ENGINE = MergeTree ORDER BY x; ClickHouse supports temporary tables which have the following characteristics: - Temporary tables disappear when the session ends, including if the connection is lost. -- A temporary table uses the Memory engine only. +- A temporary table uses the Memory table engine when engine is not specified and it may use any table engine except Replicated and `KeeperMap` engines. - The DB can’t be specified for a temporary table. It is created outside of databases. - Impossible to create a temporary table with distributed DDL query on all cluster servers (by using `ON CLUSTER`): this table exists only in the current session. - If a temporary table has the same name as another one and a query specifies the table name without specifying the DB, the temporary table will be used. @@ -487,7 +487,7 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], ... -) +) [ENGINE = engine] ``` In most cases, temporary tables are not created manually, but when using external data for a query, or for distributed `(GLOBAL) IN`. For more information, see the appropriate sections diff --git a/docs/en/sql-reference/statements/create/user.md b/docs/en/sql-reference/statements/create/user.md index a756b3d4a0d..454195db3fa 100644 --- a/docs/en/sql-reference/statements/create/user.md +++ b/docs/en/sql-reference/statements/create/user.md @@ -5,7 +5,7 @@ sidebar_label: USER title: "CREATE USER" --- -Creates [user accounts](../../../operations/access-rights.md#user-account-management). +Creates [user accounts](../../../guides/sre/user-management/index.md#user-account-management). Syntax: diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index e1987e50af4..7d7b8855d51 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -30,12 +30,6 @@ SET allow_experimental_lightweight_delete = true; ::: -An [alternative way to delete rows](./alter/delete.md) in ClickHouse is `ALTER TABLE ... DELETE`, which might be more efficient if you do bulk deletes only occasionally and don't need the operation to be applied instantly. In most use cases the new lightweight `DELETE FROM` behavior will be considerably faster. - -:::warning -Even though deletes are becoming more lightweight in ClickHouse, they should still not be used as aggressively as on an OLTP system. Lightweight deletes are currently efficient for wide parts, but for compact parts, they can be a heavyweight operation, and it may be better to use `ALTER TABLE` for some scenarios. -::: - :::note `DELETE FROM` requires the `ALTER DELETE` privilege: ```sql @@ -51,7 +45,7 @@ The idea behind Lightweight Delete is that when a `DELETE FROM table ...` query The mask is implemented as a hidden `_row_exists` system column that stores True for all visible rows and False for deleted ones. This column is only present in a part if some rows in this part were deleted. In other words, the column is not persisted when it has all values equal to True. ## SELECT query -When the column is present `SELECT ... FROM table WHERE condition` query internally is extended by an additional predicate on `_row_exists` and becomes similar to +When the column is present `SELECT ... FROM table WHERE condition` query internally is extended by an additional predicate on `_row_exists` and becomes similar to ```sql SELECT ... FROM table PREWHERE _row_exists WHERE condition ``` diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index aa87b1ef613..5f1513d3f44 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -22,7 +22,7 @@ System log tables can be also attached back (e.g. `query_log`, `text_log`, etc). Note that you can not detach permanently the table which is already detached (temporary). But you can attach it back and then detach permanently again. -Also you can not [DROP](../../sql-reference/statements/drop#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query. +Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query. The `SYNC` modifier executes the action without delay. diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 3383ea70a2b..1d9b2c9ea30 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -105,7 +105,8 @@ Hierarchy of privileges: - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` - - `CREATE TEMPORARY TABLE` + - `CREATE ARBITRARY TEMPORARY TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - `CREATE FUNCTION` @@ -313,7 +314,8 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A - `CREATE`. Level: `GROUP` - `CREATE DATABASE`. Level: `DATABASE` - `CREATE TABLE`. Level: `TABLE` - - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` + - `CREATE ARBITRARY TEMPORARY TABLE`. Level: `GLOBAL` + - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` - `CREATE VIEW`. Level: `VIEW` - `CREATE DICTIONARY`. Level: `DICTIONARY` diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index f2d590d196b..354ab95c598 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -4,7 +4,7 @@ sidebar_position: 33 sidebar_label: INSERT INTO --- -# INSERT INTO Statement +# INSERT INTO Statement Inserts data into a table. @@ -89,7 +89,7 @@ INSERT INTO t FORMAT TabSeparated 22 Qwerty ``` -You can insert data separately from the query by using the command-line client or the HTTP interface. For more information, see the section “[Interfaces](../../interfaces)”. +You can insert data separately from the query by using the [command-line client](/docs/en/integrations/sql-clients/clickhouse-client-local) or the [HTTP interface](/docs/en/interfaces/http/). :::note If you want to specify `SETTINGS` for `INSERT` query then you have to do it _before_ `FORMAT` clause since everything after `FORMAT format_name` is treated as data. For example: @@ -129,7 +129,7 @@ To insert a default value instead of `NULL` into a column with not nullable data INSERT INTO [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name ``` -Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause. +Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause. Compressed files are supported. The compression type is detected by the extension of the file name. Or it can be explicitly specified in a `COMPRESSION` clause. Supported types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`. @@ -191,7 +191,7 @@ INSERT INTO [TABLE] FUNCTION table_func ... ``` sql CREATE TABLE simple_table (id UInt32, text String) ENGINE=MergeTree() ORDER BY id; -INSERT INTO TABLE FUNCTION remote('localhost', default.simple_table) +INSERT INTO TABLE FUNCTION remote('localhost', default.simple_table) VALUES (100, 'inserted via remote()'); SELECT * FROM simple_table; ``` diff --git a/docs/en/sql-reference/statements/select/array-join.md b/docs/en/sql-reference/statements/select/array-join.md index a1b5e0cdb36..b8e6be24798 100644 --- a/docs/en/sql-reference/statements/select/array-join.md +++ b/docs/en/sql-reference/statements/select/array-join.md @@ -146,7 +146,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS ma └───────┴─────────┴───┴─────┴────────┘ ``` -The example below uses the [arrayEnumerate](../../../sql-reference/functions/array-functions#array_functions-arrayenumerate) function: +The example below uses the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function: ``` sql SELECT s, arr, a, num, arrayEnumerate(arr) @@ -166,8 +166,8 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; Multiple arrays with different sizes can be joined by using: `SETTINGS enable_unaligned_array_join = 1`. Example: ```sql -SELECT s, arr, a, b -FROM arrays_test ARRAY JOIN arr as a, [['a','b'],['c']] as b +SELECT s, arr, a, b +FROM arrays_test ARRAY JOIN arr as a, [['a','b'],['c']] as b SETTINGS enable_unaligned_array_join = 1; ``` @@ -278,7 +278,7 @@ ARRAY JOIN nest AS n; └───────┴─────┴─────┴─────────┴────────────┘ ``` -Example of using the [arrayEnumerate](../../../sql-reference/functions/array-functions#array_functions-arrayenumerate) function: +Example of using the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function: ``` sql SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num diff --git a/docs/en/sql-reference/statements/select/group-by.md b/docs/en/sql-reference/statements/select/group-by.md index 2a4b06660c7..1018b24f50b 100644 --- a/docs/en/sql-reference/statements/select/group-by.md +++ b/docs/en/sql-reference/statements/select/group-by.md @@ -8,12 +8,12 @@ sidebar_label: GROUP BY `GROUP BY` clause switches the `SELECT` query into an aggregation mode, which works as follows: - `GROUP BY` clause contains a list of expressions (or a single expression, which is considered to be the list of length one). This list acts as a “grouping key”, while each individual expression will be referred to as a “key expression”. -- All the expressions in the [SELECT](../../../sql-reference/statements/select/index.md), [HAVING](../../../sql-reference/statements/select/having), and [ORDER BY](../../../sql-reference/statements/select/order-by.md) clauses **must** be calculated based on key expressions **or** on [aggregate functions](../../../sql-reference/aggregate-functions/index.md) over non-key expressions (including plain columns). In other words, each column selected from the table must be used either in a key expression or inside an aggregate function, but not both. +- All the expressions in the [SELECT](../../../sql-reference/statements/select/index.md), [HAVING](../../../sql-reference/statements/select/having.md), and [ORDER BY](../../../sql-reference/statements/select/order-by.md) clauses **must** be calculated based on key expressions **or** on [aggregate functions](../../../sql-reference/aggregate-functions/index.md) over non-key expressions (including plain columns). In other words, each column selected from the table must be used either in a key expression or inside an aggregate function, but not both. - Result of aggregating `SELECT` query will contain as many rows as there were unique values of “grouping key” in source table. Usually, this significantly reduces the row count, often by orders of magnitude, but not necessarily: row count stays the same if all “grouping key” values were distinct. When you want to group data in the table by column numbers instead of column names, enable the setting [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments). -:::note +:::note There’s an additional way to run aggregation over a table. If a query contains table columns only inside aggregate functions, the `GROUP BY clause` can be omitted, and aggregation by an empty set of keys is assumed. Such queries always return exactly one row. ::: @@ -57,8 +57,8 @@ The subtotals are calculated in the reverse order: at first subtotals are calcul In the subtotals rows the values of already "grouped" key expressions are set to `0` or empty line. -:::note -Mind that [HAVING](../../../sql-reference/statements/select/having) clause can affect the subtotals results. +:::note +Mind that [HAVING](../../../sql-reference/statements/select/having.md) clause can affect the subtotals results. ::: **Example** @@ -125,8 +125,8 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH ROLLUP; In the subtotals rows the values of all "grouped" key expressions are set to `0` or empty line. -:::note -Mind that [HAVING](../../../sql-reference/statements/select/having) clause can affect the subtotals results. +:::note +Mind that [HAVING](../../../sql-reference/statements/select/having.md) clause can affect the subtotals results. ::: **Example** @@ -226,11 +226,11 @@ This extra row is only produced in `JSON*`, `TabSeparated*`, and `Pretty*` forma - In `Template` format, the row is output according to specified template. - In the other formats it is not available. -:::note -totals is output in the results of `SELECT` queries, and is not output in `INSERT INTO ... SELECT`. +:::note +totals is output in the results of `SELECT` queries, and is not output in `INSERT INTO ... SELECT`. ::: -`WITH TOTALS` can be run in different ways when [HAVING](../../../sql-reference/statements/select/having) is present. The behavior depends on the `totals_mode` setting. +`WITH TOTALS` can be run in different ways when [HAVING](../../../sql-reference/statements/select/having.md) is present. The behavior depends on the `totals_mode` setting. ### Configuring Totals Processing diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index 5a8893f6f28..f65e40dede5 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -4,7 +4,7 @@ sidebar_position: 32 sidebar_label: SELECT --- -# SELECT Query +# SELECT Query `SELECT` queries perform data retrieval. By default, the requested data is returned to the client, while in conjunction with [INSERT INTO](../../../sql-reference/statements/insert-into.md) it can be forwarded to a different table. @@ -44,7 +44,7 @@ Specifics of each optional clause are covered in separate sections, which are li - [WHERE clause](../../../sql-reference/statements/select/where.md) - [GROUP BY clause](../../../sql-reference/statements/select/group-by.md) - [LIMIT BY clause](../../../sql-reference/statements/select/limit-by.md) -- [HAVING clause](../../../sql-reference/statements/select/having) +- [HAVING clause](../../../sql-reference/statements/select/having.md) - [LIMIT clause](../../../sql-reference/statements/select/limit.md) - [OFFSET clause](../../../sql-reference/statements/select/offset.md) - [UNION clause](../../../sql-reference/statements/select/union.md) diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 62d3e9fd69a..49bd2672874 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -1,6 +1,6 @@ --- slug: /en/sql-reference/statements/select/join -sidebar_label: JOIN +sidebar_label: Joining Tables --- # JOIN Clause @@ -282,7 +282,7 @@ Each time a query is run with the same `JOIN`, the subquery is run again because In some cases, it is more efficient to use [IN](../../../sql-reference/operators/in.md) instead of `JOIN`. -If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is a “dictionaries” feature that you should use instead of `JOIN`. For more information, see the [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section. +If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is a “dictionaries” feature that you should use instead of `JOIN`. For more information, see the [Dictionaries](../../../sql-reference/dictionaries/index.md) section. ### Memory Limitations diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index 18b019dd017..a9f0aedccdf 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -198,7 +198,7 @@ Result: ## SHOW DICTIONARIES -Displays a list of [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). +Displays a list of [Dictionaries](../../sql-reference/dictionaries/index.md). ``` sql SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] @@ -293,7 +293,7 @@ SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] ## SHOW USERS -Returns a list of [user account](../../operations/access-rights.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users). +Returns a list of [user account](../../guides/sre/user-management/index.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users). ### Syntax @@ -303,7 +303,7 @@ SHOW USERS ## SHOW ROLES -Returns a list of [roles](../../operations/access-rights.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants). +Returns a list of [roles](../../guides/sre/user-management/index.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants). ### Syntax @@ -312,7 +312,7 @@ SHOW [CURRENT|ENABLED] ROLES ``` ## SHOW PROFILES -Returns a list of [setting profiles](../../operations/access-rights.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles). +Returns a list of [setting profiles](../../guides/sre/user-management/index.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles). ### Syntax @@ -322,7 +322,7 @@ SHOW [SETTINGS] PROFILES ## SHOW POLICIES -Returns a list of [row policies](../../operations/access-rights.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies). +Returns a list of [row policies](../../guides/sre/user-management/index.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies). ### Syntax @@ -332,7 +332,7 @@ SHOW [ROW] POLICIES [ON [db.]table] ## SHOW QUOTAS -Returns a list of [quotas](../../operations/access-rights.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas). +Returns a list of [quotas](../../guides/sre/user-management/index.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas). ### Syntax @@ -351,7 +351,7 @@ SHOW [CURRENT] QUOTA ``` ## SHOW ACCESS -Shows all [users](../../operations/access-rights.md#user-account-management), [roles](../../operations/access-rights.md#role-management), [profiles](../../operations/access-rights.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges). +Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges). ### Syntax diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index f9f55acfcec..101e7c72bcb 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -8,7 +8,7 @@ sidebar_label: SYSTEM ## RELOAD EMBEDDED DICTIONARIES -Reload all [Internal dictionaries](../../sql-reference/dictionaries/internal-dicts.md). +Reload all [Internal dictionaries](../../sql-reference/dictionaries/index.md). By default, internal dictionaries are disabled. Always returns `Ok.` regardless of the result of the internal dictionary update. @@ -369,7 +369,7 @@ SYSTEM DROP FILESYSTEM CACHE It's too heavy and has potential for misuse. ::: -Will do sync syscall. +Will do sync syscall. ```sql SYSTEM SYNC FILE CACHE diff --git a/docs/en/sql-reference/table-functions/dictionary.md b/docs/en/sql-reference/table-functions/dictionary.md index 8a8cba8ff24..ab511843d63 100644 --- a/docs/en/sql-reference/table-functions/dictionary.md +++ b/docs/en/sql-reference/table-functions/dictionary.md @@ -5,7 +5,7 @@ sidebar_label: dictionary function title: dictionary --- -Displays the [dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine. +Displays the [dictionary](../../sql-reference/dictionaries/index.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine. **Syntax** diff --git a/docs/en/sql-reference/table-functions/executable.md b/docs/en/sql-reference/table-functions/executable.md index 635188763cf..22c74eb8cfa 100644 --- a/docs/en/sql-reference/table-functions/executable.md +++ b/docs/en/sql-reference/table-functions/executable.md @@ -85,7 +85,7 @@ The response looks like: ## Passing Query Results to a Script -Be sure to check out the example in the `Executable` table engine on [how to pass query results to a script](../../engines/table-engines/special/executable#passing-query-results-to-a-script). Here is how you execute the same script in that example using the `executable` table function: +Be sure to check out the example in the `Executable` table engine on [how to pass query results to a script](../../engines/table-engines/special/executable.md#passing-query-results-to-a-script). Here is how you execute the same script in that example using the `executable` table function: ```sql SELECT * FROM executable( diff --git a/docs/en/sql-reference/table-functions/mongodb.md b/docs/en/sql-reference/table-functions/mongodb.md index dd063ae1796..706ab68fee4 100644 --- a/docs/en/sql-reference/table-functions/mongodb.md +++ b/docs/en/sql-reference/table-functions/mongodb.md @@ -70,5 +70,5 @@ SELECT * FROM mongodb( **See Also** -- [The `MongoDB` table engine](../../engines/table-engines/integrations/mongodb.md) -- [Using MongoDB as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources/#mongodb) +- [The `MongoDB` table engine](/docs/en/engines/table-engines/integrations/mongodb.md) +- [Using MongoDB as a dictionary source](/docs/en/sql-reference/dictionaries/index.md#mongodb) diff --git a/docs/en/sql-reference/table-functions/mysql.md b/docs/en/sql-reference/table-functions/mysql.md index b995319c645..64ddcd86f7f 100644 --- a/docs/en/sql-reference/table-functions/mysql.md +++ b/docs/en/sql-reference/table-functions/mysql.md @@ -56,7 +56,7 @@ SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', A table object with the same columns as the original MySQL table. -:::note +:::note In the `INSERT` query to distinguish table function `mysql(...)` from table name with column names list, you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below. ::: @@ -110,4 +110,4 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123'); **See Also** - [The ‘MySQL’ table engine](../../engines/table-engines/integrations/mysql.md) -- [Using MySQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql) +- [Using MySQL as a dictionary source](../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-mysql) diff --git a/docs/en/sql-reference/table-functions/odbc.md b/docs/en/sql-reference/table-functions/odbc.md index 7e13424bc8a..397a9ba6c89 100644 --- a/docs/en/sql-reference/table-functions/odbc.md +++ b/docs/en/sql-reference/table-functions/odbc.md @@ -101,5 +101,5 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') ## See Also -- [ODBC dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC dictionaries](../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-odbc) - [ODBC table engine](../../engines/table-engines/integrations/odbc.md). diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 87fc6ecb234..6cd13acaa77 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -27,7 +27,7 @@ postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`]) A table object with the same columns as the original PostgreSQL table. -:::note +:::note In the `INSERT` query to distinguish table function `postgresql(...)` from table name with column names list you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below. ::: @@ -43,7 +43,7 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp PostgreSQL Array types converts into ClickHouse arrays. -:::note +:::note Be careful, in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows. ::: @@ -130,7 +130,7 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) **See Also** - [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md) -- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) +- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-postgresql) ## Related content - Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres) diff --git a/docs/ru/engines/database-engines/materialized-mysql.md b/docs/ru/engines/database-engines/materialized-mysql.md index c214e08dce1..df56b7a0bd6 100644 --- a/docs/ru/engines/database-engines/materialized-mysql.md +++ b/docs/ru/engines/database-engines/materialized-mysql.md @@ -97,7 +97,7 @@ CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', ### DDL-запросы {#ddl-queries} -DDL-запросы в MySQL конвертируются в соответствующие DDL-запросы в ClickHouse ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop), [RENAME](../../sql-reference/statements/rename.md)). Если ClickHouse не может конвертировать какой-либо DDL-запрос, он его игнорирует. +DDL-запросы в MySQL конвертируются в соответствующие DDL-запросы в ClickHouse ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). Если ClickHouse не может конвертировать какой-либо DDL-запрос, он его игнорирует. ### Репликация данных {#data-replication} diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 24e0f8dbbb8..ef17a370dc6 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -89,7 +89,7 @@ ORDER BY expr - `min_merge_bytes_to_use_direct_io` — минимальный объём данных при слиянии, необходимый для прямого (небуферизованного) чтения/записи (direct I/O) на диск. При слиянии частей данных ClickHouse вычисляет общий объём хранения всех данных, подлежащих слиянию. Если общий объём хранения всех данных для чтения превышает `min_bytes_to_use_direct_io` байт, тогда ClickHouse использует флаг `O_DIRECT` при чтении данных с диска. Если `min_merge_bytes_to_use_direct_io = 0`, тогда прямой ввод-вывод отключен. Значение по умолчанию: `10 * 1024 * 1024 * 1024` байтов. - `merge_with_ttl_timeout` — минимальное время в секундах перед повторным слиянием для удаления данных с истекшим TTL. По умолчанию: `14400` секунд (4 часа). - `merge_with_recompression_ttl_timeout` — минимальное время в секундах перед повторным слиянием для повторного сжатия данных с истекшим TTL. По умолчанию: `14400` секунд (4 часа). - - `try_fetch_recompressed_part_timeout` — время ожидания (в секундах) перед началом слияния с повторным сжатием. В течение этого времени ClickHouse пытается извлечь сжатую часть из реплики, которая назначила это слияние. Значение по умолчанию: `7200` секунд (2 часа). + - `try_fetch_recompressed_part_timeout` — время ожидания (в секундах) перед началом слияния с повторным сжатием. В течение этого времени ClickHouse пытается извлечь сжатую часть из реплики, которая назначила это слияние. Значение по умолчанию: `7200` секунд (2 часа). - `write_final_mark` — включает или отключает запись последней засечки индекса в конце куска данных, указывающей за последний байт. По умолчанию — 1. Не отключайте её. - `merge_max_block_size` — максимальное количество строк в блоке для операций слияния. Значение по умолчанию: 8192. - `storage_policy` — политика хранения данных. Смотрите [Хранение данных таблицы на нескольких блочных устройствах](#table_engine-mergetree-multiple-volumes). @@ -337,7 +337,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 Поддерживаемые типы данных: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`. - Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall). + Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall). **Примеры** @@ -361,14 +361,14 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT | [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | | [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | | [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [in](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | | [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | | [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | | [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [empty](../../../sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../../sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | Функции с постоянным агрументом, который меньше, чем размер ngram не могут использовать индекс `ngrambf_v1` для оптимизации запроса. @@ -396,7 +396,7 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final). ### Запрос проекции {#projection-query} -Запрос проекции — это то, что определяет проекцию. Такой запрос неявно выбирает данные из родительской таблицы. +Запрос проекции — это то, что определяет проекцию. Такой запрос неявно выбирает данные из родительской таблицы. **Синтаксис** ```sql @@ -406,9 +406,9 @@ SELECT [GROUP BY] [ORDER BY] Проекции можно изменить или удалить с помощью запроса [ALTER](../../../sql-reference/statements/alter/projection.md). ### Хранение проекции {#projection-storage} -Проекции хранятся в каталоге куска данных. Это похоже на хранение индексов, но используется подкаталог, в котором хранится анонимный кусок таблицы `MergeTree`. Таблица создается запросом определения проекции. -Если присутствует секция `GROUP BY`, то используется движок [AggregatingMergeTree](aggregatingmergetree.md), а все агрегатные функции преобразуются в `AggregateFunction`. -Если присутствует секция `ORDER BY`, таблица `MergeTree` использует ее в качестве выражения для первичного ключа. +Проекции хранятся в каталоге куска данных. Это похоже на хранение индексов, но используется подкаталог, в котором хранится анонимный кусок таблицы `MergeTree`. Таблица создается запросом определения проекции. +Если присутствует секция `GROUP BY`, то используется движок [AggregatingMergeTree](aggregatingmergetree.md), а все агрегатные функции преобразуются в `AggregateFunction`. +Если присутствует секция `ORDER BY`, таблица `MergeTree` использует ее в качестве выражения для первичного ключа. Во время процесса слияния кусок данных проекции объединяется с помощью процедуры слияния хранилища. Контрольная сумма куска данных родительской таблицы включает кусок данных проекции. Другие процедуры аналогичны индексам пропуска данных. ### Анализ запросов {#projection-query-analysis} @@ -499,7 +499,7 @@ TTL expr За каждым `TTL` выражением может следовать тип действия, которое выполняется после достижения времени, соответствующего результату `TTL` выражения: - `DELETE` - удалить данные (действие по умолчанию); -- `RECOMPRESS codec_name` - повторно сжать данные с помощью кодека `codec_name`; +- `RECOMPRESS codec_name` - повторно сжать данные с помощью кодека `codec_name`; - `TO DISK 'aaa'` - переместить данные на диск `aaa`; - `TO VOLUME 'bbb'` - переместить данные на том `bbb`; - `GROUP BY` - агрегировать данные. @@ -679,7 +679,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); - `policy_name_N` — название политики. Названия политик должны быть уникальны. - `volume_name_N` — название тома. Названия томов должны быть уникальны. - `disk` — диск, находящийся внутри тома. -- `max_data_part_size_bytes` — максимальный размер куска данных, который может находиться на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том. +- `max_data_part_size_bytes` — максимальный размер куска данных, который может находиться на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том. - `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты. - `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками. diff --git a/docs/ru/engines/table-engines/special/buffer.md b/docs/ru/engines/table-engines/special/buffer.md index 4987dafc11f..574d9273088 100644 --- a/docs/ru/engines/table-engines/special/buffer.md +++ b/docs/ru/engines/table-engines/special/buffer.md @@ -66,4 +66,4 @@ CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10 Таблицы типа Buffer используются в тех случаях, когда от большого количества серверов поступает слишком много INSERT-ов в единицу времени, и нет возможности заранее самостоятельно буферизовать данные перед вставкой, в результате чего, INSERT-ы не успевают выполняться. -Заметим, что даже для таблиц типа Buffer не имеет смысла вставлять данные по одной строке, так как таким образом будет достигнута скорость всего лишь в несколько тысяч строк в секунду, тогда как при вставке более крупными блоками, достижимо более миллиона строк в секунду (смотрите раздел [«Производительность»](../../../introduction/performance/). +Заметим, что даже для таблиц типа Buffer не имеет смысла вставлять данные по одной строке, так как таким образом будет достигнута скорость всего лишь в несколько тысяч строк в секунду, тогда как при вставке более крупными блоками, достижимо более миллиона строк в секунду (смотрите раздел [«Производительность»](../../../introduction/performance.md). diff --git a/docs/ru/faq/operations/multi-region-replication.md b/docs/ru/faq/operations/multi-region-replication.md index bfe3231c247..eb53a69e7f6 100644 --- a/docs/ru/faq/operations/multi-region-replication.md +++ b/docs/ru/faq/operations/multi-region-replication.md @@ -10,4 +10,4 @@ The short answer is "yes". However, we recommend keeping latency between all reg Configuration-wise there's no difference compared to single-region replication, simply use hosts that are located in different locations for replicas. -For more information, see [full article on data replication](../../engines/table-engines/mergetree-family/replication/). +For more information, see [full article on data replication](../../engines/table-engines/mergetree-family/replication.md). diff --git a/docs/ru/getting-started/tutorial.md b/docs/ru/getting-started/tutorial.md index 803da2952fd..60a7463f70f 100644 --- a/docs/ru/getting-started/tutorial.md +++ b/docs/ru/getting-started/tutorial.md @@ -477,7 +477,7 @@ clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv ``` -ClickHouse has a lot of [settings to tune](../operations/settings/) and one way to specify them in console client is via arguments, as we can see with `--max_insert_block_size`. The easiest way to figure out what settings are available, what do they mean and what the defaults are is to query the `system.settings` table: +ClickHouse has a lot of [settings to tune](../operations/settings/index.md) and one way to specify them in console client is via arguments, as we can see with `--max_insert_block_size`. The easiest way to figure out what settings are available, what do they mean and what the defaults are is to query the `system.settings` table: ``` sql SELECT name, value, changed, description diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 59c77d082cf..bef5c223281 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -974,7 +974,7 @@ Array представлены как длина в формате varint (unsig столбцы из входных данных будут сопоставлены со столбцами таблицы по их именам, столбцы с неизвестными именами будут пропущены, если включен параметр [input_format_skip_unknown_fields](../operations/settings/settings.md#input_format_skip_unknown_fields). В противном случае первая строка будет пропущена. ::: - + ## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} То же самое что [RowBinary](#rowbinary), но добавляется заголовок: @@ -1326,7 +1326,7 @@ ClickHouse поддерживает настраиваемую точность Неподдерживаемые типы данных Parquet: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. +Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. ### Вставка и выборка данных {#inserting-and-selecting-data} @@ -1386,7 +1386,7 @@ ClickHouse поддерживает настраиваемую точность Неподдерживаемые типы данных Arrow: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Arrow. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. +Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Arrow. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. ### Вставка данных {#inserting-data-arrow} @@ -1444,7 +1444,7 @@ ClickHouse поддерживает настраиваемую точность Неподдерживаемые типы данных ORC: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Типы данных столбцов в таблицах ClickHouse могут отличаться от типов данных для соответствующих полей ORC. При вставке данных ClickHouse интерпретирует типы данных ORC согласно таблице соответствия, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к типу, установленному для столбца таблицы ClickHouse. +Типы данных столбцов в таблицах ClickHouse могут отличаться от типов данных для соответствующих полей ORC. При вставке данных ClickHouse интерпретирует типы данных ORC согласно таблице соответствия, а затем [приводит](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) данные к типу, установленному для столбца таблицы ClickHouse. ### Вставка данных {#inserting-data-2} diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 62e97e3f61d..b8c5ee77f0c 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -243,7 +243,7 @@ $ echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: pass Если пользователь не задан,то используется `default`. Если пароль не задан, то используется пустой пароль. Также в параметрах URL вы можете указать любые настройки, которые будут использованы для обработки одного запроса, или целые профили настроек. Пример:http://localhost:8123/?profile=web&max_rows_to_read=1000000000&query=SELECT+1 -Подробнее смотрите в разделе [Настройки](../operations/settings/). +Подробнее смотрите в разделе [Настройки](../operations/settings/index.md). ``` bash $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @- diff --git a/docs/ru/operations/optimizing-performance/sampling-query-profiler.md b/docs/ru/operations/optimizing-performance/sampling-query-profiler.md index c77f6a1f290..3d5ec993fdf 100644 --- a/docs/ru/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/ru/operations/optimizing-performance/sampling-query-profiler.md @@ -30,7 +30,7 @@ To analyze the `trace_log` system table: - Use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces. -If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope). +If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui.md#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope). ## Example {#example} diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index e29b9def9d4..4b1d8ce717f 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -47,7 +47,7 @@ ClickHouse перезагружает встроенные словари с з - `min_part_size` - Минимальный размер части таблицы. - `min_part_size_ratio` - Отношение размера минимальной части таблицы к полному размеру таблицы. - `method` - Метод сжатия. Возможные значения: `lz4`, `lz4hc`, `zstd`,`deflate_qpl`. -- `level` – Уровень сжатия. См. [Кодеки](../../sql-reference/statements/create/table/#create-query-common-purpose-codecs). +- `level` – Уровень сжатия. См. [Кодеки](../../sql-reference/statements/create/table.md#create-query-common-purpose-codecs). Можно сконфигурировать несколько разделов ``. @@ -152,7 +152,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part ## custom_settings_prefixes {#custom_settings_prefixes} -Список префиксов для [пользовательских настроек](../../operations/settings/#custom_settings). Префиксы должны перечисляться через запятую. +Список префиксов для [пользовательских настроек](../../operations/settings/index.md#custom_settings). Префиксы должны перечисляться через запятую. **Пример** @@ -162,7 +162,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part **См. также** -- [Пользовательские настройки](../../operations/settings#custom_settings) +- [Пользовательские настройки](../../operations/settings/index.md#custom_settings) ## core_dump {#server_configuration_parameters-core_dump} diff --git a/docs/ru/operations/system-tables/information_schema.md b/docs/ru/operations/system-tables/information_schema.md index 6a9b8134dad..691fec19039 100644 --- a/docs/ru/operations/system-tables/information_schema.md +++ b/docs/ru/operations/system-tables/information_schema.md @@ -178,7 +178,7 @@ table_type: BASE TABLE - `view_definition` ([String](../../sql-reference/data-types/string.md)) — `SELECT` запрос для представления. - `check_option` ([String](../../sql-reference/data-types/string.md)) — `NONE`, нет проверки. - `is_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, представление не обновляется. -- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — показывает является ли представление [материализованным](../../sql-reference/statements/create/view/#materialized). Возможные значения: +- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — показывает является ли представление [материализованным](../../sql-reference/statements/create/view.md#materialized). Возможные значения: - `NO` — создано обычное представление. - `YES` — создано материализованное представление. - `is_trigger_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, триггер не обновляется. diff --git a/docs/ru/operations/system-tables/replicated_fetches.md b/docs/ru/operations/system-tables/replicated_fetches.md index 0b91a02cf14..c13f058aae1 100644 --- a/docs/ru/operations/system-tables/replicated_fetches.md +++ b/docs/ru/operations/system-tables/replicated_fetches.md @@ -68,4 +68,4 @@ thread_id: 54 **Смотрите также** -- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system/#query-language-system-replicated) +- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated) diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md index d3185f4fcb0..73de78d1c15 100644 --- a/docs/ru/operations/utilities/clickhouse-benchmark.md +++ b/docs/ru/operations/utilities/clickhouse-benchmark.md @@ -60,7 +60,7 @@ clickhouse-benchmark [keys] < queries_file; - `--stage=WORD` — стадия обработки запроса на сервере. ClickHouse останавливает обработку запроса и возвращает ответ `clickhouse-benchmark` на заданной стадии. Возможные значения: `complete`, `fetch_columns`, `with_mergeable_state`. Значение по умолчанию: `complete`. - `--help` — показывает справку. -Если нужно применить [настройки](../../operations/settings/) для запросов, их можно передать как ключ `--= SETTING_VALUE`. Например, `--max_memory_usage=1048576`. +Если нужно применить [настройки](../../operations/settings/index.md) для запросов, их можно передать как ключ `--= SETTING_VALUE`. Например, `--max_memory_usage=1048576`. ## Вывод {#clickhouse-benchmark-output} diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index b513c51397e..e8d4a3ee9fd 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -27,9 +27,9 @@ DateTime([timezone]) Консольный клиент ClickHouse по умолчанию использует часовой пояс сервера, если для значения `DateTime` часовой пояс не был задан в явном виде при инициализации типа данных. Чтобы использовать часовой пояс клиента, запустите [clickhouse-client](../../interfaces/cli.md) с параметром `--use_client_time_zone`. -ClickHouse отображает значения в зависимости от значения параметра [date\_time\_output\_format](../../operations/settings/#settings-date_time_output_format). Текстовый формат по умолчанию `YYYY-MM-DD hh:mm:ss`. Кроме того, вы можете поменять отображение с помощью функции [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime). +ClickHouse отображает значения в зависимости от значения параметра [date\_time\_output\_format](../../operations/settings/index.md#settings-date_time_output_format). Текстовый формат по умолчанию `YYYY-MM-DD hh:mm:ss`. Кроме того, вы можете поменять отображение с помощью функции [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime). -При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date_time_input_format](../../operations/settings/#settings-date_time_input_format). +При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date_time_input_format](../../operations/settings/index.md#settings-date_time_input_format). ## Примеры {#primery} @@ -119,8 +119,8 @@ FROM dt - [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) - [Функции для работы с массивами](../../sql-reference/functions/array-functions.md) -- [Настройка `date_time_input_format`](../../operations/settings/#settings-date_time_input_format) -- [Настройка `date_time_output_format`](../../operations/settings/) +- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format) +- [Настройка `date_time_output_format`](../../operations/settings/index.md) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) - [Тип данных `Date`](date.md) diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 8fbcaf9568b..a7e8a478edb 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -268,7 +268,7 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp; ``` :::note -Тип возвращаемого значения описанными далее функциями `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` определяется конфигурационным параметром [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) имеющим по умолчанию значение `0`. +Тип возвращаемого значения описанными далее функциями `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` определяется конфигурационным параметром [enable_extended_results_for_datetime_functions](../../operations/settings/settings.md#enable-extended-results-for-datetime-functions) имеющим по умолчанию значение `0`. Поведение для * `enable_extended_results_for_datetime_functions = 0`: Функции `toStartOf*`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime`. Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime`. Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат. diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index f457b54ae28..de54f1b3607 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -2136,7 +2136,7 @@ countDigits(x) :::note "Примечание" Для `Decimal` значений учитывается их масштаб: вычисляется результат по базовому целочисленному типу, полученному как `(value * scale)`. Например: `countDigits(42) = 2`, `countDigits(42.000) = 5`, `countDigits(0.04200) = 4`. То есть вы можете проверить десятичное переполнение для `Decimal64` с помощью `countDecimal(x) > 18`. Это медленный вариант [isDecimalOverflow](#is-decimal-overflow). ::: - + **Пример** Запрос: @@ -2297,7 +2297,7 @@ enabledRoles() ## defaultRoles {#default-roles} -Возвращает имена ролей, которые задаются по умолчанию для текущего пользователя при входе в систему. Изначально это все роли, которые разрешено использовать текущему пользователю (см. [GRANT](../../sql-reference/statements/grant/#grant-select)). Список ролей по умолчанию может быть изменен с помощью выражения [SET DEFAULT ROLE](../../sql-reference/statements/set-role.md#set-default-role-statement). +Возвращает имена ролей, которые задаются по умолчанию для текущего пользователя при входе в систему. Изначально это все роли, которые разрешено использовать текущему пользователю (см. [GRANT](../../sql-reference/statements/grant.md#grant-select)). Список ролей по умолчанию может быть изменен с помощью выражения [SET DEFAULT ROLE](../../sql-reference/statements/set-role.md#set-default-role-statement). **Синтаксис** diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index 7a930b529ed..64eae49be6c 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -260,8 +260,8 @@ ENGINE = MergeTree() Кодеки шифрования: -- `CODEC('AES-128-GCM-SIV')` — Зашифровывает данные с помощью AES-128 в режиме [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV. -- `CODEC('AES-256-GCM-SIV')` — Зашифровывает данные с помощью AES-256 в режиме GCM-SIV. +- `CODEC('AES-128-GCM-SIV')` — Зашифровывает данные с помощью AES-128 в режиме [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV. +- `CODEC('AES-256-GCM-SIV')` — Зашифровывает данные с помощью AES-256 в режиме GCM-SIV. Эти кодеки используют фиксированный одноразовый ключ шифрования. Таким образом, это детерминированное шифрование. Оно совместимо с поддерживающими дедупликацию движками, в частности, [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md). Однако у шифрования имеется недостаток: если дважды зашифровать один и тот же блок данных, текст на выходе получится одинаковым, и злоумышленник, у которого есть доступ к диску, заметит эту эквивалентность (при этом доступа к содержимому он не получит). @@ -274,10 +274,10 @@ ENGINE = MergeTree() **Пример** ```sql -CREATE TABLE mytable +CREATE TABLE mytable ( x String Codec(AES_128_GCM_SIV) -) +) ENGINE = MergeTree ORDER BY x; ``` @@ -287,10 +287,10 @@ ENGINE = MergeTree ORDER BY x; **Пример** ```sql -CREATE TABLE mytable +CREATE TABLE mytable ( x String Codec(Delta, LZ4, AES_128_GCM_SIV) -) +) ENGINE = MergeTree ORDER BY x; ``` @@ -299,7 +299,7 @@ ENGINE = MergeTree ORDER BY x; ClickHouse поддерживает временные таблицы со следующими характеристиками: - Временные таблицы исчезают после завершения сессии, в том числе при обрыве соединения. -- Временная таблица использует только модуль памяти. +- Временная таблица использует движок таблиц Memory когда движок не указан и она может использовать любой движок таблиц за исключением движков Replicated и `KeeperMap`. - Невозможно указать базу данных для временной таблицы. Она создается вне баз данных. - Невозможно создать временную таблицу распределённым DDL запросом на всех серверах кластера (с опцией `ON CLUSTER`): такая таблица существует только в рамках существующей сессии. - Если временная таблица имеет то же имя, что и некоторая другая, то, при упоминании в запросе без указания БД, будет использована временная таблица. @@ -313,7 +313,7 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], ... -) +) [ENGINE = engine] ``` В большинстве случаев, временные таблицы создаются не вручную, а при использовании внешних данных для запроса, или при распределённом `(GLOBAL) IN`. Подробнее см. соответствующие разделы diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index 7c281634c98..73c63850750 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -107,7 +107,8 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` - - `CREATE TEMPORARY TABLE` + - `CREATE ARBITRARY TEMPORARY TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - `CREATE FUNCTION` @@ -314,7 +315,8 @@ GRANT INSERT(x,y) ON db.table TO john - `CREATE`. Уровень: `GROUP` - `CREATE DATABASE`. Уровень: `DATABASE` - `CREATE TABLE`. Уровень: `TABLE` - - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` + - `CREATE ARBITRARY TEMPORARY TABLE`. Уровень: `GLOBAL` + - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` - `CREATE VIEW`. Уровень: `VIEW` - `CREATE DICTIONARY`. Уровень: `DICTIONARY` diff --git a/docs/ru/sql-reference/statements/select/array-join.md b/docs/ru/sql-reference/statements/select/array-join.md index 9d2dbf54a2b..6c7fcbba7cc 100644 --- a/docs/ru/sql-reference/statements/select/array-join.md +++ b/docs/ru/sql-reference/statements/select/array-join.md @@ -146,7 +146,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS ma └───────┴─────────┴───┴─────┴────────┘ ``` -В приведенном ниже примере используется функция [arrayEnumerate](../../../sql-reference/functions/array-functions#array_functions-arrayenumerate): +В приведенном ниже примере используется функция [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate): ``` sql SELECT s, arr, a, num, arrayEnumerate(arr) @@ -259,7 +259,7 @@ ARRAY JOIN nest AS n; └───────┴─────┴─────┴─────────┴────────────┘ ``` -Пример использования функции [arrayEnumerate](../../../sql-reference/functions/array-functions#array_functions-arrayenumerate): +Пример использования функции [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate): ``` sql SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num diff --git a/docs/zh/development/continuous-integration.md b/docs/zh/development/continuous-integration.md index a52d77a7a33..56e3e1dfd50 100644 --- a/docs/zh/development/continuous-integration.md +++ b/docs/zh/development/continuous-integration.md @@ -34,7 +34,7 @@ git push ## 描述信息检查 {#description-check} 检查pull请求的描述是否符合[PULL_REQUEST_TEMPLATE.md](https://github.com/ClickHouse/ClickHouse/blob/master/.github/PULL_REQUEST_TEMPLATE.md)模板. -您必须为您的更改指定一个更改日志类别(例如,Bug修复), 并且为[CHANGELOG.md](../whats-new/changelog/)编写一条用户可读的消息用来描述更改. +您必须为您的更改指定一个更改日志类别(例如,Bug修复), 并且为[CHANGELOG.md](../whats-new/changelog/index.md)编写一条用户可读的消息用来描述更改. ## 推送到DockerHub {#push-to-dockerhub} 生成用于构建和测试的docker映像, 然后将它们推送到DockerHub. diff --git a/docs/zh/engines/database-engines/index.md b/docs/zh/engines/database-engines/index.md index 0b24590686e..2839f819671 100644 --- a/docs/zh/engines/database-engines/index.md +++ b/docs/zh/engines/database-engines/index.md @@ -16,7 +16,7 @@ sidebar_position: 27 - [MaterializeMySQL](../../engines/database-engines/materialized-mysql.md) -- [Lazy](../../engines/database-engines/lazy) +- [Lazy](../../engines/database-engines/lazy.md) - [Atomic](../../engines/database-engines/atomic.md) diff --git a/docs/zh/engines/database-engines/materialize-mysql.md b/docs/zh/engines/database-engines/materialize-mysql.md index 10049017c71..b7ee3a038b8 100644 --- a/docs/zh/engines/database-engines/materialize-mysql.md +++ b/docs/zh/engines/database-engines/materialize-mysql.md @@ -38,8 +38,8 @@ ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'passwor - `max_wait_time_when_mysql_unavailable` — 当MySQL不可用时重试间隔(毫秒)。负值禁止重试。默认值: `1000`. - `allows_query_when_mysql_lost` — 当mysql丢失时,允许查询物化表。默认值: `0` (`false`). ``` -CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***') - SETTINGS +CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***') + SETTINGS allows_query_when_mysql_lost=true, max_wait_time_when_mysql_unavailable=10000; ``` @@ -97,7 +97,7 @@ CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', ### DDL查询 {#ddl-queries} -MySQL DDL查询转换为相应的ClickHouse DDL查询([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop), [RENAME](../../sql-reference/statements/rename.md))。如果ClickHouse无法解析某个DDL查询,则该查询将被忽略。 +MySQL DDL查询转换为相应的ClickHouse DDL查询([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md))。如果ClickHouse无法解析某个DDL查询,则该查询将被忽略。 ### Data Replication {#data-replication} @@ -148,9 +148,9 @@ mysql> SELECT * FROM test; ``` ```text -+---+------+------+ ++---+------+------+ | a | b | c | -+---+------+------+ ++---+------+------+ | 2 | 222 | Wow! | +---+------+------+ ``` @@ -177,9 +177,9 @@ SELECT * FROM mysql.test; ``` ``` text -┌─a─┬──b─┐ -│ 1 │ 11 │ -│ 2 │ 22 │ +┌─a─┬──b─┐ +│ 1 │ 11 │ +│ 2 │ 22 │ └───┴────┘ ``` @@ -190,7 +190,7 @@ SELECT * FROM mysql.test; ``` ``` text -┌─a─┬───b─┬─c────┐ -│ 2 │ 222 │ Wow! │ +┌─a─┬───b─┬─c────┐ +│ 2 │ 222 │ Wow! │ └───┴─────┴──────┘ ``` diff --git a/docs/zh/engines/database-engines/materialized-mysql.md b/docs/zh/engines/database-engines/materialized-mysql.md index c34d3a6f20d..4cc4ae58840 100644 --- a/docs/zh/engines/database-engines/materialized-mysql.md +++ b/docs/zh/engines/database-engines/materialized-mysql.md @@ -109,7 +109,7 @@ MySQL中的Time 类型,会被ClickHouse转换成微秒来存储 ### DDL Queries {#ddl-queries} -MySQL DDL 语句会被转换成对应的ClickHouse DDL 语句,比如: ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop), [RENAME](../../sql-reference/statements/rename.md)). 如果ClickHouse 无法解析某些语句DDL 操作,则会跳过。 +MySQL DDL 语句会被转换成对应的ClickHouse DDL 语句,比如: ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). 如果ClickHouse 无法解析某些语句DDL 操作,则会跳过。 ### 数据复制 {#data-replication} @@ -152,17 +152,17 @@ ClickHouse只有一个物理排序,由 `order by` 条件决定。要创建一 这些是你可以对MaterializedMySQL表重写的模式转换操作: * 修改列类型。必须与原始类型兼容,否则复制将失败。例如,可以将`UInt32`列修改为`UInt64`,不能将 `String` 列修改为 `Array(String)`。 - * 修改 [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl). + * 修改 [column TTL](../table-engines/mergetree-family/mergetree.md#mergetree-column-ttl). * 修改 [column compression codec](../../sql-reference/statements/create/table.mdx#codecs). * 增加 [ALIAS columns](../../sql-reference/statements/create/table.mdx#alias). - * 增加 [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes) - * 增加 [projections](../table-engines/mergetree-family/mergetree/#projections). + * 增加 [skipping indexes](../table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes) + * 增加 [projections](../table-engines/mergetree-family/mergetree.md#projections). 请注意,当使用 `SELECT ... FINAL ` (MaterializedMySQL默认是这样做的) 时,预测优化是被禁用的,所以这里是受限的, `INDEX ... TYPE hypothesis `[在v21.12的博客文章中描述]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)可能在这种情况下更有用。 - * 修改 [PARTITION BY](../table-engines/mergetree-family/custom-partitioning-key/) - * 修改 [ORDER BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) - * 修改 [PRIMARY KEY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) - * 增加 [SAMPLE BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) - * 增加 [table TTL](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) + * 修改 [PARTITION BY](../table-engines/mergetree-family/custom-partitioning-key.md) + * 修改 [ORDER BY](../table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) + * 修改 [PRIMARY KEY](../table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) + * 增加 [SAMPLE BY](../table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) + * 增加 [table TTL](../table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) ```sql CREATE DATABASE db_name ENGINE = MaterializedMySQL(...) diff --git a/docs/zh/engines/table-engines/mergetree-family/mergetree.md b/docs/zh/engines/table-engines/mergetree-family/mergetree.md index 1fcf64fcd25..54524388650 100644 --- a/docs/zh/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/mergetree.md @@ -192,7 +192,7 @@ ClickHouse 会为每个数据片段创建一个索引文件来存储这些标记 ClickHouse 不要求主键唯一,所以您可以插入多条具有相同主键的行。 -您可以在`PRIMARY KEY`与`ORDER BY`条件中使用`可为空的`类型的表达式,但强烈建议不要这么做。为了启用这项功能,请打开[allow_nullable_key](../../../operations/settings/#allow-nullable-key),[NULLS_LAST](../../../sql-reference/statements/select/order-by.md/#sorting-of-special-values)规则也适用于`ORDER BY`条件中有NULL值的情况下。 +您可以在`PRIMARY KEY`与`ORDER BY`条件中使用`可为空的`类型的表达式,但强烈建议不要这么做。为了启用这项功能,请打开[allow_nullable_key](../../../operations/settings/index.md#allow-nullable-key),[NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values)规则也适用于`ORDER BY`条件中有NULL值的情况下。 ### 主键的选择 {#zhu-jian-de-xuan-ze} @@ -330,7 +330,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 支持的数据类型:`Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`。 - 以下函数会用到这个索引: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions) + 以下函数会用到这个索引: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md) ``` sql INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 @@ -353,14 +353,14 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达 | [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | | [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | | [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [in](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | | [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | | [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | | [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [empty](../../../sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../../sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | 常量参数小于 ngram 大小的函数不能使用 `ngrambf_v1` 进行查询优化。 diff --git a/docs/zh/faq/general/why-clickhouse-is-so-fast.md b/docs/zh/faq/general/why-clickhouse-is-so-fast.md index a30b56adb9a..1962b8b90c2 100644 --- a/docs/zh/faq/general/why-clickhouse-is-so-fast.md +++ b/docs/zh/faq/general/why-clickhouse-is-so-fast.md @@ -9,7 +9,7 @@ sidebar_position: 8 It was designed to be fast. Query execution performance has always been a top priority during the development process, but other important characteristics like user-friendliness, scalability, and security were also considered so ClickHouse could become a real production system. -ClickHouse was initially built as a prototype to do just a single task well: to filter and aggregate data as fast as possible. That’s what needs to be done to build a typical analytical report and that’s what a typical [GROUP BY](../../sql-reference/statements/select/group-by/) query does. ClickHouse team has made several high-level decisions that combined made achieving this task possible: +ClickHouse was initially built as a prototype to do just a single task well: to filter and aggregate data as fast as possible. That’s what needs to be done to build a typical analytical report and that’s what a typical [GROUP BY](../../sql-reference/statements/select/group-by.md) query does. ClickHouse team has made several high-level decisions that combined made achieving this task possible: Column-oriented storage : Source data often contain hundreds or even thousands of columns, while a report can use just a few of them. The system needs to avoid reading unnecessary columns, or most expensive disk read operations would be wasted. diff --git a/docs/zh/faq/integration/index.md b/docs/zh/faq/integration/index.md index 6678956a0b3..3a3f97761f3 100644 --- a/docs/zh/faq/integration/index.md +++ b/docs/zh/faq/integration/index.md @@ -1,5 +1,5 @@ --- -slug: /zh/faq/integration/ +slug: /zh/faq/integration/ title: 关于集成ClickHouse和其他系统的问题 toc_hidden_folder: true sidebar_position: 4 @@ -17,6 +17,6 @@ sidebar_label: Integration !!! info "没看到你要找的东西吗?" - 查看[其他faq类别](../../faq/)或浏览左边栏中的主要文档文章。 + 查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。 {## [原文](https://clickhouse.com/docs/en/faq/integration/) ##} \ No newline at end of file diff --git a/docs/zh/faq/integration/json-import.md b/docs/zh/faq/integration/json-import.md index 861abacc1e1..2d5c687316d 100644 --- a/docs/zh/faq/integration/json-import.md +++ b/docs/zh/faq/integration/json-import.md @@ -7,29 +7,29 @@ sidebar_position: 11 # How to Import JSON Into ClickHouse? {#how-to-import-json-into-clickhouse} -ClickHouse supports a wide range of [data formats for input and output](../../interfaces/formats/). There are multiple JSON variations among them, but the most commonly used for data ingestion is [JSONEachRow](../../interfaces/formats/#jsoneachrow). It expects one JSON object per row, each object separated by a newline. +ClickHouse supports a wide range of [data formats for input and output](../../interfaces/formats.md). There are multiple JSON variations among them, but the most commonly used for data ingestion is [JSONEachRow](../../interfaces/formats.md#jsoneachrow). It expects one JSON object per row, each object separated by a newline. ## Examples {#examples} -Using [HTTP interface](../../interfaces/http/): +Using [HTTP interface](../../interfaces/http.md): ``` bash $ echo '{"foo":"bar"}' | curl 'http://localhost:8123/?query=INSERT%20INTO%20test%20FORMAT%20JSONEachRow' --data-binary @- ``` -Using [CLI interface](../../interfaces/cli/): +Using [CLI interface](../../interfaces/cli.md): ``` bash $ echo '{"foo":"bar"}' | clickhouse-client --query="INSERT INTO test FORMAT JSONEachRow" ``` -Instead of inserting data manually, you might consider to use one of [client libraries](../../interfaces/) instead. +Instead of inserting data manually, you might consider to use one of [client libraries](../../interfaces/index.md) instead. ## Useful Settings {#useful-settings} - `input_format_skip_unknown_fields` allows to insert JSON even if there were additional fields not present in table schema (by discarding them). -- `input_format_import_nested_json` allows to insert nested JSON objects into columns of [Nested](../../sql-reference/data-types/nested-data-structures/nested/) type. +- `input_format_import_nested_json` allows to insert nested JSON objects into columns of [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) type. -:::note +:::note Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the `CLI` interface. ::: \ No newline at end of file diff --git a/docs/zh/faq/operations/delete-old-data.md b/docs/zh/faq/operations/delete-old-data.md index b2229058cad..24181116bab 100644 --- a/docs/zh/faq/operations/delete-old-data.md +++ b/docs/zh/faq/operations/delete-old-data.md @@ -19,7 +19,7 @@ The key advantage of this approach is that it does not need any external system TTL can also be used to move data not only to [/dev/null](https://en.wikipedia.org/wiki/Null_device), but also between different storage systems, like from SSD to HDD. ::: -More details on [configuring TTL](../../engines/table-engines/mergetree-family/mergetree/#table_engine-mergetree-ttl). +More details on [configuring TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). ## ALTER DELETE {#alter-delete} @@ -41,4 +41,4 @@ More details on [manipulating partitions](../../sql-reference/statements/alter/p It’s rather radical to drop all data from a table, but in some cases it might be exactly what you need. -More details on [table truncation](../../sql-reference/statements/truncate/). +More details on [table truncation](../../sql-reference/statements/truncate.md). \ No newline at end of file diff --git a/docs/zh/faq/operations/index.md b/docs/zh/faq/operations/index.md index 071cc872e4e..153eda6199a 100644 --- a/docs/zh/faq/operations/index.md +++ b/docs/zh/faq/operations/index.md @@ -1,5 +1,5 @@ --- -slug: /zh/faq/operations/ +slug: /zh/faq/operations/ title: 关于操作ClickHouse服务器和集群的问题 toc_hidden_folder: true sidebar_position: 3 @@ -13,9 +13,9 @@ sidebar_label: Operations - [如果想在生产环境部署,需要用哪个版本的 ClickHouse 呢?](../../faq/operations/production.md) - [是否可能从 ClickHouse 数据表中删除所有旧的数据记录?](../../faq/operations/delete-old-data.md) - [ClickHouse支持多区域复制吗?](../../faq/operations/multi-region-replication.md) - + !!! info "没看到你要找的东西吗?" - 查看[其他faq类别](../../faq/)或浏览左边栏中的主要文档文章。 + 查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。 {## [原文](https://clickhouse.com/docs/en/faq/production/) ##} diff --git a/docs/zh/faq/operations/production.md b/docs/zh/faq/operations/production.md index cc5cf6b9614..90db050e8d3 100644 --- a/docs/zh/faq/operations/production.md +++ b/docs/zh/faq/operations/production.md @@ -67,6 +67,6 @@ For production use, there are two key options: `stable` and `lts`. Here is some Many teams who initially thought that `lts` is the way to go, often switch to `stable` anyway because of some recent feature that’s important for their product. -:::warning -One more thing to keep in mind when upgrading ClickHouse: we’re always keeping eye on compatibility across releases, but sometimes it’s not reasonable to keep and some minor details might change. So make sure you check the [changelog](../../whats-new/changelog/) before upgrading to see if there are any notes about backward-incompatible changes. +:::warning +One more thing to keep in mind when upgrading ClickHouse: we’re always keeping eye on compatibility across releases, but sometimes it’s not reasonable to keep and some minor details might change. So make sure you check the [changelog](../../whats-new/changelog/index.md) before upgrading to see if there are any notes about backward-incompatible changes. ::: \ No newline at end of file diff --git a/docs/zh/faq/use-cases/index.md b/docs/zh/faq/use-cases/index.md index 75ef26368a3..ff0d873b37f 100644 --- a/docs/zh/faq/use-cases/index.md +++ b/docs/zh/faq/use-cases/index.md @@ -14,6 +14,6 @@ sidebar_label: 使用案例 - [我能把 ClickHouse 当做Key-value 键值存储来使用吗?](../../faq/use-cases/key-value.md) !!! info "没找到您所需要的内容?" - 请查看[其他常见问题类别](../../faq/)或浏览左侧边栏中的主要文档文章。 + 请查看[其他常见问题类别](../../faq/index.md)或浏览左侧边栏中的主要文档文章。 {## [原始文档](https://clickhouse.com/docs/en/faq/use-cases/) ##} diff --git a/docs/zh/getting-started/example-datasets/recipes.mdx b/docs/zh/getting-started/example-datasets/recipes.mdx index b7ed92962c5..b7f8fe8eafd 100644 --- a/docs/zh/getting-started/example-datasets/recipes.mdx +++ b/docs/zh/getting-started/example-datasets/recipes.mdx @@ -1,5 +1,5 @@ ---- -slug: /zh/getting-started/example-datasets/recipes +--- +slug: /zh/getting-started/example-datasets/recipes sidebar_label: 食谱数据集 title: "食谱数据集" --- @@ -8,8 +8,8 @@ RecipeNLG 数据集可在 [此处](https://recipenlg.cs.put.poznan.pl/dataset) ## 下载并解压数据集 -1. 进入下载页面[https://recipenlg.cs.put.poznan.pl/dataset](https://recipenlg.cs.put.poznan.pl/dataset)。 -2. 接受条款和条件并下载 zip 文件。 +1. 进入下载页面[https://recipenlg.cs.put.poznan.pl/dataset](https://recipenlg.cs.put.poznan.pl/dataset)。 +2. 接受条款和条件并下载 zip 文件。 3. 使用 `unzip` 解压 zip 文件,得到 `full_dataset.csv` 文件。 ## 创建表 @@ -49,13 +49,13 @@ clickhouse-client --query " 这是一个展示如何解析自定义 CSV,这其中涉及了许多调整。 -说明: -- 数据集为 CSV 格式,但在插入时需要一些预处理;使用表函数 [input](../../sql-reference/table-functions/input.md) 进行预处理; -- CSV 文件的结构在表函数 `input` 的参数中指定; -- 字段 `num`(行号)是不需要的 - 可以忽略并从文件中进行解析; -- 使用 `FORMAT CSVWithNames`,因为标题不包含第一个字段的名称,因此 CSV 中的标题将被忽略(通过命令行参数 `--input_format_with_names_use_header 0`); -- 文件仅使用双引号将 CSV 字符串括起来;一些字符串没有用双引号括起来,单引号也不能被解析为括起来的字符串 - 所以添加`--format_csv_allow_single_quote 0`参数接受文件中的单引号; -- 由于某些 CSV 的字符串的开头包含 `\M/` 因此无法被解析; CSV 中唯一可能以反斜杠开头的值是 `\N`,这个值被解析为 SQL NULL。通过添加`--input_format_allow_errors_num 10`参数,允许在导入过程中跳过 10 个格式错误; +说明: +- 数据集为 CSV 格式,但在插入时需要一些预处理;使用表函数 [input](../../sql-reference/table-functions/input.md) 进行预处理; +- CSV 文件的结构在表函数 `input` 的参数中指定; +- 字段 `num`(行号)是不需要的 - 可以忽略并从文件中进行解析; +- 使用 `FORMAT CSVWithNames`,因为标题不包含第一个字段的名称,因此 CSV 中的标题将被忽略(通过命令行参数 `--input_format_with_names_use_header 0`); +- 文件仅使用双引号将 CSV 字符串括起来;一些字符串没有用双引号括起来,单引号也不能被解析为括起来的字符串 - 所以添加`--format_csv_allow_single_quote 0`参数接受文件中的单引号; +- 由于某些 CSV 的字符串的开头包含 `\M/` 因此无法被解析; CSV 中唯一可能以反斜杠开头的值是 `\N`,这个值被解析为 SQL NULL。通过添加`--input_format_allow_errors_num 10`参数,允许在导入过程中跳过 10 个格式错误; - 在数据集中的 Ingredients、directions 和 NER 字段为数组;但这些数组并没有以一般形式表示:这些字段作为 JSON 序列化为字符串,然后放入 CSV 中 - 在导入是将它们解析为字符串,然后使用 [JSONExtract](../../sql-reference/functions/json-functions.md ) 函数将其转换为数组。 ## 验证插入的数据 @@ -80,7 +80,7 @@ SELECT count() FROM recipes; ### 按配方数量排列的顶级组件: -在此示例中,我们学习如何使用 [arrayJoin](../../sql-reference/functions/array-join/) 函数将数组扩展为行的集合。 +在此示例中,我们学习如何使用 [arrayJoin](../../sql-reference/functions/array-join.md) 函数将数组扩展为行的集合。 请求: @@ -185,7 +185,7 @@ LIMIT 10 10 rows in set. Elapsed: 0.215 sec. Processed 2.23 million rows, 1.48 GB (10.35 million rows/s., 6.86 GB/s.) ``` -在此示例中,我们使用 [has](../../sql-reference/functions/array-functions/#hasarr-elem) 函数来按过滤数组类型元素并按 directions 的数量进行排序。 +在此示例中,我们使用 [has](../../sql-reference/functions/array-functions.md#hasarr-elem) 函数来按过滤数组类型元素并按 directions 的数量进行排序。 有一个婚礼蛋糕需要整个126个步骤来制作!显示 directions: diff --git a/docs/zh/guides/improving-query-performance/skipping-indexes.md b/docs/zh/guides/improving-query-performance/skipping-indexes.md index f5889898c2c..f9f43e46927 100644 --- a/docs/zh/guides/improving-query-performance/skipping-indexes.md +++ b/docs/zh/guides/improving-query-performance/skipping-indexes.md @@ -89,7 +89,7 @@ SELECT * FROM skip_table WHERE my_value IN (125, 700) 下图是更直观的展示,这就是如何读取和选择my_value为125的4096行,以及如何跳过以下行而不从磁盘读取: -![Simple Skip](../../../en/guides/improving-query-performance/images/simple_skip.svg) +![Simple Skip](../../../en/guides/best-practices/images/simple_skip.svg) 通过在执行查询时启用跟踪,用户可以看到关于跳数索引使用情况的详细信息。在clickhouse-client中设置send_logs_level: @@ -126,7 +126,7 @@ Bloom filter是一种数据结构,它允许对集合成员进行高效的是 * 基本的**bloom_filter**接受一个可选参数,该参数表示在0到1之间允许的“假阳性”率(如果未指定,则使用.025)。 * 更专业的**tokenbf_v1**。需要三个参数,用来优化布隆过滤器:(1)过滤器的大小字节(大过滤器有更少的假阳性,有更高的存储成本),(2)哈希函数的个数(更多的散列函数可以减少假阳性)。(3)布隆过滤器哈希函数的种子。有关这些参数如何影响布隆过滤器功能的更多细节,请参阅 [这里](https://hur.st/bloomfilter/) 。此索引仅适用于String、FixedString和Map类型的数据。输入表达式被分割为由非字母数字字符分隔的字符序列。例如,列值`This is a candidate for a "full text" search`将被分割为`This` `is` `a` `candidate` `for` `full` `text` `search`。它用于LIKE、EQUALS、in、hasToken()和类似的长字符串中单词和其他值的搜索。例如,一种可能的用途是在非结构的应用程序日志行列中搜索少量的类名或行号。 - + * 更专业的**ngrambf_v1**。该索引的功能与tokenbf_v1相同。在Bloom filter设置之前需要一个额外的参数,即要索引的ngram的大小。一个ngram是长度为n的任何字符串,比如如果n是4,`A short string`会被分割为`A sh`` sho`, `shor`, `hort`, `ort s`, `or st`, `r str`, ` stri`, `trin`, `ring`。这个索引对于文本搜索也很有用,特别是没有单词间断的语言,比如中文。 ### 跳数索引函数 @@ -150,7 +150,7 @@ Bloom filter是一种数据结构,它允许对集合成员进行高效的是 考虑以下数据分布: -![Bad Skip!](../../../en/guides/improving-query-performance/images/bad_skip_1.svg) +![Bad Skip!](../../../en/guides/best-practices/images/bad_skip_1.svg) 假设主键/顺序是时间戳,并且在visitor_id上有一个索引。考虑下面的查询: diff --git a/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md b/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md index 18b23a79f86..27b6679e2c1 100644 --- a/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md +++ b/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md @@ -1,5 +1,5 @@ --- -slug: /zh/guides/improving-query-performance/sparse-primary-indexes +slug: /zh/guides/best-practices sidebar_label: 主键稀疏索引 sidebar_position: 20 --- @@ -19,21 +19,21 @@ sidebar_position: 20 :::note 这篇文章主要关注稀疏索引。 -如果想了解二级跳数索引,请查看[教程](./skipping-indexes.md). +如果想了解二级跳数索引,请查看[教程](./skipping-indexes.md). ::: -## 数据集 +## 数据集 在本文中,我们将使用一个匿名的web流量数据集。 -- 我们将使用样本数据集中的887万行(事件)的子集。 +- 我们将使用样本数据集中的887万行(事件)的子集。 - 未压缩的数据大小为887万个事件和大约700mb。当存储在ClickHouse时,压缩为200mb。 - 在我们的子集中,每行包含三列,表示在特定时间(EventTime列)单击URL (URL列)的互联网用户(UserID列)。 通过这三个列,我们已经可以制定一些典型的web分析查询,如: - + - 某个用户点击次数最多的前10个url是什么? - 点击某个URL次数最多的前10名用户是谁? - 用户点击特定URL的最频繁时间(比如一周中的几天)是什么? @@ -44,7 +44,7 @@ sidebar_position: 20 ## 全表扫描 -为了了解在没有主键的情况下如何对数据集执行查询,我们通过执行以下SQL DDL语句(使用MergeTree表引擎)创建了一个表: +为了了解在没有主键的情况下如何对数据集执行查询,我们通过执行以下SQL DDL语句(使用MergeTree表引擎)创建了一个表: ```sql CREATE TABLE hits_NoPrimaryKey @@ -70,11 +70,11 @@ FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz') WHERE URL != ''; ``` 结果: -```response +```response Ok. 0 rows in set. Elapsed: 145.993 sec. Processed 8.87 million rows, 18.40 GB (60.78 thousand rows/s., 126.06 MB/s.) -``` +``` ClickHouse客户端输出了执行结果,插入了887万行数据。 @@ -102,7 +102,7 @@ ORDER BY Count DESC LIMIT 10; ``` 结果: -```response +```response ┌─URL────────────────────────────┬─Count─┐ │ http://auto.ru/chatay-barana.. │ 170 │ │ http://auto.ru/chatay-id=371...│ 52 │ @@ -117,10 +117,10 @@ LIMIT 10; └────────────────────────────────┴───────┘ 10 rows in set. Elapsed: 0.022 sec. -// highlight-next-line -Processed 8.87 million rows, +// highlight-next-line +Processed 8.87 million rows, 70.45 MB (398.53 million rows/s., 3.17 GB/s.) -``` +``` ClickHouse客户端输出表明,ClickHouse执行了一个完整的表扫描!我们的表的887万行中的每一行都被加载到ClickHouse中,这不是可扩展的。 @@ -131,7 +131,7 @@ ClickHouse客户端输出表明,ClickHouse执行了一个完整的表扫描! ## 包含主键的表 -创建一个包含联合主键UserID和URL列的表: +创建一个包含联合主键UserID和URL列的表: ```sql CREATE TABLE hits_UserID_URL @@ -141,7 +141,7 @@ CREATE TABLE hits_UserID_URL `EventTime` DateTime ) ENGINE = MergeTree -// highlight-next-line +// highlight-next-line PRIMARY KEY (UserID, URL) ORDER BY (UserID, URL, EventTime) SETTINGS index_granularity = 8192, index_granularity_bytes = 0; @@ -190,7 +190,7 @@ FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz') WHERE URL != ''; ``` 结果: -```response +```response 0 rows in set. Elapsed: 149.432 sec. Processed 8.87 million rows, 18.40 GB (59.38 thousand rows/s., 123.16 MB/s.) ``` @@ -219,7 +219,7 @@ FROM system.parts WHERE (table = 'hits_UserID_URL') AND (active = 1) FORMAT Vertical; ``` - + 结果: ```response @@ -237,7 +237,7 @@ bytes_on_disk: 207.07 MiB ``` 客户端输出表明: - + - 表数据以wide format存储在一个特定目录,每个列有一个数据文件和mark文件。 - 表有887万行数据。 - 未压缩的数据有733.28 MB。 @@ -278,8 +278,8 @@ bytes_on_disk: 207.07 MiB ## 数据按照主键排序存储在磁盘上 -上面创建的表有: -- 联合主键 (UserID, URL) +上面创建的表有: +- 联合主键 (UserID, URL) - 联合排序键 (UserID, URL, EventTime)。 :::note @@ -293,7 +293,7 @@ bytes_on_disk: 207.07 MiB ::: -插入的行按照主键列(以及排序键的附加EventTime列)的字典序(从小到大)存储在磁盘上。 +插入的行按照主键列(以及排序键的附加EventTime列)的字典序(从小到大)存储在磁盘上。 :::note ClickHouse允许插入具有相同主键列的多行数据。在这种情况下(参见下图中的第1行和第2行),最终的顺序是由指定的排序键决定的,这里是EventTime列的值。 @@ -307,7 +307,7 @@ ClickHouse允许插入具有相同主键列的多行数据。在这种情况下( - 然后是URL, - 最后是EventTime: - + UserID.bin,URL.bin,和EventTime.bin是UserIDURL,和EventTime列的数据文件。
@@ -331,7 +331,7 @@ UserID.bin,URL.bin,和EventTime.bin是UserID 下图显示了如何将表中的887万行(列值)组织成1083个颗粒,这是表的DDL语句包含设置index_granularity(设置为默认值8192)的结果。 - + 第一个(根据磁盘上的物理顺序)8192行(它们的列值)在逻辑上属于颗粒0,然后下一个8192行(它们的列值)属于颗粒1,以此类推。 @@ -355,21 +355,21 @@ UserID.bin,URL.bin,和EventTime.bin是UserID 下面的图显示了索引存储了每个颗粒的最小主键列值(在上面的图中用橙色标记的值)。 例如: -- 第一个索引条目(下图中的“mark 0”)存储上图中颗粒0的主键列的最小值, -- 第二个索引条目(下图中的“mark 1”)存储上图中颗粒1的主键列的最小值,以此类推。 +- 第一个索引条目(下图中的“mark 0”)存储上图中颗粒0的主键列的最小值, +- 第二个索引条目(下图中的“mark 1”)存储上图中颗粒1的主键列的最小值,以此类推。 - + -在我们的表中,索引总共有1083个条目,887万行数据和1083个颗粒: +在我们的表中,索引总共有1083个条目,887万行数据和1083个颗粒: - + :::note - 最后一个索引条目(上图中的“mark 1082”)存储了上图中颗粒1082的主键列的最大值。 - 索引条目(索引标记)不是基于表中的特定行,而是基于颗粒。例如,对于上图中的索引条目‘mark 0’,在我们的表中没有UserID为240.923且URL为“goal://metry=10000467796a411…”的行,相反,对于该表,有一个颗粒0,在该颗粒中,最小UserID值是240.923,最小URL值是“goal://metry=10000467796a411…”,这两个值来自不同的行。 -- 主索引文件完全加载到主内存中。如果文件大于可用的空闲内存空间,则ClickHouse将发生错误。 +- 主索引文件完全加载到主内存中。如果文件大于可用的空闲内存空间,则ClickHouse将发生错误。 ::: @@ -377,11 +377,11 @@ UserID.bin,URL.bin,和EventTime.bin是UserID - UserID index marks:
主索引中存储的UserID值按升序排序。
上图中的‘mark 1’指示颗粒1中所有表行的UserID值,以及随后所有颗粒中的UserID值,都保证大于或等于4.073.710。 - + [正如我们稍后将看到的](#query-on-userid-fast), 当查询对主键的第一列进行过滤时,此全局有序使ClickHouse能够对第一个键列的索引标记使用二分查找算法。 -- URL index marks:
- 主键列UserIDURL有相同的基数,这意味着第一列之后的所有主键列的索引标记通常只表示每个颗粒的数据范围。
+- URL index marks:
+ 主键列UserIDURL有相同的基数,这意味着第一列之后的所有主键列的索引标记通常只表示每个颗粒的数据范围。
例如,‘mark 0’中的URL列所有的值都大于等于goal://metry=10000467796a411..., 然后颗粒1中的URL并不是如此,这是因为‘mark 1‘与‘mark 0‘具有不同的UserID列值。 稍后我们将更详细地讨论这对查询执行性能的影响。 @@ -401,7 +401,7 @@ GROUP BY URL ORDER BY Count DESC LIMIT 10; ``` - + 结果: @@ -420,8 +420,8 @@ LIMIT 10; └────────────────────────────────┴───────┘ 10 rows in set. Elapsed: 0.005 sec. -// highlight-next-line -Processed 8.19 thousand rows, +// highlight-next-line +Processed 8.19 thousand rows, 740.18 KB (1.53 million rows/s., 138.59 MB/s.) ``` @@ -431,13 +431,13 @@ ClickHouse客户端的输出显示,没有进行全表扫描,只有8.19万行 如果trace logging打开了,那ClickHouse服务端日志会显示ClickHouse正在对1083个UserID索引标记执行二分查找以便识别可能包含UserID列值为749927693的行的颗粒。这需要19个步骤,平均时间复杂度为O(log2 n): ```response ...Executor): Key condition: (column 0 in [749927693, 749927693]) -// highlight-next-line +// highlight-next-line ...Executor): Running binary search on index range for part all_1_9_2 (1083 marks) ...Executor): Found (LEFT) boundary mark: 176 ...Executor): Found (RIGHT) boundary mark: 177 ...Executor): Found continuous range in 19 steps ...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, -// highlight-next-line +// highlight-next-line 1/1083 marks by primary key, 1 marks to read from 1 ranges ...Reading ...approx. 8192 rows starting from 1441792 ``` @@ -451,7 +451,7 @@ ClickHouse客户端的输出显示,没有进行全表扫描,只有8.19万行

-Mark 176 was identified (the 'found left boundary mark' is inclusive, the 'found right boundary mark' is exclusive), and therefore all 8192 rows from granule 176 (which starts at row 1.441.792 - we will see that later on in this article) are then streamed into ClickHouse in order to find the actual rows with a UserID column value of 749927693. +Mark 176 was identified (the 'found left boundary mark' is inclusive, the 'found right boundary mark' is exclusive), and therefore all 8192 rows from granule 176 (which starts at row 1.441.792 - we will see that later on in this article) are then streamed into ClickHouse in order to find the actual rows with a UserID column value of 749927693.

@@ -465,7 +465,7 @@ GROUP BY URL ORDER BY Count DESC LIMIT 10; ``` - + 结果如下: ```response @@ -507,15 +507,15 @@ LIMIT 10; 在**第二阶段(数据读取中)**, ClickHouse定位所选的颗粒,以便将它们的所有行流到ClickHouse引擎中,以便找到实际匹配查询的行。 -我们将在下一节更详细地讨论第二阶段。 +我们将在下一节更详细地讨论第二阶段。 ## 标记文件用来定位颗粒 -下图描述了上表主索引文件的一部分。 +下图描述了上表主索引文件的一部分。 - + 如上所述,通过对索引的1083个UserID标记进行二分搜索,确定了第176个标记。因此,它对应的颗粒176可能包含UserID列值为749.927.693的行。 @@ -537,7 +537,7 @@ LIMIT 10; 下图显示了三个标记文件UserID.mrk、URL.mrk、EventTime.mrk,为表的UserID、URL和EventTime列存储颗粒的物理位置。 - + 我们已经讨论了主索引是一个扁平的未压缩数组文件(primary.idx),其中包含从0开始编号的索引标记。 @@ -545,9 +545,9 @@ LIMIT 10; 一旦ClickHouse确定并选择了可能包含查询所需的匹配行的颗粒的索引标记,就可以在标记文件数组中查找,以获得颗粒的物理位置。 -每个特定列的标记文件条目以偏移量的形式存储两个位置: +每个特定列的标记文件条目以偏移量的形式存储两个位置: -- 第一个偏移量(上图中的'block_offset')是在包含所选颗粒的压缩版本的压缩列数据文件中定位块。这个压缩块可能包含几个压缩的颗粒。所定位的压缩文件块在读取时被解压到内存中。 +- 第一个偏移量(上图中的'block_offset')是在包含所选颗粒的压缩版本的压缩列数据文件中定位块。这个压缩块可能包含几个压缩的颗粒。所定位的压缩文件块在读取时被解压到内存中。 - 标记文件的第二个偏移量(上图中的“granule_offset”)提供了颗粒在解压数据块中的位置。 @@ -576,7 +576,7 @@ LIMIT 10; 下面的图表和文本说明了我们的查询示例,ClickHouse如何在UserID.bin数据文件中定位176颗粒。 - + 我们在本文前面讨论过,ClickHouse选择了主索引标记176,因此176颗粒可能包含查询所需的匹配行。 @@ -624,7 +624,7 @@ LIMIT 10; ``` 结果是: -```response +```response ┌─────UserID─┬─Count─┐ │ 2459550954 │ 3741 │ │ 1084649151 │ 2484 │ @@ -639,26 +639,26 @@ LIMIT 10; └────────────┴───────┘ 10 rows in set. Elapsed: 0.086 sec. -// highlight-next-line -Processed 8.81 million rows, +// highlight-next-line +Processed 8.81 million rows, 799.69 MB (102.11 million rows/s., 9.27 GB/s.) -``` +``` 客户端输出表明,尽管URL列是联合主键的一部分,ClickHouse几乎执行了一一次全表扫描!ClickHouse从表的887万行中读取881万行。 如果启用了trace日志,那么ClickHouse服务日志文件显示,ClickHouse在1083个URL索引标记上使用了通用的排除搜索,以便识别那些可能包含URL列值为"http://public_search"的行。 -```response -...Executor): Key condition: (column 1 in ['http://public_search', +```response +...Executor): Key condition: (column 1 in ['http://public_search', 'http://public_search']) -// highlight-next-line -...Executor): Used generic exclusion search over index for part all_1_9_2 +// highlight-next-line +...Executor): Used generic exclusion search over index for part all_1_9_2 with 1537 steps ...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, -// highlight-next-line +// highlight-next-line 1076/1083 marks by primary key, 1076 marks to read from 5 ranges ...Executor): Reading approx. 8814592 rows with 10 streams -``` -我们可以在上面的跟踪日志示例中看到,1083个颗粒中有1076个(通过标记)被选中,因为可能包含具有匹配URL值的行。 +``` +我们可以在上面的跟踪日志示例中看到,1083个颗粒中有1076个(通过标记)被选中,因为可能包含具有匹配URL值的行。 这将导致881万行被读取到ClickHouse引擎中(通过使用10个流并行地读取),以便识别实际包含URL值"http://public_search"的行。 @@ -672,7 +672,7 @@ Processed 8.81 million rows,
- 通用排除搜索算法 + 通用排除搜索算法

@@ -693,7 +693,7 @@ Processed 8.81 million rows, 假设UserID具有较低的基数。在这种情况下,相同的UserID值很可能分布在多个表行和颗粒上,从而分布在索引标记上。对于具有相同UserID的索引标记,索引标记的URL值按升序排序(因为表行首先按UserID排序,然后按URL排序)。这使得有效的过滤如下所述: - + 在上图中,我们的抽象样本数据的颗粒选择过程有三种不同的场景: @@ -704,13 +704,13 @@ Processed 8.81 million rows, 3. 可以排除URL值大于W3的索引标记2和3,因为主索引的索引标记存储了每个颗粒的最小键列值,因此颗粒2和3不可能包含URL值W3。 - + **前缀主键高基数** 当UserID具有较高的基数时,相同的UserID值不太可能分布在多个表行和颗粒上。这意味着索引标记的URL值不是单调递增的: - + 正如在上面的图表中所看到的,所有URL值小于W3的标记都被选中,以便将其关联的颗粒的行加载到ClickHouse引擎中。 @@ -745,7 +745,7 @@ ALTER TABLE hits_UserID_URL MATERIALIZE INDEX url_skipping_index; ``` ClickHouse现在创建了一个额外的索引来存储—每组4个连续的颗粒(注意上面ALTER TABLE语句中的GRANULARITY 4子句)—最小和最大的URL值: - + 第一个索引条目(上图中的mark 0)存储属于表的前4个颗粒的行的最小和最大URL值。 @@ -786,15 +786,15 @@ ClickHouse现在创建了一个额外的索引来存储—每组4个连续的颗 当创建有不同主键的第二个表时,查询必须显式地发送给最适合查询的表版本,并且必须显式地插入新数据到两个表中,以保持表的同步: - + 在物化视图中,额外的表被隐藏,数据自动在两个表之间保持同步: - + projection方式是最透明的选项,因为除了自动保持隐藏的附加表与数据变化同步外,ClickHouse还会自动选择最有效的表版本进行查询: - + 下面我们使用真实的例子详细讨论下这三种方式。 @@ -813,7 +813,7 @@ CREATE TABLE hits_URL_UserID `EventTime` DateTime ) ENGINE = MergeTree -// highlight-next-line +// highlight-next-line PRIMARY KEY (URL, UserID) ORDER BY (URL, UserID, EventTime) SETTINGS index_granularity = 8192, index_granularity_bytes = 0; @@ -822,10 +822,10 @@ SETTINGS index_granularity = 8192, index_granularity_bytes = 0; 写入887万行源表数据: ```sql -INSERT INTO hits_URL_UserID +INSERT INTO hits_URL_UserID SELECT * from hits_UserID_URL; ``` - + 结果: ```response @@ -841,10 +841,10 @@ OPTIMIZE TABLE hits_URL_UserID FINAL; 因为我们切换了主键中列的顺序,插入的行现在以不同的字典顺序存储在磁盘上(与我们的原始表相比),因此该表的1083个颗粒也包含了与以前不同的值: - + 主键索引如下: - + 现在计算最频繁点击URL"http://public_search"的前10名用户,这时候的查询速度是明显加快的: ```sql @@ -856,7 +856,7 @@ GROUP BY UserID ORDER BY Count DESC LIMIT 10; ``` - + 结果: @@ -875,8 +875,8 @@ LIMIT 10; └────────────┴───────┘ 10 rows in set. Elapsed: 0.017 sec. -// highlight-next-line -Processed 319.49 thousand rows, +// highlight-next-line +Processed 319.49 thousand rows, 11.38 MB (18.41 million rows/s., 655.75 MB/s.) ``` @@ -887,15 +887,15 @@ Processed 319.49 thousand rows, 将URL作为主索引的第一列,ClickHouse现在对索引标记运行二分搜索。ClickHouse服务器日志文件中对应的跟踪日志: ```response -...Executor): Key condition: (column 0 in ['http://public_search', +...Executor): Key condition: (column 0 in ['http://public_search', 'http://public_search']) -// highlight-next-line +// highlight-next-line ...Executor): Running binary search on index range for part all_1_9_2 (1083 marks) ...Executor): Found (LEFT) boundary mark: 644 ...Executor): Found (RIGHT) boundary mark: 683 ...Executor): Found continuous range in 19 steps ...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, -// highlight-next-line +// highlight-next-line 39/1083 marks by primary key, 39 marks to read from 1 ranges ...Executor): Reading approx. 319488 rows with 2 streams ``` @@ -920,7 +920,7 @@ GROUP BY URL ORDER BY Count DESC LIMIT 10; ``` - + 结果 ```response @@ -938,8 +938,8 @@ LIMIT 10; └────────────────────────────────┴───────┘ 10 rows in set. Elapsed: 0.024 sec. -// highlight-next-line -Processed 8.02 million rows, +// highlight-next-line +Processed 8.02 million rows, 73.04 MB (340.26 million rows/s., 3.10 GB/s.) ``` @@ -947,10 +947,10 @@ Processed 8.02 million rows, ```response ...Executor): Key condition: (column 1 in [749927693, 749927693]) // highlight-next-line -...Executor): Used generic exclusion search over index for part all_1_9_2 +...Executor): Used generic exclusion search over index for part all_1_9_2 with 1453 steps ...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, -// highlight-next-line +// highlight-next-line 980/1083 marks by primary key, 980 marks to read from 23 ranges ...Executor): Reading approx. 8028160 rows with 10 streams ``` @@ -960,7 +960,7 @@ Processed 8.02 million rows, 现在我们有了两张表。优化了对UserID和URL的查询过滤,分别: - + @@ -981,7 +981,7 @@ ORDER BY (URL, UserID, EventTime) POPULATE AS SELECT * FROM hits_UserID_URL; ``` - + 结果: ```response @@ -993,20 +993,20 @@ Ok. :::note - 我们在视图的主键中切换键列的顺序(与原始表相比) - 物化视图由一个隐藏表支持,该表的行顺序和主索引基于给定的主键定义 -- 我们使用POPULATE关键字,以便用源表hits_UserID_URL中的所有887万行立即导入新的物化视图 +- 我们使用POPULATE关键字,以便用源表hits_UserID_URL中的所有887万行立即导入新的物化视图 - 如果在源表hits_UserID_URL中插入了新行,那么这些行也会自动插入到隐藏表中 - 实际上,隐式创建的隐藏表的行顺序和主索引与我们上面显式创建的辅助表相同: - + ClickHouse将隐藏表的列数据文件(.bin)、标记文件(.mrk2)和主索引(primary.idx)存储在ClickHouse服务器的数据目录的一个特殊文件夹中: - + ::: @@ -1021,7 +1021,7 @@ GROUP BY UserID ORDER BY Count DESC LIMIT 10; ``` - + 结果: ```response @@ -1039,8 +1039,8 @@ LIMIT 10; └────────────┴───────┘ 10 rows in set. Elapsed: 0.026 sec. -// highlight-next-line -Processed 335.87 thousand rows, +// highlight-next-line +Processed 335.87 thousand rows, 13.54 MB (12.91 million rows/s., 520.38 MB/s.) ``` @@ -1049,13 +1049,13 @@ Processed 335.87 thousand rows, ClickHouse服务器日志文件中相应的跟踪日志确认了ClickHouse正在对索引标记运行二分搜索: ```response -...Executor): Key condition: (column 0 in ['http://public_search', +...Executor): Key condition: (column 0 in ['http://public_search', 'http://public_search']) // highlight-next-line ...Executor): Running binary search on index range ... ... ...Executor): Selected 4/4 parts by partition key, 4 parts by primary key, -// highlight-next-line +// highlight-next-line 41/1083 marks by primary key, 41 marks to read from 4 ranges ...Executor): Reading approx. 335872 rows with 4 streams ``` @@ -1095,11 +1095,11 @@ ALTER TABLE hits_UserID_URL - 查询总是(从语法上)针对源表hits_UserID_URL,但是如果隐藏表的行顺序和主索引允许更有效地执行查询,那么将使用该隐藏表 - 实际上,隐式创建的隐藏表的行顺序和主索引与我们显式创建的辅助表相同: - + ClickHouse将隐藏表的列数据文件(.bin)、标记文件(.mrk2)和主索引(primary.idx)存储在一个特殊的文件夹中(在下面的截图中用橙色标记),紧挨着源表的数据文件、标记文件和主索引文件: - + ::: 由投影创建的隐藏表(以及它的主索引)现在可以(隐式地)用于显著加快URL列上查询过滤的执行。注意,查询在语法上针对投影的源表。 @@ -1113,7 +1113,7 @@ GROUP BY UserID ORDER BY Count DESC LIMIT 10; ``` - + 结果: ```response @@ -1130,8 +1130,8 @@ LIMIT 10; │ 765730816 │ 536 │ └────────────┴───────┘ -10 rows in set. Elapsed: 0.029 sec. -// highlight-next-line +10 rows in set. Elapsed: 0.029 sec. +// highlight-next-line Processed 319.49 thousand rows, 1 1.38 MB (11.05 million rows/s., 393.58 MB/s.) ``` @@ -1142,16 +1142,16 @@ ClickHouse服务器日志文件中跟踪日志确认了ClickHouse正在对索引 ```response -...Executor): Key condition: (column 0 in ['http://public_search', +...Executor): Key condition: (column 0 in ['http://public_search', 'http://public_search']) -// highlight-next-line +// highlight-next-line ...Executor): Running binary search on index range for part prj_url_userid (1083 marks) ...Executor): ... // highlight-next-line ...Executor): Choose complete Normal projection prj_url_userid ...Executor): projection required columns: URL, UserID ...Executor): Selected 1/1 parts by partition key, 1 parts by primary key, -// highlight-next-line +// highlight-next-line 39/1083 marks by primary key, 39 marks to read from 1 ranges ...Executor): Reading approx. 319488 rows with 2 streams ``` diff --git a/docs/zh/interfaces/http.md b/docs/zh/interfaces/http.md index e0c12193a6a..c7a0f355a92 100644 --- a/docs/zh/interfaces/http.md +++ b/docs/zh/interfaces/http.md @@ -96,7 +96,7 @@ ECT 1 , expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception ``` -默认情况下,返回的数据是`TabSeparated`格式的,更多信息,见[Formats](../interfaces/formats/)部分。 +默认情况下,返回的数据是`TabSeparated`格式的,更多信息,见[Formats](../interfaces/formats.md)部分。 您可以使用查询的FORMAT子句来设置其他格式。 diff --git a/docs/zh/operations/optimizing-performance/sampling-query-profiler.md b/docs/zh/operations/optimizing-performance/sampling-query-profiler.md index 4206274ec0d..5d31ab9b245 100644 --- a/docs/zh/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/zh/operations/optimizing-performance/sampling-query-profiler.md @@ -32,7 +32,7 @@ ClickHouse运行允许分析查询执行的采样探查器。 使用探查器, - 使用 `addressToLine`, `addressToSymbol` 和 `demangle` [内省功能](../../sql-reference/functions/introspection.md) 获取函数名称及其在ClickHouse代码中的位置。 要获取某些查询的配置文件,您需要从以下内容汇总数据 `trace_log` 桌子 您可以通过单个函数或整个堆栈跟踪聚合数据。 -如果你需要想象 `trace_log` 信息,尝试 [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) 和 [测速镜](https://github.com/laplab/clickhouse-speedscope). +如果你需要想象 `trace_log` 信息,尝试 [flamegraph](../../interfaces/third-party/gui.md#clickhouse-flamegraph) 和 [测速镜](https://github.com/laplab/clickhouse-speedscope). ## 示例 {#example} diff --git a/docs/zh/operations/settings/settings-users.md b/docs/zh/operations/settings/settings-users.md index 3fb97bbddb2..d7fe5bad3c3 100644 --- a/docs/zh/operations/settings/settings-users.md +++ b/docs/zh/operations/settings/settings-users.md @@ -11,7 +11,7 @@ sidebar_label: "\u7528\u6237\u8BBE\u7F6E" `user.xml` 中的 `users` 配置段包含了用户配置 :::note -ClickHouse还支持 [SQL驱动的工作流](../access-rights.md#access-control) 用于管理用户。 我们建议使用它。 +ClickHouse还支持 [SQL驱动的工作流](/docs/en/operations/access-rights#access-control) 用于管理用户。 我们建议使用它。 ::: `users` 配置段的结构: @@ -79,7 +79,7 @@ ClickHouse还支持 [SQL驱动的工作流](../access-rights.md#access-control) ### access_management {#access_management-user-setting} -此设置可为用户启用或禁用 SQL-driven [访问控制和帐户管理](../access-rights.md#access-control) 。 +此设置可为用户启用或禁用 SQL-driven [访问控制和帐户管理](/docs/en/operations/access-rights#access-control) 。 可能的值: diff --git a/docs/zh/operations/system-tables/data_type_families.md b/docs/zh/operations/system-tables/data_type_families.md index 18e9455476d..f0e3a9ef896 100644 --- a/docs/zh/operations/system-tables/data_type_families.md +++ b/docs/zh/operations/system-tables/data_type_families.md @@ -3,7 +3,7 @@ slug: /zh/operations/system-tables/data_type_families --- # system.data_type_families {#system_tables-data_type_families} -包含有关受支持的[数据类型](../../sql-reference/data-types/)的信息. +包含有关受支持的[数据类型](../../sql-reference/data-types/index.md)的信息. 列字段包括: diff --git a/docs/zh/operations/system-tables/replicated_fetches.md b/docs/zh/operations/system-tables/replicated_fetches.md index 7fd517c72ab..c6c37759755 100644 --- a/docs/zh/operations/system-tables/replicated_fetches.md +++ b/docs/zh/operations/system-tables/replicated_fetches.md @@ -68,4 +68,4 @@ thread_id: 54 **另请参阅** -- [管理 ReplicatedMergeTree 表](../../sql-reference/statements/system/#query-language-system-replicated) +- [管理 ReplicatedMergeTree 表](../../sql-reference/statements/system.md#query-language-system-replicated) diff --git a/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index 8431b5a1110..f0672d4fe45 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -20,7 +20,7 @@ groupArrayInsertAt(default_x, size)(x, pos); **参数** -- `x` — 要插入的值。生成所[支持的数据类型](../../../sql-reference/data-types/index.md)(数据)的[表达式](../../../sql-reference/syntax#syntax-expressions)。 +- `x` — 要插入的值。生成所[支持的数据类型](../../../sql-reference/data-types/index.md)(数据)的[表达式](../../../sql-reference/syntax.md#syntax-expressions)。 - `pos` — 指定元素 `x` 将被插入的位置。 数组中的索引编号从零开始。 [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges). - `default_x` — 在空位置替换的默认值。可选参数。生成 `x` 数据类型 (数据) 的[表达式](../../../sql-reference/syntax.md#syntax-expressions)。 如果 `default_x` 未定义,则 [默认值](../../../sql-reference/statements/create.md#create-default-values) 被使用。 - `size`— 结果数组的长度。可选参数。如果使用该参数,必须指定默认值 `default_x` 。 [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges)。 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.mdx b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md similarity index 59% rename from docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.mdx rename to docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md index 0c924feda73..fe70d29f8da 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.mdx +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md @@ -5,6 +5,4 @@ sidebar_label: Polygon Dictionaries With Grids title: "Polygon dictionaries" --- -import Content from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md'; - - +View the [english Dictionaries doc page for details](../../../../en/sql-reference/dictionaries/index.md). diff --git a/docs/zh/sql-reference/statements/create/database.md b/docs/zh/sql-reference/statements/create/database.md index 2c6e53c0f06..3e5b71fb196 100644 --- a/docs/zh/sql-reference/statements/create/database.md +++ b/docs/zh/sql-reference/statements/create/database.md @@ -27,4 +27,4 @@ ClickHouse在指定集群的所有服务器上创建`db_name`数据库。 更多 ### ENGINE {#engine} -[MySQL](../../../engines/database-engines/mysql.md) 允许您从远程MySQL服务器检索数据. 默认情况下,ClickHouse使用自己的[database engine](../../../engines/database-engines/index.md). 还有一个[lazy](../../../engines/database-engines/lazy)引擎. +[MySQL](../../../engines/database-engines/mysql.md) 允许您从远程MySQL服务器检索数据. 默认情况下,ClickHouse使用自己的[database engine](../../../engines/database-engines/index.md). 还有一个[lazy](../../../engines/database-engines/lazy.md)引擎. diff --git a/docs/zh/sql-reference/statements/create/view.md b/docs/zh/sql-reference/statements/create/view.md index a000c69f1ef..8ce2d20a10c 100644 --- a/docs/zh/sql-reference/statements/create/view.md +++ b/docs/zh/sql-reference/statements/create/view.md @@ -63,7 +63,7 @@ ClickHouse 中的物化视图更像是插入触发器。 如果视图查询中 视图看起来与普通表相同。 例如,它们列在`SHOW TABLES`查询的结果中。 -删除视图,使用[DROP VIEW](../../../sql-reference/statements/drop#drop-view). `DROP TABLE`也适用于视图。 +删除视图,使用[DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). `DROP TABLE`也适用于视图。 ## Live View (实验性) {#live-view} diff --git a/docs/zh/sql-reference/statements/index.md b/docs/zh/sql-reference/statements/index.md index cf51dadc8f1..989c368ebc4 100644 --- a/docs/zh/sql-reference/statements/index.md +++ b/docs/zh/sql-reference/statements/index.md @@ -20,7 +20,7 @@ sidebar_position: 31 - [CHECK TABLE](../../sql-reference/statements/check-table.mdx) - [DESCRIBE TABLE](../../sql-reference/statements/describe-table.mdx) - [DETACH](../../sql-reference/statements/detach.mdx) -- [DROP](../../sql-reference/statements/drop) +- [DROP](../../sql-reference/statements/drop.md) - [EXISTS](../../sql-reference/statements/exists.md) - [KILL](../../sql-reference/statements/kill.mdx) - [OPTIMIZE](../../sql-reference/statements/optimize.mdx) diff --git a/docs/zh/sql-reference/statements/select/array-join.md b/docs/zh/sql-reference/statements/select/array-join.md index b0352a7bb0a..4162a39f399 100644 --- a/docs/zh/sql-reference/statements/select/array-join.md +++ b/docs/zh/sql-reference/statements/select/array-join.md @@ -146,7 +146,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS ma └───────┴─────────┴───┴─────┴────────┘ ``` -下面的例子使用 [arrayEnumerate](../../../sql-reference/functions/array-functions#array_functions-arrayenumerate) 功能: +下面的例子使用 [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) 功能: ``` sql SELECT s, arr, a, num, arrayEnumerate(arr) @@ -259,7 +259,7 @@ ARRAY JOIN nest AS n; └───────┴─────┴─────┴─────────┴────────────┘ ``` -使用功能 [arrayEnumerate](../../../sql-reference/functions/array-functions#array_functions-arrayenumerate) 的例子: +使用功能 [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) 的例子: ``` sql SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num diff --git a/docs/zh/sql-reference/statements/select/group-by.md b/docs/zh/sql-reference/statements/select/group-by.md index 29c72ce7e45..86511470538 100644 --- a/docs/zh/sql-reference/statements/select/group-by.md +++ b/docs/zh/sql-reference/statements/select/group-by.md @@ -8,7 +8,7 @@ sidebar_label: GROUP BY `GROUP BY` 子句将 `SELECT` 查询结果转换为聚合模式,其工作原理如下: - `GROUP BY` 子句包含表达式列表(或单个表达式 -- 可以认为是长度为1的列表)。 这份名单充当 “grouping key”,而每个单独的表达式将被称为 “key expressions”. -- 在所有的表达式在 [SELECT](../../../sql-reference/statements/select/index.md), [HAVING](../../../sql-reference/statements/select/having),和 [ORDER BY](../../../sql-reference/statements/select/order-by.md) 子句中 **必须** 基于键表达式进行计算 **或** 上 [聚合函数](../../../sql-reference/aggregate-functions/index.md) 在非键表达式(包括纯列)上。 换句话说,从表中选择的每个列必须用于键表达式或聚合函数内,但不能同时使用。 +- 在所有的表达式在 [SELECT](../../../sql-reference/statements/select/index.md), [HAVING](../../../sql-reference/statements/select/having.md),和 [ORDER BY](../../../sql-reference/statements/select/order-by.md) 子句中 **必须** 基于键表达式进行计算 **或** 上 [聚合函数](../../../sql-reference/aggregate-functions/index.md) 在非键表达式(包括纯列)上。 换句话说,从表中选择的每个列必须用于键表达式或聚合函数内,但不能同时使用。 - 聚合结果 `SELECT` 查询将包含尽可能多的行,因为有唯一值 “grouping key” 在源表中。 通常这会显着减少行数,通常是数量级,但不一定:如果所有行数保持不变 “grouping key” 值是不同的。 :::note @@ -58,7 +58,7 @@ sidebar_label: GROUP BY - 在 `Pretty*` 格式时,该行在主结果之后作为单独的表输出。 - 在其他格式中,它不可用。 -`WITH TOTALS` 可以以不同的方式运行时 [HAVING](../../../sql-reference/statements/select/having) 是存在的。 该行为取决于 `totals_mode` 设置。 +`WITH TOTALS` 可以以不同的方式运行时 [HAVING](../../../sql-reference/statements/select/having.md) 是存在的。 该行为取决于 `totals_mode` 设置。 ### 配置总和处理 {#configuring-totals-processing} diff --git a/docs/zh/sql-reference/statements/select/index.md b/docs/zh/sql-reference/statements/select/index.md index 2d4044cbd20..fdf196e198b 100644 --- a/docs/zh/sql-reference/statements/select/index.md +++ b/docs/zh/sql-reference/statements/select/index.md @@ -41,7 +41,7 @@ SELECT [DISTINCT] expr_list - [WHERE 子句](../../../sql-reference/statements/select/where.md) - [GROUP BY 子句](../../../sql-reference/statements/select/group-by.md) - [LIMIT BY 子句](../../../sql-reference/statements/select/limit-by.md) -- [HAVING 子句](../../../sql-reference/statements/select/having) +- [HAVING 子句](../../../sql-reference/statements/select/having.md) - [SELECT 子句](#select-clause) - [DISTINCT 子句](../../../sql-reference/statements/select/distinct.md) - [LIMIT 子句](../../../sql-reference/statements/select/limit.md) diff --git a/packages/clickhouse-server.service b/packages/clickhouse-server.service index 090461df988..7742d8b278a 100644 --- a/packages/clickhouse-server.service +++ b/packages/clickhouse-server.service @@ -18,7 +18,7 @@ Group=clickhouse Restart=always RestartSec=30 # Since ClickHouse is systemd aware default 1m30sec may not be enough -TimeoutStartSec=infinity +TimeoutStartSec=0 # %p is resolved to the systemd unit name RuntimeDirectory=%p ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=%t/%p/%p.pid diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 5b97daf2998..47017a94cb5 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -400,10 +400,6 @@ endif () add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_BUNDLE}) -if (USE_GDB_ADD_INDEX) - add_custom_command(TARGET clickhouse POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} clickhouse COMMENT "Adding .gdb-index to clickhouse" VERBATIM) -endif() - if (USE_BINARY_HASH) add_custom_command(TARGET clickhouse POST_BUILD COMMAND ./clickhouse hash-binary > hash && ${OBJCOPY_PATH} --add-section .clickhouse.hash=hash clickhouse COMMENT "Adding section '.clickhouse.hash' to clickhouse binary" VERBATIM) endif() diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index b60138b5692..cc25747702a 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -66,6 +66,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) using namespace DB; namespace po = boost::program_options; + bool print_stacktrace = false; try { po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); @@ -84,6 +85,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) ("level", po::value(), "compression level for codecs specified via flags") ("none", "use no compression instead of LZ4") ("stat", "print block statistics of compressed data") + ("stacktrace", "print stacktrace of exception") ; po::positional_options_description positional_desc; @@ -107,6 +109,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) bool use_deflate_qpl = options.count("deflate_qpl"); bool stat_mode = options.count("stat"); bool use_none = options.count("none"); + print_stacktrace = options.count("stacktrace"); unsigned block_size = options["block-size"].as(); std::vector codecs; if (options.count("codec")) @@ -188,11 +191,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) /// Compression CompressedWriteBuffer to(*wb, codec, block_size); copyData(*rb, to); + to.finalize(); } } catch (...) { - std::cerr << getCurrentExceptionMessage(true) << '\n'; + std::cerr << getCurrentExceptionMessage(print_stacktrace) << '\n'; return getCurrentExceptionCode(); } diff --git a/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt index f649e81c50a..118610e4dcd 100644 --- a/programs/odbc-bridge/CMakeLists.txt +++ b/programs/odbc-bridge/CMakeLists.txt @@ -35,10 +35,6 @@ target_link_libraries(clickhouse-odbc-bridge PRIVATE set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro) -if (USE_GDB_ADD_INDEX) - add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM) -endif() - if (SPLIT_DEBUG_SYMBOLS) clickhouse_split_debug_symbols(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-odbc-bridge) else() diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h index f57cc2886e3..c73c0499fbe 100644 --- a/src/Access/Common/AccessType.h +++ b/src/Access/Common/AccessType.h @@ -15,6 +15,7 @@ enum class AccessType /// node_type either specifies access type's level (GLOBAL/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS), /// or specifies that the access type is a GROUP of other access types; /// parent_group_name is the name of the group containing this access type (or NONE if there is no such group). +/// NOTE A parent group must be declared AFTER all its children. #define APPLY_FOR_ACCESS_TYPES(M) \ M(SHOW_DATABASES, "", DATABASE, SHOW) /* allows to execute SHOW DATABASES, SHOW CREATE DATABASE, USE ; implicitly enabled by any grant on the database */\ @@ -86,8 +87,10 @@ enum class AccessType M(CREATE_VIEW, "", VIEW, CREATE) /* allows to execute {CREATE|ATTACH} VIEW; implicitly enabled by the grant CREATE_TABLE */\ M(CREATE_DICTIONARY, "", DICTIONARY, CREATE) /* allows to execute {CREATE|ATTACH} DICTIONARY */\ - M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables; + M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE_ARBITRARY_TEMPORARY_TABLE) /* allows to create and manipulate temporary tables; implicitly enabled by the grant CREATE_TABLE on any table */ \ + M(CREATE_ARBITRARY_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables + with arbitrary table engine */\ M(CREATE_FUNCTION, "", GLOBAL, CREATE) /* allows to execute CREATE FUNCTION */ \ M(CREATE_NAMED_COLLECTION, "", GLOBAL, CREATE) /* allows to execute CREATE NAMED COLLECTION */ \ M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \ diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index fbaacb2263b..cc51183c51f 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -81,6 +81,11 @@ namespace if ((level == 0) && (max_flags_with_children & create_table)) res |= create_temporary_table; + /// CREATE TABLE (on any database/table) => CREATE_ARBITRARY_TEMPORARY_TABLE (global) + static const AccessFlags create_arbitrary_temporary_table = AccessType::CREATE_ARBITRARY_TEMPORARY_TABLE; + if ((level == 0) && (max_flags_with_children & create_table)) + res |= create_arbitrary_temporary_table; + /// ALTER_TTL => ALTER_MATERIALIZE_TTL static const AccessFlags alter_ttl = AccessType::ALTER_TTL; static const AccessFlags alter_materialize_ttl = AccessType::ALTER_MATERIALIZE_TTL; diff --git a/src/AggregateFunctions/UniqExactSet.h b/src/AggregateFunctions/UniqExactSet.h index 4a3ef576e4d..916dfe4a424 100644 --- a/src/AggregateFunctions/UniqExactSet.h +++ b/src/AggregateFunctions/UniqExactSet.h @@ -54,10 +54,10 @@ public: { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); setThreadName("UniqExactMerger"); while (true) diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp index 0a757f94a49..1ebc7cb3bb9 100644 --- a/src/Backups/BackupIO_S3.cpp +++ b/src/Backups/BackupIO_S3.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -68,7 +69,9 @@ namespace settings.auth_settings.use_environment_credentials.value_or( context->getConfigRef().getBool("s3.use_environment_credentials", false)), settings.auth_settings.use_insecure_imds_request.value_or( - context->getConfigRef().getBool("s3.use_insecure_imds_request", false))); + context->getConfigRef().getBool("s3.use_insecure_imds_request", false)), + settings.auth_settings.expiration_window_seconds.value_or( + context->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS))); } Aws::Vector listObjects(S3::Client & client, const S3::URI & s3_uri, const String & file_name) diff --git a/src/Backups/BackupUtils.cpp b/src/Backups/BackupUtils.cpp index 9ff91050177..c6a0840964b 100644 --- a/src/Backups/BackupUtils.cpp +++ b/src/Backups/BackupUtils.cpp @@ -89,13 +89,13 @@ void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries if (!--num_active_jobs) event.notify_all(); if (async) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); try { if (async && thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); if (async) setThreadName("BackupWorker"); @@ -154,13 +154,13 @@ void restoreTablesData(DataRestoreTasks && tasks, ThreadPool & thread_pool) if (!--num_active_jobs) event.notify_all(); if (async) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); try { if (async && thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); if (async) setThreadName("RestoreWorker"); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 876b1d1906d..aa6ea9a46db 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -117,6 +117,11 @@ namespace ProfileEvents extern const Event SystemTimeMicroseconds; } +namespace +{ +constexpr UInt64 THREAD_GROUP_ID = 0; +} + namespace DB { @@ -195,8 +200,19 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src) } }; std::map rows_by_name; + for (size_t src_row = 0; src_row < src.rows(); ++src_row) { + /// Filter out threads stats, use stats from thread group + /// Exactly stats from thread group is stored to the table system.query_log + /// The stats from threads are less useful. + /// They take more records, they need to be combined, + /// there even could be several records from one thread. + /// Server doesn't send it any more to the clients, so this code left for compatible + auto thread_id = src_array_thread_id[src_row]; + if (thread_id != THREAD_GROUP_ID) + continue; + Id id{ src_column_name.getDataAt(src_row), src_column_host_name.getDataAt(src_row), @@ -204,16 +220,6 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src) rows_by_name[id] = src_row; } - /// Filter out snapshots - std::set thread_id_filter_mask; - for (size_t i = 0; i < src_array_thread_id.size(); ++i) - { - if (src_array_thread_id[i] != 0) - { - thread_id_filter_mask.emplace(i); - } - } - /// Merge src into dst. for (size_t dst_row = 0; dst_row < dst_rows; ++dst_row) { @@ -225,10 +231,6 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src) if (auto it = rows_by_name.find(id); it != rows_by_name.end()) { size_t src_row = it->second; - if (thread_id_filter_mask.contains(src_row)) - { - continue; - } dst_array_current_time[dst_row] = src_array_current_time[src_row]; @@ -249,11 +251,6 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src) /// Copy rows from src that dst does not contains. for (const auto & [id, pos] : rows_by_name) { - if (thread_id_filter_mask.contains(pos)) - { - continue; - } - for (size_t col = 0; col < src.columns(); ++col) { mutable_columns[col]->insert((*src.getByPosition(col).column)[pos]); @@ -1080,13 +1077,18 @@ void ClientBase::onProfileEvents(Block & block) const auto * user_time_name = ProfileEvents::getName(ProfileEvents::UserTimeMicroseconds); const auto * system_time_name = ProfileEvents::getName(ProfileEvents::SystemTimeMicroseconds); - HostToThreadTimesMap thread_times; + HostToTimesMap thread_times; for (size_t i = 0; i < rows; ++i) { auto thread_id = array_thread_id[i]; auto host_name = host_names.getDataAt(i).toString(); - if (thread_id != 0) - progress_indication.addThreadIdToList(host_name, thread_id); + + /// In ProfileEvents packets thread id 0 specifies common profiling information + /// for all threads executing current query on specific host. So instead of summing per thread + /// consumption it's enough to look for data with thread id 0. + if (thread_id != THREAD_GROUP_ID) + continue; + auto event_name = names.getDataAt(i); auto value = array_values[i]; @@ -1095,11 +1097,11 @@ void ClientBase::onProfileEvents(Block & block) continue; if (event_name == user_time_name) - thread_times[host_name][thread_id].user_ms = value; + thread_times[host_name].user_ms = value; else if (event_name == system_time_name) - thread_times[host_name][thread_id].system_ms = value; + thread_times[host_name].system_ms = value; else if (event_name == MemoryTracker::USAGE_EVENT_NAME) - thread_times[host_name][thread_id].memory_usage = value; + thread_times[host_name].memory_usage = value; } progress_indication.updateThreadEventData(thread_times); diff --git a/src/Client/LineReader.cpp b/src/Client/LineReader.cpp index f49e48be617..04b387c9f7d 100644 --- a/src/Client/LineReader.cpp +++ b/src/Client/LineReader.cpp @@ -12,9 +12,7 @@ #include -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif namespace { diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index d696070aa41..5b20d98aa01 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -11,13 +11,21 @@ M(ReplicatedSend, "Number of data parts being sent to replicas") \ M(ReplicatedChecks, "Number of data parts checking for consistency") \ M(BackgroundMergesAndMutationsPoolTask, "Number of active merges and mutations in an associated background pool") \ + M(BackgroundMergesAndMutationsPoolSize, "Limit on number of active merges and mutations in an associated background pool") \ M(BackgroundFetchesPoolTask, "Number of active fetches in an associated background pool") \ + M(BackgroundFetchesPoolSize, "Limit on number of simultaneous fetches in an associated background pool") \ M(BackgroundCommonPoolTask, "Number of active tasks in an associated background pool") \ + M(BackgroundCommonPoolSize, "Limit on number of tasks in an associated background pool") \ M(BackgroundMovePoolTask, "Number of active tasks in BackgroundProcessingPool for moves") \ + M(BackgroundMovePoolSize, "Limit on number of tasks in BackgroundProcessingPool for moves") \ M(BackgroundSchedulePoolTask, "Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc.") \ + M(BackgroundSchedulePoolSize, "Limit on number of tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc.") \ M(BackgroundBufferFlushSchedulePoolTask, "Number of active tasks in BackgroundBufferFlushSchedulePool. This pool is used for periodic Buffer flushes") \ + M(BackgroundBufferFlushSchedulePoolSize, "Limit on number of tasks in BackgroundBufferFlushSchedulePool") \ M(BackgroundDistributedSchedulePoolTask, "Number of active tasks in BackgroundDistributedSchedulePool. This pool is used for distributed sends that is done in background.") \ + M(BackgroundDistributedSchedulePoolSize, "Limit on number of tasks in BackgroundDistributedSchedulePool") \ M(BackgroundMessageBrokerSchedulePoolTask, "Number of active tasks in BackgroundProcessingPool for message streaming") \ + M(BackgroundMessageBrokerSchedulePoolSize, "Limit on number of tasks in BackgroundProcessingPool for message streaming") \ M(CacheDictionaryUpdateQueueBatches, "Number of 'batches' (a set of keys) in update queue in CacheDictionaries.") \ M(CacheDictionaryUpdateQueueKeys, "Exact number of keys in update queue in CacheDictionaries.") \ M(DiskSpaceReservedForMerge, "Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts.") \ diff --git a/src/Common/CurrentThread.cpp b/src/Common/CurrentThread.cpp index 188e78fe69b..7fd82426522 100644 --- a/src/Common/CurrentThread.cpp +++ b/src/Common/CurrentThread.cpp @@ -57,6 +57,23 @@ void CurrentThread::updateProgressOut(const Progress & value) current_thread->progress_out.incrementPiecewiseAtomically(value); } +std::shared_ptr CurrentThread::getInternalTextLogsQueue() +{ + /// NOTE: this method could be called at early server startup stage + if (unlikely(!current_thread)) + return nullptr; + + return current_thread->getInternalTextLogsQueue(); +} + +InternalProfileEventsQueuePtr CurrentThread::getInternalProfileEventsQueue() +{ + if (unlikely(!current_thread)) + return nullptr; + + return current_thread->getInternalProfileEventsQueue(); +} + void CurrentThread::attachInternalTextLogsQueue(const std::shared_ptr & logs_queue, LogsLevel client_logs_level) { @@ -65,42 +82,6 @@ void CurrentThread::attachInternalTextLogsQueue(const std::shared_ptrattachInternalTextLogsQueue(logs_queue, client_logs_level); } -void CurrentThread::setFatalErrorCallback(std::function callback) -{ - /// It does not make sense to set a callback for sending logs to a client if there's no thread status - chassert(current_thread); - current_thread->setFatalErrorCallback(callback); -} - -std::shared_ptr CurrentThread::getInternalTextLogsQueue() -{ - /// NOTE: this method could be called at early server startup stage - if (unlikely(!current_thread)) - return nullptr; - - if (current_thread->getCurrentState() == ThreadStatus::ThreadState::Died) - return nullptr; - - return current_thread->getInternalTextLogsQueue(); -} - -void CurrentThread::attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & queue) -{ - if (unlikely(!current_thread)) - return; - current_thread->attachInternalProfileEventsQueue(queue); -} - -InternalProfileEventsQueuePtr CurrentThread::getInternalProfileEventsQueue() -{ - if (unlikely(!current_thread)) - return nullptr; - - if (current_thread->getCurrentState() == ThreadStatus::ThreadState::Died) - return nullptr; - - return current_thread->getInternalProfileEventsQueue(); -} ThreadGroupStatusPtr CurrentThread::getGroup() { @@ -110,4 +91,12 @@ ThreadGroupStatusPtr CurrentThread::getGroup() return current_thread->getThreadGroup(); } +std::string_view CurrentThread::getQueryId() +{ + if (unlikely(!current_thread)) + return {}; + + return current_thread->getQueryId(); +} + } diff --git a/src/Common/CurrentThread.h b/src/Common/CurrentThread.h index f4975e800ca..3b16163b1ba 100644 --- a/src/Common/CurrentThread.h +++ b/src/Common/CurrentThread.h @@ -5,6 +5,7 @@ #include #include +#include namespace ProfileEvents @@ -48,7 +49,7 @@ public: static void attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & queue); static InternalProfileEventsQueuePtr getInternalProfileEventsQueue(); - static void setFatalErrorCallback(std::function callback); + static void attachQueryForLog(const String & query_); /// Makes system calls to update ProfileEvents that contain info from rusage and taskstats static void updatePerformanceCounters(); @@ -65,31 +66,20 @@ public: static void updateProgressIn(const Progress & value); static void updateProgressOut(const Progress & value); - /// Query management: - - /// Call from master thread as soon as possible (e.g. when thread accepted connection) - static void initializeQuery(); - /// You must call one of these methods when create a query child thread: /// Add current thread to a group associated with the thread group - static void attachTo(const ThreadGroupStatusPtr & thread_group); + static void attachToGroup(const ThreadGroupStatusPtr & thread_group); /// Is useful for a ThreadPool tasks - static void attachToIfDetached(const ThreadGroupStatusPtr & thread_group); + static void attachToGroupIfDetached(const ThreadGroupStatusPtr & thread_group); + + /// Non-master threads call this method in destructor automatically + static void detachFromGroupIfNotDetached(); /// Update ProfileEvents and dumps info to system.query_thread_log static void finalizePerformanceCounters(); /// Returns a non-empty string if the thread is attached to a query - static std::string_view getQueryId() - { - if (unlikely(!current_thread)) - return {}; - return current_thread->getQueryId(); - } - - /// Non-master threads call this method in destructor automatically - static void detachQuery(); - static void detachQueryIfNotDetached(); + static std::string_view getQueryId(); /// Initializes query with current thread as master thread in constructor, and detaches it in destructor struct QueryScope : private boost::noncopyable @@ -101,13 +91,6 @@ public: void logPeakMemoryUsage(); bool log_peak_memory_usage_in_destructor = true; }; - -private: - static void defaultThreadDeleter(); - - /// Sets query_context for current thread group - /// Can by used only through QueryScope - static void attachQueryContext(ContextPtr query_context); }; } diff --git a/src/Common/ProgressIndication.cpp b/src/Common/ProgressIndication.cpp index b049edcdcf7..df8778eb0d1 100644 --- a/src/Common/ProgressIndication.cpp +++ b/src/Common/ProgressIndication.cpp @@ -15,24 +15,6 @@ /// http://en.wikipedia.org/wiki/ANSI_escape_code #define CLEAR_TO_END_OF_LINE "\033[K" - -namespace -{ - constexpr UInt64 ALL_THREADS = 0; - - UInt64 aggregateCPUUsageNs(DB::ThreadIdToTimeMap times) - { - constexpr UInt64 us_to_ns = 1000; - return us_to_ns * std::accumulate(times.begin(), times.end(), 0ull, - [](UInt64 acc, const auto & elem) - { - if (elem.first == ALL_THREADS) - return acc; - return acc + elem.second.time(); - }); - } -} - namespace DB { @@ -58,7 +40,7 @@ void ProgressIndication::resetProgress() { std::lock_guard lock(profile_events_mutex); cpu_usage_meter.reset(getElapsedNanoseconds()); - thread_data.clear(); + hosts_data.clear(); } } @@ -71,25 +53,17 @@ void ProgressIndication::setFileProgressCallback(ContextMutablePtr context, Writ }); } -void ProgressIndication::addThreadIdToList(String const & host, UInt64 thread_id) +void ProgressIndication::updateThreadEventData(HostToTimesMap & new_hosts_data) { std::lock_guard lock(profile_events_mutex); - auto & thread_to_times = thread_data[host]; - if (thread_to_times.contains(thread_id)) - return; - thread_to_times[thread_id] = {}; -} - -void ProgressIndication::updateThreadEventData(HostToThreadTimesMap & new_thread_data) -{ - std::lock_guard lock(profile_events_mutex); + constexpr UInt64 us_to_ns = 1000; UInt64 total_cpu_ns = 0; - for (auto & new_host_map : new_thread_data) + for (auto & new_host : new_hosts_data) { - total_cpu_ns += aggregateCPUUsageNs(new_host_map.second); - thread_data[new_host_map.first] = std::move(new_host_map.second); + total_cpu_ns += us_to_ns * new_host.second.time(); + hosts_data[new_host.first] = new_host.second; } cpu_usage_meter.add(getElapsedNanoseconds(), total_cpu_ns); } @@ -104,16 +78,10 @@ ProgressIndication::MemoryUsage ProgressIndication::getMemoryUsage() const { std::lock_guard lock(profile_events_mutex); - return std::accumulate(thread_data.cbegin(), thread_data.cend(), MemoryUsage{}, + return std::accumulate(hosts_data.cbegin(), hosts_data.cend(), MemoryUsage{}, [](MemoryUsage const & acc, auto const & host_data) { - UInt64 host_usage = 0; - // In ProfileEvents packets thread id 0 specifies common profiling information - // for all threads executing current query on specific host. So instead of summing per thread - // memory consumption it's enough to look for data with thread id 0. - if (auto it = host_data.second.find(ALL_THREADS); it != host_data.second.end()) - host_usage = it->second.memory_usage; - + UInt64 host_usage = host_data.second.memory_usage; return MemoryUsage{.total = acc.total + host_usage, .max = std::max(acc.max, host_usage)}; }); } diff --git a/src/Common/ProgressIndication.h b/src/Common/ProgressIndication.h index 717de5debb9..af5d69c0255 100644 --- a/src/Common/ProgressIndication.h +++ b/src/Common/ProgressIndication.h @@ -24,8 +24,7 @@ struct ThreadEventData UInt64 memory_usage = 0; }; -using ThreadIdToTimeMap = std::unordered_map; -using HostToThreadTimesMap = std::unordered_map; +using HostToTimesMap = std::unordered_map; class ProgressIndication { @@ -56,9 +55,7 @@ public: /// How much seconds passed since query execution start. double elapsedSeconds() const { return getElapsedNanoseconds() / 1e9; } - void addThreadIdToList(String const & host, UInt64 thread_id); - - void updateThreadEventData(HostToThreadTimesMap & new_thread_data); + void updateThreadEventData(HostToTimesMap & new_hosts_data); private: double getCPUUsage(); @@ -91,7 +88,7 @@ private: bool write_progress_on_update = false; EventRateMeter cpu_usage_meter{static_cast(clock_gettime_ns()), 2'000'000'000 /*ns*/}; // average cpu utilization last 2 second - HostToThreadTimesMap thread_data; + HostToTimesMap hosts_data; /// In case of all of the above: /// - clickhouse-local /// - input_format_parallel_parsing=true @@ -99,7 +96,7 @@ private: /// /// It is possible concurrent access to the following: /// - writeProgress() (class properties) (guarded with progress_mutex) - /// - thread_data/cpu_usage_meter (guarded with profile_events_mutex) + /// - hosts_data/cpu_usage_meter (guarded with profile_events_mutex) mutable std::mutex profile_events_mutex; mutable std::mutex progress_mutex; }; diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index df6f860e588..fd6ef12693d 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -34,9 +34,7 @@ M(int, pthread_mutex_unlock, pthread_mutex_t * arg) #endif -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif namespace DB { diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index b5a48c48ffe..11f35bc7a6b 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -1,6 +1,5 @@ #include #include -#include #include #include #include @@ -11,19 +10,12 @@ #include #include -#include #include namespace DB { - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - thread_local ThreadStatus constinit * current_thread = nullptr; #if !defined(SANITIZER) @@ -71,24 +63,9 @@ static thread_local ThreadStack alt_stack; static thread_local bool has_alt_stack = false; #endif - -std::vector ThreadGroupStatus::getProfileEventsCountersAndMemoryForThreads() -{ - std::lock_guard guard(mutex); - - /// It is OK to move it, since it is enough to report statistics for the thread at least once. - auto stats = std::move(finished_threads_counters_memory); - for (auto * thread : threads) - { - stats.emplace_back(ProfileEventsCountersAndMemory{ - thread->performance_counters.getPartiallyAtomicSnapshot(), - thread->memory_tracker.get(), - thread->thread_id, - }); - } - - return stats; -} +ThreadGroupStatus::ThreadGroupStatus() + : master_thread_id(CurrentThread::get().thread_id) +{} ThreadStatus::ThreadStatus() : thread_id{getThreadId()} @@ -144,6 +121,63 @@ ThreadStatus::ThreadStatus() #endif } +ThreadGroupStatusPtr ThreadStatus::getThreadGroup() const +{ + return thread_group; +} + +const String & ThreadStatus::getQueryId() const +{ + return query_id_from_query_context; +} + +ContextPtr ThreadStatus::getQueryContext() const +{ + return query_context.lock(); +} + +ContextPtr ThreadStatus::getGlobalContext() const +{ + return global_context.lock(); +} + +void ThreadGroupStatus::attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue, LogsLevel logs_level) +{ + std::lock_guard lock(mutex); + shared_data.logs_queue_ptr = logs_queue; + shared_data.client_logs_level = logs_level; +} + +void ThreadStatus::attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue, + LogsLevel logs_level) +{ + local_data.logs_queue_ptr = logs_queue; + local_data.client_logs_level = logs_level; + + if (thread_group) + thread_group->attachInternalTextLogsQueue(logs_queue, logs_level); +} + +InternalTextLogsQueuePtr ThreadStatus::getInternalTextLogsQueue() const +{ + return local_data.logs_queue_ptr.lock(); +} + +InternalProfileEventsQueuePtr ThreadStatus::getInternalProfileEventsQueue() const +{ + return local_data.profile_queue_ptr.lock(); +} + +const String & ThreadStatus::getQueryForLog() const +{ + return local_data.query_for_logs; +} + +LogsLevel ThreadStatus::getClientLogsLevel() const +{ + return local_data.client_logs_level; +} + void ThreadStatus::flushUntrackedMemory() { if (untracked_memory == 0) @@ -157,24 +191,11 @@ ThreadStatus::~ThreadStatus() { flushUntrackedMemory(); - if (thread_group) - { - ThreadGroupStatus::ProfileEventsCountersAndMemory counters - { - performance_counters.getPartiallyAtomicSnapshot(), - memory_tracker.get(), - thread_id - }; - - std::lock_guard guard(thread_group->mutex); - thread_group->finished_threads_counters_memory.emplace_back(std::move(counters)); - thread_group->threads.erase(this); - } - /// It may cause segfault if query_context was destroyed, but was not detached auto query_context_ptr = query_context.lock(); - assert((!query_context_ptr && query_id.empty()) || (query_context_ptr && query_id == query_context_ptr->getCurrentQueryId())); + assert((!query_context_ptr && getQueryId().empty()) || (query_context_ptr && getQueryId() == query_context_ptr->getCurrentQueryId())); + /// detachGroup if it was attached if (deleter) deleter(); @@ -198,71 +219,25 @@ void ThreadStatus::updatePerformanceCounters() } } -void ThreadStatus::assertState(ThreadState permitted_state, const char * description) const -{ - if (getCurrentState() == permitted_state) - return; - - if (description) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected thread state {}: {}", getCurrentState(), description); - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected thread state {}", getCurrentState()); -} - -void ThreadStatus::attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue, - LogsLevel client_logs_level) -{ - logs_queue_ptr = logs_queue; - - if (!thread_group) - return; - - std::lock_guard lock(thread_group->mutex); - thread_group->logs_queue_ptr = logs_queue; - thread_group->client_logs_level = client_logs_level; -} - -void ThreadStatus::attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & profile_queue) -{ - profile_queue_ptr = profile_queue; - - if (!thread_group) - return; - - std::lock_guard lock(thread_group->mutex); - thread_group->profile_queue_ptr = profile_queue; -} - -void ThreadStatus::setFatalErrorCallback(std::function callback) -{ - /// It does not make sense to set a callback for sending logs to a client if there's no thread group - chassert(thread_group); - std::lock_guard lock(thread_group->mutex); - fatal_error_callback = std::move(callback); - thread_group->fatal_error_callback = fatal_error_callback; -} - void ThreadStatus::onFatalError() { - /// No thread group - no callback - if (!thread_group) - return; - - std::lock_guard lock(thread_group->mutex); if (fatal_error_callback) fatal_error_callback(); } ThreadStatus * MainThreadStatus::main_thread = nullptr; + MainThreadStatus & MainThreadStatus::getInstance() { static MainThreadStatus thread_status; return thread_status; } + MainThreadStatus::MainThreadStatus() { main_thread = current_thread; } + MainThreadStatus::~MainThreadStatus() { main_thread = nullptr; diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index e620413c8eb..77c924f9650 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -57,46 +57,66 @@ using ThreadStatusPtr = ThreadStatus *; * Create via CurrentThread::initializeQuery (for queries) or directly (for various background tasks). * Use via CurrentThread::getGroup. */ +class ThreadGroupStatus; +using ThreadGroupStatusPtr = std::shared_ptr; + class ThreadGroupStatus { public: - struct ProfileEventsCountersAndMemory - { - ProfileEvents::Counters::Snapshot counters; - Int64 memory_usage; - UInt64 thread_id; - }; + ThreadGroupStatus(); + using FatalErrorCallback = std::function; + ThreadGroupStatus(ContextPtr query_context_, FatalErrorCallback fatal_error_callback_ = {}); - mutable std::mutex mutex; + /// The first thread created this thread group + const UInt64 master_thread_id; + + /// Set up at creation, no race when reading + const ContextWeakPtr query_context; + const ContextWeakPtr global_context; + + const FatalErrorCallback fatal_error_callback; ProfileEvents::Counters performance_counters{VariableContext::Process}; MemoryTracker memory_tracker{VariableContext::Process}; - ContextWeakPtr query_context; - ContextWeakPtr global_context; + struct SharedData + { + InternalProfileEventsQueueWeakPtr profile_queue_ptr; - InternalTextLogsQueueWeakPtr logs_queue_ptr; - InternalProfileEventsQueueWeakPtr profile_queue_ptr; - std::function fatal_error_callback; + InternalTextLogsQueueWeakPtr logs_queue_ptr; + LogsLevel client_logs_level = LogsLevel::none; + String query_for_logs; + UInt64 normalized_query_hash = 0; + }; + + SharedData getSharedData() + { + /// Critical section for making the copy of shared_data + std::lock_guard lock(mutex); + return shared_data; + } + + /// Mutation shared data + void attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue, LogsLevel logs_level); + void attachQueryForLog(const String & query_, UInt64 normalized_hash = 0); + void attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & profile_queue); + + /// When new query starts, new thread group is created for it, current thread becomes master thread of the query + static ThreadGroupStatusPtr createForQuery(ContextPtr query_context_, FatalErrorCallback fatal_error_callback_ = {}); + + std::vector getInvolvedThreadIds() const; + void linkThread(UInt64 thread_it); + +private: + mutable std::mutex mutex; + + /// Set up at creation, no race when reading + SharedData shared_data; + /// Set of all thread ids which has been attached to the group std::unordered_set thread_ids; - std::unordered_set threads; - - /// The first thread created this thread group - UInt64 master_thread_id = 0; - - LogsLevel client_logs_level = LogsLevel::none; - - String query; - UInt64 normalized_query_hash = 0; - - std::vector finished_threads_counters_memory; - - std::vector getProfileEventsCountersAndMemoryForThreads(); }; -using ThreadGroupStatusPtr = std::shared_ptr; - /** * We use **constinit** here to tell the compiler the current_thread variable is initialized. * If we didn't help the compiler, then it would most likely add a check before every use of the variable to initialize it if needed. @@ -124,12 +144,11 @@ public: /// TODO: merge them into common entity ProfileEvents::Counters performance_counters{VariableContext::Thread}; - /// Points to performance_counters by default. /// Could be changed to point to another object to calculate performance counters for some narrow scope. ProfileEvents::Counters * current_performance_counters{&performance_counters}; - MemoryTracker memory_tracker{VariableContext::Thread}; + MemoryTracker memory_tracker{VariableContext::Thread}; /// Small amount of untracked memory (per thread atomic-less counter) Int64 untracked_memory = 0; /// Each thread could new/delete memory in range of (-untracked_memory_limit, untracked_memory_limit) without access to common counters. @@ -139,93 +158,70 @@ public: Progress progress_in; Progress progress_out; - using Deleter = std::function; - Deleter deleter; - -protected: +private: /// Group of threads, to which this thread attached ThreadGroupStatusPtr thread_group; - std::atomic thread_state{ThreadState::DetachedFromQuery}; - /// Is set once ContextWeakPtr global_context; /// Use it only from current thread ContextWeakPtr query_context; - String query_id; + /// Is used to send logs from logs_queue to client in case of fatal errors. + using FatalErrorCallback = std::function; + FatalErrorCallback fatal_error_callback; - /// A logs queue used by TCPHandler to pass logs to a client - InternalTextLogsQueueWeakPtr logs_queue_ptr; - - InternalProfileEventsQueueWeakPtr profile_queue_ptr; + ThreadGroupStatus::SharedData local_data; bool performance_counters_finalized = false; - UInt64 query_start_time_nanoseconds = 0; - UInt64 query_start_time_microseconds = 0; - time_t query_start_time = 0; - size_t queries_started = 0; + + String query_id_from_query_context; + /// Requires access to query_id. + friend class MemoryTrackerThreadSwitcher; + void setQueryId(const String & query_id_) + { + query_id_from_query_context = query_id_; + } + + struct TimePoint + { + void setUp(); + UInt64 nanoseconds() const; + UInt64 microseconds() const; + UInt64 seconds() const; + + std::chrono::time_point point; + }; + + TimePoint query_start_time{}; // CPU and Real time query profilers std::unique_ptr query_profiler_real; std::unique_ptr query_profiler_cpu; - Poco::Logger * log = nullptr; - - friend class CurrentThread; - /// Use ptr not to add extra dependencies in the header std::unique_ptr last_rusage; std::unique_ptr taskstats; - /// Is used to send logs from logs_queue to client in case of fatal errors. - std::function fatal_error_callback; - /// See setInternalThread() bool internal_thread = false; - /// Requires access to query_id. - friend class MemoryTrackerThreadSwitcher; - void setQueryId(const String & query_id_) - { - query_id = query_id_; - } + /// This is helpful for cut linking dependencies for clickhouse_common_io + using Deleter = std::function; + Deleter deleter; + + Poco::Logger * log = nullptr; public: ThreadStatus(); ~ThreadStatus(); - ThreadGroupStatusPtr getThreadGroup() const - { - return thread_group; - } + ThreadGroupStatusPtr getThreadGroup() const; - enum ThreadState - { - DetachedFromQuery = 0, /// We just created thread or it is a background thread - AttachedToQuery, /// Thread executes enqueued query - Died, /// Thread does not exist - }; + const String & getQueryId() const; - int getCurrentState() const - { - return thread_state.load(std::memory_order_relaxed); - } - - std::string_view getQueryId() const - { - return query_id; - } - - auto getQueryContext() const - { - return query_context.lock(); - } - - auto getGlobalContext() const - { - return global_context.lock(); - } + ContextPtr getQueryContext() const; + ContextPtr getGlobalContext() const; /// "Internal" ThreadStatus is used for materialized views for separate /// tracking into system.query_views_log @@ -243,39 +239,30 @@ public: /// query. void setInternalThread(); - /// Starts new query and create new thread group for it, current thread becomes master thread of the query - void initializeQuery(); - /// Attaches slave thread to existing thread group - void attachQuery(const ThreadGroupStatusPtr & thread_group_, bool check_detached = true); + void attachToGroup(const ThreadGroupStatusPtr & thread_group_, bool check_detached = true); + + /// Detaches thread from the thread group and the query, dumps performance counters if they have not been dumped + void detachFromGroup(); /// Returns pointer to the current profile counters to restore them back. /// Note: consequent call with new scope will detach previous scope. ProfileEvents::Counters * attachProfileCountersScope(ProfileEvents::Counters * performance_counters_scope); - InternalTextLogsQueuePtr getInternalTextLogsQueue() const - { - return thread_state == Died ? nullptr : logs_queue_ptr.lock(); - } - void attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue, LogsLevel client_logs_level); - - InternalProfileEventsQueuePtr getInternalProfileEventsQueue() const - { - return thread_state == Died ? nullptr : profile_queue_ptr.lock(); - } + InternalTextLogsQueuePtr getInternalTextLogsQueue() const; + LogsLevel getClientLogsLevel() const; void attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & profile_queue); + InternalProfileEventsQueuePtr getInternalProfileEventsQueue() const; - /// Callback that is used to trigger sending fatal error messages to client. - void setFatalErrorCallback(std::function callback); + void attachQueryForLog(const String & query_); + const String & getQueryForLog() const; + + /// Proper cal for fatal_error_callback void onFatalError(); - /// Sets query context for current master thread and its thread group - /// NOTE: query_context have to be alive until detachQuery() is called - void attachQueryContext(ContextPtr query_context); - /// Update several ProfileEvents counters void updatePerformanceCounters(); @@ -285,14 +272,11 @@ public: /// Set the counters last usage to now void resetPerformanceCountersLastUsage(); - /// Detaches thread from the thread group and the query, dumps performance counters if they have not been dumped - void detachQuery(bool exit_if_already_detached = false, bool thread_exits = false); - void logToQueryViewsLog(const ViewRuntimeData & vinfo); void flushUntrackedMemory(); -protected: +private: void applyQuerySettings(); void initPerformanceCounters(); @@ -301,14 +285,9 @@ protected: void finalizeQueryProfiler(); - void logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database, std::chrono::time_point now); + void logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database); - - void assertState(ThreadState permitted_state, const char * description = nullptr) const; - - -private: - void setupState(const ThreadGroupStatusPtr & thread_group_); + void attachToGroupImpl(const ThreadGroupStatusPtr & thread_group_); }; /** diff --git a/src/Common/UTF8Helpers.h b/src/Common/UTF8Helpers.h index 623a62a6f79..1dac8f60c5e 100644 --- a/src/Common/UTF8Helpers.h +++ b/src/Common/UTF8Helpers.h @@ -11,9 +11,7 @@ #if defined(__aarch64__) && defined(__ARM_NEON) # include -# ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic ignored "-Wreserved-identifier" -# endif +# pragma clang diagnostic ignored "-Wreserved-identifier" #endif diff --git a/src/Common/ZooKeeper/ZooKeeperLock.cpp b/src/Common/ZooKeeper/ZooKeeperLock.cpp index 1200dcdb533..a52c942a35f 100644 --- a/src/Common/ZooKeeper/ZooKeeperLock.cpp +++ b/src/Common/ZooKeeper/ZooKeeperLock.cpp @@ -41,6 +41,16 @@ ZooKeeperLock::~ZooKeeperLock() } } +bool ZooKeeperLock::isLocked() const +{ + return locked; +} + +const std::string & ZooKeeperLock::getLockPath() const +{ + return lock_path; +} + void ZooKeeperLock::unlock() { if (!locked) diff --git a/src/Common/ZooKeeper/ZooKeeperLock.h b/src/Common/ZooKeeper/ZooKeeperLock.h index f249e69dcc3..755ca1333b8 100644 --- a/src/Common/ZooKeeper/ZooKeeperLock.h +++ b/src/Common/ZooKeeper/ZooKeeperLock.h @@ -37,6 +37,8 @@ public: void unlock(); bool tryLock(); + bool isLocked() const; + const std::string & getLockPath() const; private: zkutil::ZooKeeperPtr zookeeper; diff --git a/src/Common/examples/int_hashes_perf.cpp b/src/Common/examples/int_hashes_perf.cpp index fecfa0adba8..2b260dca809 100644 --- a/src/Common/examples/int_hashes_perf.cpp +++ b/src/Common/examples/int_hashes_perf.cpp @@ -1,6 +1,4 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #if defined (OS_LINUX) # include diff --git a/src/Common/logger_useful.h b/src/Common/logger_useful.h index 2a0c582331d..ba1e2e7789b 100644 --- a/src/Common/logger_useful.h +++ b/src/Common/logger_useful.h @@ -34,7 +34,7 @@ namespace { \ auto _logger = ::getLogger(logger); \ const bool _is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \ - (DB::CurrentThread::getGroup()->client_logs_level >= (priority)); \ + (DB::CurrentThread::get().getClientLogsLevel() >= (priority)); \ if (_is_clients_log || _logger->is((PRIORITY))) \ { \ std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \ diff --git a/src/Common/memcmpSmall.h b/src/Common/memcmpSmall.h index e95a21b836d..e0b232a3485 100644 --- a/src/Common/memcmpSmall.h +++ b/src/Common/memcmpSmall.h @@ -502,9 +502,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size) #elif defined(__aarch64__) && defined(__ARM_NEON) # include -# ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic ignored "-Wreserved-identifier" -# endif +# pragma clang diagnostic ignored "-Wreserved-identifier" inline uint64_t getNibbleMask(uint8x16_t res) { diff --git a/src/Common/memcpySmall.h b/src/Common/memcpySmall.h index 4f38095c7f1..5eaa1203f05 100644 --- a/src/Common/memcpySmall.h +++ b/src/Common/memcpySmall.h @@ -8,9 +8,7 @@ #if defined(__aarch64__) && defined(__ARM_NEON) # include -# ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic ignored "-Wreserved-identifier" -# endif +# pragma clang diagnostic ignored "-Wreserved-identifier" #endif /** memcpy function could work suboptimal if all the following conditions are met: diff --git a/src/Compression/CompressionCodecDelta.cpp b/src/Compression/CompressionCodecDelta.cpp index 6d6078b9ee1..37f9230da14 100644 --- a/src/Compression/CompressionCodecDelta.cpp +++ b/src/Compression/CompressionCodecDelta.cpp @@ -193,7 +193,8 @@ void registerCodecDelta(CompressionCodecFactory & factory) UInt8 method_code = static_cast(CompressionMethodByte::Delta); auto codec_builder = [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr { - UInt8 delta_bytes_size = 0; + /// Default bytes size is 1. + UInt8 delta_bytes_size = 1; if (arguments && !arguments->children.empty()) { @@ -202,8 +203,8 @@ void registerCodecDelta(CompressionCodecFactory & factory) const auto children = arguments->children; const auto * literal = children[0]->as(); - if (!literal) - throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "Delta codec argument must be integer"); + if (!literal || literal->value.getType() != Field::Types::Which::UInt64) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "Delta codec argument must be unsigned integer"); size_t user_bytes_size = literal->value.safeGet(); if (user_bytes_size != 1 && user_bytes_size != 2 && user_bytes_size != 4 && user_bytes_size != 8) diff --git a/src/Compression/CompressionCodecDoubleDelta.cpp b/src/Compression/CompressionCodecDoubleDelta.cpp index 782675dfd32..40271726697 100644 --- a/src/Compression/CompressionCodecDoubleDelta.cpp +++ b/src/Compression/CompressionCodecDoubleDelta.cpp @@ -1,13 +1,11 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include #include #include #include #include -#include +#include #include #include @@ -31,7 +29,7 @@ namespace DB /** DoubleDelta column codec implementation. * * Based on Gorilla paper: http://www.vldb.org/pvldb/vol8/p1816-teller.pdf, which was extended - * to support 64bit types. The drawback is 1 extra bit for 32-byte wide deltas: 5-bit prefix + * to support 64bit types. The drawback is 1 extra bit for 32-bit wide deltas: 5-bit prefix * instead of 4-bit prefix. * * This codec is best used against monotonic integer sequences with constant (or almost constant) @@ -145,6 +143,8 @@ namespace ErrorCodes extern const int CANNOT_COMPRESS; extern const int CANNOT_DECOMPRESS; extern const int BAD_ARGUMENTS; + extern const int ILLEGAL_SYNTAX_FOR_CODEC_TYPE; + extern const int ILLEGAL_CODEC_PARAMETER; } namespace @@ -549,10 +549,28 @@ void registerCodecDoubleDelta(CompressionCodecFactory & factory) factory.registerCompressionCodecWithType("DoubleDelta", method_code, [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr { - if (arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Codec DoubleDelta does not accept any arguments"); + /// Default bytes size is 1. + UInt8 data_bytes_size = 1; + if (arguments && !arguments->children.empty()) + { + if (arguments->children.size() > 1) + throw Exception(ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE, "DoubleDelta codec must have 1 parameter, given {}", arguments->children.size()); + + const auto children = arguments->children; + const auto * literal = children[0]->as(); + if (!literal || literal->value.getType() != Field::Types::Which::UInt64) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "DoubleDelta codec argument must be unsigned integer"); + + size_t user_bytes_size = literal->value.safeGet(); + if (user_bytes_size != 1 && user_bytes_size != 2 && user_bytes_size != 4 && user_bytes_size != 8) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "Argument value for DoubleDelta codec can be 1, 2, 4 or 8, given {}", user_bytes_size); + data_bytes_size = static_cast(user_bytes_size); + } + else if (column_type) + { + data_bytes_size = getDataBytesSize(column_type); + } - UInt8 data_bytes_size = column_type ? getDataBytesSize(column_type) : 0; return std::make_shared(data_bytes_size); }); } diff --git a/src/Compression/CompressionCodecFPC.cpp b/src/Compression/CompressionCodecFPC.cpp index 31b12b762c8..8c3e518ed62 100644 --- a/src/Compression/CompressionCodecFPC.cpp +++ b/src/Compression/CompressionCodecFPC.cpp @@ -109,28 +109,42 @@ void registerCodecFPC(CompressionCodecFactory & factory) auto method_code = static_cast(CompressionMethodByte::FPC); auto codec_builder = [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr { - UInt8 float_width = 0; + /// Set default float width to 4. + UInt8 float_width = 4; if (column_type != nullptr) float_width = getFloatBytesSize(*column_type); UInt8 level = CompressionCodecFPC::DEFAULT_COMPRESSION_LEVEL; if (arguments && !arguments->children.empty()) { - if (arguments->children.size() > 1) + if (arguments->children.size() > 2) { throw Exception(ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE, - "FPC codec must have 1 parameter, given {}", arguments->children.size()); + "FPC codec must have from 0 to 2 parameters, given {}", arguments->children.size()); } const auto * literal = arguments->children.front()->as(); - if (!literal) - throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "FPC codec argument must be integer"); + if (!literal || literal->value.getType() != Field::Types::Which::UInt64) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "FPC codec argument must be unsigned integer"); level = literal->value.safeGet(); if (level < 1 || level > CompressionCodecFPC::MAX_COMPRESSION_LEVEL) throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "FPC codec level must be between {} and {}", 1, static_cast(CompressionCodecFPC::MAX_COMPRESSION_LEVEL)); + + if (arguments->children.size() == 2) + { + literal = arguments->children[1]->as(); + if (!literal || !isInt64OrUInt64FieldType(literal->value.getType())) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "FPC codec argument must be unsigned integer"); + + size_t user_float_width = literal->value.safeGet(); + if (user_float_width != 4 && user_float_width != 8) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "Float size for FPC codec can be 4 or 8, given {}", user_float_width); + float_width = static_cast(user_float_width); + } } + return std::make_shared(float_width, level); }; factory.registerCompressionCodecWithType("FPC", method_code, codec_builder); diff --git a/src/Compression/CompressionCodecGorilla.cpp b/src/Compression/CompressionCodecGorilla.cpp index d68648bd83c..2c6f862d38b 100644 --- a/src/Compression/CompressionCodecGorilla.cpp +++ b/src/Compression/CompressionCodecGorilla.cpp @@ -1,12 +1,11 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include #include #include #include #include +#include #include #include #include @@ -134,6 +133,8 @@ namespace ErrorCodes extern const int CANNOT_COMPRESS; extern const int CANNOT_DECOMPRESS; extern const int BAD_ARGUMENTS; + extern const int ILLEGAL_SYNTAX_FOR_CODEC_TYPE; + extern const int ILLEGAL_CODEC_PARAMETER; } namespace @@ -445,10 +446,28 @@ void registerCodecGorilla(CompressionCodecFactory & factory) UInt8 method_code = static_cast(CompressionMethodByte::Gorilla); auto codec_builder = [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr { - if (arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Codec Gorilla does not accept any arguments"); + /// Default bytes size is 1 + UInt8 data_bytes_size = 1; + if (arguments && !arguments->children.empty()) + { + if (arguments->children.size() > 1) + throw Exception(ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE, "Gorilla codec must have 1 parameter, given {}", arguments->children.size()); + + const auto children = arguments->children; + const auto * literal = children[0]->as(); + if (!literal || literal->value.getType() != Field::Types::Which::UInt64) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "Gorilla codec argument must be unsigned integer"); + + size_t user_bytes_size = literal->value.safeGet(); + if (user_bytes_size != 1 && user_bytes_size != 2 && user_bytes_size != 4 && user_bytes_size != 8) + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "Argument value for Gorilla codec can be 1, 2, 4 or 8, given {}", user_bytes_size); + data_bytes_size = static_cast(user_bytes_size); + } + else if (column_type) + { + data_bytes_size = getDataBytesSize(column_type); + } - UInt8 data_bytes_size = column_type ? getDataBytesSize(column_type) : 0; return std::make_shared(data_bytes_size); }; factory.registerCompressionCodecWithType("Gorilla", method_code, codec_builder); diff --git a/src/Compression/CompressionCodecT64.cpp b/src/Compression/CompressionCodecT64.cpp index e7f1615128a..1f8331c8a5f 100644 --- a/src/Compression/CompressionCodecT64.cpp +++ b/src/Compression/CompressionCodecT64.cpp @@ -33,7 +33,8 @@ public: Bit }; - CompressionCodecT64(TypeIndex type_idx_, Variant variant_); + // type_idx_ is required for compression, but not for decompression. + CompressionCodecT64(std::optional type_idx_, Variant variant_); uint8_t getMethodByte() const override; @@ -53,7 +54,7 @@ protected: bool isGenericCompression() const override { return false; } private: - TypeIndex type_idx; + std::optional type_idx; Variant variant; }; @@ -91,9 +92,12 @@ enum class MagicNumber : uint8_t IPv4 = 21, }; -MagicNumber serializeTypeId(TypeIndex type_id) +MagicNumber serializeTypeId(std::optional type_id) { - switch (type_id) + if (!type_id) + throw Exception(ErrorCodes::CANNOT_COMPRESS, "T64 codec doesn't support compression without information about column type"); + + switch (*type_id) { case TypeIndex::UInt8: return MagicNumber::UInt8; case TypeIndex::UInt16: return MagicNumber::UInt16; @@ -115,7 +119,7 @@ MagicNumber serializeTypeId(TypeIndex type_id) break; } - throw Exception(ErrorCodes::LOGICAL_ERROR, "Type is not supported by T64 codec: {}", static_cast(type_id)); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Type is not supported by T64 codec: {}", static_cast(*type_id)); } TypeIndex deserializeTypeId(uint8_t serialized_type_id) @@ -632,7 +636,7 @@ UInt32 CompressionCodecT64::doCompressData(const char * src, UInt32 src_size, ch memcpy(dst, &cookie, 1); dst += 1; - switch (baseType(type_idx)) + switch (baseType(*type_idx)) { case TypeIndex::Int8: return 1 + compressData(src, src_size, dst, variant); @@ -699,7 +703,7 @@ uint8_t CompressionCodecT64::getMethodByte() const return codecId(); } -CompressionCodecT64::CompressionCodecT64(TypeIndex type_idx_, Variant variant_) +CompressionCodecT64::CompressionCodecT64(std::optional type_idx_, Variant variant_) : type_idx(type_idx_) , variant(variant_) { @@ -712,7 +716,7 @@ CompressionCodecT64::CompressionCodecT64(TypeIndex type_idx_, Variant variant_) void CompressionCodecT64::updateHash(SipHash & hash) const { getCodecDesc()->updateTreeHash(hash); - hash.update(type_idx); + hash.update(type_idx.value_or(TypeIndex::Nothing)); hash.update(variant); } @@ -742,9 +746,14 @@ void registerCodecT64(CompressionCodecFactory & factory) throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, "Wrong modification for T64: {}", name); } - auto type_idx = typeIdx(type); - if (type && type_idx == TypeIndex::Nothing) - throw Exception(ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE, "T64 codec is not supported for specified type {}", type->getName()); + std::optional type_idx; + if (type) + { + type_idx = typeIdx(type); + if (type_idx == TypeIndex::Nothing) + throw Exception( + ErrorCodes::ILLEGAL_SYNTAX_FOR_CODEC_TYPE, "T64 codec is not supported for specified type {}", type->getName()); + } return std::make_shared(type_idx, variant); }; diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp index 7b47324a890..cabeb13e2f8 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.cpp +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -103,7 +104,8 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo auth_settings.server_side_encryption_customer_key_base64, std::move(headers), auth_settings.use_environment_credentials.value_or(false), - auth_settings.use_insecure_imds_request.value_or(false)); + auth_settings.use_insecure_imds_request.value_or(false), + auth_settings.expiration_window_seconds.value_or(S3::DEFAULT_EXPIRATION_WINDOW_SECONDS)); auto new_client = std::make_shared(std::move(new_uri), std::move(auth_settings), std::move(client)); diff --git a/src/Core/BackgroundSchedulePool.cpp b/src/Core/BackgroundSchedulePool.cpp index 993cfb6ef04..5384ee7f961 100644 --- a/src/Core/BackgroundSchedulePool.cpp +++ b/src/Core/BackgroundSchedulePool.cpp @@ -149,8 +149,9 @@ Coordination::WatchCallback BackgroundSchedulePoolTaskInfo::getWatchCallback() } -BackgroundSchedulePool::BackgroundSchedulePool(size_t size_, CurrentMetrics::Metric tasks_metric_, const char *thread_name_) +BackgroundSchedulePool::BackgroundSchedulePool(size_t size_, CurrentMetrics::Metric tasks_metric_, CurrentMetrics::Metric size_metric_, const char *thread_name_) : tasks_metric(tasks_metric_) + , size_metric(size_metric_, size_) , thread_name(thread_name_) { LOG_INFO(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), "Create BackgroundSchedulePool with {} threads", size_); @@ -177,6 +178,8 @@ void BackgroundSchedulePool::increaseThreadsCount(size_t new_threads_count) threads.resize(new_threads_count); for (size_t i = old_threads_count; i < new_threads_count; ++i) threads[i] = ThreadFromGlobalPoolNoTracingContextPropagation([this] { threadFunction(); }); + + size_metric.changeTo(new_threads_count); } diff --git a/src/Core/BackgroundSchedulePool.h b/src/Core/BackgroundSchedulePool.h index 0fb70b1f715..ef6fbfa68e9 100644 --- a/src/Core/BackgroundSchedulePool.h +++ b/src/Core/BackgroundSchedulePool.h @@ -54,7 +54,7 @@ public: void increaseThreadsCount(size_t new_threads_count); /// thread_name_ cannot be longer then 13 bytes (2 bytes is reserved for "/D" suffix for delayExecutionThreadFunction()) - BackgroundSchedulePool(size_t size_, CurrentMetrics::Metric tasks_metric_, const char *thread_name_); + BackgroundSchedulePool(size_t size_, CurrentMetrics::Metric tasks_metric_, CurrentMetrics::Metric size_metric_, const char *thread_name_); ~BackgroundSchedulePool(); private: @@ -91,6 +91,7 @@ private: DelayedTasks delayed_tasks; CurrentMetrics::Metric tasks_metric; + CurrentMetrics::Increment size_metric; std::string thread_name; }; diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 18c4c0d97a0..e050124e497 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -1,6 +1,4 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include #include @@ -312,12 +310,8 @@ private: /// It will allow client to see failure messages directly. if (thread_ptr) { - query_id = std::string(thread_ptr->getQueryId()); - - if (auto thread_group = thread_ptr->getThreadGroup()) - { - query = DB::toOneLineQuery(thread_group->query); - } + query_id = thread_ptr->getQueryId(); + query = thread_ptr->getQueryForLog(); if (auto logs_queue = thread_ptr->getInternalTextLogsQueue()) { diff --git a/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp index fda0bbe8032..3ede69d5362 100644 --- a/src/Databases/DatabaseMemory.cpp +++ b/src/Databases/DatabaseMemory.cpp @@ -26,7 +26,12 @@ namespace ErrorCodes DatabaseMemory::DatabaseMemory(const String & name_, ContextPtr context_) : DatabaseWithOwnTablesBase(name_, "DatabaseMemory(" + name_ + ")", context_) , data_path("data/" + escapeForFileName(database_name) + "/") -{} +{ + /// Temporary database should not have any data on the moment of its creation + /// In case of sudden server shutdown remove database folder of temporary database + if (name_ == DatabaseCatalog::TEMPORARY_DATABASE) + removeDataPath(context_); +} void DatabaseMemory::createTable( ContextPtr /*context*/, @@ -71,8 +76,7 @@ void DatabaseMemory::dropTable( if (table->storesDataOnDisk()) { - assert(getDatabaseName() != DatabaseCatalog::TEMPORARY_DATABASE); - fs::path table_data_dir{getTableDataPath(table_name)}; + fs::path table_data_dir{fs::path{getContext()->getPath()} / getTableDataPath(table_name)}; if (fs::exists(table_data_dir)) fs::remove_all(table_data_dir); } @@ -80,7 +84,6 @@ void DatabaseMemory::dropTable( catch (...) { std::lock_guard lock{mutex}; - assert(database_name != DatabaseCatalog::TEMPORARY_DATABASE); attachTableUnlocked(table_name, table); throw; } @@ -129,10 +132,15 @@ UUID DatabaseMemory::tryGetTableUUID(const String & table_name) const return UUIDHelpers::Nil; } +void DatabaseMemory::removeDataPath(ContextPtr local_context) +{ + std::filesystem::remove_all(local_context->getPath() + data_path); +} + void DatabaseMemory::drop(ContextPtr local_context) { /// Remove data on explicit DROP DATABASE - std::filesystem::remove_all(local_context->getPath() + data_path); + removeDataPath(local_context); } void DatabaseMemory::alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) diff --git a/src/Databases/DatabaseMemory.h b/src/Databases/DatabaseMemory.h index 6262543b0c1..0f703a0b46e 100644 --- a/src/Databases/DatabaseMemory.h +++ b/src/Databases/DatabaseMemory.h @@ -53,6 +53,8 @@ public: std::vector> getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const override; private: + void removeDataPath(ContextPtr local_context); + const String data_path; using NameToASTCreate = std::unordered_map; NameToASTCreate create_queries TSA_GUARDED_BY(mutex); diff --git a/src/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp index 309a3dda2e4..d6c9ac50dbe 100644 --- a/src/Dictionaries/HashedDictionary.cpp +++ b/src/Dictionaries/HashedDictionary.cpp @@ -75,7 +75,7 @@ public: pool.scheduleOrThrowOnError([this, shard, thread_group = CurrentThread::getGroup()] { if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); setThreadName("HashedDictLoad"); threadWorker(shard); @@ -224,7 +224,7 @@ HashedDictionary::~HashedDictionary() pool.trySchedule([&container, thread_group = CurrentThread::getGroup()] { if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); setThreadName("HashedDictDtor"); if constexpr (sparse) diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index e0e4735f519..1c3bb857798 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -152,7 +153,8 @@ std::unique_ptr getClient( config.getString(config_prefix + ".server_side_encryption_customer_key_base64", ""), {}, config.getBool(config_prefix + ".use_environment_credentials", config.getBool("s3.use_environment_credentials", false)), - config.getBool(config_prefix + ".use_insecure_imds_request", config.getBool("s3.use_insecure_imds_request", false))); + config.getBool(config_prefix + ".use_insecure_imds_request", config.getBool("s3.use_insecure_imds_request", false)), + config.getBool(config_prefix + ".expiration_window_seconds", config.getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS))); } } diff --git a/src/Disks/getDiskConfigurationFromAST.cpp b/src/Disks/getDiskConfigurationFromAST.cpp index e6b08046036..4b1323b4db8 100644 --- a/src/Disks/getDiskConfigurationFromAST.cpp +++ b/src/Disks/getDiskConfigurationFromAST.cpp @@ -83,4 +83,24 @@ DiskConfigurationPtr getDiskConfigurationFromAST(const std::string & root_name, return conf; } + +ASTs convertDiskConfigurationToAST(const Poco::Util::AbstractConfiguration & configuration, const std::string & config_path) +{ + ASTs result; + + Poco::Util::AbstractConfiguration::Keys keys; + configuration.keys(config_path, keys); + + for (const auto & key : keys) + { + result.push_back( + makeASTFunction( + "equals", + std::make_shared(key), + std::make_shared(configuration.getString(config_path + "." + key)))); + } + + return result; +} + } diff --git a/src/Disks/getDiskConfigurationFromAST.h b/src/Disks/getDiskConfigurationFromAST.h index 1f9d7c1bfe6..5697955e914 100644 --- a/src/Disks/getDiskConfigurationFromAST.h +++ b/src/Disks/getDiskConfigurationFromAST.h @@ -25,4 +25,12 @@ using DiskConfigurationPtr = Poco::AutoPtr; */ DiskConfigurationPtr getDiskConfigurationFromAST(const std::string & root_name, const ASTs & disk_args, ContextPtr context); +/// The same as above function, but return XML::Document for easier modification of result configuration. +[[ maybe_unused ]] Poco::AutoPtr getDiskConfigurationFromASTImpl(const std::string & root_name, const ASTs & disk_args, ContextPtr context); + +/* + * A reverse function. + */ +[[ maybe_unused ]] ASTs convertDiskConfigurationToAST(const Poco::Util::AbstractConfiguration & configuration, const std::string & config_path); + } diff --git a/src/Functions/FunctionsCodingIP.cpp b/src/Functions/FunctionsCodingIP.cpp index 4784368db9b..fb54fb951d1 100644 --- a/src/Functions/FunctionsCodingIP.cpp +++ b/src/Functions/FunctionsCodingIP.cpp @@ -1,7 +1,5 @@ #include -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 55003044ff5..f832bf404a8 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -803,7 +803,7 @@ struct ConvertImpl, DataTypeNumber, Name, Con } }; -static ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) +static inline ColumnUInt8::MutablePtr copyNullMap(ColumnPtr col) { ColumnUInt8::MutablePtr null_map = nullptr; if (const auto * col_null = checkAndGetColumn(col.get())) diff --git a/src/Functions/GatherUtils/CMakeLists.txt b/src/Functions/GatherUtils/CMakeLists.txt index fe600f86d07..376ca6613d8 100644 --- a/src/Functions/GatherUtils/CMakeLists.txt +++ b/src/Functions/GatherUtils/CMakeLists.txt @@ -4,14 +4,6 @@ add_headers_and_sources(clickhouse_functions_gatherutils .) add_library(clickhouse_functions_gatherutils ${clickhouse_functions_gatherutils_sources} ${clickhouse_functions_gatherutils_headers}) target_link_libraries(clickhouse_functions_gatherutils PRIVATE dbms) -if (HAS_SUGGEST_OVERRIDE) - target_compile_definitions(clickhouse_functions_gatherutils PUBLIC HAS_SUGGEST_OVERRIDE) -endif() - -if (HAS_SUGGEST_DESTRUCTOR_OVERRIDE) - target_compile_definitions(clickhouse_functions_gatherutils PUBLIC HAS_SUGGEST_DESTRUCTOR_OVERRIDE) -endif() - if (OMIT_HEAVY_DEBUG_SYMBOLS) target_compile_options(clickhouse_functions_gatherutils PRIVATE "-g0") endif() diff --git a/src/Functions/GatherUtils/Sources.h b/src/Functions/GatherUtils/Sources.h index af6fc84beba..b75239d8678 100644 --- a/src/Functions/GatherUtils/Sources.h +++ b/src/Functions/GatherUtils/Sources.h @@ -141,12 +141,8 @@ struct NumericArraySource : public ArraySourceImpl> /// The methods can be virtual or not depending on the template parameter. See IStringSource. #pragma GCC diagnostic push -#ifdef HAS_SUGGEST_OVERRIDE -# pragma GCC diagnostic ignored "-Wsuggest-override" -#endif -#ifdef HAS_SUGGEST_DESTRUCTOR_OVERRIDE -# pragma GCC diagnostic ignored "-Wsuggest-destructor-override" -#endif +#pragma GCC diagnostic ignored "-Wsuggest-override" +#pragma GCC diagnostic ignored "-Wsuggest-destructor-override" template struct ConstSource : public Base diff --git a/src/Functions/toValidUTF8.cpp b/src/Functions/toValidUTF8.cpp index e509b59a23e..528cef93dd3 100644 --- a/src/Functions/toValidUTF8.cpp +++ b/src/Functions/toValidUTF8.cpp @@ -13,9 +13,7 @@ #if defined(__aarch64__) && defined(__ARM_NEON) # include -# ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic ignored "-Wreserved-identifier" -# endif +# pragma clang diagnostic ignored "-Wreserved-identifier" #endif namespace DB diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 417e985159d..65df93413dd 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -12,9 +12,7 @@ #include -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif namespace ProfileEvents { diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index e14b3ae9129..809a2404746 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -18,9 +18,7 @@ #if defined(__aarch64__) && defined(__ARM_NEON) # include -# ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic ignored "-Wreserved-identifier" -# endif +# pragma clang diagnostic ignored "-Wreserved-identifier" #endif namespace DB diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index aba884948da..e80e58314c7 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -564,7 +564,8 @@ std::unique_ptr ClientFactory::create( // NOLINT const String & server_side_encryption_customer_key_base64, HTTPHeaderEntries headers, bool use_environment_credentials, - bool use_insecure_imds_request) + bool use_insecure_imds_request, + uint64_t expiration_window_seconds) { PocoHTTPClientConfiguration client_configuration = cfg_; client_configuration.updateSchemeAndRegion(); @@ -592,7 +593,8 @@ std::unique_ptr ClientFactory::create( // NOLINT client_configuration, std::move(credentials), use_environment_credentials, - use_insecure_imds_request); + use_insecure_imds_request, + expiration_window_seconds); client_configuration.retryStrategy = std::make_shared(std::move(client_configuration.retryStrategy)); return Client::create( diff --git a/src/IO/S3/Client.h b/src/IO/S3/Client.h index 7ac97555dd1..0e102a1859d 100644 --- a/src/IO/S3/Client.h +++ b/src/IO/S3/Client.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -228,7 +229,8 @@ public: const String & server_side_encryption_customer_key_base64, HTTPHeaderEntries headers, bool use_environment_credentials, - bool use_insecure_imds_request); + bool use_insecure_imds_request, + uint64_t expiration_window_seconds = DEFAULT_EXPIRATION_WINDOW_SECONDS); PocoHTTPClientConfiguration createClientConfiguration( const String & force_region, diff --git a/src/IO/S3/Credentials.cpp b/src/IO/S3/Credentials.cpp index 4b9fa59ea2a..f6675961ddc 100644 --- a/src/IO/S3/Credentials.cpp +++ b/src/IO/S3/Credentials.cpp @@ -21,6 +21,21 @@ namespace DB::S3 { +namespace +{ + +bool areCredentialsEmptyOrExpired(const Aws::Auth::AWSCredentials & credentials, uint64_t expiration_window_seconds) +{ + if (credentials.IsEmpty()) + return true; + + const Aws::Utils::DateTime now = Aws::Utils::DateTime::Now(); + return now >= credentials.GetExpiration() - std::chrono::seconds(expiration_window_seconds); +} + + +} + AWSEC2MetadataClient::AWSEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration, const char * endpoint_) : Aws::Internal::AWSHttpResourceClient(client_configuration) , endpoint(endpoint_) @@ -270,8 +285,10 @@ void AWSInstanceProfileCredentialsProvider::refreshIfExpired() Reload(); } -AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider::AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider(DB::S3::PocoHTTPClientConfiguration & aws_client_configuration) +AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider::AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider( + DB::S3::PocoHTTPClientConfiguration & aws_client_configuration, uint64_t expiration_window_seconds_) : logger(&Poco::Logger::get("AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider")) + , expiration_window_seconds(expiration_window_seconds_) { // check environment variables String tmp_region = Aws::Environment::GetEnv("AWS_DEFAULT_REGION"); @@ -388,16 +405,12 @@ void AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider::Reload() void AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider::refreshIfExpired() { Aws::Utils::Threading::ReaderLockGuard guard(m_reloadLock); - if (!credentials.IsExpiredOrEmpty()) - { + if (!areCredentialsEmptyOrExpired(credentials, expiration_window_seconds)) return; - } guard.UpgradeToWriterLock(); - if (!credentials.IsExpiredOrEmpty()) // double-checked lock to avoid refreshing twice - { + if (!areCredentialsEmptyOrExpired(credentials, expiration_window_seconds)) // double-checked lock to avoid refreshing twice return; - } Reload(); } @@ -406,7 +419,8 @@ S3CredentialsProviderChain::S3CredentialsProviderChain( const DB::S3::PocoHTTPClientConfiguration & configuration, const Aws::Auth::AWSCredentials & credentials, bool use_environment_credentials, - bool use_insecure_imds_request) + bool use_insecure_imds_request, + uint64_t expiration_window_seconds) { auto * logger = &Poco::Logger::get("S3CredentialsProviderChain"); @@ -439,7 +453,7 @@ S3CredentialsProviderChain::S3CredentialsProviderChain( configuration.for_disk_s3, configuration.get_request_throttler, configuration.put_request_throttler); - AddProvider(std::make_shared(aws_client_configuration)); + AddProvider(std::make_shared(aws_client_configuration, expiration_window_seconds)); } AddProvider(std::make_shared()); diff --git a/src/IO/S3/Credentials.h b/src/IO/S3/Credentials.h index f786810726d..d6214c5e2fa 100644 --- a/src/IO/S3/Credentials.h +++ b/src/IO/S3/Credentials.h @@ -17,6 +17,8 @@ namespace DB::S3 { +inline static constexpr uint64_t DEFAULT_EXPIRATION_WINDOW_SECONDS = 120; + class AWSEC2MetadataClient : public Aws::Internal::AWSHttpResourceClient { static constexpr char EC2_SECURITY_CREDENTIALS_RESOURCE[] = "/latest/meta-data/iam/security-credentials"; @@ -97,9 +99,11 @@ class AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider : public Aws::Auth::AWS /// See STSAssumeRoleWebIdentityCredentialsProvider. public: - explicit AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider(DB::S3::PocoHTTPClientConfiguration & aws_client_configuration); + explicit AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider( + DB::S3::PocoHTTPClientConfiguration & aws_client_configuration, uint64_t expiration_window_seconds_); Aws::Auth::AWSCredentials GetAWSCredentials() override; + protected: void Reload() override; @@ -114,14 +118,19 @@ private: Aws::String token; bool initialized = false; Poco::Logger * logger; + uint64_t expiration_window_seconds; }; class S3CredentialsProviderChain : public Aws::Auth::AWSCredentialsProviderChain { public: - S3CredentialsProviderChain(const DB::S3::PocoHTTPClientConfiguration & configuration, const Aws::Auth::AWSCredentials & credentials, bool use_environment_credentials, bool use_insecure_imds_request); + S3CredentialsProviderChain( + const DB::S3::PocoHTTPClientConfiguration & configuration, + const Aws::Auth::AWSCredentials & credentials, + bool use_environment_credentials, + bool use_insecure_imds_request, + uint64_t expiration_window_seconds); }; - } #endif diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index aa8de07c3f4..4acc31ca472 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -85,6 +85,10 @@ AuthSettings AuthSettings::loadFromConfig(const std::string & config_elem, const if (config.has(config_elem + ".use_insecure_imds_request")) use_insecure_imds_request = config.getBool(config_elem + ".use_insecure_imds_request"); + std::optional expiration_window_seconds; + if (config.has(config_elem + ".expiration_window_seconds")) + expiration_window_seconds = config.getUInt64(config_elem + ".expiration_window_seconds"); + HTTPHeaderEntries headers; Poco::Util::AbstractConfiguration::Keys subconfig_keys; config.keys(config_elem, subconfig_keys); @@ -107,7 +111,8 @@ AuthSettings AuthSettings::loadFromConfig(const std::string & config_elem, const std::move(server_side_encryption_customer_key_base64), std::move(headers), use_environment_credentials, - use_insecure_imds_request + use_insecure_imds_request, + expiration_window_seconds }; } @@ -127,6 +132,7 @@ void AuthSettings::updateFrom(const AuthSettings & from) server_side_encryption_customer_key_base64 = from.server_side_encryption_customer_key_base64; use_environment_credentials = from.use_environment_credentials; use_insecure_imds_request = from.use_insecure_imds_request; + expiration_window_seconds = from.expiration_window_seconds; } } diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index 7f277176632..ff948c065f8 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -84,6 +84,7 @@ struct AuthSettings std::optional use_environment_credentials; std::optional use_insecure_imds_request; + std::optional expiration_window_seconds; bool operator==(const AuthSettings & other) const = default; diff --git a/src/IO/WriteBufferValidUTF8.cpp b/src/IO/WriteBufferValidUTF8.cpp index 4c8e172f43c..b72bc627220 100644 --- a/src/IO/WriteBufferValidUTF8.cpp +++ b/src/IO/WriteBufferValidUTF8.cpp @@ -8,9 +8,7 @@ #if defined(__aarch64__) && defined(__ARM_NEON) # include -# ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic ignored "-Wreserved-identifier" -# endif +# pragma clang diagnostic ignored "-Wreserved-identifier" #endif namespace DB diff --git a/src/IO/tests/gtest_bit_io.cpp b/src/IO/tests/gtest_bit_io.cpp index 6a88f7baa64..6ad6bec894e 100644 --- a/src/IO/tests/gtest_bit_io.cpp +++ b/src/IO/tests/gtest_bit_io.cpp @@ -1,6 +1,4 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include #include diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 09c2eebfdd6..0f7cb961e34 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -2309,10 +2309,10 @@ BlocksList Aggregator::prepareBlocksAndFillTwoLevelImpl( { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); BlocksList blocks; while (true) @@ -3030,10 +3030,10 @@ void Aggregator::mergeBlocks(BucketToBlocks bucket_to_blocks, AggregatedDataVari { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); for (Block & block : bucket_to_blocks[bucket]) { diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index cf1d5203bf7..2cfa55f0d87 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -129,13 +129,21 @@ namespace CurrentMetrics { extern const Metric ContextLockWait; extern const Metric BackgroundMovePoolTask; + extern const Metric BackgroundMovePoolSize; extern const Metric BackgroundSchedulePoolTask; + extern const Metric BackgroundSchedulePoolSize; extern const Metric BackgroundBufferFlushSchedulePoolTask; + extern const Metric BackgroundBufferFlushSchedulePoolSize; extern const Metric BackgroundDistributedSchedulePoolTask; + extern const Metric BackgroundDistributedSchedulePoolSize; extern const Metric BackgroundMessageBrokerSchedulePoolTask; + extern const Metric BackgroundMessageBrokerSchedulePoolSize; extern const Metric BackgroundMergesAndMutationsPoolTask; + extern const Metric BackgroundMergesAndMutationsPoolSize; extern const Metric BackgroundFetchesPoolTask; + extern const Metric BackgroundFetchesPoolSize; extern const Metric BackgroundCommonPoolTask; + extern const Metric BackgroundCommonPoolSize; } namespace DB @@ -2175,6 +2183,7 @@ BackgroundSchedulePool & Context::getBufferFlushSchedulePool() const shared->buffer_flush_schedule_pool = std::make_unique( background_buffer_flush_schedule_pool_size, CurrentMetrics::BackgroundBufferFlushSchedulePoolTask, + CurrentMetrics::BackgroundBufferFlushSchedulePoolSize, "BgBufSchPool"); } @@ -2226,6 +2235,7 @@ BackgroundSchedulePool & Context::getSchedulePool() const shared->schedule_pool = std::make_unique( background_schedule_pool_size, CurrentMetrics::BackgroundSchedulePoolTask, + CurrentMetrics::BackgroundSchedulePoolSize, "BgSchPool"); } @@ -2246,6 +2256,7 @@ BackgroundSchedulePool & Context::getDistributedSchedulePool() const shared->distributed_schedule_pool = std::make_unique( background_distributed_schedule_pool_size, CurrentMetrics::BackgroundDistributedSchedulePoolTask, + CurrentMetrics::BackgroundDistributedSchedulePoolSize, "BgDistSchPool"); } @@ -2266,6 +2277,7 @@ BackgroundSchedulePool & Context::getMessageBrokerSchedulePool() const shared->message_broker_schedule_pool = std::make_unique( background_message_broker_schedule_pool_size, CurrentMetrics::BackgroundMessageBrokerSchedulePoolTask, + CurrentMetrics::BackgroundMessageBrokerSchedulePoolSize, "BgMBSchPool"); } @@ -3826,6 +3838,7 @@ void Context::initializeBackgroundExecutorsIfNeeded() /*max_threads_count*/background_pool_size, /*max_tasks_count*/background_pool_size * background_merges_mutations_concurrency_ratio, CurrentMetrics::BackgroundMergesAndMutationsPoolTask, + CurrentMetrics::BackgroundMergesAndMutationsPoolSize, background_merges_mutations_scheduling_policy ); LOG_INFO(shared->log, "Initialized background executor for merges and mutations with num_threads={}, num_tasks={}, scheduling_policy={}", @@ -3836,7 +3849,8 @@ void Context::initializeBackgroundExecutorsIfNeeded() "Move", background_move_pool_size, background_move_pool_size, - CurrentMetrics::BackgroundMovePoolTask + CurrentMetrics::BackgroundMovePoolTask, + CurrentMetrics::BackgroundMovePoolSize ); LOG_INFO(shared->log, "Initialized background executor for move operations with num_threads={}, num_tasks={}", background_move_pool_size, background_move_pool_size); @@ -3845,7 +3859,8 @@ void Context::initializeBackgroundExecutorsIfNeeded() "Fetch", background_fetches_pool_size, background_fetches_pool_size, - CurrentMetrics::BackgroundFetchesPoolTask + CurrentMetrics::BackgroundFetchesPoolTask, + CurrentMetrics::BackgroundFetchesPoolSize ); LOG_INFO(shared->log, "Initialized background executor for fetches with num_threads={}, num_tasks={}", background_fetches_pool_size, background_fetches_pool_size); @@ -3854,7 +3869,8 @@ void Context::initializeBackgroundExecutorsIfNeeded() "Common", background_common_pool_size, background_common_pool_size, - CurrentMetrics::BackgroundCommonPoolTask + CurrentMetrics::BackgroundCommonPoolTask, + CurrentMetrics::BackgroundCommonPoolSize ); LOG_INFO(shared->log, "Initialized background executor for common operations (e.g. clearing old parts) with num_threads={}, num_tasks={}", background_common_pool_size, background_common_pool_size); diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 975e0da66ce..b11a973c7b7 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -121,9 +121,16 @@ TemporaryTableHolder::~TemporaryTableHolder() { if (id != UUIDHelpers::Nil) { - auto table = getTable(); - table->flushAndShutdown(); - temporary_tables->dropTable(getContext(), "_tmp_" + toString(id)); + try + { + auto table = getTable(); + table->flushAndShutdown(); + temporary_tables->dropTable(getContext(), "_tmp_" + toString(id)); + } + catch (...) + { + tryLogCurrentException("TemporaryTableHolder"); + } } } @@ -140,7 +147,6 @@ StoragePtr TemporaryTableHolder::getTable() const return table; } - void DatabaseCatalog::initializeAndLoadTemporaryDatabase() { drop_delay_sec = getContext()->getConfigRef().getInt("database_atomic_delay_before_drop_table_sec", default_drop_delay_sec); diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 4200373018d..88645ff72af 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -235,6 +235,21 @@ public: void checkTableCanBeRemovedOrRenamed(const StorageID & table_id, bool check_referential_dependencies, bool check_loading_dependencies, bool is_drop_database = false) const; + + struct TableMarkedAsDropped + { + StorageID table_id = StorageID::createEmpty(); + StoragePtr table; + String metadata_path; + time_t drop_time{}; + }; + using TablesMarkedAsDropped = std::list; + + TablesMarkedAsDropped getTablesMarkedDropped() + { + std::lock_guard lock(tables_marked_dropped_mutex); + return tables_marked_dropped; + } private: // The global instance of database catalog. unique_ptr is to allow // deferred initialization. Thought I'd use std::optional, but I can't @@ -263,15 +278,6 @@ private: return uuid.toUnderType().items[0] >> (64 - bits_for_first_level); } - struct TableMarkedAsDropped - { - StorageID table_id = StorageID::createEmpty(); - StoragePtr table; - String metadata_path; - time_t drop_time{}; - }; - using TablesMarkedAsDropped = std::list; - void dropTableDataTask(); void dropTableFinally(const TableMarkedAsDropped & table); diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 9931ae97286..4c9f47e5915 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -848,6 +848,23 @@ std::string ExpressionActions::dumpActions() const return ss.str(); } +void ExpressionActions::describeActions(WriteBuffer & out, std::string_view prefix) const +{ + bool first = true; + + for (const auto & action : actions) + { + out << prefix << (first ? "Actions: " : " "); + out << action.toString() << '\n'; + first = false; + } + + out << prefix << "Positions:"; + for (const auto & pos : result_positions) + out << ' ' << pos; + out << '\n'; +} + JSONBuilder::ItemPtr ExpressionActions::toTree() const { auto inputs_array = std::make_unique(); diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index 11957997a30..db6670c50b9 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -109,6 +109,9 @@ public: const Block & getSampleBlock() const { return sample_block; } std::string dumpActions() const; + + void describeActions(WriteBuffer & out, std::string_view prefix) const; + JSONBuilder::ItemPtr toTree() const; static NameAndTypePair getSmallestColumn(const NamesAndTypesList & columns); diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index 9858b27d57a..04a116ec0c7 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -971,11 +971,11 @@ private: { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); LOG_TRACE(log, "Start loading object '{}'", name); try diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index b03049a209f..5c2ad548c93 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -107,39 +107,4 @@ void FillingRow::initFromDefaults(size_t from_pos) row[i] = getFillDescription(i).fill_from; } -void insertFromFillingRow(MutableColumns & filling_columns, MutableColumns & interpolate_columns, MutableColumns & other_columns, - const FillingRow & filling_row, const Block & interpolate_block) -{ - for (size_t i = 0, size = filling_columns.size(); i < size; ++i) - { - if (filling_row[i].isNull()) - { - filling_columns[i]->insertDefault(); - } - else - { - filling_columns[i]->insert(filling_row[i]); - } - } - - if (size_t size = interpolate_block.columns()) - { - Columns columns = interpolate_block.getColumns(); - for (size_t i = 0; i < size; ++i) - interpolate_columns[i]->insertFrom(*columns[i]->convertToFullColumnIfConst(), 0); - } - else - for (const auto & interpolate_column : interpolate_columns) - interpolate_column->insertDefault(); - - for (const auto & other_column : other_columns) - other_column->insertDefault(); -} - -void copyRowFromColumns(MutableColumns & dest, const Columns & source, size_t row_num) -{ - for (size_t i = 0, size = source.size(); i < size; ++i) - dest[i]->insertFrom(*source[i], row_num); -} - } diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index 331c237285b..8d47094d0de 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -39,8 +39,4 @@ private: SortDescription sort_description; }; -void insertFromFillingRow(MutableColumns & filling_columns, MutableColumns & interpolate_columns, MutableColumns & other_columns, - const FillingRow & filling_row, const Block & interpolate_block); -void copyRowFromColumns(MutableColumns & dest, const Columns & source, size_t row_num); - } diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index fba985da41c..b4376426700 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -495,7 +495,7 @@ size_t HashJoin::getTotalByteCount() const if (!data) return 0; -#ifdef NDEBUG +#ifndef NDEBUG size_t debug_blocks_allocated_size = 0; for (const auto & block : data->blocks) debug_blocks_allocated_size += block.allocatedBytes(); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index c352280b7ed..7a4d65a4d57 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -940,23 +940,32 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const if (create.temporary) { - if (create.storage && create.storage->engine && create.storage->engine->name != "Memory") - throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables can only be created with ENGINE = Memory, not {}", - create.storage->engine->name); - /// It's possible if some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not. /// It makes sense when default_table_engine setting is used, but not for temporary tables. /// For temporary tables we ignore this setting to allow CREATE TEMPORARY TABLE query without specifying ENGINE - /// even if setting is set to MergeTree or something like that (otherwise MergeTree will be substituted and query will fail). - if (create.storage && !create.storage->engine) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Invalid storage definition for temporary table: must be either ENGINE = Memory or empty"); - auto engine_ast = std::make_shared(); - engine_ast->name = "Memory"; - engine_ast->no_empty_args = true; - auto storage_ast = std::make_shared(); - storage_ast->set(storage_ast->engine, engine_ast); - create.set(create.storage, storage_ast); + if (!create.cluster.empty()) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with ON CLUSTER clause"); + + if (create.storage) + { + if (create.storage->engine) + { + if (create.storage->engine->name.starts_with("Replicated") || create.storage->engine->name == "KeeperMap") + throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with Replicated or KeeperMap table engines"); + } + else + throw Exception(ErrorCodes::INCORRECT_QUERY, "Invalid storage definition for temporary table"); + } + else + { + auto engine_ast = std::make_shared(); + engine_ast->name = "Memory"; + engine_ast->no_empty_args = true; + auto storage_ast = std::make_shared(); + storage_ast->set(storage_ast->engine, engine_ast); + create.set(create.storage, storage_ast); + } return; } @@ -1284,8 +1293,21 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, if (create.if_not_exists && getContext()->tryResolveStorageID({"", create.getTable()}, Context::ResolveExternal)) return false; + DatabasePtr database = DatabaseCatalog::instance().getDatabase(DatabaseCatalog::TEMPORARY_DATABASE); + String temporary_table_name = create.getTable(); - auto temporary_table = TemporaryTableHolder(getContext(), properties.columns, properties.constraints, query_ptr); + auto creator = [&](const StorageID & table_id) + { + return StorageFactory::instance().get(create, + database->getTableDataPath(table_id.getTableName()), + getContext(), + getContext()->getGlobalContext(), + properties.columns, + properties.constraints, + false); + }; + auto temporary_table = TemporaryTableHolder(getContext(), creator, query_ptr); + getContext()->getSessionContext()->addExternalTable(temporary_table_name, std::move(temporary_table)); return true; } @@ -1712,7 +1734,13 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const else { if (create.temporary) - required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE); + { + /// Currently default table engine for temporary tables is Memory. default_table_engine does not affect temporary tables. + if (create.storage && create.storage->engine && create.storage->engine->name != "Memory") + required_access.emplace_back(AccessType::CREATE_ARBITRARY_TEMPORARY_TABLE); + else + required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE); + } else { if (create.replace_table) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index f4507de5ac7..e16403bed67 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -282,11 +282,6 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(const String & table_name, else if (kind == ASTDropQuery::Kind::Drop) { context_handle->removeExternalTable(table_name); - table->flushAndShutdown(); - auto table_lock = table->lockExclusively(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); - /// Delete table data - table->drop(); - table->is_dropped = true; } else if (kind == ASTDropQuery::Kind::Detach) { diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index b792ea538ae..5db39ece2e5 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -203,10 +203,10 @@ ProcessList::insert(const String & query_, const IAST * ast, ContextMutablePtr q ProcessListForUser & user_process_list = user_process_list_it->second; /// Actualize thread group info + CurrentThread::attachQueryForLog(query_); auto thread_group = CurrentThread::getGroup(); if (thread_group) { - std::lock_guard lock_thread_group(thread_group->mutex); thread_group->performance_counters.setParent(&user_process_list.user_performance_counters); thread_group->memory_tracker.setParent(&user_process_list.user_memory_tracker); if (user_process_list.user_temp_data_on_disk) @@ -214,8 +214,6 @@ ProcessList::insert(const String & query_, const IAST * ast, ContextMutablePtr q query_context->setTempDataOnDisk(std::make_shared( user_process_list.user_temp_data_on_disk, settings.max_temporary_data_on_disk_size_for_query)); } - thread_group->query = query_; - thread_group->normalized_query_hash = normalizedQueryHash(query_); /// Set query-level memory trackers thread_group->memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage); @@ -576,10 +574,7 @@ QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_even res.peak_memory_usage = thread_group->memory_tracker.getPeak(); if (get_thread_list) - { - std::lock_guard lock(thread_group->mutex); - res.thread_ids.assign(thread_group->thread_ids.begin(), thread_group->thread_ids.end()); - } + res.thread_ids = thread_group->getInvolvedThreadIds(); if (get_profile_events) res.profile_counters = std::make_shared(thread_group->performance_counters.getPartiallyAtomicSnapshot()); diff --git a/src/Interpreters/ProfileEventsExt.cpp b/src/Interpreters/ProfileEventsExt.cpp index 0f6b52b2611..7fbbe3c662b 100644 --- a/src/Interpreters/ProfileEventsExt.cpp +++ b/src/Interpreters/ProfileEventsExt.cpp @@ -113,34 +113,10 @@ void getProfileEvents( block = std::move(temp_columns); MutableColumns columns = block.mutateColumns(); auto thread_group = CurrentThread::getGroup(); - auto const current_thread_id = CurrentThread::get().thread_id; - std::vector snapshots; ThreadIdToCountersSnapshot new_snapshots; + ProfileEventsSnapshot group_snapshot; { - auto stats = thread_group->getProfileEventsCountersAndMemoryForThreads(); - snapshots.reserve(stats.size()); - - for (auto & stat : stats) - { - auto const thread_id = stat.thread_id; - if (thread_id == current_thread_id) - continue; - auto current_time = time(nullptr); - auto previous_snapshot = last_sent_snapshots.find(thread_id); - auto increment = - previous_snapshot != last_sent_snapshots.end() - ? CountersIncrement(stat.counters, previous_snapshot->second) - : CountersIncrement(stat.counters); - snapshots.push_back(ProfileEventsSnapshot{ - thread_id, - std::move(increment), - stat.memory_usage, - current_time - }); - new_snapshots[thread_id] = std::move(stat.counters); - } - group_snapshot.thread_id = 0; group_snapshot.current_time = time(nullptr); group_snapshot.memory_usage = thread_group->memory_tracker.get(); @@ -154,11 +130,6 @@ void getProfileEvents( } last_sent_snapshots = std::move(new_snapshots); - for (auto & snapshot : snapshots) - { - dumpProfileEvents(snapshot, columns, server_display_name); - dumpMemoryTracker(snapshot, columns, server_display_name); - } dumpProfileEvents(group_snapshot, columns, server_display_name); dumpMemoryTracker(group_snapshot, columns, server_display_name); diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index da4f97c1483..b4e1da2c697 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -40,13 +41,96 @@ namespace ErrorCodes extern const int CANNOT_SET_THREAD_PRIORITY; } +ThreadGroupStatus::ThreadGroupStatus(ContextPtr query_context_, FatalErrorCallback fatal_error_callback_) + : master_thread_id(CurrentThread::get().thread_id) + , query_context(query_context_) + , global_context(query_context_->getGlobalContext()) + , fatal_error_callback(fatal_error_callback_) +{} + +std::vector ThreadGroupStatus::getInvolvedThreadIds() const +{ + std::vector res; + + { + std::lock_guard lock(mutex); + res.assign(thread_ids.begin(), thread_ids.end()); + } + + return res; +} + +void ThreadGroupStatus::linkThread(UInt64 thread_it) +{ + std::lock_guard lock(mutex); + thread_ids.insert(thread_it); +} + +ThreadGroupStatusPtr ThreadGroupStatus::createForQuery(ContextPtr query_context_, std::function fatal_error_callback_) +{ + auto group = std::make_shared(query_context_, std::move(fatal_error_callback_)); + group->memory_tracker.setDescription("(for query)"); + return group; +} + +void ThreadGroupStatus::attachQueryForLog(const String & query_, UInt64 normalized_hash) +{ + auto hash = normalized_hash ? normalized_hash : normalizedQueryHash(query_); + + std::lock_guard lock(mutex); + shared_data.query_for_logs = query_; + shared_data.normalized_query_hash = hash; +} + +void ThreadStatus::attachQueryForLog(const String & query_) +{ + local_data.query_for_logs = query_; + local_data.normalized_query_hash = normalizedQueryHash(query_); + + if (!thread_group) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No thread group attached to the thread {}", thread_id); + + thread_group->attachQueryForLog(local_data.query_for_logs, local_data.normalized_query_hash); +} + +void ThreadGroupStatus::attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & profile_queue) +{ + std::lock_guard lock(mutex); + shared_data.profile_queue_ptr = profile_queue; +} + +void ThreadStatus::attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & profile_queue) +{ + if (!thread_group) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No thread group attached to the thread {}", thread_id); + + local_data.profile_queue_ptr = profile_queue; + thread_group->attachInternalProfileEventsQueue(profile_queue); +} + +void CurrentThread::attachInternalProfileEventsQueue(const InternalProfileEventsQueuePtr & queue) +{ + if (unlikely(!current_thread)) + return; + current_thread->attachInternalProfileEventsQueue(queue); +} + +void CurrentThread::attachQueryForLog(const String & query_) +{ + if (unlikely(!current_thread)) + return; + current_thread->attachQueryForLog(query_); +} + void ThreadStatus::applyQuerySettings() { auto query_context_ptr = query_context.lock(); - assert(query_context_ptr); + if (!query_context_ptr) + return; + const Settings & settings = query_context_ptr->getSettingsRef(); - query_id = query_context_ptr->getCurrentQueryId(); + query_id_from_query_context = query_context_ptr->getCurrentQueryId(); initQueryProfiler(); untracked_memory_limit = settings.max_untracked_memory; @@ -68,67 +152,64 @@ void ThreadStatus::applyQuerySettings() #endif } - -void ThreadStatus::attachQueryContext(ContextPtr query_context_) +void ThreadStatus::attachToGroupImpl(const ThreadGroupStatusPtr & thread_group_) { - query_context = query_context_; - - if (global_context.expired()) - global_context = query_context_->getGlobalContext(); - - if (thread_group) - { - std::lock_guard lock(thread_group->mutex); - - thread_group->query_context = query_context; - if (thread_group->global_context.expired()) - thread_group->global_context = global_context; - } - - applyQuerySettings(); -} - -void CurrentThread::defaultThreadDeleter() -{ - if (unlikely(!current_thread)) - return; - current_thread->detachQuery(true, true); -} - -void ThreadStatus::setupState(const ThreadGroupStatusPtr & thread_group_) -{ - assertState(ThreadState::DetachedFromQuery, __PRETTY_FUNCTION__); - /// Attach or init current thread to thread group and copy useful information from it thread_group = thread_group_; + thread_group->linkThread(thread_id); performance_counters.setParent(&thread_group->performance_counters); memory_tracker.setParent(&thread_group->memory_tracker); - { - std::lock_guard lock(thread_group->mutex); + query_context = thread_group->query_context; + global_context = thread_group->global_context; - /// NOTE: thread may be attached multiple times if it is reused from a thread pool. - thread_group->thread_ids.insert(thread_id); - thread_group->threads.insert(this); + fatal_error_callback = thread_group->fatal_error_callback; - logs_queue_ptr = thread_group->logs_queue_ptr; - fatal_error_callback = thread_group->fatal_error_callback; - query_context = thread_group->query_context; - profile_queue_ptr = thread_group->profile_queue_ptr; - - if (global_context.expired()) - global_context = thread_group->global_context; - } - - if (auto query_context_ptr = query_context.lock()) - { - applyQuerySettings(); - } + local_data = thread_group->getSharedData(); + applyQuerySettings(); initPerformanceCounters(); +} - thread_state = ThreadState::AttachedToQuery; +void ThreadStatus::detachFromGroup() +{ + if (!thread_group) + return; + + LockMemoryExceptionInThread lock_memory_tracker(VariableContext::Global); + + /// flash untracked memory before resetting memory_tracker parent + flushUntrackedMemory(); + + finalizeQueryProfiler(); + finalizePerformanceCounters(); + + performance_counters.setParent(&ProfileEvents::global_counters); + + memory_tracker.reset(); + memory_tracker.setParent(thread_group->memory_tracker.getParent()); + + thread_group.reset(); + + query_id_from_query_context.clear(); + query_context.reset(); + + local_data = {}; + + fatal_error_callback = {}; + +#if defined(OS_LINUX) + if (os_thread_priority) + { + LOG_TRACE(log, "Resetting nice"); + + if (0 != setpriority(PRIO_PROCESS, static_cast(thread_id), 0)) + LOG_ERROR(log, "Cannot 'setpriority' back to zero: {}", errnoToString()); + + os_thread_priority = 0; + } +#endif } void ThreadStatus::setInternalThread() @@ -137,28 +218,19 @@ void ThreadStatus::setInternalThread() internal_thread = true; } -void ThreadStatus::initializeQuery() +void ThreadStatus::attachToGroup(const ThreadGroupStatusPtr & thread_group_, bool check_detached) { - setupState(std::make_shared()); - - /// No need to lock on mutex here - thread_group->memory_tracker.setDescription("(for query)"); - thread_group->master_thread_id = thread_id; -} - -void ThreadStatus::attachQuery(const ThreadGroupStatusPtr & thread_group_, bool check_detached) -{ - if (thread_state == ThreadState::AttachedToQuery) - { - if (check_detached) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't attach query to the thread, it is already attached"); - return; - } + if (thread_group && check_detached) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't attach query to the thread, it is already attached"); if (!thread_group_) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to attach to nullptr thread group"); - setupState(thread_group_); + if (thread_group) + return; + + deleter = [this] () { detachFromGroup(); }; + attachToGroupImpl(thread_group_); } ProfileEvents::Counters * ThreadStatus::attachProfileCountersScope(ProfileEvents::Counters * performance_counters_scope) @@ -178,6 +250,26 @@ ProfileEvents::Counters * ThreadStatus::attachProfileCountersScope(ProfileEvents return prev_counters; } +void ThreadStatus::TimePoint::setUp() +{ + point = std::chrono::system_clock::now(); +} + +UInt64 ThreadStatus::TimePoint::nanoseconds() const +{ + return timeInNanoseconds(point); +} + +UInt64 ThreadStatus::TimePoint::microseconds() const +{ + return timeInMicroseconds(point); +} + +UInt64 ThreadStatus::TimePoint::seconds() const +{ + return timeInSeconds(point); +} + void ThreadStatus::initPerformanceCounters() { performance_counters_finalized = false; @@ -188,16 +280,9 @@ void ThreadStatus::initPerformanceCounters() memory_tracker.resetCounters(); memory_tracker.setDescription("(for thread)"); - // query_start_time_{microseconds, nanoseconds} are all constructed from the same time point - // to ensure that they are all equal up to the precision of a second. - const auto now = std::chrono::system_clock::now(); + query_start_time.setUp(); - query_start_time_nanoseconds = timeInNanoseconds(now); - query_start_time = timeInSeconds(now); - query_start_time_microseconds = timeInMicroseconds(now); - ++queries_started; - - // query_start_time_nanoseconds cannot be used here since RUsageCounters expect CLOCK_MONOTONIC + // query_start_time.nanoseconds cannot be used here since RUsageCounters expect CLOCK_MONOTONIC *last_rusage = RUsageCounters::current(); if (!internal_thread) @@ -272,11 +357,11 @@ void ThreadStatus::finalizePerformanceCounters() if (settings.log_queries && settings.log_query_threads) { const auto now = std::chrono::system_clock::now(); - Int64 query_duration_ms = (timeInMicroseconds(now) - query_start_time_microseconds) / 1000; + Int64 query_duration_ms = std::chrono::duration_cast(now - query_start_time.point).count(); if (query_duration_ms >= settings.log_queries_min_query_duration_ms.totalMilliseconds()) { if (auto thread_log = global_context_ptr->getQueryThreadLog()) - logToQueryThreadLog(*thread_log, query_context_ptr->getCurrentDatabase(), now); + logToQueryThreadLog(*thread_log, query_context_ptr->getCurrentDatabase()); } } } @@ -331,77 +416,20 @@ void ThreadStatus::finalizeQueryProfiler() query_profiler_cpu.reset(); } -void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits) -{ - LockMemoryExceptionInThread lock_memory_tracker(VariableContext::Global); - - if (exit_if_already_detached && thread_state == ThreadState::DetachedFromQuery) - { - thread_state = thread_exits ? ThreadState::Died : ThreadState::DetachedFromQuery; - return; - } - - assertState(ThreadState::AttachedToQuery, __PRETTY_FUNCTION__); - - finalizeQueryProfiler(); - finalizePerformanceCounters(); - - /// Detach from thread group - { - std::lock_guard guard(thread_group->mutex); - thread_group->threads.erase(this); - } - performance_counters.setParent(&ProfileEvents::global_counters); - - flushUntrackedMemory(); - - memory_tracker.reset(); - memory_tracker.setParent(thread_group->memory_tracker.getParent()); - - query_id.clear(); - query_context.reset(); - - /// The memory of thread_group->finished_threads_counters_memory is temporarily moved to this vector, which is deallocated out of critical section. - std::vector move_to_temp; - - /// Avoid leaking of ThreadGroupStatus::finished_threads_counters_memory - /// (this is in case someone uses system thread but did not call getProfileEventsCountersAndMemoryForThreads()) - { - std::lock_guard guard(thread_group->mutex); - move_to_temp = std::move(thread_group->finished_threads_counters_memory); - } - - thread_group.reset(); - - thread_state = thread_exits ? ThreadState::Died : ThreadState::DetachedFromQuery; - -#if defined(OS_LINUX) - if (os_thread_priority) - { - LOG_TRACE(log, "Resetting nice"); - - if (0 != setpriority(PRIO_PROCESS, static_cast(thread_id), 0)) - LOG_ERROR(log, "Cannot 'setpriority' back to zero: {}", errnoToString()); - - os_thread_priority = 0; - } -#endif -} - -void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database, std::chrono::time_point now) +void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database) { QueryThreadLogElement elem; // construct current_time and current_time_microseconds using the same time point // so that the two times will always be equal up to a precision of a second. - auto current_time = timeInSeconds(now); - auto current_time_microseconds = timeInMicroseconds(now); + TimePoint current_time; + current_time.setUp(); - elem.event_time = current_time; - elem.event_time_microseconds = current_time_microseconds; - elem.query_start_time = query_start_time; - elem.query_start_time_microseconds = query_start_time_microseconds; - elem.query_duration_ms = (timeInNanoseconds(now) - query_start_time_nanoseconds) / 1000000U; + elem.event_time = current_time.seconds(); + elem.event_time_microseconds = current_time.microseconds(); + elem.query_start_time = query_start_time.seconds(); + elem.query_start_time_microseconds = query_start_time.microseconds(); + elem.query_duration_ms = std::chrono::duration_cast(current_time.point - query_start_time.point).count(); elem.read_rows = progress_in.read_rows.load(std::memory_order_relaxed); elem.read_bytes = progress_in.read_bytes.load(std::memory_order_relaxed); @@ -417,13 +445,9 @@ void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String elem.current_database = current_database; if (thread_group) { - { - std::lock_guard lock(thread_group->mutex); - - elem.master_thread_id = thread_group->master_thread_id; - elem.query = thread_group->query; - elem.normalized_query_hash = thread_group->normalized_query_hash; - } + elem.master_thread_id = thread_group->master_thread_id; + elem.query = local_data.query_for_logs; + elem.normalized_query_hash = local_data.normalized_query_hash; } auto query_context_ptr = query_context.lock(); @@ -457,6 +481,7 @@ void ThreadStatus::logToQueryViewsLog(const ViewRuntimeData & vinfo) auto query_context_ptr = query_context.lock(); if (!query_context_ptr) return; + auto views_log = query_context_ptr->getQueryViewsLog(); if (!views_log) return; @@ -467,7 +492,7 @@ void ThreadStatus::logToQueryViewsLog(const ViewRuntimeData & vinfo) element.event_time_microseconds = timeInMicroseconds(vinfo.runtime_stats->event_time); element.view_duration_ms = vinfo.runtime_stats->elapsed_ms; - element.initial_query_id = query_id; + element.initial_query_id = query_id_from_query_context; element.view_name = vinfo.table_id.getFullTableName(); element.view_uuid = vinfo.table_id.uuid; element.view_type = vinfo.runtime_stats->type; @@ -475,16 +500,14 @@ void ThreadStatus::logToQueryViewsLog(const ViewRuntimeData & vinfo) element.view_query = getCleanQueryAst(vinfo.query, query_context_ptr); element.view_target = vinfo.runtime_stats->target_name; - auto events = std::make_shared(performance_counters.getPartiallyAtomicSnapshot()); element.read_rows = progress_in.read_rows.load(std::memory_order_relaxed); element.read_bytes = progress_in.read_bytes.load(std::memory_order_relaxed); element.written_rows = progress_out.written_rows.load(std::memory_order_relaxed); element.written_bytes = progress_out.written_bytes.load(std::memory_order_relaxed); element.peak_memory_usage = memory_tracker.getPeak() > 0 ? memory_tracker.getPeak() : 0; if (query_context_ptr->getSettingsRef().log_profile_events != 0) - { - element.profile_counters = events; - } + element.profile_counters = std::make_shared( + performance_counters.getPartiallyAtomicSnapshot()); element.status = vinfo.runtime_stats->event_status; element.exception_code = 0; @@ -499,35 +522,18 @@ void ThreadStatus::logToQueryViewsLog(const ViewRuntimeData & vinfo) views_log->add(element); } -void CurrentThread::initializeQuery() +void CurrentThread::attachToGroup(const ThreadGroupStatusPtr & thread_group) { if (unlikely(!current_thread)) return; - current_thread->initializeQuery(); - current_thread->deleter = CurrentThread::defaultThreadDeleter; + current_thread->attachToGroup(thread_group, true); } -void CurrentThread::attachTo(const ThreadGroupStatusPtr & thread_group) +void CurrentThread::attachToGroupIfDetached(const ThreadGroupStatusPtr & thread_group) { if (unlikely(!current_thread)) return; - current_thread->attachQuery(thread_group, true); - current_thread->deleter = CurrentThread::defaultThreadDeleter; -} - -void CurrentThread::attachToIfDetached(const ThreadGroupStatusPtr & thread_group) -{ - if (unlikely(!current_thread)) - return; - current_thread->attachQuery(thread_group, false); - current_thread->deleter = CurrentThread::defaultThreadDeleter; -} - -void CurrentThread::attachQueryContext(ContextPtr query_context) -{ - if (unlikely(!current_thread)) - return; - current_thread->attachQueryContext(query_context); + current_thread->attachToGroup(thread_group, false); } void CurrentThread::finalizePerformanceCounters() @@ -537,28 +543,20 @@ void CurrentThread::finalizePerformanceCounters() current_thread->finalizePerformanceCounters(); } -void CurrentThread::detachQuery() +void CurrentThread::detachFromGroupIfNotDetached() { if (unlikely(!current_thread)) return; - current_thread->detachQuery(false); + current_thread->detachFromGroup(); } -void CurrentThread::detachQueryIfNotDetached() -{ - if (unlikely(!current_thread)) - return; - current_thread->detachQuery(true); -} - - CurrentThread::QueryScope::QueryScope(ContextMutablePtr query_context, std::function fatal_error_callback) { - CurrentThread::initializeQuery(); - CurrentThread::attachQueryContext(query_context); if (!query_context->hasQueryContext()) query_context->makeQueryContext(); - setFatalErrorCallback(fatal_error_callback); + + auto group = ThreadGroupStatus::createForQuery(query_context, std::move(fatal_error_callback)); + CurrentThread::attachToGroup(group); } CurrentThread::QueryScope::QueryScope(ContextPtr query_context, std::function fatal_error_callback) @@ -567,9 +565,8 @@ CurrentThread::QueryScope::QueryScope(ContextPtr query_context, std::functionmakeQueryContext(); query_context->setCurrentQueryId(query_id); + chassert(&DB::CurrentThread::get() == &thread_status); DB::CurrentThread::QueryScope query_scope_holder(query_context); DB::FileCacheSettings settings; @@ -398,8 +399,8 @@ TEST_F(FileCacheTest, get) auto query_context_1 = DB::Context::createCopy(getContext().context); query_context_1->makeQueryContext(); query_context_1->setCurrentQueryId("query_id_1"); + chassert(&DB::CurrentThread::get() == &thread_status_1); DB::CurrentThread::QueryScope query_scope_holder_1(query_context_1); - thread_status_1.attachQueryContext(query_context_1); auto holder_2 = cache.getOrSet(key, 25, 5, {}); /// Get [25, 29] once again. auto segments_2 = fromHolder(holder_2); @@ -467,8 +468,8 @@ TEST_F(FileCacheTest, get) auto query_context_1 = DB::Context::createCopy(getContext().context); query_context_1->makeQueryContext(); query_context_1->setCurrentQueryId("query_id_1"); + chassert(&DB::CurrentThread::get() == &thread_status_1); DB::CurrentThread::QueryScope query_scope_holder_1(query_context_1); - thread_status_1.attachQueryContext(query_context_1); auto holder_2 = cache.getOrSet(key, 3, 23, {}); /// Get [3, 25] once again auto segments_2 = fromHolder(*holder); diff --git a/src/Interpreters/threadPoolCallbackRunner.h b/src/Interpreters/threadPoolCallbackRunner.h index 1d2f67e8e59..6151f7b1de6 100644 --- a/src/Interpreters/threadPoolCallbackRunner.h +++ b/src/Interpreters/threadPoolCallbackRunner.h @@ -22,7 +22,7 @@ ThreadPoolCallbackRunner threadPoolCallbackRunner(ThreadPool & auto task = std::make_shared>([thread_group, thread_name, callback = std::move(callback)]() mutable -> Result { if (thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); SCOPE_EXIT_SAFE({ { @@ -33,7 +33,7 @@ ThreadPoolCallbackRunner threadPoolCallbackRunner(ThreadPool & } if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); }); diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 72353a42a87..a6354cd0e81 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -2151,8 +2151,9 @@ bool ParserTTLElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_set("SET"); ParserKeyword s_recompress("RECOMPRESS"); ParserKeyword s_codec("CODEC"); - ParserToken s_comma(TokenType::Comma); - ParserToken s_eq(TokenType::Equals); + ParserKeyword s_materialize("MATERIALIZE"); + ParserKeyword s_remove("REMOVE"); + ParserKeyword s_modify("MODIFY"); ParserIdentifier parser_identifier; ParserStringLiteral parser_string_literal; @@ -2160,8 +2161,11 @@ bool ParserTTLElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserExpressionList parser_keys_list(false); ParserCodec parser_codec; - ParserList parser_assignment_list( - std::make_unique(), std::make_unique(TokenType::Comma)); + if (s_materialize.checkWithoutMoving(pos, expected) || + s_remove.checkWithoutMoving(pos, expected) || + s_modify.checkWithoutMoving(pos, expected)) + + return false; ASTPtr ttl_expr; if (!parser_exp.parse(pos, ttl_expr, expected)) @@ -2219,6 +2223,9 @@ bool ParserTTLElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (s_set.ignore(pos)) { + ParserList parser_assignment_list( + std::make_unique(), std::make_unique(TokenType::Comma)); + if (!parser_assignment_list.parse(pos, group_by_assignments, expected)) return false; } diff --git a/src/Processors/Executors/CompletedPipelineExecutor.cpp b/src/Processors/Executors/CompletedPipelineExecutor.cpp index 22b924337c5..e624ecd52de 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.cpp +++ b/src/Processors/Executors/CompletedPipelineExecutor.cpp @@ -36,14 +36,14 @@ static void threadFunction(CompletedPipelineExecutor::Data & data, ThreadGroupSt { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); setThreadName("QueryCompPipeEx"); try { if (thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); data.executor->execute(num_threads); } diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index f1e044e470b..313a5139581 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -308,12 +308,12 @@ void PipelineExecutor::spawnThreads() SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); setThreadName("QueryPipelineEx"); if (thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); try { diff --git a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp index 0a7a9025b30..95ed4eb813d 100644 --- a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp @@ -71,14 +71,14 @@ static void threadFunction(PullingAsyncPipelineExecutor::Data & data, ThreadGrou { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); setThreadName("QueryPullPipeEx"); try { if (thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); data.executor->execute(num_threads); } diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp index 4478f1548a4..3aec7608e6d 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp @@ -101,14 +101,14 @@ static void threadFunction(PushingAsyncPipelineExecutor::Data & data, ThreadGrou { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); setThreadName("QueryPushPipeEx"); try { if (thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); data.executor->execute(num_threads); } diff --git a/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp b/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp index 96ed2a7021e..27fd7ac1575 100644 --- a/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp +++ b/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp @@ -1,6 +1,4 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include "ArrowBufferedStreams.h" diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp index 35d4dd9ddcd..3fc57ca1c1e 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp @@ -100,11 +100,11 @@ namespace DB { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); setThreadName("Collector"); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); try { @@ -161,11 +161,11 @@ namespace DB { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); setThreadName("Formatter"); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); try { diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 293bf4f73f3..5ba32251a71 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -12,10 +12,10 @@ void ParallelParsingInputFormat::segmentatorThreadFunction(ThreadGroupStatusPtr { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachTo(thread_group); + CurrentThread::attachToGroup(thread_group); setThreadName("Segmentator"); try @@ -62,10 +62,10 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); const auto parser_unit_number = current_ticket_number % processing_units.size(); auto & unit = processing_units[parser_unit_number]; diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index 9bf351442b2..69dfa05899b 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -38,7 +38,6 @@ static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_ return ITransformingStep::Traits { { - .preserves_distinct_columns = false, /// Actually, we may check that distinct names are in aggregation keys .returns_single_stream = should_produce_results_in_order_of_bucket_number, .preserves_number_of_streams = false, .preserves_sorting = false, diff --git a/src/Processors/QueryPlan/ArrayJoinStep.cpp b/src/Processors/QueryPlan/ArrayJoinStep.cpp index bd1908a4a6d..23a0a756f0d 100644 --- a/src/Processors/QueryPlan/ArrayJoinStep.cpp +++ b/src/Processors/QueryPlan/ArrayJoinStep.cpp @@ -14,7 +14,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = false, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = false, diff --git a/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp b/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp index 53dcec9ef0a..07137e87736 100644 --- a/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp +++ b/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp @@ -40,7 +40,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/CreatingSetsStep.cpp b/src/Processors/QueryPlan/CreatingSetsStep.cpp index 23e0a17a31b..b696b77ccfe 100644 --- a/src/Processors/QueryPlan/CreatingSetsStep.cpp +++ b/src/Processors/QueryPlan/CreatingSetsStep.cpp @@ -21,7 +21,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/CubeStep.cpp b/src/Processors/QueryPlan/CubeStep.cpp index 03f952ac782..0c632c346c7 100644 --- a/src/Processors/QueryPlan/CubeStep.cpp +++ b/src/Processors/QueryPlan/CubeStep.cpp @@ -14,7 +14,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = false, .returns_single_stream = true, .preserves_number_of_streams = false, .preserves_sorting = false, @@ -32,9 +31,6 @@ CubeStep::CubeStep(const DataStream & input_stream_, Aggregator::Params params_, , final(final_) , use_nulls(use_nulls_) { - /// Aggregation keys are distinct - for (const auto & key : params.keys) - output_stream->distinct_columns.insert(key); } ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, bool use_nulls, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number) @@ -89,9 +85,5 @@ void CubeStep::updateOutputStream() { output_stream = createOutputStream( input_streams.front(), generateOutputHeader(params.getHeader(input_streams.front().header, final), params.keys, use_nulls), getDataStreamTraits()); - - /// Aggregation keys are distinct - for (const auto & key : params.keys) - output_stream->distinct_columns.insert(key); } } diff --git a/src/Processors/QueryPlan/DistinctStep.cpp b/src/Processors/QueryPlan/DistinctStep.cpp index 323ef0bbdab..15ed02b700e 100644 --- a/src/Processors/QueryPlan/DistinctStep.cpp +++ b/src/Processors/QueryPlan/DistinctStep.cpp @@ -10,28 +10,13 @@ namespace DB { -static bool checkColumnsAlreadyDistinct(const Names & columns, const NameSet & distinct_names) -{ - if (distinct_names.empty()) - return false; - - /// Now we need to check that distinct_names is a subset of columns. - std::unordered_set columns_set(columns.begin(), columns.end()); - for (const auto & name : distinct_names) - if (!columns_set.contains(name)) - return false; - - return true; -} - -static ITransformingStep::Traits getTraits(bool pre_distinct, bool already_distinct_columns) +static ITransformingStep::Traits getTraits(bool pre_distinct) { return ITransformingStep::Traits { { - .preserves_distinct_columns = already_distinct_columns, /// Will be calculated separately otherwise - .returns_single_stream = !pre_distinct && !already_distinct_columns, - .preserves_number_of_streams = pre_distinct || already_distinct_columns, + .returns_single_stream = !pre_distinct, + .preserves_number_of_streams = pre_distinct, .preserves_sorting = true, /// Sorting is preserved indeed because of implementation. }, { @@ -62,34 +47,23 @@ DistinctStep::DistinctStep( : ITransformingStep( input_stream_, input_stream_.header, - getTraits(pre_distinct_, checkColumnsAlreadyDistinct(columns_, input_stream_.distinct_columns))) + getTraits(pre_distinct_)) , set_size_limits(set_size_limits_) , limit_hint(limit_hint_) , columns(columns_) , pre_distinct(pre_distinct_) , optimize_distinct_in_order(optimize_distinct_in_order_) { - if (!output_stream->distinct_columns.empty() /// Columns already distinct, do nothing - && (!pre_distinct /// Main distinct - || input_stream_.has_single_port)) /// pre_distinct for single port works as usual one - { - /// Build distinct set. - for (const auto & name : columns) - output_stream->distinct_columns.insert(name); - } } void DistinctStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - const auto & input_stream = input_streams.back(); - if (checkColumnsAlreadyDistinct(columns, input_stream.distinct_columns)) - return; - if (!pre_distinct) pipeline.resize(1); if (optimize_distinct_in_order) { + const auto & input_stream = input_streams.back(); const SortDescription distinct_sort_desc = getSortDescription(input_stream.sort_description, columns); if (!distinct_sort_desc.empty()) { @@ -197,16 +171,7 @@ void DistinctStep::updateOutputStream() output_stream = createOutputStream( input_streams.front(), input_streams.front().header, - getTraits(pre_distinct, checkColumnsAlreadyDistinct(columns, input_streams.front().distinct_columns)).data_stream_traits); - - if (!output_stream->distinct_columns.empty() /// Columns already distinct, do nothing - && (!pre_distinct /// Main distinct - || input_streams.front().has_single_port)) /// pre_distinct for single port works as usual one - { - /// Build distinct set. - for (const auto & name : columns) - output_stream->distinct_columns.insert(name); - } + getTraits(pre_distinct).data_stream_traits); } } diff --git a/src/Processors/QueryPlan/ExpressionStep.cpp b/src/Processors/QueryPlan/ExpressionStep.cpp index dcfa6e5a891..250a1733caa 100644 --- a/src/Processors/QueryPlan/ExpressionStep.cpp +++ b/src/Processors/QueryPlan/ExpressionStep.cpp @@ -15,7 +15,6 @@ static ITransformingStep::Traits getTraits(const ActionsDAGPtr & actions, const return ITransformingStep::Traits { { - .preserves_distinct_columns = !actions->hasArrayJoin(), .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = actions->isSortingPreserved(header, sort_description), @@ -33,8 +32,6 @@ ExpressionStep::ExpressionStep(const DataStream & input_stream_, const ActionsDA getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description)) , actions_dag(actions_dag_) { - /// Some columns may be removed by expression. - updateDistinctColumns(output_stream->header, output_stream->distinct_columns); } void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) @@ -63,22 +60,9 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu void ExpressionStep::describeActions(FormatSettings & settings) const { - String prefix(settings.offset, ' '); - bool first = true; - + String prefix(settings.offset, settings.indent_char); auto expression = std::make_shared(actions_dag); - for (const auto & action : expression->getActions()) - { - settings.out << prefix << (first ? "Actions: " - : " "); - first = false; - settings.out << action.toString() << '\n'; - } - - settings.out << prefix << "Positions:"; - for (const auto & pos : expression->getResultPositions()) - settings.out << ' ' << pos; - settings.out << '\n'; + expression->describeActions(settings.out, prefix); } void ExpressionStep::describeActions(JSONBuilder::JSONMap & map) const diff --git a/src/Processors/QueryPlan/ExtremesStep.cpp b/src/Processors/QueryPlan/ExtremesStep.cpp index 4524b9883d6..010a82072cf 100644 --- a/src/Processors/QueryPlan/ExtremesStep.cpp +++ b/src/Processors/QueryPlan/ExtremesStep.cpp @@ -9,7 +9,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/FillingStep.cpp b/src/Processors/QueryPlan/FillingStep.cpp index 13b7ca625fb..20d7d6d0f8f 100644 --- a/src/Processors/QueryPlan/FillingStep.cpp +++ b/src/Processors/QueryPlan/FillingStep.cpp @@ -17,7 +17,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = false, /// TODO: it seem to actually be true. Check it later. .returns_single_stream = true, .preserves_number_of_streams = true, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 4699a7c1908..dc837446a96 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -23,7 +23,6 @@ static ITransformingStep::Traits getTraits(const ActionsDAGPtr & expression, con return ITransformingStep::Traits { { - .preserves_distinct_columns = !expression->hasArrayJoin(), /// I suppose it actually never happens .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = preserves_sorting, @@ -51,8 +50,6 @@ FilterStep::FilterStep( , filter_column_name(std::move(filter_column_name_)) , remove_filter_column(remove_filter_column_) { - /// TODO: it would be easier to remove all expressions from filter step. It should only filter by column name. - updateDistinctColumns(output_stream->header, output_stream->distinct_columns); } void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) @@ -82,27 +79,15 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ void FilterStep::describeActions(FormatSettings & settings) const { - String prefix(settings.offset, ' '); + String prefix(settings.offset, settings.indent_char); settings.out << prefix << "Filter column: " << filter_column_name; if (remove_filter_column) settings.out << " (removed)"; settings.out << '\n'; - bool first = true; auto expression = std::make_shared(actions_dag); - for (const auto & action : expression->getActions()) - { - settings.out << prefix << (first ? "Actions: " - : " "); - first = false; - settings.out << action.toString() << '\n'; - } - - settings.out << prefix << "Positions:"; - for (const auto & pos : expression->getResultPositions()) - settings.out << ' ' << pos; - settings.out << '\n'; + expression->describeActions(settings.out, prefix); } void FilterStep::describeActions(JSONBuilder::JSONMap & map) const diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index 316ecff9c2e..a608c6f8058 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -23,11 +23,6 @@ class DataStream public: Block header; - /// Tuples with those columns are distinct. - /// It doesn't mean that columns are distinct separately. - /// Removing any column from this list breaks this invariant. - NameSet distinct_columns = {}; - /// QueryPipeline has single port. Totals or extremes ports are not counted. bool has_single_port = false; @@ -51,8 +46,7 @@ public: bool hasEqualPropertiesWith(const DataStream & other) const { - return distinct_columns == other.distinct_columns - && has_single_port == other.has_single_port + return has_single_port == other.has_single_port && sort_description == other.sort_description && (sort_description.empty() || sort_scope == other.sort_scope); } diff --git a/src/Processors/QueryPlan/ITransformingStep.cpp b/src/Processors/QueryPlan/ITransformingStep.cpp index 195fa9ad68c..9ecfdb0af22 100644 --- a/src/Processors/QueryPlan/ITransformingStep.cpp +++ b/src/Processors/QueryPlan/ITransformingStep.cpp @@ -20,9 +20,6 @@ DataStream ITransformingStep::createOutputStream( { DataStream output_stream{.header = std::move(output_header)}; - if (stream_traits.preserves_distinct_columns) - output_stream.distinct_columns = input_stream.distinct_columns; - output_stream.has_single_port = stream_traits.returns_single_stream || (input_stream.has_single_port && stream_traits.preserves_number_of_streams); @@ -50,21 +47,6 @@ QueryPipelineBuilderPtr ITransformingStep::updatePipeline(QueryPipelineBuilders return std::move(pipelines.front()); } -void ITransformingStep::updateDistinctColumns(const Block & res_header, NameSet & distinct_columns) -{ - if (distinct_columns.empty()) - return; - - for (const auto & column : distinct_columns) - { - if (!res_header.has(column)) - { - distinct_columns.clear(); - break; - } - } -} - void ITransformingStep::describePipeline(FormatSettings & settings) const { IQueryPlanStep::describePipeline(processors, settings); diff --git a/src/Processors/QueryPlan/ITransformingStep.h b/src/Processors/QueryPlan/ITransformingStep.h index 1513b4307f8..77de668fbdb 100644 --- a/src/Processors/QueryPlan/ITransformingStep.h +++ b/src/Processors/QueryPlan/ITransformingStep.h @@ -18,11 +18,6 @@ public: /// They are specified in constructor and cannot be changed. struct DataStreamTraits { - /// Keep distinct_columns unchanged. - /// Examples: true for LimitStep, false for ExpressionStep with ARRAY JOIN - /// It some columns may be removed from result header, call updateDistinctColumns - bool preserves_distinct_columns; - /// True if pipeline has single output port after this step. /// Examples: MergeSortingStep, AggregatingStep bool returns_single_stream; @@ -69,8 +64,6 @@ public: input_streams.emplace_back(std::move(input_stream)); updateOutputStream(); - - updateDistinctColumns(output_stream->header, output_stream->distinct_columns); } void describePipeline(FormatSettings & settings) const override; @@ -83,9 +76,6 @@ public: } protected: - /// Clear distinct_columns if res_header doesn't contain all of them. - static void updateDistinctColumns(const Block & res_header, NameSet & distinct_columns); - /// Create output stream from header and traits. static DataStream createOutputStream( const DataStream & input_stream, diff --git a/src/Processors/QueryPlan/JoinStep.cpp b/src/Processors/QueryPlan/JoinStep.cpp index 6e212a53bc6..2ff8f161e99 100644 --- a/src/Processors/QueryPlan/JoinStep.cpp +++ b/src/Processors/QueryPlan/JoinStep.cpp @@ -83,7 +83,6 @@ static ITransformingStep::Traits getStorageJoinTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = false, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = false, diff --git a/src/Processors/QueryPlan/LimitByStep.cpp b/src/Processors/QueryPlan/LimitByStep.cpp index 39086e995fc..8b4abecc12c 100644 --- a/src/Processors/QueryPlan/LimitByStep.cpp +++ b/src/Processors/QueryPlan/LimitByStep.cpp @@ -12,7 +12,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = true, .preserves_number_of_streams = false, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/LimitStep.cpp b/src/Processors/QueryPlan/LimitStep.cpp index 144ac16f0d5..5e5a7387832 100644 --- a/src/Processors/QueryPlan/LimitStep.cpp +++ b/src/Processors/QueryPlan/LimitStep.cpp @@ -12,7 +12,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/MergingAggregatedStep.cpp b/src/Processors/QueryPlan/MergingAggregatedStep.cpp index e4fc332a1fd..8b5f21442b1 100644 --- a/src/Processors/QueryPlan/MergingAggregatedStep.cpp +++ b/src/Processors/QueryPlan/MergingAggregatedStep.cpp @@ -24,7 +24,6 @@ static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_ return ITransformingStep::Traits { { - .preserves_distinct_columns = false, .returns_single_stream = should_produce_results_in_order_of_bucket_number, .preserves_number_of_streams = false, .preserves_sorting = false, @@ -62,10 +61,6 @@ MergingAggregatedStep::MergingAggregatedStep( , should_produce_results_in_order_of_bucket_number(should_produce_results_in_order_of_bucket_number_) , memory_bound_merging_of_aggregation_results_enabled(memory_bound_merging_of_aggregation_results_enabled_) { - /// Aggregation keys are distinct - for (const auto & key : params.keys) - output_stream->distinct_columns.insert(key); - if (memoryBoundMergingWillBeUsed() && should_produce_results_in_order_of_bucket_number) { output_stream->sort_description = group_by_sort_description; @@ -157,10 +152,6 @@ void MergingAggregatedStep::describeActions(JSONBuilder::JSONMap & map) const void MergingAggregatedStep::updateOutputStream() { output_stream = createOutputStream(input_streams.front(), params.getHeader(input_streams.front().header, final), getDataStreamTraits()); - - /// Aggregation keys are distinct - for (const auto & key : params.keys) - output_stream->distinct_columns.insert(key); } bool MergingAggregatedStep::memoryBoundMergingWillBeUsed() const diff --git a/src/Processors/QueryPlan/MergingAggregatedStep.h b/src/Processors/QueryPlan/MergingAggregatedStep.h index 2dea289ca89..3a7e2b66183 100644 --- a/src/Processors/QueryPlan/MergingAggregatedStep.h +++ b/src/Processors/QueryPlan/MergingAggregatedStep.h @@ -27,6 +27,7 @@ public: bool memory_bound_merging_of_aggregation_results_enabled_); String getName() const override { return "MergingAggregated"; } + const Aggregator::Params & getParams() const { return params; } void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; diff --git a/src/Processors/QueryPlan/OffsetStep.cpp b/src/Processors/QueryPlan/OffsetStep.cpp index e0c70ba2f28..4bbe81f9169 100644 --- a/src/Processors/QueryPlan/OffsetStep.cpp +++ b/src/Processors/QueryPlan/OffsetStep.cpp @@ -12,7 +12,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp index 02725dc3122..c9a0270f6e7 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -100,24 +101,29 @@ namespace logDebug("aggregation_keys", aggregation_keys); logDebug("aggregation_keys size", aggregation_keys.size()); logDebug("distinct_columns size", distinct_columns.size()); - if (aggregation_keys.size() != distinct_columns.size()) - return false; - /// compare columns of two DISTINCTs + std::set original_distinct_columns; for (const auto & column : distinct_columns) { logDebug("distinct column name", column); const auto * alias_node = getOriginalNodeForOutputAlias(path_actions, String(column)); if (!alias_node) { - logDebug("original name for alias is not found for", column); - return false; + logDebug("original name for alias is not found", column); + original_distinct_columns.insert(column); } - - logDebug("alias result name", alias_node->result_name); - if (std::find(cbegin(aggregation_keys), cend(aggregation_keys), alias_node->result_name) == aggregation_keys.cend()) + else { - logDebug("alias result name is not found in aggregation keys", alias_node->result_name); + logDebug("alias result name", alias_node->result_name); + original_distinct_columns.insert(alias_node->result_name); + } + } + /// if aggregation keys are part of distinct columns then rows already distinct + for (const auto & key : aggregation_keys) + { + if (!original_distinct_columns.contains(key)) + { + logDebug("aggregation key NOT found: {}", key); return false; } } @@ -176,7 +182,7 @@ namespace while (!node->children.empty()) { const IQueryPlanStep * current_step = node->step.get(); - if (typeid_cast(current_step)) + if (typeid_cast(current_step) || typeid_cast(current_step)) { aggregation_before_distinct = current_step; break; @@ -208,6 +214,9 @@ namespace if (const auto * aggregating_step = typeid_cast(aggregation_before_distinct); aggregating_step) return compareAggregationKeysWithDistinctColumns(aggregating_step->getParams().keys, distinct_columns, actions); + else if (const auto * merging_aggregated_step = typeid_cast(aggregation_before_distinct); + merging_aggregated_step) + return compareAggregationKeysWithDistinctColumns(merging_aggregated_step->getParams().keys, distinct_columns, actions); } return false; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 51afe96545d..753bb070c47 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -340,57 +340,55 @@ Pipe ReadFromMergeTree::readFromPool( / max_block_size * max_block_size / fixed_index_granularity; } - bool all_parts_are_remote = true; - bool all_parts_are_local = true; - for (const auto & part : parts_with_range) - { - const bool is_remote = part.data_part->isStoredOnRemoteDisk(); - all_parts_are_local &= !is_remote; - all_parts_are_remote &= is_remote; - } + bool all_parts_are_remote = true; + bool all_parts_are_local = true; + for (const auto & part : parts_with_range) + { + const bool is_remote = part.data_part->isStoredOnRemoteDisk(); + all_parts_are_local &= !is_remote; + all_parts_are_remote &= is_remote; + } - MergeTreeReadPoolPtr pool; + MergeTreeReadPoolPtr pool; - if ((all_parts_are_remote - && settings.allow_prefetched_read_pool_for_remote_filesystem - && MergeTreePrefetchedReadPool::checkReadMethodAllowed(reader_settings.read_settings.remote_fs_method)) - || (all_parts_are_local - && settings.allow_prefetched_read_pool_for_local_filesystem - && MergeTreePrefetchedReadPool::checkReadMethodAllowed(reader_settings.read_settings.local_fs_method))) - { - pool = std::make_shared( - max_streams, - sum_marks, - min_marks_for_concurrent_read, - std::move(parts_with_range), - storage_snapshot, - prewhere_info, - actions_settings, - required_columns, - virt_column_names, - settings.preferred_block_size_bytes, - reader_settings, - context, - use_uncompressed_cache, - all_parts_are_remote, - *data.getSettings()); - } - else - { - pool = std::make_shared( - max_streams, - sum_marks, - min_marks_for_concurrent_read, - std::move(parts_with_range), - storage_snapshot, - prewhere_info, - actions_settings, - reader_settings, - required_columns, - virt_column_names, - context, - false); - } + if ((all_parts_are_remote && settings.allow_prefetched_read_pool_for_remote_filesystem + && MergeTreePrefetchedReadPool::checkReadMethodAllowed(reader_settings.read_settings.remote_fs_method)) + || (all_parts_are_local && settings.allow_prefetched_read_pool_for_local_filesystem + && MergeTreePrefetchedReadPool::checkReadMethodAllowed(reader_settings.read_settings.local_fs_method))) + { + pool = std::make_shared( + max_streams, + sum_marks, + min_marks_for_concurrent_read, + std::move(parts_with_range), + storage_snapshot, + prewhere_info, + actions_settings, + required_columns, + virt_column_names, + settings.preferred_block_size_bytes, + reader_settings, + context, + use_uncompressed_cache, + all_parts_are_remote, + *data.getSettings()); + } + else + { + pool = std::make_shared( + max_streams, + sum_marks, + min_marks_for_concurrent_read, + std::move(parts_with_range), + storage_snapshot, + prewhere_info, + actions_settings, + reader_settings, + required_columns, + virt_column_names, + context, + false); + } auto * logger = &Poco::Logger::get(data.getLogName() + " (SelectExecutor)"); LOG_DEBUG(logger, "Reading approx. {} rows with {} streams", total_rows, max_streams); @@ -1732,6 +1730,36 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << prefix << "Parts: " << result.index_stats.back().num_parts_after << '\n'; format_settings.out << prefix << "Granules: " << result.index_stats.back().num_granules_after << '\n'; } + + if (prewhere_info) + { + format_settings.out << prefix << "Prewhere info" << '\n'; + format_settings.out << prefix << "Need filter: " << prewhere_info->need_filter << '\n'; + + prefix.push_back(format_settings.indent_char); + prefix.push_back(format_settings.indent_char); + + if (prewhere_info->prewhere_actions) + { + format_settings.out << prefix << "Prewhere filter" << '\n'; + format_settings.out << prefix << "Prewhere filter column: " << prewhere_info->prewhere_column_name; + if (prewhere_info->remove_prewhere_column) + format_settings.out << " (removed)"; + format_settings.out << '\n'; + + auto expression = std::make_shared(prewhere_info->prewhere_actions); + expression->describeActions(format_settings.out, prefix); + } + + if (prewhere_info->row_level_filter) + { + format_settings.out << prefix << "Row level filter" << '\n'; + format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; + + auto expression = std::make_shared(prewhere_info->row_level_filter); + expression->describeActions(format_settings.out, prefix); + } + } } void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const @@ -1743,6 +1771,35 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const map.add("Parts", result.index_stats.back().num_parts_after); map.add("Granules", result.index_stats.back().num_granules_after); } + + if (prewhere_info) + { + std::unique_ptr prewhere_info_map = std::make_unique(); + prewhere_info_map->add("Need filter", prewhere_info->need_filter); + + if (prewhere_info->prewhere_actions) + { + std::unique_ptr prewhere_filter_map = std::make_unique(); + prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); + prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); + auto expression = std::make_shared(prewhere_info->prewhere_actions); + prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); + + prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); + } + + if (prewhere_info->row_level_filter) + { + std::unique_ptr row_level_filter_map = std::make_unique(); + row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); + auto expression = std::make_shared(prewhere_info->row_level_filter); + row_level_filter_map->add("Row level filter expression", expression->toTree()); + + prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); + } + + map.add("Prewhere info", std::move(prewhere_info_map)); + } } void ReadFromMergeTree::describeIndexes(FormatSettings & format_settings) const diff --git a/src/Processors/QueryPlan/RollupStep.cpp b/src/Processors/QueryPlan/RollupStep.cpp index 3305f24602f..136690ccfc0 100644 --- a/src/Processors/QueryPlan/RollupStep.cpp +++ b/src/Processors/QueryPlan/RollupStep.cpp @@ -11,7 +11,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = false, .returns_single_stream = true, .preserves_number_of_streams = false, .preserves_sorting = false, @@ -29,9 +28,6 @@ RollupStep::RollupStep(const DataStream & input_stream_, Aggregator::Params para , final(final_) , use_nulls(use_nulls_) { - /// Aggregation keys are distinct - for (const auto & key : params.keys) - output_stream->distinct_columns.insert(key); } ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, bool use_nulls, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number); @@ -54,10 +50,6 @@ void RollupStep::updateOutputStream() { output_stream = createOutputStream( input_streams.front(), appendGroupingSetColumn(params.getHeader(input_streams.front().header, final)), getDataStreamTraits()); - - /// Aggregation keys are distinct - for (const auto & key : params.keys) - output_stream->distinct_columns.insert(key); } diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 0308e320e3a..0ab8e091e05 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -45,7 +45,6 @@ static ITransformingStep::Traits getTraits(size_t limit) return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = true, .preserves_number_of_streams = false, .preserves_sorting = false, diff --git a/src/Processors/QueryPlan/TotalsHavingStep.cpp b/src/Processors/QueryPlan/TotalsHavingStep.cpp index 63991655426..d1bd70fd0b2 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.cpp +++ b/src/Processors/QueryPlan/TotalsHavingStep.cpp @@ -14,7 +14,6 @@ static ITransformingStep::Traits getTraits(bool has_filter) return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = true, .preserves_number_of_streams = false, .preserves_sorting = true, diff --git a/src/Processors/QueryPlan/WindowStep.cpp b/src/Processors/QueryPlan/WindowStep.cpp index 92e9948c4c7..d313b210854 100644 --- a/src/Processors/QueryPlan/WindowStep.cpp +++ b/src/Processors/QueryPlan/WindowStep.cpp @@ -15,7 +15,6 @@ static ITransformingStep::Traits getTraits() return ITransformingStep::Traits { { - .preserves_distinct_columns = true, .returns_single_stream = false, .preserves_number_of_streams = true, .preserves_sorting = true, diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index 83dfc01e6b0..3abd2ac3346 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -100,10 +100,10 @@ struct ManyAggregatedData { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); setThreadName("AggregDestruct"); }); diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 503a40ae0e5..4a729863200 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -259,6 +259,114 @@ IProcessor::Status FillingTransform::prepare() return ISimpleTransform::prepare(); } +void FillingTransform::interpolate(const MutableColumns & result_columns, Block & interpolate_block) +{ + if (interpolate_description) + { + interpolate_block.clear(); + + if (!input_positions.empty()) + { + /// populate calculation block with required columns with values from previous row + for (const auto & [col_pos, name_type] : input_positions) + { + MutableColumnPtr column = name_type.type->createColumn(); + const auto * res_column = result_columns[col_pos].get(); + size_t size = res_column->size(); + if (size == 0) /// this is the first row in current chunk + { + /// take value from last row of previous chunk if exists, else use default + if (last_row.size() > col_pos && !last_row[col_pos]->empty()) + column->insertFrom(*last_row[col_pos], 0); + else + column->insertDefault(); + } + else /// take value from previous row of current chunk + column->insertFrom(*res_column, size - 1); + + interpolate_block.insert({std::move(column), name_type.type, name_type.name}); + } + interpolate_actions->execute(interpolate_block); + } + else /// all INTERPOLATE expressions are constants + { + size_t n = 1; + interpolate_actions->execute(interpolate_block, n); + } + } +} + +using MutableColumnRawPtrs = std::vector; + +static void insertFromFillingRow(const MutableColumnRawPtrs & filling_columns, const MutableColumnRawPtrs & interpolate_columns, const MutableColumnRawPtrs & other_columns, + const FillingRow & filling_row, const Block & interpolate_block) +{ + for (size_t i = 0, size = filling_columns.size(); i < size; ++i) + { + if (filling_row[i].isNull()) + filling_columns[i]->insertDefault(); + else + filling_columns[i]->insert(filling_row[i]); + } + + if (size_t size = interpolate_block.columns()) + { + Columns columns = interpolate_block.getColumns(); + for (size_t i = 0; i < size; ++i) + interpolate_columns[i]->insertFrom(*columns[i]->convertToFullColumnIfConst(), 0); + } + else + for (auto * interpolate_column : interpolate_columns) + interpolate_column->insertDefault(); + + for (auto * other_column : other_columns) + other_column->insertDefault(); +} + +static void copyRowFromColumns(const MutableColumnRawPtrs & dest, const Columns & source, size_t row_num) +{ + for (size_t i = 0, size = source.size(); i < size; ++i) + dest[i]->insertFrom(*source[i], row_num); +} + +static void initColumnsByPositions( + const Columns & input_columns, + Columns & input_columns_by_positions, + const MutableColumns & output_columns, + MutableColumnRawPtrs & output_columns_by_position, + const std::vector & positions) +{ + for (size_t pos : positions) + { + input_columns_by_positions.push_back(input_columns[pos]); + output_columns_by_position.push_back(output_columns[pos].get()); + } +} + +void FillingTransform::initColumns( + const Columns & input_columns, + Columns & input_fill_columns, + Columns & input_interpolate_columns, + Columns & input_other_columns, + MutableColumns & output_columns, + MutableColumnRawPtrs & output_fill_columns, + MutableColumnRawPtrs & output_interpolate_columns, + MutableColumnRawPtrs & output_other_columns) +{ + Columns non_const_columns; + non_const_columns.reserve(input_columns.size()); + + for (const auto & column : input_columns) + non_const_columns.push_back(column->convertToFullColumnIfConst()); + + for (const auto & column : non_const_columns) + output_columns.push_back(column->cloneEmpty()->assumeMutable()); + + initColumnsByPositions(non_const_columns, input_fill_columns, output_columns, output_fill_columns, fill_column_positions); + initColumnsByPositions( + non_const_columns, input_interpolate_columns, output_columns, output_interpolate_columns, interpolate_column_positions); + initColumnsByPositions(non_const_columns, input_other_columns, output_columns, output_other_columns, other_column_positions); +} void FillingTransform::transform(Chunk & chunk) { @@ -268,97 +376,58 @@ void FillingTransform::transform(Chunk & chunk) Columns old_fill_columns; Columns old_interpolate_columns; Columns old_other_columns; - MutableColumns res_fill_columns; - MutableColumns res_interpolate_columns; - MutableColumns res_other_columns; - - std::vector> res_map; - res_map.resize(input.getHeader().columns()); - - auto init_columns_by_positions = [&res_map](const Columns & old_columns, Columns & new_columns, - MutableColumns & new_mutable_columns, const Positions & positions) - { - for (size_t pos : positions) - { - auto old_column = old_columns[pos]->convertToFullColumnIfConst(); - new_columns.push_back(old_column); - res_map[pos] = {&new_mutable_columns, new_mutable_columns.size()}; - new_mutable_columns.push_back(old_column->cloneEmpty()->assumeMutable()); - } - }; + MutableColumnRawPtrs res_fill_columns; + MutableColumnRawPtrs res_interpolate_columns; + MutableColumnRawPtrs res_other_columns; + MutableColumns result_columns; Block interpolate_block; - auto interpolate = [&]() - { - if (interpolate_description) - { - interpolate_block.clear(); - - if (!input_positions.empty()) - { - /// populate calculation block with required columns with values from previous row - for (const auto & [col_pos, name_type] : input_positions) - { - MutableColumnPtr column = name_type.type->createColumn(); - auto [res_columns, pos] = res_map[col_pos]; - size_t size = (*res_columns)[pos]->size(); - if (size == 0) /// this is the first row in current chunk - { - /// take value from last row of previous chunk if exists, else use default - if (last_row.size() > col_pos && !last_row[col_pos]->empty()) - column->insertFrom(*last_row[col_pos], 0); - else - column->insertDefault(); - } - else /// take value from previous row of current chunk - column->insertFrom(*(*res_columns)[pos], size - 1); - - interpolate_block.insert({std::move(column), name_type.type, name_type.name}); - } - interpolate_actions->execute(interpolate_block); - } - else /// all INTERPOLATE expressions are constants - { - size_t n = 1; - interpolate_actions->execute(interpolate_block, n); - } - } - }; - if (generate_suffix) { const auto & empty_columns = input.getHeader().getColumns(); - init_columns_by_positions(empty_columns, old_fill_columns, res_fill_columns, fill_column_positions); - init_columns_by_positions(empty_columns, old_interpolate_columns, res_interpolate_columns, interpolate_column_positions); - init_columns_by_positions(empty_columns, old_other_columns, res_other_columns, other_column_positions); + initColumns( + empty_columns, + old_fill_columns, + old_interpolate_columns, + old_other_columns, + result_columns, + res_fill_columns, + res_interpolate_columns, + res_other_columns); if (first) filling_row.initFromDefaults(); if (should_insert_first && filling_row < next_row) { - interpolate(); + interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, filling_row, interpolate_block); } - interpolate(); + interpolate(result_columns, interpolate_block); while (filling_row.next(next_row)) { insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, filling_row, interpolate_block); - interpolate(); + interpolate(result_columns, interpolate_block); } - setResultColumns(chunk, res_fill_columns, res_interpolate_columns, res_other_columns); + size_t num_output_rows = result_columns[0]->size(); + chunk.setColumns(std::move(result_columns), num_output_rows); return; } - size_t num_rows = chunk.getNumRows(); + const size_t num_rows = chunk.getNumRows(); auto old_columns = chunk.detachColumns(); - - init_columns_by_positions(old_columns, old_fill_columns, res_fill_columns, fill_column_positions); - init_columns_by_positions(old_columns, old_interpolate_columns, res_interpolate_columns, interpolate_column_positions); - init_columns_by_positions(old_columns, old_other_columns, res_other_columns, other_column_positions); + initColumns( + old_columns, + old_fill_columns, + old_interpolate_columns, + old_other_columns, + result_columns, + res_fill_columns, + res_interpolate_columns, + res_other_columns); if (first) { @@ -372,7 +441,7 @@ void FillingTransform::transform(Chunk & chunk) filling_row.initFromDefaults(i); if (less(fill_from, current_value, filling_row.getDirection(i))) { - interpolate(); + interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, filling_row, interpolate_block); } break; @@ -386,7 +455,7 @@ void FillingTransform::transform(Chunk & chunk) { should_insert_first = next_row < filling_row; - for (size_t i = 0; i < filling_row.size(); ++i) + for (size_t i = 0, size = filling_row.size(); i < size; ++i) { auto current_value = (*old_fill_columns[i])[row_ind]; const auto & fill_to = filling_row.getFillDescription(i).fill_to; @@ -401,15 +470,15 @@ void FillingTransform::transform(Chunk & chunk) /// and probably we need to insert it to block. if (should_insert_first && filling_row < next_row) { - interpolate(); + interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, filling_row, interpolate_block); } - interpolate(); + interpolate(result_columns, interpolate_block); while (filling_row.next(next_row)) { insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, filling_row, interpolate_block); - interpolate(); + interpolate(result_columns, interpolate_block); } copyRowFromColumns(res_fill_columns, old_fill_columns, row_ind); @@ -417,55 +486,24 @@ void FillingTransform::transform(Chunk & chunk) copyRowFromColumns(res_other_columns, old_other_columns, row_ind); } - saveLastRow(res_fill_columns, res_interpolate_columns, res_other_columns); - setResultColumns(chunk, res_fill_columns, res_interpolate_columns, res_other_columns); + saveLastRow(result_columns); + size_t num_output_rows = result_columns[0]->size(); + chunk.setColumns(std::move(result_columns), num_output_rows); } -void FillingTransform::setResultColumns(Chunk & chunk, MutableColumns & fill_columns, MutableColumns & interpolate_columns, MutableColumns & other_columns) const -{ - MutableColumns result_columns(fill_columns.size() + interpolate_columns.size() + other_columns.size()); - /// fill_columns always non-empty. - size_t num_rows = fill_columns[0]->size(); - - for (size_t i = 0, size = fill_columns.size(); i < size; ++i) - result_columns[fill_column_positions[i]] = std::move(fill_columns[i]); - for (size_t i = 0, size = interpolate_columns.size(); i < size; ++i) - result_columns[interpolate_column_positions[i]] = std::move(interpolate_columns[i]); - for (size_t i = 0, size = other_columns.size(); i < size; ++i) - result_columns[other_column_positions[i]] = std::move(other_columns[i]); - - chunk.setColumns(std::move(result_columns), num_rows); -} - -void FillingTransform::saveLastRow(const MutableColumns & fill_columns, const MutableColumns & interpolate_columns, const MutableColumns & other_columns) +void FillingTransform::saveLastRow(const MutableColumns & result_columns) { last_row.clear(); - last_row.resize(fill_columns.size() + interpolate_columns.size() + other_columns.size()); - size_t num_rows = fill_columns[0]->size(); + const size_t num_rows = result_columns[0]->size(); if (num_rows == 0) return; - for (size_t i = 0, size = fill_columns.size(); i < size; ++i) + for (const auto & result_column : result_columns) { - auto column = fill_columns[i]->cloneEmpty(); - column->insertFrom(*fill_columns[i], num_rows - 1); - last_row[fill_column_positions[i]] = std::move(column); - } - - for (size_t i = 0, size = interpolate_columns.size(); i < size; ++i) - { - auto column = interpolate_columns[i]->cloneEmpty(); - column->insertFrom(*interpolate_columns[i], num_rows - 1); - last_row[interpolate_column_positions[i]] = std::move(column); - } - - for (size_t i = 0, size = other_columns.size(); i < size; ++i) - { - auto column = other_columns[i]->cloneEmpty(); - column->insertFrom(*other_columns[i], num_rows - 1); - last_row[other_column_positions[i]] = std::move(column); + auto column = result_column->cloneEmpty(); + column->insertFrom(*result_column, num_rows - 1); + last_row.push_back(std::move(column)); } } - } diff --git a/src/Processors/Transforms/FillingTransform.h b/src/Processors/Transforms/FillingTransform.h index 9304e561ad3..5331254b08c 100644 --- a/src/Processors/Transforms/FillingTransform.h +++ b/src/Processors/Transforms/FillingTransform.h @@ -28,8 +28,19 @@ protected: void transform(Chunk & Chunk) override; private: - void setResultColumns(Chunk & chunk, MutableColumns & fill_columns, MutableColumns & interpolate_columns, MutableColumns & other_columns) const; - void saveLastRow(const MutableColumns & fill_columns, const MutableColumns & interpolate_columns, const MutableColumns & other_columns); + void saveLastRow(const MutableColumns & result_columns); + void interpolate(const MutableColumns& result_columns, Block & interpolate_block); + + using MutableColumnRawPtrs = std::vector; + void initColumns( + const Columns & input_columns, + Columns & input_fill_columns, + Columns & input_interpolate_columns, + Columns & input_other_columns, + MutableColumns & output_columns, + MutableColumnRawPtrs & output_fill_columns, + MutableColumnRawPtrs & output_interpolate_columns, + MutableColumnRawPtrs & output_other_columns); const SortDescription sort_description; /// Contains only columns with WITH FILL. const InterpolateDescriptionPtr interpolate_description; /// Contains INTERPOLATE columns diff --git a/src/Processors/Transforms/buildPushingToViewsChain.cpp b/src/Processors/Transforms/buildPushingToViewsChain.cpp index c27e73804ad..5ab1e811efb 100644 --- a/src/Processors/Transforms/buildPushingToViewsChain.cpp +++ b/src/Processors/Transforms/buildPushingToViewsChain.cpp @@ -286,12 +286,7 @@ Chain buildPushingToViewsChain( std::unique_ptr view_thread_status_ptr = std::make_unique(); /// Copy of a ThreadStatus should be internal. view_thread_status_ptr->setInternalThread(); - /// view_thread_status_ptr will be moved later (on and on), so need to capture raw pointer. - view_thread_status_ptr->deleter = [thread_status = view_thread_status_ptr.get(), running_group] - { - thread_status->detachQuery(); - }; - view_thread_status_ptr->attachQuery(running_group); + view_thread_status_ptr->attachToGroup(running_group); auto * view_thread_status = view_thread_status_ptr.get(); views_data->thread_status_holder->thread_statuses.push_front(std::move(view_thread_status_ptr)); diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index f6797864b73..e3a69958213 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -21,6 +21,7 @@ #include #include #include +#include namespace ProfileEvents @@ -602,6 +603,9 @@ void RemoteQueryExecutor::sendExternalTables() for (const auto & table : external_tables) { StoragePtr cur = table.second; + /// Send only temporary tables with StorageMemory + if (!std::dynamic_pointer_cast(cur)) + continue; auto data = std::make_unique(); data->table_name = table.first; diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index cc050730853..5da2a9dd169 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -50,6 +50,11 @@ #include #include +#if USE_SSL +# include +# include +#endif + #include "Core/Protocol.h" #include "Storages/MergeTree/RequestResponse.h" #include "TCPHandler.h" @@ -501,6 +506,7 @@ void TCPHandler::runImpl() /// (i.e. deallocations from the Aggregator with two-level aggregation) state.reset(); query_scope.reset(); + last_sent_snapshots.clear(); thread_trace_context.reset(); } catch (const Exception & e) @@ -1225,6 +1231,22 @@ void TCPHandler::receiveHello() session = makeSession(); auto & client_info = session->getClientInfo(); + +#if USE_SSL + /// Authentication with SSL user certificate + if (dynamic_cast(socket().impl())) + { + Poco::Net::SecureStreamSocket secure_socket(socket()); + if (secure_socket.havePeerCertificate()) + { + session->authenticate( + SSLCertificateCredentials{user, secure_socket.peerCertificate().commonName()}, + getClientAddress(client_info)); + return; + } + } +#endif + session->authenticate(user, password, getClientAddress(client_info)); } diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index d388a403031..19eab0b5837 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -293,12 +293,12 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); OpenTelemetry::SpanHolder span(__PRETTY_FUNCTION__); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); setThreadName("DistrOutStrProc"); ++job.blocks_started; diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index e017c9681e8..ed1b83048e1 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -218,9 +218,10 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() zero_copy_lock = storage.tryCreateZeroCopyExclusiveLock(entry.new_part_name, disk); - if (!zero_copy_lock) + if (!zero_copy_lock || !zero_copy_lock->isLocked()) { LOG_DEBUG(log, "Merge of part {} started by some other replica, will wait it and fetch merged part", entry.new_part_name); + storage.watchZeroCopyLock(entry.new_part_name, disk); /// Don't check for missing part -- it's missing because other replica still not /// finished merge. return PrepareResult{ diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp index d0469c35cef..84fa9ec2c8e 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp @@ -59,6 +59,7 @@ void MergeTreeBackgroundExecutor::increaseThreadsAndMaxTasksCount(size_t for (size_t number = threads_count; number < new_threads_count; ++number) pool.scheduleOrThrowOnError([this] { threadFunction(); }); + max_tasks_metric.changeTo(2 * new_max_tasks_count); // pending + active max_tasks_count.store(new_max_tasks_count, std::memory_order_relaxed); threads_count = new_threads_count; } diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h index 9305f36feb5..5c47d20865b 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -247,11 +248,13 @@ public: String name_, size_t threads_count_, size_t max_tasks_count_, - CurrentMetrics::Metric metric_) + CurrentMetrics::Metric metric_, + CurrentMetrics::Metric max_tasks_metric_) : name(name_) , threads_count(threads_count_) , max_tasks_count(max_tasks_count_) , metric(metric_) + , max_tasks_metric(max_tasks_metric_, 2 * max_tasks_count) // active + pending { if (max_tasks_count == 0) throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Task count for MergeTreeBackgroundExecutor must not be zero"); @@ -272,9 +275,10 @@ public: size_t threads_count_, size_t max_tasks_count_, CurrentMetrics::Metric metric_, + CurrentMetrics::Metric max_tasks_metric_, std::string_view policy) requires requires(Queue queue) { queue.updatePolicy(policy); } // Because we use explicit template instantiation - : MergeTreeBackgroundExecutor(name_, threads_count_, max_tasks_count_, metric_) + : MergeTreeBackgroundExecutor(name_, threads_count_, max_tasks_count_, metric_, max_tasks_metric_) { pending.updatePolicy(policy); } @@ -311,6 +315,7 @@ private: size_t threads_count TSA_GUARDED_BY(mutex) = 0; std::atomic max_tasks_count = 0; CurrentMetrics::Metric metric; + CurrentMetrics::Increment max_tasks_metric; void routine(TaskRuntimeDataPtr item); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ee5cf0ea450..a0d870fec7c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1395,10 +1395,10 @@ std::vector MergeTreeData::loadDataPartsFromDisk( { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); while (true) { @@ -2314,10 +2314,10 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); asMutableDeletingPart(part)->remove(); if (part_names_succeed) @@ -2375,10 +2375,10 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); LOG_TRACE(log, "Removing {} parts in blocks range {}", batch.size(), range.getPartNameForLogs()); @@ -7484,7 +7484,7 @@ MovePartsOutcome MergeTreeData::movePartsToSpace(const DataPartsVector & parts, if (moving_tagger->parts_to_move.empty()) return MovePartsOutcome::NothingToMove; - return moveParts(moving_tagger); + return moveParts(moving_tagger, true); } MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::selectPartsForMove() @@ -7539,7 +7539,7 @@ MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::checkPartsForMove(co return std::make_shared(std::move(parts_to_move), *this); } -MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger) +MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, bool wait_for_move_if_zero_copy) { LOG_INFO(log, "Got {} parts to move.", moving_tagger->parts_to_move.size()); @@ -7588,21 +7588,41 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & auto disk = moving_part.reserved_space->getDisk(); if (supportsReplication() && disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) { - /// If we acquired lock than let's try to move. After one - /// replica will actually move the part from disk to some - /// zero-copy storage other replicas will just fetch - /// metainformation. - if (auto lock = tryCreateZeroCopyExclusiveLock(moving_part.part->name, disk); lock) + /// This loop is not endless, if shutdown called/connection failed/replica became readonly + /// we will return true from waitZeroCopyLock and createZeroCopyLock will return nullopt. + while (true) { - cloned_part = parts_mover.clonePart(moving_part); - parts_mover.swapClonedPart(cloned_part); - } - else - { - /// Move will be retried but with backoff. - LOG_DEBUG(log, "Move of part {} postponed, because zero copy mode enabled and someone other moving this part right now", moving_part.part->name); - result = MovePartsOutcome::MoveWasPostponedBecauseOfZeroCopy; - continue; + /// If we acquired lock than let's try to move. After one + /// replica will actually move the part from disk to some + /// zero-copy storage other replicas will just fetch + /// metainformation. + if (auto lock = tryCreateZeroCopyExclusiveLock(moving_part.part->name, disk); lock) + { + if (lock->isLocked()) + { + cloned_part = parts_mover.clonePart(moving_part); + parts_mover.swapClonedPart(cloned_part); + break; + } + else if (wait_for_move_if_zero_copy) + { + LOG_DEBUG(log, "Other replica is working on move of {}, will wait until lock disappear", moving_part.part->name); + /// Wait and checks not only for timeout but also for shutdown and so on. + while (!waitZeroCopyLockToDisappear(*lock, 3000)) + { + LOG_DEBUG(log, "Waiting until some replica will move {} and zero copy lock disappear", moving_part.part->name); + } + } + else + break; + } + else + { + /// Move will be retried but with backoff. + LOG_DEBUG(log, "Move of part {} postponed, because zero copy mode enabled and someone other moving this part right now", moving_part.part->name); + result = MovePartsOutcome::MoveWasPostponedBecauseOfZeroCopy; + break; + } } } else /// Ordinary move as it should be diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 4a1aafe20b6..bc5e5bc2d91 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -1456,7 +1456,7 @@ private: using CurrentlyMovingPartsTaggerPtr = std::shared_ptr; /// Move selected parts to corresponding disks - MovePartsOutcome moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger); + MovePartsOutcome moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, bool wait_for_move_if_zero_copy=false); /// Select parts for move and disks for them. Used in background moving processes. CurrentlyMovingPartsTaggerPtr selectPartsForMove(); @@ -1511,6 +1511,7 @@ private: /// Create zero-copy exclusive lock for part and disk. Useful for coordination of /// distributed operations which can lead to data duplication. Implemented only in ReplicatedMergeTree. virtual std::optional tryCreateZeroCopyExclusiveLock(const String &, const DiskPtr &) { return std::nullopt; } + virtual bool waitZeroCopyLockToDisappear(const ZeroCopyLock &, size_t) { return false; } /// Remove parts from disk calling part->remove(). Can do it in parallel in case of big set of parts and enabled settings. /// If we fail to remove some part and throw_on_error equal to `true` will throw an exception on the first failed part. diff --git a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp index 7a0b1d03e79..78f68ea72fe 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp @@ -46,6 +46,10 @@ void MergeTreeDataPartChecksum::checkEqual(const MergeTreeDataPartChecksum & rhs void MergeTreeDataPartChecksum::checkSize(const IDataPartStorage & storage, const String & name) const { + /// Skip inverted index files, these have a default MergeTreeDataPartChecksum with file_size == 0 + if (name.ends_with(".gin_dict") || name.ends_with(".gin_post") || name.ends_with(".gin_seg") || name.ends_with(".gin_sid")) + return; + if (!storage.exists(name)) throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "{} doesn't exist", fs::path(storage.getRelativePath()) / name); diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 07da66e4378..936b9561725 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1119,10 +1119,10 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd { SCOPE_EXIT_SAFE( if (thread_group) - CurrentThread::detachQueryIfNotDetached(); + CurrentThread::detachFromGroupIfNotDetached(); ); if (thread_group) - CurrentThread::attachToIfDetached(thread_group); + CurrentThread::attachToGroupIfDetached(thread_group); process_part(part_index); }); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index e3b87d48ce6..5cba3e34169 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -18,9 +18,7 @@ #if defined(__aarch64__) && defined(__ARM_NEON) # include -# ifdef HAS_RESERVED_IDENTIFIER -# pragma clang diagnostic ignored "-Wreserved-identifier" -# endif +# pragma clang diagnostic ignored "-Wreserved-identifier" #endif namespace DB diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 4428f6c2bce..a72a947ad56 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -127,9 +127,11 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() zero_copy_lock = storage.tryCreateZeroCopyExclusiveLock(entry.new_part_name, disk); - if (!zero_copy_lock) + if (!zero_copy_lock || !zero_copy_lock->isLocked()) { + storage.watchZeroCopyLock(entry.new_part_name, disk); LOG_DEBUG(log, "Mutation of part {} started by some other replica, will wait it and mutated merged part", entry.new_part_name); + return PrepareResult{ .prepared_successfully = false, .need_to_check_missing_part_in_fetch = false, diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.h b/src/Storages/MergeTree/MutateFromLogEntryTask.h index 23c9428faa9..c823df3b999 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.h +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.h @@ -30,7 +30,9 @@ public: UInt64 getPriority() override { return priority; } private: + ReplicatedMergeMutateTaskBase::PrepareResult prepare() override; + bool finalize(ReplicatedMergeMutateTaskBase::PartLogWriter write_part_log) override; bool executeInnerTask() override diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 9e6090c947b..c0aac96dd31 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1370,6 +1370,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( { constexpr auto fmt_string = "Not executing merge/mutation for the part {}, waiting for {} to execute it and will fetch after."; out_postpone_reason = fmt::format(fmt_string, entry.new_part_name, replica_to_execute_merge); + LOG_TEST(log, fmt_string, entry.new_part_name, replica_to_execute_merge); return false; } } diff --git a/src/Storages/MergeTree/ZeroCopyLock.h b/src/Storages/MergeTree/ZeroCopyLock.h index 4e73b27804c..4400ea55b8f 100644 --- a/src/Storages/MergeTree/ZeroCopyLock.h +++ b/src/Storages/MergeTree/ZeroCopyLock.h @@ -14,6 +14,7 @@ struct ZeroCopyLock { ZeroCopyLock(const zkutil::ZooKeeperPtr & zookeeper, const std::string & lock_path, const std::string & lock_message); + bool isLocked() const { return lock->isLocked(); } /// Actual lock std::unique_ptr lock; }; diff --git a/src/Storages/MergeTree/tests/gtest_executor.cpp b/src/Storages/MergeTree/tests/gtest_executor.cpp index e45887da7ef..3a4f147b456 100644 --- a/src/Storages/MergeTree/tests/gtest_executor.cpp +++ b/src/Storages/MergeTree/tests/gtest_executor.cpp @@ -15,6 +15,7 @@ using namespace DB; namespace CurrentMetrics { extern const Metric BackgroundMergesAndMutationsPoolTask; + extern const Metric BackgroundMergesAndMutationsPoolSize; } std::random_device device; @@ -102,7 +103,8 @@ TEST(Executor, Simple) "GTest", 1, // threads 100, // max_tasks - CurrentMetrics::BackgroundMergesAndMutationsPoolTask + CurrentMetrics::BackgroundMergesAndMutationsPoolTask, + CurrentMetrics::BackgroundMergesAndMutationsPoolSize ); String schedule; // mutex is not required because we have a single worker @@ -144,7 +146,8 @@ TEST(Executor, RemoveTasks) "GTest", tasks_kinds, tasks_kinds * batch, - CurrentMetrics::BackgroundMergesAndMutationsPoolTask + CurrentMetrics::BackgroundMergesAndMutationsPoolTask, + CurrentMetrics::BackgroundMergesAndMutationsPoolSize ); for (size_t i = 0; i < batch; ++i) @@ -184,7 +187,8 @@ TEST(Executor, RemoveTasksStress) "GTest", tasks_kinds, tasks_kinds * batch * (schedulers_count + removers_count), - CurrentMetrics::BackgroundMergesAndMutationsPoolTask + CurrentMetrics::BackgroundMergesAndMutationsPoolTask, + CurrentMetrics::BackgroundMergesAndMutationsPoolSize ); std::barrier barrier(schedulers_count + removers_count); @@ -234,7 +238,8 @@ TEST(Executor, UpdatePolicy) "GTest", 1, // threads 100, // max_tasks - CurrentMetrics::BackgroundMergesAndMutationsPoolTask + CurrentMetrics::BackgroundMergesAndMutationsPoolTask, + CurrentMetrics::BackgroundMergesAndMutationsPoolSize ); String schedule; // mutex is not required because we have a single worker diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index fe4a144deaa..5d0b04e8cb6 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -8555,7 +8555,6 @@ String StorageReplicatedMergeTree::getSharedDataReplica( return best_replica; } - Strings StorageReplicatedMergeTree::getZeroCopyPartPath( const MergeTreeSettings & settings, const std::string & disk_type, const String & table_uuid, const String & part_name, const String & zookeeper_path_old) @@ -8575,18 +8574,65 @@ Strings StorageReplicatedMergeTree::getZeroCopyPartPath( return res; } -bool StorageReplicatedMergeTree::checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk, String & lock_replica) +void StorageReplicatedMergeTree::watchZeroCopyLock(const String & part_name, const DiskPtr & disk) { auto path = getZeroCopyPartPath(part_name, disk); if (path) { - /// FIXME + auto zookeeper = getZooKeeper(); auto lock_path = fs::path(*path) / "part_exclusive_lock"; - if (getZooKeeper()->tryGet(lock_path, lock_replica)) + LOG_TEST(log, "Adding zero-copy lock on {}", lock_path); + /// Looks ugly, but we cannot touch any storage fields inside Watch callback + /// because it could lead to use-after-free (storage dropped and watch triggered) + std::shared_ptr> flag = std::make_shared>(true); + std::string replica; + bool exists = zookeeper->tryGetWatch(lock_path, replica, nullptr, [flag] (const Coordination::WatchResponse &) { - return true; + *flag = false; + }); + + if (exists) + { + std::lock_guard lock(existing_zero_copy_locks_mutex); + existing_zero_copy_locks[lock_path] = ZeroCopyLockDescription{replica, flag}; } } +} + +bool StorageReplicatedMergeTree::checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk, String & lock_replica) +{ + auto path = getZeroCopyPartPath(part_name, disk); + + std::lock_guard lock(existing_zero_copy_locks_mutex); + /// Cleanup abandoned locks during each check. The set of locks is small and this is quite fast loop. + /// Also it's hard to properly remove locks because we can execute replication queue + /// in arbitrary order and some parts can be replaced by covering parts without merges. + for (auto it = existing_zero_copy_locks.begin(); it != existing_zero_copy_locks.end();) + { + if (*it->second.exists) + ++it; + else + { + LOG_TEST(log, "Removing zero-copy lock on {}", it->first); + it = existing_zero_copy_locks.erase(it); + } + } + + if (path) + { + auto lock_path = fs::path(*path) / "part_exclusive_lock"; + if (auto it = existing_zero_copy_locks.find(lock_path); it != existing_zero_copy_locks.end()) + { + lock_replica = it->second.replica; + if (*it->second.exists) + { + LOG_TEST(log, "Zero-copy lock on path {} exists", it->first); + return true; + } + } + + LOG_TEST(log, "Zero-copy lock on path {} doesn't exist", lock_path); + } return false; } @@ -8599,11 +8645,37 @@ std::optional StorageReplicatedMergeTree::getZeroCopyPartPath(const Stri return getZeroCopyPartPath(*getSettings(), toString(disk->getDataSourceDescription().type), getTableSharedID(), part_name, zookeeper_path)[0]; } +bool StorageReplicatedMergeTree::waitZeroCopyLockToDisappear(const ZeroCopyLock & lock, size_t milliseconds_to_wait) +{ + if (lock.isLocked()) + return true; + + if (partial_shutdown_called.load(std::memory_order_relaxed)) + return true; + + auto lock_path = lock.lock->getLockPath(); + zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper(); + if (!zookeeper) + return true; + + Stopwatch time_waiting; + const auto & stop_waiting = [&]() + { + bool timeout_exceeded = milliseconds_to_wait < time_waiting.elapsedMilliseconds(); + return partial_shutdown_called.load(std::memory_order_relaxed) || is_readonly.load(std::memory_order_relaxed) || timeout_exceeded; + }; + + return zookeeper->waitForDisappear(lock_path, stop_waiting); +} + std::optional StorageReplicatedMergeTree::tryCreateZeroCopyExclusiveLock(const String & part_name, const DiskPtr & disk) { if (!disk || !disk->supportZeroCopyReplication()) return std::nullopt; + if (partial_shutdown_called.load(std::memory_order_relaxed) || is_readonly.load(std::memory_order_relaxed)) + return std::nullopt; + zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper(); if (!zookeeper) return std::nullopt; @@ -8616,10 +8688,8 @@ std::optional StorageReplicatedMergeTree::tryCreateZeroCopyExclusi /// Create actual lock ZeroCopyLock lock(zookeeper, zc_zookeeper_path, replica_name); - if (lock.lock->tryLock()) - return lock; - else - return std::nullopt; + lock.lock->tryLock(); + return lock; } String StorageReplicatedMergeTree::findReplicaHavingPart( diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 46c78e9064a..70f5ca0caac 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -482,6 +482,16 @@ private: std::mutex last_broken_disks_mutex; std::set last_broken_disks; + std::mutex existing_zero_copy_locks_mutex; + + struct ZeroCopyLockDescription + { + std::string replica; + std::shared_ptr> exists; + }; + + std::unordered_map existing_zero_copy_locks; + static std::optional distributedWriteFromClusterStorage(const std::shared_ptr & src_storage_cluster, const ASTInsertQuery & query, ContextPtr context); template @@ -862,13 +872,19 @@ private: void createTableSharedID() const; bool checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk, String & lock_replica); + void watchZeroCopyLock(const String & part_name, const DiskPtr & disk); std::optional getZeroCopyPartPath(const String & part_name, const DiskPtr & disk); /// Create ephemeral lock in zookeeper for part and disk which support zero copy replication. - /// If somebody already holding the lock -- return std::nullopt. + /// If no connection to zookeeper, shutdown, readonly -- return std::nullopt. + /// If somebody already holding the lock -- return unlocked ZeroCopyLock object (not std::nullopt). std::optional tryCreateZeroCopyExclusiveLock(const String & part_name, const DiskPtr & disk) override; + /// Wait for ephemral lock to disappear. Return true if table shutdown/readonly/timeout exceeded, etc. + /// Or if node actually disappeared. + bool waitZeroCopyLockToDisappear(const ZeroCopyLock & lock, size_t milliseconds_to_wait) override; + void startupImpl(bool from_attach_thread); }; diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index ed290c38c1f..df9705b9c9a 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -1266,7 +1266,8 @@ void StorageS3::updateConfiguration(ContextPtr ctx, StorageS3::Configuration & u upd.auth_settings.server_side_encryption_customer_key_base64, std::move(headers), upd.auth_settings.use_environment_credentials.value_or(ctx->getConfigRef().getBool("s3.use_environment_credentials", false)), - upd.auth_settings.use_insecure_imds_request.value_or(ctx->getConfigRef().getBool("s3.use_insecure_imds_request", false))); + upd.auth_settings.use_insecure_imds_request.value_or(ctx->getConfigRef().getBool("s3.use_insecure_imds_request", false)), + upd.auth_settings.expiration_window_seconds.value_or(ctx->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS))); } void StorageS3::processNamedCollectionResult(StorageS3::Configuration & configuration, const NamedCollection & collection) diff --git a/src/Storages/System/StorageSystemMarkedDroppedTables.cpp b/src/Storages/System/StorageSystemMarkedDroppedTables.cpp new file mode 100644 index 00000000000..fcdd6e1edcf --- /dev/null +++ b/src/Storages/System/StorageSystemMarkedDroppedTables.cpp @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "base/types.h" + + +namespace DB +{ + +NamesAndTypesList StorageSystemMarkedDroppedTables::getNamesAndTypes() +{ + NamesAndTypesList names_and_types{ + {"index", std::make_shared()}, + {"database", std::make_shared()}, + {"table", std::make_shared()}, + {"uuid", std::make_shared()}, + {"engine", std::make_shared()}, + {"metadata_dropped_path", std::make_shared()}, + {"table_dropped_time", std::make_shared()}, + }; + return names_and_types; +} + + +void StorageSystemMarkedDroppedTables::fillData(MutableColumns & res_columns, ContextPtr, const SelectQueryInfo &) const +{ + auto tables_mark_dropped = DatabaseCatalog::instance().getTablesMarkedDropped(); + + size_t index = 0; + + auto & column_index = assert_cast(*res_columns[index++]); + auto & column_database = assert_cast(*res_columns[index++]); + auto & column_table = assert_cast(*res_columns[index++]); + auto & column_uuid = assert_cast(*res_columns[index++]).getData(); + auto & column_engine = assert_cast(*res_columns[index++]); + auto & column_metadata_dropped_path = assert_cast(*res_columns[index++]); + auto & column_table_dropped_time = assert_cast(*res_columns[index++]); + + auto add_row = [&](UInt32 idx, const DatabaseCatalog::TableMarkedAsDropped & table_mark_dropped) + { + column_index.insertValue(idx); + column_database.insertData(table_mark_dropped.table_id.getDatabaseName().data(), table_mark_dropped.table_id.getDatabaseName().size()); + column_table.insertData(table_mark_dropped.table_id.getTableName().data(), table_mark_dropped.table_id.getTableName().size()); + column_uuid.push_back(table_mark_dropped.table_id.uuid.toUnderType()); + if (table_mark_dropped.table) + column_engine.insertData(table_mark_dropped.table->getName().data(), table_mark_dropped.table->getName().size()); + else + column_engine.insertData({}, 0); + column_metadata_dropped_path.insertData(table_mark_dropped.metadata_path.data(), table_mark_dropped.metadata_path.size()); + column_table_dropped_time.insertValue(static_cast(table_mark_dropped.drop_time)); + }; + + UInt32 idx = 0; + for (const auto & table_mark_dropped : tables_mark_dropped) + add_row(idx++, table_mark_dropped); +} + +} diff --git a/src/Storages/System/StorageSystemMarkedDroppedTables.h b/src/Storages/System/StorageSystemMarkedDroppedTables.h new file mode 100644 index 00000000000..ea2a864311c --- /dev/null +++ b/src/Storages/System/StorageSystemMarkedDroppedTables.h @@ -0,0 +1,20 @@ +#pragma once + +#include + + +namespace DB +{ + +class StorageSystemMarkedDroppedTables final : public IStorageSystemOneBlock +{ +public: + std::string getName() const override { return "SystemMarkedDroppedTables"; } + static NamesAndTypesList getNamesAndTypes(); + +protected: + using IStorageSystemOneBlock::IStorageSystemOneBlock; + void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const override; +}; + +} diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index da3d6b98dc5..26411bf3bcb 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -90,7 +90,7 @@ namespace const ucontext_t signal_context = *reinterpret_cast(context); stack_trace = StackTrace(signal_context); - std::string_view query_id = CurrentThread::getQueryId(); + auto query_id = CurrentThread::getQueryId(); query_id_size = std::min(query_id.size(), max_query_id_size); if (!query_id.empty()) memcpy(query_id_data, query_id.data(), query_id_size); diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 61329ab834b..fd1cf2f1623 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -79,6 +79,7 @@ #include #include #include +#include #ifdef OS_LINUX #include @@ -140,6 +141,7 @@ void attachSystemTablesLocal(ContextPtr context, IDatabase & system_database) attach(context, system_database, "time_zones"); attach(context, system_database, "backups"); attach(context, system_database, "schema_inference_cache"); + attach(context, system_database, "marked_dropped_tables"); #ifdef OS_LINUX attach(context, system_database, "stack_trace"); #endif diff --git a/tests/integration/test_keeper_mntr_data_size/test.py b/tests/integration/test_keeper_mntr_data_size/test.py index 8789ca0354c..ad6e5f02af7 100644 --- a/tests/integration/test_keeper_mntr_data_size/test.py +++ b/tests/integration/test_keeper_mntr_data_size/test.py @@ -5,8 +5,6 @@ from helpers.cluster import ClickHouseCluster import helpers.keeper_utils as keeper_utils import random import string -import os -import time from kazoo.client import KazooClient, KazooState @@ -17,7 +15,6 @@ node = cluster.add_instance( "node", main_configs=["configs/enable_keeper.xml"], stay_alive=True, - with_zookeeper=True, ) @@ -60,10 +57,14 @@ def test_mntr_data_size_after_restart(started_cluster): "/test_mntr_data_size/node" + str(i), random_string(123).encode() ) - def get_line_with_size(): + node_zk.stop() + node_zk.close() + node_zk = None + + def get_line_from_mntr(key): return next( filter( - lambda line: "zk_approximate_data_size" in line, + lambda line: key in line, keeper_utils.send_4lw_cmd(started_cluster, node, "mntr").split( "\n" ), @@ -71,19 +72,21 @@ def test_mntr_data_size_after_restart(started_cluster): None, ) - line_size_before = get_line_with_size() + line_size_before = get_line_from_mntr("zk_approximate_data_size") + node_count_before = get_line_from_mntr("zk_znode_count") + assert get_line_from_mntr("zk_ephemerals_count") == "zk_ephemerals_count\t0" assert line_size_before != None - node_zk.stop() - node_zk.close() - node_zk = None - restart_clickhouse() - assert get_line_with_size() == line_size_before + def assert_mntr_stats(): + assert get_line_from_mntr("zk_ephemerals_count") == "zk_ephemerals_count\t0" + assert get_line_from_mntr("zk_znode_count") == node_count_before + assert get_line_from_mntr("zk_approximate_data_size") == line_size_before + assert_mntr_stats() keeper_utils.send_4lw_cmd(started_cluster, node, "rclc") - assert get_line_with_size() == line_size_before + assert_mntr_stats() finally: try: if node_zk is not None: diff --git a/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml b/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml index 163449872be..ed3b2b595db 100644 --- a/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml +++ b/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml @@ -9,7 +9,7 @@ You have to configure certificate to enable this interface. See the openSSL section below. --> - + 9440 diff --git a/tests/integration/test_ssl_cert_authentication/test.py b/tests/integration/test_ssl_cert_authentication/test.py index 0246b835fd5..7c62ca0d8b6 100644 --- a/tests/integration/test_ssl_cert_authentication/test.py +++ b/tests/integration/test_ssl_cert_authentication/test.py @@ -1,9 +1,12 @@ import pytest +from helpers.client import Client from helpers.cluster import ClickHouseCluster from helpers.ssl_context import WrapSSLContextWithSNI import urllib.request, urllib.parse import ssl import os.path +from os import remove + # The test cluster is configured with certificate for that host name, see 'server-ext.cnf'. # The client have to verify server certificate against that name. Client uses SNI @@ -66,6 +69,54 @@ def execute_query_https( return response.decode("utf-8") +config = """ + + + none + + {certificateFile} + {privateKeyFile} + {caConfig} + + + AcceptCertificateHandler + + + +""" + + +def execute_query_native(node, query, user, cert_name): + + config_path = f"{SCRIPT_DIR}/configs/client.xml" + + formatted = config.format( + certificateFile=f"{SCRIPT_DIR}/certs/{cert_name}-cert.pem", + privateKeyFile=f"{SCRIPT_DIR}/certs/{cert_name}-key.pem", + caConfig=f"{SCRIPT_DIR}/certs/ca-cert.pem", + ) + + file = open(config_path, "w") + file.write(formatted) + file.close() + + client = Client( + node.ip_address, + 9440, + command=cluster.client_bin_path, + secure=True, + config=config_path, + ) + + try: + result = client.query(query, user=user) + remove(config_path) + return result + except: + remove(config_path) + raise + + def test_https(): assert ( execute_query_https("SELECT currentUser()", user="john", cert_name="client1") @@ -81,6 +132,27 @@ def test_https(): ) +def test_native(): + assert ( + execute_query_native( + instance, "SELECT currentUser()", user="john", cert_name="client1" + ) + == "john\n" + ) + assert ( + execute_query_native( + instance, "SELECT currentUser()", user="lucy", cert_name="client2" + ) + == "lucy\n" + ) + assert ( + execute_query_native( + instance, "SELECT currentUser()", user="lucy", cert_name="client3" + ) + == "lucy\n" + ) + + def test_https_wrong_cert(): # Wrong certificate: different user's certificate with pytest.raises(Exception) as err: @@ -107,6 +179,23 @@ def test_https_wrong_cert(): ) +def test_native_wrong_cert(): + # Wrong certificate: different user's certificate + with pytest.raises(Exception) as err: + execute_query_native( + instance, "SELECT currentUser()", user="john", cert_name="client2" + ) + assert "AUTHENTICATION_FAILED" in str(err.value) + + # Wrong certificate: self-signed certificate. + # In this case clickhouse-client itself will throw an error + with pytest.raises(Exception) as err: + execute_query_native( + instance, "SELECT currentUser()", user="john", cert_name="wrong" + ) + assert "UNKNOWN_CA" in str(err.value) + + def test_https_non_ssl_auth(): # Users with non-SSL authentication are allowed, in this case we can skip sending a client certificate at all (because "verificationMode" is set to "relaxed"). # assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False) == "peter\n" diff --git a/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml b/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml index 257ae0a355c..b3ce0735a3c 100644 --- a/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml +++ b/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml @@ -1,4 +1,8 @@ + + test + + @@ -21,6 +25,13 @@ + + +

+ s3 +
+ + diff --git a/tests/integration/test_zero_copy_fetch/test.py b/tests/integration/test_zero_copy_fetch/test.py index f13eac5e9d1..b71752528d3 100644 --- a/tests/integration/test_zero_copy_fetch/test.py +++ b/tests/integration/test_zero_copy_fetch/test.py @@ -5,6 +5,7 @@ import random import string import time +from multiprocessing.dummy import Pool import pytest from helpers.cluster import ClickHouseCluster @@ -102,3 +103,133 @@ SETTINGS index_granularity = 8192, storage_policy = 's3'""" assert part_to_disk["20230102_0_0_0"] == "s3" assert part_to_disk["20230109_0_0_0"] == "s3" assert part_to_disk["20230116_0_0_0"] == "default" + + +def test_concurrent_move_to_s3(started_cluster): + node1 = cluster.instances["node1"] + node2 = cluster.instances["node2"] + + node1.query( + """ +CREATE TABLE test_concurrent_move (EventDate Date, CounterID UInt32) +ENGINE = ReplicatedMergeTree('/clickhouse-tables/test_concurrent_move', 'r1') +PARTITION BY CounterID +ORDER BY (CounterID, EventDate) +SETTINGS index_granularity = 8192, storage_policy = 's3'""" + ) + + node2.query( + """ +CREATE TABLE test_concurrent_move (EventDate Date, CounterID UInt32) +ENGINE = ReplicatedMergeTree('/clickhouse-tables/test_concurrent_move', 'r2') +PARTITION BY CounterID +ORDER BY (CounterID, EventDate) +SETTINGS index_granularity = 8192, storage_policy = 's3'""" + ) + partitions = range(10) + + for i in partitions: + node1.query( + f"INSERT INTO test_concurrent_move SELECT toDate('2023-01-01') + toIntervalDay(number), {i} from system.numbers limit 20" + ) + node1.query( + f"INSERT INTO test_concurrent_move SELECT toDate('2023-01-01') + toIntervalDay(number) + rand(), {i} from system.numbers limit 20" + ) + node1.query( + f"INSERT INTO test_concurrent_move SELECT toDate('2023-01-01') + toIntervalDay(number) + rand(), {i} from system.numbers limit 20" + ) + node1.query( + f"INSERT INTO test_concurrent_move SELECT toDate('2023-01-01') + toIntervalDay(number) + rand(), {i} from system.numbers limit 20" + ) + + node2.query("SYSTEM SYNC REPLICA test_concurrent_move") + + # check that we can move parts concurrently without exceptions + p = Pool(3) + for i in partitions: + + def move_partition_to_s3(node): + node.query( + f"ALTER TABLE test_concurrent_move MOVE PARTITION '{i}' TO DISK 's3'" + ) + + j1 = p.apply_async(move_partition_to_s3, (node1,)) + j2 = p.apply_async(move_partition_to_s3, (node2,)) + j1.get() + j2.get() + + def get_part_to_disk(query_result): + part_to_disk = {} + for row in query_result.strip().split("\n"): + disk, part = row.split("\t") + part_to_disk[part] = disk + return part_to_disk + + part_to_disk = get_part_to_disk( + node1.query( + "SELECT disk_name, name FROM system.parts where table = 'test_concurrent_move' and active" + ) + ) + + assert all([value == "s3" for value in part_to_disk.values()]) + + part_to_disk = get_part_to_disk( + node2.query( + "SELECT disk_name, name FROM system.parts where table = 'test_concurrent_move' and active" + ) + ) + assert all([value == "s3" for value in part_to_disk.values()]) + + +def test_zero_copy_mutation(started_cluster): + node1 = cluster.instances["node1"] + node2 = cluster.instances["node2"] + + node1.query( + """ +CREATE TABLE test_zero_copy_mutation (EventDate Date, CounterID UInt32) +ENGINE = ReplicatedMergeTree('/clickhouse-tables/test_zero_copy_mutation', 'r1') +ORDER BY (CounterID, EventDate) +SETTINGS index_granularity = 8192, storage_policy = 's3_only'""" + ) + + node2.query( + """ +CREATE TABLE test_zero_copy_mutation (EventDate Date, CounterID UInt32) +ENGINE = ReplicatedMergeTree('/clickhouse-tables/test_zero_copy_mutation', 'r2') +ORDER BY (CounterID, EventDate) +SETTINGS index_granularity = 8192, storage_policy = 's3_only'""" + ) + + node1.query( + "INSERT INTO test_zero_copy_mutation SELECT toDate('2023-01-01') + toIntervalDay(number) + rand(), number * number from system.numbers limit 10" + ) + + node2.query("SYSTEM STOP REPLICATION QUEUES test_zero_copy_mutation") + p = Pool(3) + + def run_long_mutation(node): + node1.query( + "ALTER TABLE test_zero_copy_mutation DELETE WHERE sleepEachRow(1) == 1" + ) + + job = p.apply_async(run_long_mutation, (node1,)) + + for i in range(30): + count = node1.query( + "SELECT count() FROM system.replication_queue WHERE type = 'MUTATE_PART'" + ).strip() + if int(count) > 0: + break + else: + time.sleep(0.1) + + node2.query("SYSTEM START REPLICATION QUEUES test_zero_copy_mutation") + + node2.query("SYSTEM SYNC REPLICA test_zero_copy_mutation") + + job.get() + + assert node2.contains_in_log("all_0_0_0_1/part_exclusive_lock exists") + assert node2.contains_in_log("Removing zero-copy lock on") + assert node2.contains_in_log("all_0_0_0_1/part_exclusive_lock doesn't exist") diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments5.sh b/tests/queries/0_stateless/00534_functions_bad_arguments5.sh index 7b180870443..a8b0ce77677 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments5.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments5.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-tsan, no-debug +# Tags: no-tsan, no-debug, no-fasttest # Tag no-tsan: Too long for TSan # shellcheck disable=SC2016 diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments7.sh b/tests/queries/0_stateless/00534_functions_bad_arguments7.sh index 8358d2b80d4..383e5a1b434 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments7.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments7.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-tsan, no-debug +# Tags: no-tsan, no-debug, no-fasttest # Tag no-tsan: Too long for TSan # shellcheck disable=SC2016 diff --git a/tests/queries/0_stateless/00719_parallel_ddl_table.sh b/tests/queries/0_stateless/00719_parallel_ddl_table.sh index 2a542ea21f6..fdc994aec33 100755 --- a/tests/queries/0_stateless/00719_parallel_ddl_table.sh +++ b/tests/queries/0_stateless/00719_parallel_ddl_table.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/00746_sql_fuzzy.sh b/tests/queries/0_stateless/00746_sql_fuzzy.sh index b534b1820ba..c0741beea12 100755 --- a/tests/queries/0_stateless/00746_sql_fuzzy.sh +++ b/tests/queries/0_stateless/00746_sql_fuzzy.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh b/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh index 04b1f8b65ce..02ea1fa699c 100755 --- a/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh +++ b/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel +# Tags: no-parallel, no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py b/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py index d38e7738deb..ff15f14cbc3 100755 --- a/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py +++ b/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Tags: no-parallel +# Tags: no-parallel, no-fasttest import os import sys diff --git a/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.sh b/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.sh index 2e3b7dd9785..3deb16fa439 100755 --- a/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.sh +++ b/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-random-settings, no-parallel +# Tags: no-random-settings, no-parallel, no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01195_formats_diagnostic_info.sh b/tests/queries/0_stateless/01195_formats_diagnostic_info.sh index e75780a4520..b146d65fc58 100755 --- a/tests/queries/0_stateless/01195_formats_diagnostic_info.sh +++ b/tests/queries/0_stateless/01195_formats_diagnostic_info.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest # shellcheck disable=SC2206 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/01246_buffer_flush.sql b/tests/queries/0_stateless/01246_buffer_flush.sql index 47891a7f00e..ac507d94b69 100644 --- a/tests/queries/0_stateless/01246_buffer_flush.sql +++ b/tests/queries/0_stateless/01246_buffer_flush.sql @@ -1,3 +1,5 @@ +-- Tags: no-fasttest + drop table if exists data_01256; drop table if exists buffer_01256; diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index c061eb95a65..abebc35d072 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -50,7 +50,8 @@ CREATE DATABASE [] DATABASE CREATE CREATE TABLE [] TABLE CREATE CREATE VIEW [] VIEW CREATE CREATE DICTIONARY [] DICTIONARY CREATE -CREATE TEMPORARY TABLE [] GLOBAL CREATE +CREATE TEMPORARY TABLE [] GLOBAL CREATE ARBITRARY TEMPORARY TABLE +CREATE ARBITRARY TEMPORARY TABLE [] GLOBAL CREATE CREATE FUNCTION [] GLOBAL CREATE CREATE NAMED COLLECTION [] GLOBAL CREATE CREATE [] \N ALL diff --git a/tests/queries/0_stateless/01293_optimize_final_force.sh b/tests/queries/0_stateless/01293_optimize_final_force.sh index 60d45f87385..994d5952dbc 100755 --- a/tests/queries/0_stateless/01293_optimize_final_force.sh +++ b/tests/queries/0_stateless/01293_optimize_final_force.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01380_coded_delta_exception_code.sql b/tests/queries/0_stateless/01380_coded_delta_exception_code.sql index 587fac958cd..f4b88a93904 100644 --- a/tests/queries/0_stateless/01380_coded_delta_exception_code.sql +++ b/tests/queries/0_stateless/01380_coded_delta_exception_code.sql @@ -2,5 +2,5 @@ CREATE TABLE delta_codec_synthetic (`id` Decimal(38, 10) CODEC(Delta, ZSTD(22))) CREATE TABLE delta_codec_synthetic (`id` Decimal(38, 10) CODEC(DoubleDelta, ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 36 } CREATE TABLE delta_codec_synthetic (`id` Decimal(38, 10) CODEC(Gorilla, ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 36 } -CREATE TABLE delta_codec_synthetic (`id` UInt64 CODEC(DoubleDelta(3), ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 36 } -CREATE TABLE delta_codec_synthetic (`id` UInt64 CODEC(Gorilla('hello, world'), ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 36 } +CREATE TABLE delta_codec_synthetic (`id` UInt64 CODEC(DoubleDelta(3), ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } +CREATE TABLE delta_codec_synthetic (`id` UInt64 CODEC(Gorilla('hello, world'), ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } diff --git a/tests/queries/0_stateless/01395_limit_more_cases.sh b/tests/queries/0_stateless/01395_limit_more_cases.sh index 32c854e53fb..177147d2142 100755 --- a/tests/queries/0_stateless/01395_limit_more_cases.sh +++ b/tests/queries/0_stateless/01395_limit_more_cases.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01647_clickhouse_local_hung.sh b/tests/queries/0_stateless/01647_clickhouse_local_hung.sh index 04f32055ab6..4789db18b2e 100755 --- a/tests/queries/0_stateless/01647_clickhouse_local_hung.sh +++ b/tests/queries/0_stateless/01647_clickhouse_local_hung.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest set -e diff --git a/tests/queries/0_stateless/01656_join_defaul_enum.sql b/tests/queries/0_stateless/01656_join_defaul_enum.sql index 8a0fc089b16..878936da3b5 100644 --- a/tests/queries/0_stateless/01656_join_defaul_enum.sql +++ b/tests/queries/0_stateless/01656_join_defaul_enum.sql @@ -1,18 +1,9 @@ -DROP DATABASE IF EXISTS test_01656; - -CREATE DATABASE test_01656; -USE test_01656; - -DROP TABLE IF EXISTS table_key; -DROP TABLE IF EXISTS table_with_enum; - -CREATE TABLE table_key (keycol UInt16) ENGINE = MergeTree() ORDER BY (keycol) PARTITION BY tuple(); +CREATE TABLE table_key (keycol UInt16) ENGINE = MergeTree() ORDER BY (keycol) PARTITION BY tuple() +as SELECT * FROM VALUES ( (1), (2), (3) ); CREATE TABLE table_with_enum (keycol UInt16, enum_col Enum8('First' = 1,'Second' = 2)) - ENGINE = MergeTree() ORDER BY (keycol) PARTITION BY tuple(); - -INSERT INTO table_key VALUES (1), (2), (3); -INSERT INTO table_with_enum VALUES (2, 'Second'), (4, 'Second'); + ENGINE = MergeTree() ORDER BY (keycol) PARTITION BY tuple() +as SELECT * FROM VALUES ( (2, 'Second'), (4, 'Second') ); SET join_algorithm = 'hash'; @@ -34,7 +25,6 @@ SELECT keycol, enum_col FROM table_with_enum LEFT JOIN table_key USING (keycol) SELECT keycol, enum_col FROM table_with_enum RIGHT JOIN table_key USING (keycol) ORDER BY keycol; SELECT keycol, enum_col FROM table_with_enum FULL JOIN table_key USING (keycol) ORDER BY keycol; -DROP TABLE IF EXISTS table_key; -DROP TABLE IF EXISTS table_with_enum; +DROP TABLE table_key; +DROP TABLE table_with_enum; -DROP DATABASE IF EXISTS test_01656; diff --git a/tests/queries/0_stateless/02015_async_inserts_4.sh b/tests/queries/0_stateless/02015_async_inserts_4.sh index 65598923b96..28f0e250630 100755 --- a/tests/queries/0_stateless/02015_async_inserts_4.sh +++ b/tests/queries/0_stateless/02015_async_inserts_4.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02015_async_inserts_7.sh b/tests/queries/0_stateless/02015_async_inserts_7.sh index c8cbbc48a29..29f908cdc90 100755 --- a/tests/queries/0_stateless/02015_async_inserts_7.sh +++ b/tests/queries/0_stateless/02015_async_inserts_7.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index f77076bcd5c..c13de3faec3 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -289,7 +289,7 @@ CREATE TABLE system.grants ( `user_name` Nullable(String), `role_name` Nullable(String), - `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'SHOW NAMED COLLECTIONS SECRETS' = 92, 'ACCESS MANAGEMENT' = 93, 'SYSTEM SHUTDOWN' = 94, 'SYSTEM DROP DNS CACHE' = 95, 'SYSTEM DROP MARK CACHE' = 96, 'SYSTEM DROP UNCOMPRESSED CACHE' = 97, 'SYSTEM DROP MMAP CACHE' = 98, 'SYSTEM DROP QUERY CACHE' = 99, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 100, 'SYSTEM DROP FILESYSTEM CACHE' = 101, 'SYSTEM DROP SCHEMA CACHE' = 102, 'SYSTEM DROP S3 CLIENT CACHE' = 103, 'SYSTEM DROP CACHE' = 104, 'SYSTEM RELOAD CONFIG' = 105, 'SYSTEM RELOAD USERS' = 106, 'SYSTEM RELOAD SYMBOLS' = 107, 'SYSTEM RELOAD DICTIONARY' = 108, 'SYSTEM RELOAD MODEL' = 109, 'SYSTEM RELOAD FUNCTION' = 110, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 111, 'SYSTEM RELOAD' = 112, 'SYSTEM RESTART DISK' = 113, 'SYSTEM MERGES' = 114, 'SYSTEM TTL MERGES' = 115, 'SYSTEM FETCHES' = 116, 'SYSTEM MOVES' = 117, 'SYSTEM DISTRIBUTED SENDS' = 118, 'SYSTEM REPLICATED SENDS' = 119, 'SYSTEM SENDS' = 120, 'SYSTEM REPLICATION QUEUES' = 121, 'SYSTEM DROP REPLICA' = 122, 'SYSTEM SYNC REPLICA' = 123, 'SYSTEM RESTART REPLICA' = 124, 'SYSTEM RESTORE REPLICA' = 125, 'SYSTEM WAIT LOADING PARTS' = 126, 'SYSTEM SYNC DATABASE REPLICA' = 127, 'SYSTEM SYNC TRANSACTION LOG' = 128, 'SYSTEM SYNC FILE CACHE' = 129, 'SYSTEM FLUSH DISTRIBUTED' = 130, 'SYSTEM FLUSH LOGS' = 131, 'SYSTEM FLUSH' = 132, 'SYSTEM THREAD FUZZER' = 133, 'SYSTEM UNFREEZE' = 134, 'SYSTEM' = 135, 'dictGet' = 136, 'addressToLine' = 137, 'addressToLineWithInlines' = 138, 'addressToSymbol' = 139, 'demangle' = 140, 'INTROSPECTION' = 141, 'FILE' = 142, 'URL' = 143, 'REMOTE' = 144, 'MONGO' = 145, 'MEILISEARCH' = 146, 'MYSQL' = 147, 'POSTGRES' = 148, 'SQLITE' = 149, 'ODBC' = 150, 'JDBC' = 151, 'HDFS' = 152, 'S3' = 153, 'HIVE' = 154, 'SOURCES' = 155, 'CLUSTER' = 156, 'ALL' = 157, 'NONE' = 158), + `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'TRUNCATE' = 64, 'OPTIMIZE' = 65, 'BACKUP' = 66, 'KILL QUERY' = 67, 'KILL TRANSACTION' = 68, 'MOVE PARTITION BETWEEN SHARDS' = 69, 'CREATE USER' = 70, 'ALTER USER' = 71, 'DROP USER' = 72, 'CREATE ROLE' = 73, 'ALTER ROLE' = 74, 'DROP ROLE' = 75, 'ROLE ADMIN' = 76, 'CREATE ROW POLICY' = 77, 'ALTER ROW POLICY' = 78, 'DROP ROW POLICY' = 79, 'CREATE QUOTA' = 80, 'ALTER QUOTA' = 81, 'DROP QUOTA' = 82, 'CREATE SETTINGS PROFILE' = 83, 'ALTER SETTINGS PROFILE' = 84, 'DROP SETTINGS PROFILE' = 85, 'SHOW USERS' = 86, 'SHOW ROLES' = 87, 'SHOW ROW POLICIES' = 88, 'SHOW QUOTAS' = 89, 'SHOW SETTINGS PROFILES' = 90, 'SHOW ACCESS' = 91, 'SHOW NAMED COLLECTIONS' = 92, 'SHOW NAMED COLLECTIONS SECRETS' = 93, 'ACCESS MANAGEMENT' = 94, 'SYSTEM SHUTDOWN' = 95, 'SYSTEM DROP DNS CACHE' = 96, 'SYSTEM DROP MARK CACHE' = 97, 'SYSTEM DROP UNCOMPRESSED CACHE' = 98, 'SYSTEM DROP MMAP CACHE' = 99, 'SYSTEM DROP QUERY CACHE' = 100, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 101, 'SYSTEM DROP FILESYSTEM CACHE' = 102, 'SYSTEM DROP SCHEMA CACHE' = 103, 'SYSTEM DROP S3 CLIENT CACHE' = 104, 'SYSTEM DROP CACHE' = 105, 'SYSTEM RELOAD CONFIG' = 106, 'SYSTEM RELOAD USERS' = 107, 'SYSTEM RELOAD SYMBOLS' = 108, 'SYSTEM RELOAD DICTIONARY' = 109, 'SYSTEM RELOAD MODEL' = 110, 'SYSTEM RELOAD FUNCTION' = 111, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 112, 'SYSTEM RELOAD' = 113, 'SYSTEM RESTART DISK' = 114, 'SYSTEM MERGES' = 115, 'SYSTEM TTL MERGES' = 116, 'SYSTEM FETCHES' = 117, 'SYSTEM MOVES' = 118, 'SYSTEM DISTRIBUTED SENDS' = 119, 'SYSTEM REPLICATED SENDS' = 120, 'SYSTEM SENDS' = 121, 'SYSTEM REPLICATION QUEUES' = 122, 'SYSTEM DROP REPLICA' = 123, 'SYSTEM SYNC REPLICA' = 124, 'SYSTEM RESTART REPLICA' = 125, 'SYSTEM RESTORE REPLICA' = 126, 'SYSTEM WAIT LOADING PARTS' = 127, 'SYSTEM SYNC DATABASE REPLICA' = 128, 'SYSTEM SYNC TRANSACTION LOG' = 129, 'SYSTEM SYNC FILE CACHE' = 130, 'SYSTEM FLUSH DISTRIBUTED' = 131, 'SYSTEM FLUSH LOGS' = 132, 'SYSTEM FLUSH' = 133, 'SYSTEM THREAD FUZZER' = 134, 'SYSTEM UNFREEZE' = 135, 'SYSTEM' = 136, 'dictGet' = 137, 'addressToLine' = 138, 'addressToLineWithInlines' = 139, 'addressToSymbol' = 140, 'demangle' = 141, 'INTROSPECTION' = 142, 'FILE' = 143, 'URL' = 144, 'REMOTE' = 145, 'MONGO' = 146, 'MEILISEARCH' = 147, 'MYSQL' = 148, 'POSTGRES' = 149, 'SQLITE' = 150, 'ODBC' = 151, 'JDBC' = 152, 'HDFS' = 153, 'S3' = 154, 'HIVE' = 155, 'SOURCES' = 156, 'CLUSTER' = 157, 'ALL' = 158, 'NONE' = 159), `database` Nullable(String), `table` Nullable(String), `column` Nullable(String), @@ -570,10 +570,10 @@ ENGINE = SystemPartsColumns COMMENT 'SYSTEM TABLE is built on the fly.' CREATE TABLE system.privileges ( - `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'SHOW NAMED COLLECTIONS SECRETS' = 92, 'ACCESS MANAGEMENT' = 93, 'SYSTEM SHUTDOWN' = 94, 'SYSTEM DROP DNS CACHE' = 95, 'SYSTEM DROP MARK CACHE' = 96, 'SYSTEM DROP UNCOMPRESSED CACHE' = 97, 'SYSTEM DROP MMAP CACHE' = 98, 'SYSTEM DROP QUERY CACHE' = 99, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 100, 'SYSTEM DROP FILESYSTEM CACHE' = 101, 'SYSTEM DROP SCHEMA CACHE' = 102, 'SYSTEM DROP S3 CLIENT CACHE' = 103, 'SYSTEM DROP CACHE' = 104, 'SYSTEM RELOAD CONFIG' = 105, 'SYSTEM RELOAD USERS' = 106, 'SYSTEM RELOAD SYMBOLS' = 107, 'SYSTEM RELOAD DICTIONARY' = 108, 'SYSTEM RELOAD MODEL' = 109, 'SYSTEM RELOAD FUNCTION' = 110, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 111, 'SYSTEM RELOAD' = 112, 'SYSTEM RESTART DISK' = 113, 'SYSTEM MERGES' = 114, 'SYSTEM TTL MERGES' = 115, 'SYSTEM FETCHES' = 116, 'SYSTEM MOVES' = 117, 'SYSTEM DISTRIBUTED SENDS' = 118, 'SYSTEM REPLICATED SENDS' = 119, 'SYSTEM SENDS' = 120, 'SYSTEM REPLICATION QUEUES' = 121, 'SYSTEM DROP REPLICA' = 122, 'SYSTEM SYNC REPLICA' = 123, 'SYSTEM RESTART REPLICA' = 124, 'SYSTEM RESTORE REPLICA' = 125, 'SYSTEM WAIT LOADING PARTS' = 126, 'SYSTEM SYNC DATABASE REPLICA' = 127, 'SYSTEM SYNC TRANSACTION LOG' = 128, 'SYSTEM SYNC FILE CACHE' = 129, 'SYSTEM FLUSH DISTRIBUTED' = 130, 'SYSTEM FLUSH LOGS' = 131, 'SYSTEM FLUSH' = 132, 'SYSTEM THREAD FUZZER' = 133, 'SYSTEM UNFREEZE' = 134, 'SYSTEM' = 135, 'dictGet' = 136, 'addressToLine' = 137, 'addressToLineWithInlines' = 138, 'addressToSymbol' = 139, 'demangle' = 140, 'INTROSPECTION' = 141, 'FILE' = 142, 'URL' = 143, 'REMOTE' = 144, 'MONGO' = 145, 'MEILISEARCH' = 146, 'MYSQL' = 147, 'POSTGRES' = 148, 'SQLITE' = 149, 'ODBC' = 150, 'JDBC' = 151, 'HDFS' = 152, 'S3' = 153, 'HIVE' = 154, 'SOURCES' = 155, 'CLUSTER' = 156, 'ALL' = 157, 'NONE' = 158), + `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'TRUNCATE' = 64, 'OPTIMIZE' = 65, 'BACKUP' = 66, 'KILL QUERY' = 67, 'KILL TRANSACTION' = 68, 'MOVE PARTITION BETWEEN SHARDS' = 69, 'CREATE USER' = 70, 'ALTER USER' = 71, 'DROP USER' = 72, 'CREATE ROLE' = 73, 'ALTER ROLE' = 74, 'DROP ROLE' = 75, 'ROLE ADMIN' = 76, 'CREATE ROW POLICY' = 77, 'ALTER ROW POLICY' = 78, 'DROP ROW POLICY' = 79, 'CREATE QUOTA' = 80, 'ALTER QUOTA' = 81, 'DROP QUOTA' = 82, 'CREATE SETTINGS PROFILE' = 83, 'ALTER SETTINGS PROFILE' = 84, 'DROP SETTINGS PROFILE' = 85, 'SHOW USERS' = 86, 'SHOW ROLES' = 87, 'SHOW ROW POLICIES' = 88, 'SHOW QUOTAS' = 89, 'SHOW SETTINGS PROFILES' = 90, 'SHOW ACCESS' = 91, 'SHOW NAMED COLLECTIONS' = 92, 'SHOW NAMED COLLECTIONS SECRETS' = 93, 'ACCESS MANAGEMENT' = 94, 'SYSTEM SHUTDOWN' = 95, 'SYSTEM DROP DNS CACHE' = 96, 'SYSTEM DROP MARK CACHE' = 97, 'SYSTEM DROP UNCOMPRESSED CACHE' = 98, 'SYSTEM DROP MMAP CACHE' = 99, 'SYSTEM DROP QUERY CACHE' = 100, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 101, 'SYSTEM DROP FILESYSTEM CACHE' = 102, 'SYSTEM DROP SCHEMA CACHE' = 103, 'SYSTEM DROP S3 CLIENT CACHE' = 104, 'SYSTEM DROP CACHE' = 105, 'SYSTEM RELOAD CONFIG' = 106, 'SYSTEM RELOAD USERS' = 107, 'SYSTEM RELOAD SYMBOLS' = 108, 'SYSTEM RELOAD DICTIONARY' = 109, 'SYSTEM RELOAD MODEL' = 110, 'SYSTEM RELOAD FUNCTION' = 111, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 112, 'SYSTEM RELOAD' = 113, 'SYSTEM RESTART DISK' = 114, 'SYSTEM MERGES' = 115, 'SYSTEM TTL MERGES' = 116, 'SYSTEM FETCHES' = 117, 'SYSTEM MOVES' = 118, 'SYSTEM DISTRIBUTED SENDS' = 119, 'SYSTEM REPLICATED SENDS' = 120, 'SYSTEM SENDS' = 121, 'SYSTEM REPLICATION QUEUES' = 122, 'SYSTEM DROP REPLICA' = 123, 'SYSTEM SYNC REPLICA' = 124, 'SYSTEM RESTART REPLICA' = 125, 'SYSTEM RESTORE REPLICA' = 126, 'SYSTEM WAIT LOADING PARTS' = 127, 'SYSTEM SYNC DATABASE REPLICA' = 128, 'SYSTEM SYNC TRANSACTION LOG' = 129, 'SYSTEM SYNC FILE CACHE' = 130, 'SYSTEM FLUSH DISTRIBUTED' = 131, 'SYSTEM FLUSH LOGS' = 132, 'SYSTEM FLUSH' = 133, 'SYSTEM THREAD FUZZER' = 134, 'SYSTEM UNFREEZE' = 135, 'SYSTEM' = 136, 'dictGet' = 137, 'addressToLine' = 138, 'addressToLineWithInlines' = 139, 'addressToSymbol' = 140, 'demangle' = 141, 'INTROSPECTION' = 142, 'FILE' = 143, 'URL' = 144, 'REMOTE' = 145, 'MONGO' = 146, 'MEILISEARCH' = 147, 'MYSQL' = 148, 'POSTGRES' = 149, 'SQLITE' = 150, 'ODBC' = 151, 'JDBC' = 152, 'HDFS' = 153, 'S3' = 154, 'HIVE' = 155, 'SOURCES' = 156, 'CLUSTER' = 157, 'ALL' = 158, 'NONE' = 159), `aliases` Array(String), `level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5)), - `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'SHOW NAMED COLLECTIONS SECRETS' = 92, 'ACCESS MANAGEMENT' = 93, 'SYSTEM SHUTDOWN' = 94, 'SYSTEM DROP DNS CACHE' = 95, 'SYSTEM DROP MARK CACHE' = 96, 'SYSTEM DROP UNCOMPRESSED CACHE' = 97, 'SYSTEM DROP MMAP CACHE' = 98, 'SYSTEM DROP QUERY CACHE' = 99, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 100, 'SYSTEM DROP FILESYSTEM CACHE' = 101, 'SYSTEM DROP SCHEMA CACHE' = 102, 'SYSTEM DROP S3 CLIENT CACHE' = 103, 'SYSTEM DROP CACHE' = 104, 'SYSTEM RELOAD CONFIG' = 105, 'SYSTEM RELOAD USERS' = 106, 'SYSTEM RELOAD SYMBOLS' = 107, 'SYSTEM RELOAD DICTIONARY' = 108, 'SYSTEM RELOAD MODEL' = 109, 'SYSTEM RELOAD FUNCTION' = 110, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 111, 'SYSTEM RELOAD' = 112, 'SYSTEM RESTART DISK' = 113, 'SYSTEM MERGES' = 114, 'SYSTEM TTL MERGES' = 115, 'SYSTEM FETCHES' = 116, 'SYSTEM MOVES' = 117, 'SYSTEM DISTRIBUTED SENDS' = 118, 'SYSTEM REPLICATED SENDS' = 119, 'SYSTEM SENDS' = 120, 'SYSTEM REPLICATION QUEUES' = 121, 'SYSTEM DROP REPLICA' = 122, 'SYSTEM SYNC REPLICA' = 123, 'SYSTEM RESTART REPLICA' = 124, 'SYSTEM RESTORE REPLICA' = 125, 'SYSTEM WAIT LOADING PARTS' = 126, 'SYSTEM SYNC DATABASE REPLICA' = 127, 'SYSTEM SYNC TRANSACTION LOG' = 128, 'SYSTEM SYNC FILE CACHE' = 129, 'SYSTEM FLUSH DISTRIBUTED' = 130, 'SYSTEM FLUSH LOGS' = 131, 'SYSTEM FLUSH' = 132, 'SYSTEM THREAD FUZZER' = 133, 'SYSTEM UNFREEZE' = 134, 'SYSTEM' = 135, 'dictGet' = 136, 'addressToLine' = 137, 'addressToLineWithInlines' = 138, 'addressToSymbol' = 139, 'demangle' = 140, 'INTROSPECTION' = 141, 'FILE' = 142, 'URL' = 143, 'REMOTE' = 144, 'MONGO' = 145, 'MEILISEARCH' = 146, 'MYSQL' = 147, 'POSTGRES' = 148, 'SQLITE' = 149, 'ODBC' = 150, 'JDBC' = 151, 'HDFS' = 152, 'S3' = 153, 'HIVE' = 154, 'SOURCES' = 155, 'CLUSTER' = 156, 'ALL' = 157, 'NONE' = 158)) + `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'TRUNCATE' = 64, 'OPTIMIZE' = 65, 'BACKUP' = 66, 'KILL QUERY' = 67, 'KILL TRANSACTION' = 68, 'MOVE PARTITION BETWEEN SHARDS' = 69, 'CREATE USER' = 70, 'ALTER USER' = 71, 'DROP USER' = 72, 'CREATE ROLE' = 73, 'ALTER ROLE' = 74, 'DROP ROLE' = 75, 'ROLE ADMIN' = 76, 'CREATE ROW POLICY' = 77, 'ALTER ROW POLICY' = 78, 'DROP ROW POLICY' = 79, 'CREATE QUOTA' = 80, 'ALTER QUOTA' = 81, 'DROP QUOTA' = 82, 'CREATE SETTINGS PROFILE' = 83, 'ALTER SETTINGS PROFILE' = 84, 'DROP SETTINGS PROFILE' = 85, 'SHOW USERS' = 86, 'SHOW ROLES' = 87, 'SHOW ROW POLICIES' = 88, 'SHOW QUOTAS' = 89, 'SHOW SETTINGS PROFILES' = 90, 'SHOW ACCESS' = 91, 'SHOW NAMED COLLECTIONS' = 92, 'SHOW NAMED COLLECTIONS SECRETS' = 93, 'ACCESS MANAGEMENT' = 94, 'SYSTEM SHUTDOWN' = 95, 'SYSTEM DROP DNS CACHE' = 96, 'SYSTEM DROP MARK CACHE' = 97, 'SYSTEM DROP UNCOMPRESSED CACHE' = 98, 'SYSTEM DROP MMAP CACHE' = 99, 'SYSTEM DROP QUERY CACHE' = 100, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 101, 'SYSTEM DROP FILESYSTEM CACHE' = 102, 'SYSTEM DROP SCHEMA CACHE' = 103, 'SYSTEM DROP S3 CLIENT CACHE' = 104, 'SYSTEM DROP CACHE' = 105, 'SYSTEM RELOAD CONFIG' = 106, 'SYSTEM RELOAD USERS' = 107, 'SYSTEM RELOAD SYMBOLS' = 108, 'SYSTEM RELOAD DICTIONARY' = 109, 'SYSTEM RELOAD MODEL' = 110, 'SYSTEM RELOAD FUNCTION' = 111, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 112, 'SYSTEM RELOAD' = 113, 'SYSTEM RESTART DISK' = 114, 'SYSTEM MERGES' = 115, 'SYSTEM TTL MERGES' = 116, 'SYSTEM FETCHES' = 117, 'SYSTEM MOVES' = 118, 'SYSTEM DISTRIBUTED SENDS' = 119, 'SYSTEM REPLICATED SENDS' = 120, 'SYSTEM SENDS' = 121, 'SYSTEM REPLICATION QUEUES' = 122, 'SYSTEM DROP REPLICA' = 123, 'SYSTEM SYNC REPLICA' = 124, 'SYSTEM RESTART REPLICA' = 125, 'SYSTEM RESTORE REPLICA' = 126, 'SYSTEM WAIT LOADING PARTS' = 127, 'SYSTEM SYNC DATABASE REPLICA' = 128, 'SYSTEM SYNC TRANSACTION LOG' = 129, 'SYSTEM SYNC FILE CACHE' = 130, 'SYSTEM FLUSH DISTRIBUTED' = 131, 'SYSTEM FLUSH LOGS' = 132, 'SYSTEM FLUSH' = 133, 'SYSTEM THREAD FUZZER' = 134, 'SYSTEM UNFREEZE' = 135, 'SYSTEM' = 136, 'dictGet' = 137, 'addressToLine' = 138, 'addressToLineWithInlines' = 139, 'addressToSymbol' = 140, 'demangle' = 141, 'INTROSPECTION' = 142, 'FILE' = 143, 'URL' = 144, 'REMOTE' = 145, 'MONGO' = 146, 'MEILISEARCH' = 147, 'MYSQL' = 148, 'POSTGRES' = 149, 'SQLITE' = 150, 'ODBC' = 151, 'JDBC' = 152, 'HDFS' = 153, 'S3' = 154, 'HIVE' = 155, 'SOURCES' = 156, 'CLUSTER' = 157, 'ALL' = 158, 'NONE' = 159)) ) ENGINE = SystemPrivileges COMMENT 'SYSTEM TABLE is built on the fly.' diff --git a/tests/queries/0_stateless/02184_default_table_engine.sql b/tests/queries/0_stateless/02184_default_table_engine.sql index 4b5ad6c008c..109875d53a5 100644 --- a/tests/queries/0_stateless/02184_default_table_engine.sql +++ b/tests/queries/0_stateless/02184_default_table_engine.sql @@ -82,7 +82,7 @@ SET default_table_engine = 'Log'; CREATE TEMPORARY TABLE tmp (n int); SHOW CREATE TEMPORARY TABLE tmp; CREATE TEMPORARY TABLE tmp1 (n int) ENGINE=Memory; -CREATE TEMPORARY TABLE tmp2 (n int) ENGINE=Log; -- {serverError 80} +CREATE TEMPORARY TABLE tmp2 (n int) ENGINE=Log; CREATE TEMPORARY TABLE tmp2 (n int) ORDER BY n; -- {serverError 80} CREATE TEMPORARY TABLE tmp2 (n int, PRIMARY KEY (n)); -- {serverError 80} diff --git a/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh b/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh index 171dcc52c9c..e5d00bc1a1c 100755 --- a/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh +++ b/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct.reference b/tests/queries/0_stateless/02500_remove_redundant_distinct.reference index 32ddab4886c..2e049dbc936 100644 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct.reference +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct.reference @@ -464,3 +464,16 @@ Expression ((Projection + (Before ORDER BY + (Projection + Before ORDER BY)))) 1 0 +-- DISTINCT COUNT() with GROUP BY => do _not_ remove DISTINCT +-- query +select distinct count() from numbers(10) group by number +-- explain +Expression (Projection) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + Aggregating + Expression (Before GROUP BY) + ReadFromStorage (SystemNumbers) +-- execute +1 diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh index 879cc776fe1..d550b057853 100755 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh @@ -256,3 +256,7 @@ FROM GROUP BY a WITH TOTALS )" run_query "$query" + +echo "-- DISTINCT COUNT() with GROUP BY => do _not_ remove DISTINCT" +query="select distinct count() from numbers(10) group by number" +run_query "$query" diff --git a/tests/queries/0_stateless/02525_different_engines_in_temporary_tables.reference b/tests/queries/0_stateless/02525_different_engines_in_temporary_tables.reference new file mode 100644 index 00000000000..3d1916b29f6 --- /dev/null +++ b/tests/queries/0_stateless/02525_different_engines_in_temporary_tables.reference @@ -0,0 +1,14 @@ +1 a +2 b +3 c +0 +0 +1 a +2 b +3 c +1 a +2 b +3 c +1 a +2 b +3 c diff --git a/tests/queries/0_stateless/02525_different_engines_in_temporary_tables.sql b/tests/queries/0_stateless/02525_different_engines_in_temporary_tables.sql new file mode 100644 index 00000000000..7ebc05dfece --- /dev/null +++ b/tests/queries/0_stateless/02525_different_engines_in_temporary_tables.sql @@ -0,0 +1,66 @@ +DROP TEMPORARY TABLE IF EXISTS table_merge_tree_02525; +CREATE TEMPORARY TABLE table_merge_tree_02525 +( + id UInt64, + info String +) +ENGINE = MergeTree +ORDER BY id +PRIMARY KEY id; +INSERT INTO table_merge_tree_02525 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM table_merge_tree_02525; +-- Check that temporary table with MergeTree is not sent to remote servers +-- The query with remote() should not fail +SELECT dummy FROM remote('127.0.0.{1,2}', system, one); +DROP TEMPORARY TABLE table_merge_tree_02525; + +DROP TEMPORARY TABLE IF EXISTS table_log_02525; +CREATE TEMPORARY TABLE table_log_02525 +( + id UInt64, + info String +) +ENGINE = Log; +INSERT INTO table_log_02525 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM table_log_02525; +DROP TEMPORARY TABLE table_log_02525; + +DROP TEMPORARY TABLE IF EXISTS table_stripe_log_02525; +CREATE TEMPORARY TABLE table_stripe_log_02525 +( + id UInt64, + info String +) +ENGINE = StripeLog; +INSERT INTO table_stripe_log_02525 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM table_stripe_log_02525; +DROP TEMPORARY TABLE table_stripe_log_02525; + +DROP TEMPORARY TABLE IF EXISTS table_tiny_log_02525; +CREATE TEMPORARY TABLE table_tiny_log_02525 +( + id UInt64, + info String +) +ENGINE = TinyLog; +INSERT INTO table_tiny_log_02525 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM table_tiny_log_02525; +DROP TEMPORARY TABLE table_tiny_log_02525; + +DROP TEMPORARY TABLE IF EXISTS table_replicated_merge_tree_02525; +CREATE TEMPORARY TABLE table_replicated_merge_tree_02525 +( + id UInt64, + info String +) +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02525/table_replicated_merge_tree_02525', 'r1') +ORDER BY id +PRIMARY KEY id; -- { serverError INCORRECT_QUERY } + +DROP TEMPORARY TABLE IF EXISTS table_keeper_map_02525; +CREATE TEMPORARY TABLE table_keeper_map_02525 +( + key String, + value UInt32 +) Engine=KeeperMap('/' || currentDatabase() || '/test02525') +PRIMARY KEY(key); -- { serverError INCORRECT_QUERY } diff --git a/tests/queries/0_stateless/02561_temporary_table_grants.reference b/tests/queries/0_stateless/02561_temporary_table_grants.reference new file mode 100644 index 00000000000..b462a5a7baa --- /dev/null +++ b/tests/queries/0_stateless/02561_temporary_table_grants.reference @@ -0,0 +1,4 @@ +OK +OK +OK +OK diff --git a/tests/queries/0_stateless/02561_temporary_table_grants.sh b/tests/queries/0_stateless/02561_temporary_table_grants.sh new file mode 100755 index 00000000000..6e0c96786e8 --- /dev/null +++ b/tests/queries/0_stateless/02561_temporary_table_grants.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -e + +user=user_$CLICKHOUSE_TEST_UNIQUE_NAME +$CLICKHOUSE_CLIENT --query "DROP USER IF EXISTS $user" +$CLICKHOUSE_CLIENT --query "CREATE USER $user IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'hello'" + +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_memory_02561(name String)" 2>&1 | grep -F "Not enough privileges. To execute this query it's necessary to have grant CREATE TEMPORARY TABLE" > /dev/null && echo "OK" + +$CLICKHOUSE_CLIENT --query "GRANT CREATE TEMPORARY TABLE ON *.* TO $user" +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_memory_02561(name String)" + +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_merge_tree_02561(name String) ENGINE = MergeTree() ORDER BY name" 2>&1 | grep -F "Not enough privileges. To execute this query it's necessary to have grant CREATE ARBITRARY TEMPORARY TABLE" > /dev/null && echo "OK" + +$CLICKHOUSE_CLIENT --query "GRANT CREATE ARBITRARY TEMPORARY TABLE ON *.* TO $user" + +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_merge_tree_02561(name String) ENGINE = MergeTree() ORDER BY name" + +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_file_02561(name String) ENGINE = File(TabSeparated)" 2>&1 | grep -F "Not enough privileges. To execute this query it's necessary to have grant FILE" > /dev/null && echo "OK" + +$CLICKHOUSE_CLIENT --query "GRANT FILE ON *.* TO $user" + +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_file_02561(name String) ENGINE = File(TabSeparated)" + +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_url_02561(name String) ENGINE = URL('http://127.0.0.1:8123?query=select+12', 'RawBLOB')" 2>&1 | grep -F "Not enough privileges. To execute this query it's necessary to have grant URL" > /dev/null && echo "OK" + +$CLICKHOUSE_CLIENT --query "GRANT URL ON *.* TO $user" + +$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_url_02561(name String) ENGINE = URL('http://127.0.0.1:8123?query=select+12', 'RawBLOB')" + +$CLICKHOUSE_CLIENT --query "DROP USER $user" diff --git a/tests/queries/0_stateless/02561_temporary_table_sessions.reference b/tests/queries/0_stateless/02561_temporary_table_sessions.reference new file mode 100644 index 00000000000..b3890873523 --- /dev/null +++ b/tests/queries/0_stateless/02561_temporary_table_sessions.reference @@ -0,0 +1,7 @@ +OK +1 d +2 e +3 f +1 a +2 b +3 c diff --git a/tests/queries/0_stateless/02561_temporary_table_sessions.sh b/tests/queries/0_stateless/02561_temporary_table_sessions.sh new file mode 100755 index 00000000000..a810a48cdf3 --- /dev/null +++ b/tests/queries/0_stateless/02561_temporary_table_sessions.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SESSION_ID_A="$RANDOM$RANDOM$RANDOM" +SESSION_ID_B="$RANDOM$RANDOM$RANDOM" + +# Create temporary table and insert in SESSION_ID_A +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_A}" -d 'CREATE TEMPORARY TABLE table_merge_tree_02561 (id UInt64, info String) ENGINE = MergeTree ORDER BY id' +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_A}" -d "INSERT INTO table_merge_tree_02561 VALUES (1, 'a'), (2, 'b'), (3, 'c')" + +# Select from SESSION_ID_B +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_B}" -d "SELECT * FROM table_merge_tree_02561" | tr -d '\n' | grep -F 'UNKNOWN_TABLE' > /dev/null && echo "OK" + +# Create temporary table, insert and select in SESSION_ID_B +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_B}" -d 'CREATE TEMPORARY TABLE table_merge_tree_02561 (id UInt64, info String) ENGINE = MergeTree ORDER BY id' +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_B}" -d "INSERT INTO table_merge_tree_02561 VALUES (1, 'd'), (2, 'e'), (3, 'f')" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_B}" -d "SELECT * FROM table_merge_tree_02561" + +# Select from SESSION_ID_A +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_A}" -d "SELECT * FROM table_merge_tree_02561" + +# Drop tables in both sessions +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_A}" -d "DROP TEMPORARY TABLE table_merge_tree_02561" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID_B}" -d "DROP TEMPORARY TABLE table_merge_tree_02561" diff --git a/tests/queries/0_stateless/02584_compressor_codecs.reference b/tests/queries/0_stateless/02584_compressor_codecs.reference new file mode 100644 index 00000000000..bb0850568bb --- /dev/null +++ b/tests/queries/0_stateless/02584_compressor_codecs.reference @@ -0,0 +1,9 @@ +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02584_compressor_codecs.sh b/tests/queries/0_stateless/02584_compressor_codecs.sh new file mode 100755 index 00000000000..fad6847b792 --- /dev/null +++ b/tests/queries/0_stateless/02584_compressor_codecs.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "Hello, World!" > 02584_test_data + +$CLICKHOUSE_COMPRESSOR --codec 'Delta' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' +$CLICKHOUSE_COMPRESSOR --codec 'Delta(5)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'Delta([1,2])' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'Delta(4)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out'; + +$CLICKHOUSE_COMPRESSOR --codec 'DoubleDelta' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' +$CLICKHOUSE_COMPRESSOR --codec 'DoubleDelta(5)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'DoubleDelta([1,2])' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'DoubleDelta(4)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out'; + +$CLICKHOUSE_COMPRESSOR --codec 'Gorilla' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' +$CLICKHOUSE_COMPRESSOR --codec 'Gorilla(5)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'Gorilla([1,2])' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'Gorilla(4)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out'; + +$CLICKHOUSE_COMPRESSOR --codec 'FPC' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out'; +$CLICKHOUSE_COMPRESSOR --codec 'FPC(5)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out'; +$CLICKHOUSE_COMPRESSOR --codec 'FPC(5, 1)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'FPC([1,2,3])' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "ILLEGAL_CODEC_PARAMETER"; +$CLICKHOUSE_COMPRESSOR --codec 'FPC(5, 4)' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out'; + + +$CLICKHOUSE_COMPRESSOR --codec 'T64' --codec 'LZ4' --input '02584_test_data' --output '02584_test_out' 2>&1 | grep -c "CANNOT_COMPRESS"; + +rm 02584_test_data 02584_test_out + diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference new file mode 100644 index 00000000000..2d41f5dae89 --- /dev/null +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference @@ -0,0 +1,23 @@ +INSERT TO S3 + [ 0 ] S3CompleteMultipartUpload: 1 + [ 0 ] S3CreateMultipartUpload: 1 + [ 0 ] S3HeadObject: 1 + [ 0 ] S3ReadRequestsCount: 1 + [ 0 ] S3UploadPart: 1 + [ 0 ] S3WriteRequestsCount: 3 +CHECK WITH query_log +QueryFinish S3CreateMultipartUpload 1 S3UploadPart 1 S3CompleteMultipartUpload 1 S3PutObject 0 +CREATE +INSERT + [ 0 ] FileOpen: 7 +READ +INSERT and READ INSERT + [ 0 ] FileOpen: 7 + [ 0 ] FileOpen: 7 +DROP +CHECK with query_log +QueryFinish INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; FileOpen 7 +QueryFinish SELECT \'1\', min(t) FROM times; FileOpen 0 +QueryFinish INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; FileOpen 7 +QueryFinish SELECT \'2\', min(t) FROM times; FileOpen 0 +QueryFinish INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; FileOpen 7 diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh new file mode 100755 index 00000000000..4713e4cbe8b --- /dev/null +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# Tag no-fasttest: needs s3 + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "INSERT TO S3" +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1; +" 2>&1 | grep -o -e '\ \[\ .*\ \]\ S3.*:\ .*\ ' | grep -v 'Microseconds' | sort + +echo "CHECK WITH query_log" +$CLICKHOUSE_CLIENT -nq " +SYSTEM FLUSH LOGS; +SELECT type, + 'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'], + 'S3UploadPart', ProfileEvents['S3UploadPart'], + 'S3CompleteMultipartUpload', ProfileEvents['S3CompleteMultipartUpload'], + 'S3PutObject', ProfileEvents['S3PutObject'] +FROM system.query_log +WHERE query LIKE '%profile_events.csv%' +AND type = 'QueryFinish' +AND current_database = currentDatabase() +ORDER BY query_start_time DESC; +" + +echo "CREATE" +$CLICKHOUSE_CLIENT -nq " +DROP TABLE IF EXISTS times; +CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t + SETTINGS + storage_policy='default', + min_rows_for_compact_part = 0, + min_bytes_for_compact_part = 0, + min_rows_for_wide_part = 1000000, + min_bytes_for_wide_part = 1000000, + in_memory_parts_enable_wal = 0, + ratio_of_defaults_for_sparse_serialization=1.0; +" + +echo "INSERT" +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; +" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ ' + +echo "READ" +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +SELECT '1', min(t) FROM times; +" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ ' + +echo "INSERT and READ INSERT" +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; +SELECT '2', min(t) FROM times; +INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; +" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ ' + +echo "DROP" +$CLICKHOUSE_CLIENT -nq " +DROP TABLE times; +" + +echo "CHECK with query_log" +$CLICKHOUSE_CLIENT -nq " +SYSTEM FLUSH LOGS; +SELECT type, + query, + 'FileOpen', ProfileEvents['FileOpen'] +FROM system.query_log +WHERE current_database = currentDatabase() +AND ( query LIKE '%SELECT % FROM times%' OR query LIKE '%INSERT INTO times%' ) +AND type = 'QueryFinish' +ORDER BY query_start_time_microseconds ASC, query DESC; +" + diff --git a/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.reference b/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.reference new file mode 100644 index 00000000000..2f3b59e5530 --- /dev/null +++ b/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.reference @@ -0,0 +1,26 @@ +Expression ((Projection + Before ORDER BY)) +Header: id UInt64 + value String +Actions: INPUT :: 0 -> id UInt64 : 0 + INPUT :: 1 -> value String : 1 +Positions: 0 1 + ReadFromMergeTree (default.test_table) + Header: id UInt64 + value String + ReadType: Default + Parts: 0 + Granules: 0 + Prewhere info + Need filter: 1 + Prewhere filter + Prewhere filter column: equals(id, 5) (removed) + Actions: INPUT : 0 -> id UInt64 : 0 + COLUMN Const(UInt8) -> 5 UInt8 : 1 + FUNCTION equals(id : 0, 5 :: 1) -> equals(id, 5) UInt8 : 2 + Positions: 2 0 + Row level filter + Row level filter column: greaterOrEquals(id, 5) + Actions: INPUT : 0 -> id UInt64 : 0 + COLUMN Const(UInt8) -> 5 UInt8 : 1 + FUNCTION greaterOrEquals(id : 0, 5 :: 1) -> greaterOrEquals(id, 5) UInt8 : 2 + Positions: 2 0 diff --git a/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.sql b/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.sql new file mode 100644 index 00000000000..8099ccc0b0d --- /dev/null +++ b/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value'); + +DROP ROW POLICY IF EXISTS test_row_policy ON test_table; +CREATE ROW POLICY test_row_policy ON test_table USING id >= 5 TO ALL; + +EXPLAIN header = 1, actions = 1 SELECT id, value FROM test_table PREWHERE id = 5; + +DROP ROW POLICY test_row_policy ON test_table; +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/25341_inverted_idx_checksums.reference b/tests/queries/0_stateless/25341_inverted_idx_checksums.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/25341_inverted_idx_checksums.sql b/tests/queries/0_stateless/25341_inverted_idx_checksums.sql new file mode 100644 index 00000000000..92ffa7a6196 --- /dev/null +++ b/tests/queries/0_stateless/25341_inverted_idx_checksums.sql @@ -0,0 +1,16 @@ +SET allow_experimental_inverted_index = 1; + +CREATE TABLE t +( + `key` UInt64, + `str` String, + INDEX inv_idx str TYPE inverted(0) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO t VALUES (1, 'Hello World'); + +ALTER TABLE t DETACH PART 'all_1_1_0'; + +ALTER TABLE t ATTACH PART 'all_1_1_0'; \ No newline at end of file diff --git a/tests/queries/0_stateless/25400_marked_dropped_tables.reference b/tests/queries/0_stateless/25400_marked_dropped_tables.reference new file mode 100644 index 00000000000..6fc5caff0cb --- /dev/null +++ b/tests/queries/0_stateless/25400_marked_dropped_tables.reference @@ -0,0 +1,8 @@ +25400_marked_dropped_tables MergeTree +index UInt32 +database String +table String +uuid UUID +engine String +metadata_dropped_path String +table_dropped_time DateTime diff --git a/tests/queries/0_stateless/25400_marked_dropped_tables.sql b/tests/queries/0_stateless/25400_marked_dropped_tables.sql new file mode 100644 index 00000000000..101642fa779 --- /dev/null +++ b/tests/queries/0_stateless/25400_marked_dropped_tables.sql @@ -0,0 +1,11 @@ +-- Tags: no-ordinary-database + +SET database_atomic_wait_for_drop_and_detach_synchronously = 0; +DROP TABLE IF EXISTS 25400_marked_dropped_tables; + +CREATE TABLE 25400_marked_dropped_tables (id Int32) Engine=MergeTree() ORDER BY id; +DROP TABLE 25400_marked_dropped_tables; + +SELECT table, engine FROM system.marked_dropped_tables WHERE database = currentDatabase() LIMIT 1; +DESCRIBE TABLE system.marked_dropped_tables; + diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index f1bba4dc2fc..753d8167a86 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1,4 +1,4 @@ -personal_ws-1.1 en 543 +personal_ws-1.1 en 543 AArch ACLs AMQP @@ -22,6 +22,7 @@ CSVWithNamesAndTypes CamelCase CapnProto CentOS +ClickableSquare ClickHouse ClickHouse's CodeBlock @@ -53,6 +54,7 @@ HDDs Heredoc Homebrew Homebrew's +HorizontalDivide Hostname IPv IntN @@ -87,6 +89,7 @@ LOCALTIME LOCALTIMESTAMP LibFuzzer LineAsString +LinksDeployment LowCardinality MEMTABLE MSan @@ -169,6 +172,7 @@ Testflows Tgz Toolset Tradeoff +TwoColumnList UBSan UInt UIntN @@ -178,6 +182,8 @@ Updatable Util Valgrind Vectorized +VideoContainer +ViewAllLink VirtualBox Werror WithNamesAndTypes diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 53165d14f96..f9b7a9bbbb7 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -392,3 +392,6 @@ find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | # Check for existence of __init__.py files for i in "${ROOT_PATH}"/tests/integration/test_*; do FILE="${i}/__init__.py"; [ ! -f "${FILE}" ] && echo "${FILE} should exist for every integration test"; done + +# A small typo can lead to debug code in release builds, see https://github.com/ClickHouse/ClickHouse/pull/47647 +find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -F '#ifdef NDEBUG' | xargs -I@FILE awk '/#ifdef NDEBUG/ { inside = 1; dirty = 1 } /#endif/ { if (inside && dirty) { print "File @FILE has suspicious #ifdef NDEBUG, possibly confused with #ifndef NDEBUG" }; inside = 0 } /#else/ { dirty = 0 }' @FILE diff --git a/utils/memcpy-bench/memcpy-bench.cpp b/utils/memcpy-bench/memcpy-bench.cpp index 8b75164eb60..bb571200d07 100644 --- a/utils/memcpy-bench/memcpy-bench.cpp +++ b/utils/memcpy-bench/memcpy-bench.cpp @@ -1,6 +1,4 @@ -#ifdef HAS_RESERVED_IDENTIFIER #pragma clang diagnostic ignored "-Wreserved-identifier" -#endif #include #include