diff --git a/CMakeLists.txt b/CMakeLists.txt index 8a5a6293f7c..de517b1b589 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -127,12 +127,13 @@ if (USE_STATIC_LIBRARIES) list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES) endif () -# Implies ${WITH_COVERAGE} option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF) if (ENABLE_FUZZING) + # Also set WITH_COVERAGE=1 for better fuzzing process + # By default this is disabled, because fuzzers are built in CI with the clickhouse itself. + # And we don't want to enable coverage for it. message (STATUS "Fuzzing instrumentation enabled") - set (WITH_COVERAGE ON) set (FUZZER "libfuzzer") endif() diff --git a/README.md b/README.md index 178547ea523..496a6357f44 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,3 @@ ClickHouse® is an open-source column-oriented database management system that a * [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation. * [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any. * You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person. - -## Upcoming Events -* [SF Bay Area ClickHouse August Community Meetup (online)](https://www.meetup.com/San-Francisco-Bay-Area-ClickHouse-Meetup/events/279109379/) on 25 August 2021. diff --git a/base/glibc-compatibility/musl/getauxval.c b/base/glibc-compatibility/musl/getauxval.c index a429273fa1a..dad7aa938d7 100644 --- a/base/glibc-compatibility/musl/getauxval.c +++ b/base/glibc-compatibility/musl/getauxval.c @@ -1,4 +1,5 @@ #include +#include "atomic.h" #include // __environ #include @@ -17,18 +18,7 @@ static size_t __find_auxv(unsigned long type) return (size_t) -1; } -__attribute__((constructor)) static void __auxv_init() -{ - size_t i; - for (i = 0; __environ[i]; i++); - __auxv = (unsigned long *) (__environ + i + 1); - - size_t secure_idx = __find_auxv(AT_SECURE); - if (secure_idx != ((size_t) -1)) - __auxv_secure = __auxv[secure_idx]; -} - -unsigned long getauxval(unsigned long type) +unsigned long __getauxval(unsigned long type) { if (type == AT_SECURE) return __auxv_secure; @@ -43,3 +33,38 @@ unsigned long getauxval(unsigned long type) errno = ENOENT; return 0; } + +static void * volatile getauxval_func; + +static unsigned long __auxv_init(unsigned long type) +{ + if (!__environ) + { + // __environ is not initialized yet so we can't initialize __auxv right now. + // That's normally occurred only when getauxval() is called from some sanitizer's internal code. + errno = ENOENT; + return 0; + } + + // Initialize __auxv and __auxv_secure. + size_t i; + for (i = 0; __environ[i]; i++); + __auxv = (unsigned long *) (__environ + i + 1); + + size_t secure_idx = __find_auxv(AT_SECURE); + if (secure_idx != ((size_t) -1)) + __auxv_secure = __auxv[secure_idx]; + + // Now we've initialized __auxv, next time getauxval() will only call __get_auxval(). + a_cas_p(&getauxval_func, (void *)__auxv_init, (void *)__getauxval); + + return __getauxval(type); +} + +// First time getauxval() will call __auxv_init(). +static void * volatile getauxval_func = (void *)__auxv_init; + +unsigned long getauxval(unsigned long type) +{ + return ((unsigned long (*)(unsigned long))getauxval_func)(type); +} diff --git a/contrib/libpqxx-cmake/CMakeLists.txt b/contrib/libpqxx-cmake/CMakeLists.txt index ae35538ccf4..65fa94cb3fd 100644 --- a/contrib/libpqxx-cmake/CMakeLists.txt +++ b/contrib/libpqxx-cmake/CMakeLists.txt @@ -22,6 +22,7 @@ set (SRCS "${LIBRARY_DIR}/src/transaction.cxx" "${LIBRARY_DIR}/src/transaction_base.cxx" "${LIBRARY_DIR}/src/row.cxx" + "${LIBRARY_DIR}/src/params.cxx" "${LIBRARY_DIR}/src/util.cxx" "${LIBRARY_DIR}/src/version.cxx" ) @@ -31,6 +32,7 @@ set (SRCS # conflicts with all includes of . set (HDRS "${LIBRARY_DIR}/include/pqxx/array.hxx" + "${LIBRARY_DIR}/include/pqxx/params.hxx" "${LIBRARY_DIR}/include/pqxx/binarystring.hxx" "${LIBRARY_DIR}/include/pqxx/composite.hxx" "${LIBRARY_DIR}/include/pqxx/connection.hxx" @@ -75,4 +77,3 @@ set(CM_CONFIG_PQ "${LIBRARY_DIR}/include/pqxx/config-internal-libpq.h") configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_INT}" @ONLY) configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PUB}" @ONLY) configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PQ}" @ONLY) - diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index d6614bbb9e2..b9900e34bf1 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -83,6 +83,16 @@ then mv "$COMBINED_OUTPUT.tgz" /output fi +# Also build fuzzers if any sanitizer specified +if [ -n "$SANITIZER" ] +then + # Currently we are in build/build_docker directory + ../docker/packager/other/fuzzer.sh +fi + +ccache --show-config ||: +ccache --show-stats ||: + if [ "${CCACHE_DEBUG:-}" == "1" ] then find . -name '*.ccache-*' -print0 \ @@ -95,4 +105,3 @@ then # files in place, and will fail because this directory is not writable. tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE" fi - diff --git a/docker/packager/deb/build.sh b/docker/packager/deb/build.sh index 4e14574b738..46f6404363d 100755 --- a/docker/packager/deb/build.sh +++ b/docker/packager/deb/build.sh @@ -23,12 +23,24 @@ then echo "Place $BINARY_OUTPUT to output" mkdir /output/binary ||: # if exists mv /build/obj-*/programs/clickhouse* /output/binary + if [ "$BINARY_OUTPUT" = "tests" ] then mv /build/obj-*/src/unit_tests_dbms /output/binary fi fi +# Also build fuzzers if any sanitizer specified +if [ -n "$SANITIZER" ] +then + # Script is supposed that we are in build directory. + mkdir -p build/build_docker + cd build/build_docker + # Launching build script + ../docker/packager/other/fuzzer.sh + cd +fi + ccache --show-config ||: ccache --show-stats ||: diff --git a/docker/packager/other/fuzzer.sh b/docker/packager/other/fuzzer.sh new file mode 100755 index 00000000000..1a8b80c3f77 --- /dev/null +++ b/docker/packager/other/fuzzer.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# This script is responsible for building all fuzzers, and copy them to output directory +# as an archive. +# Script is supposed that we are in build directory. + +set -x -e + +printenv + +# Delete previous cache, because we add a new flags -DENABLE_FUZZING=1 and -DFUZZER=libfuzzer +rm -f CMakeCache.txt +read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" +# Hope, that the most part of files will be in cache, so we just link new executables +cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_C_COMPILER="$CC" -DCMAKE_CXX_COMPILER="$CXX" -DENABLE_CLICKHOUSE_ODBC_BRIDGE=OFF \ + -DENABLE_LIBRARIES=0 -DENABLE_SSL=1 -DUSE_INTERNAL_SSL_LIBRARY=1 -DUSE_UNWIND=ON -DENABLE_EMBEDDED_COMPILER=0 \ + -DENABLE_EXAMPLES=0 -DENABLE_UTILS=0 -DENABLE_THINLTO=0 "-DSANITIZE=$SANITIZER" \ + -DENABLE_FUZZING=1 -DFUZZER='libfuzzer' -DENABLE_TCMALLOC=0 -DENABLE_JEMALLOC=0 \ + -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. + +FUZZER_TARGETS=$(find ../src -name '*_fuzzer.cpp' -execdir basename {} .cpp ';' | tr '\n' ' ') + +mkdir -p /output/fuzzers +for FUZZER_TARGET in $FUZZER_TARGETS +do + # shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. + ninja $NINJA_FLAGS $FUZZER_TARGET + # Find this binary in build directory and strip it + FUZZER_PATH=$(find ./src -name "$FUZZER_TARGET") + strip --strip-unneeded "$FUZZER_PATH" + mv "$FUZZER_PATH" /output/fuzzers +done + +tar -zcvf /output/fuzzers.tar.gz /output/fuzzers +rm -rf /output/fuzzers diff --git a/docker/packager/packager b/docker/packager/packager index 95b7fcd8568..673878bce43 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -105,6 +105,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ if image_type == "deb" or image_type == "unbundled": result.append("DEB_CC={}".format(cc)) result.append("DEB_CXX={}".format(cxx)) + # For building fuzzers + result.append("CC={}".format(cc)) + result.append("CXX={}".format(cxx)) elif image_type == "binary": result.append("CC={}".format(cc)) result.append("CXX={}".format(cxx)) diff --git a/docker/test/fuzzer/Dockerfile b/docker/test/fuzzer/Dockerfile index 18684145636..9a96ac1dfa7 100644 --- a/docker/test/fuzzer/Dockerfile +++ b/docker/test/fuzzer/Dockerfile @@ -16,6 +16,8 @@ RUN apt-get update \ p7zip-full \ parallel \ psmisc \ + python3 \ + python3-pip \ rsync \ tree \ tzdata \ @@ -25,6 +27,8 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +RUN pip3 install Jinja2 + COPY * / SHELL ["/bin/bash", "-c"] diff --git a/docker/test/fuzzer/generate-test-j2.py b/docker/test/fuzzer/generate-test-j2.py new file mode 100755 index 00000000000..bcc1bf6bc84 --- /dev/null +++ b/docker/test/fuzzer/generate-test-j2.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +from argparse import ArgumentParser +import os +import jinja2 + + +def removesuffix(text, suffix): + """ + Added in python 3.9 + https://www.python.org/dev/peps/pep-0616/ + """ + if suffix and text.endswith(suffix): + return text[:-len(suffix)] + else: + return text[:] + + +def render_test_template(j2env, suite_dir, test_name): + """ + Render template for test and reference file if needed + """ + + test_base_name = removesuffix(test_name, ".sql.j2") + + reference_file_name = test_base_name + ".reference.j2" + reference_file_path = os.path.join(suite_dir, reference_file_name) + if os.path.isfile(reference_file_path): + tpl = j2env.get_template(reference_file_name) + tpl.stream().dump(os.path.join(suite_dir, test_base_name) + ".gen.reference") + + if test_name.endswith(".sql.j2"): + tpl = j2env.get_template(test_name) + generated_test_name = test_base_name + ".gen.sql" + tpl.stream().dump(os.path.join(suite_dir, generated_test_name)) + return generated_test_name + + return test_name + + +def main(args): + suite_dir = args.path + + print(f"Scanning {suite_dir} directory...") + + j2env = jinja2.Environment( + loader=jinja2.FileSystemLoader(suite_dir), + keep_trailing_newline=True, + ) + + test_names = os.listdir(suite_dir) + for test_name in test_names: + if not test_name.endswith(".sql.j2"): + continue + new_name = render_test_template(j2env, suite_dir, test_name) + print(f"File {new_name} generated") + + +if __name__ == "__main__": + parser = ArgumentParser(description="Jinja2 test generator") + parser.add_argument("-p", "--path", help="Path to test dir", required=True) + main(parser.parse_args()) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 44183a50ae5..9a389edc5b2 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -71,12 +71,12 @@ function watchdog kill -9 -- $fuzzer_pid ||: } -function filter_exists +function filter_exists_and_template { local path for path in "$@"; do if [ -e "$path" ]; then - echo "$path" + echo "$path" | sed -n 's/\.sql\.j2$/.gen.sql/' else echo "'$path' does not exists" >&2 fi @@ -85,11 +85,13 @@ function filter_exists function fuzz { + /generate-test-j2.py --path ch/tests/queries/0_stateless + # Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests. # Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment. - NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\)$!ch/\1!p' ci-changed-files.txt | sort -R)" + NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\(\.j2\)\?\)$!ch/\1!p' ci-changed-files.txt | sort -R)" # ci-changed-files.txt contains also files that has been deleted/renamed, filter them out. - NEW_TESTS="$(filter_exists $NEW_TESTS)" + NEW_TESTS="$(filter_exists_and_template $NEW_TESTS)" if [[ -n "$NEW_TESTS" ]] then NEW_TESTS_OPT="${NEW_TESTS_OPT:---interleave-queries-file ${NEW_TESTS}}" diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 4c763153a36..53ab3f5088c 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -34,6 +34,7 @@ The table structure can differ from the original PostgreSQL table structure: - `user` — PostgreSQL user. - `password` — User password. - `schema` — Non-default table schema. Optional. +- `on conflict ...` — example: `ON CONFLICT DO NOTHING`. Optional. Note: adding this option will make insertion less efficient. ## Implementation Details {#implementation-details} diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 94ef48d8d72..ce41b288f0a 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -390,20 +390,27 @@ Functions with a constant argument that is less than ngram size can’t be used - `s != 1` - `NOT startsWith(s, 'test')` -### Projections {#projections} -Projections are like materialized views but defined in part-level. It provides consistency guarantees along with automatic usage in queries. +## Projections {#projections} +Projections are like [materialized views](../../../sql-reference/statements/create/view.md#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries. -#### Query {#projection-query} -A projection query is what defines a projection. It has the following grammar: +Projections are an experimental feature. To enable them you must set the [allow_experimental_projection_optimization](../../../operations/settings/settings.md#allow-experimental-projection-optimization) to `1`. See also the [force_optimize_projection](../../../operations/settings/settings.md#force-optimize-projection) setting. -`SELECT [GROUP BY] [ORDER BY]` +Projections are not supported in the `SELECT` statements with the [FINAL](../../../sql-reference/statements/select/from.md#select-from-final) modifier. -It implicitly selects data from the parent table. +### Projection Query {#projection-query} +A projection query is what defines a projection. It implicitly selects data from the parent table. +**Syntax** -#### Storage {#projection-storage} -Projections are stored inside the part directory. It's similar to an index but contains a subdirectory that stores an anonymous MergeTree table's part. The table is induced by the definition query of the projection. If there is a GROUP BY clause, the underlying storage engine becomes AggregatedMergeTree, and all aggregate functions are converted to AggregateFunction. If there is an ORDER BY clause, the MergeTree table will use it as its primary key expression. During the merge process, the projection part will be merged via its storage's merge routine. The checksum of the parent table's part will combine the projection's part. Other maintenance jobs are similar to skip indices. +```sql +SELECT [GROUP BY] [ORDER BY] +``` -#### Query Analysis {#projection-query-analysis} +Projections can be modified or dropped with the [ALTER](../../../sql-reference/statements/alter/projection.md) statement. + +### Projection Storage {#projection-storage} +Projections are stored inside the part directory. It's similar to an index but contains a subdirectory that stores an anonymous `MergeTree` table's part. The table is induced by the definition query of the projection. If there is a `GROUP BY` clause, the underlying storage engine becomes [AggregatingMergeTree](aggregatingmergetree.md), and all aggregate functions are converted to `AggregateFunction`. If there is an `ORDER BY` clause, the `MergeTree` table uses it as its primary key expression. During the merge process the projection part is merged via its storage's merge routine. The checksum of the parent table's part is combined with the projection's part. Other maintenance jobs are similar to skip indices. + +### Query Analysis {#projection-query-analysis} 1. Check if the projection can be used to answer the given query, that is, it generates the same answer as querying the base table. 2. Select the best feasible match, which contains the least granules to read. 3. The query pipeline which uses projections will be different from the one that uses the original parts. If the projection is absent in some parts, we can add the pipeline to "project" it on the fly. diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 8457ea41857..70b7d59b037 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -141,7 +141,7 @@ Since version 20.5, `clickhouse-client` has automatic syntax highlighting (alway Example of a config file: -``` xml +```xml username password @@ -149,4 +149,30 @@ Example of a config file: ``` -[Original article](https://clickhouse.tech/docs/en/interfaces/cli/) +### Query ID Format {#query-id-format} + +In interactive mode `clickhouse-client` shows query ID for every query. By default, the ID is formatted like this: + +```sql +Query id: 927f137d-00f1-4175-8914-0dd066365e96 +``` + +A custom format may be specified in a configuration file inside a `query_id_formats` tag. `{query_id}` placeholder in the format string is replaced with the ID of a query. Several format strings are allowed inside the tag. +This feature can be used to generate URLs to facilitate profiling of queries. + +**Example** + +```xml + + + http://speedscope-host/#profileURL=qp%3Fid%3D{query_id} + + +``` + +If the configuration above is applied, the ID of a query is shown in the following format: + +``` text +speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d +``` + diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index a0a8a220ad6..a1c7d1aab32 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3435,3 +3435,25 @@ Possible values: - 1 — The table is automatically updated in the background, when schema changes are detected. Default value: `0`. + +## allow_experimental_projection_optimization {#allow-experimental-projection-optimization} + +Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries. + +Possible values: + +- 0 — Projection optimization disabled. +- 1 — Projection optimization enabled. + +Default value: `0`. + +## force_optimize_projection {#force-optimize-projection} + +Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). + +Possible values: + +- 0 — Projection optimization is not obligatory. +- 1 — Projection optimization is obligatory. + +Default value: `0`. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 577fdd668a2..cf77444b17f 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2236,3 +2236,74 @@ defaultRoles() Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). +## queryID {#query-id} + +Returns the ID of the current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `query_id`. + +In contrast to [initialQueryID](#initial-query-id) function, `queryID` can return different results on different shards (see example). + +**Syntax** + +``` sql +queryID() +``` + +**Returned value** + +- The ID of the current query. + +Type: [String](../../sql-reference/data-types/string.md) + +**Example** + +Query: + +``` sql +CREATE TABLE tmp (str String) ENGINE = Log; +INSERT INTO tmp (*) VALUES ('a'); +SELECT count(DISTINCT t) FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +``` + +Result: + +``` text +┌─count()─┐ +│ 3 │ +└─────────┘ +``` + +## initialQueryID {#initial-query-id} + +Returns the ID of the initial current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `initial_query_id`. + +In contrast to [queryID](#query-id) function, `initialQueryID` returns the same results on different shards (see example). + +**Syntax** + +``` sql +initialQueryID() +``` + +**Returned value** + +- The ID of the initial current query. + +Type: [String](../../sql-reference/data-types/string.md) + +**Example** + +Query: + +``` sql +CREATE TABLE tmp (str String) ENGINE = Log; +INSERT INTO tmp (*) VALUES ('a'); +SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +``` + +Result: + +``` text +┌─count()─┐ +│ 1 │ +└─────────┘ +``` diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index 07a13fc23c4..429241ebf13 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -5,7 +5,7 @@ toc_title: PROJECTION # Manipulating Projections {#manipulations-with-projections} -The following operations are available: +The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available: - `ALTER TABLE [db].name ADD PROJECTION name AS SELECT [GROUP BY] [ORDER BY]` - Adds projection description to tables metadata. @@ -15,7 +15,7 @@ The following operations are available: - `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. -The commands ADD, DROP and CLEAR are lightweight in a sense that they only change metadata or remove files. +The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files. Also, they are replicated, syncing projections metadata via ZooKeeper. diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index c20981b6bbf..d09ff24efcd 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -254,6 +254,7 @@ CREATE TABLE codec_example ENGINE = MergeTree() ``` + ## Temporary Tables {#temporary-tables} ClickHouse supports temporary tables which have the following characteristics: diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 7e517be2d66..e8152441101 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -377,23 +377,33 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT - `s != 1` - `NOT startsWith(s, 'test')` -### Проекции {#projections} -Проекции похожи на материализованные представления, но определяются на уровне партов. Это обеспечивает гарантии согласованности наряду с автоматическим использованием в запросах. +## Проекции {#projections} +Проекции похожи на [материализованные представления](../../../sql-reference/statements/create/view.md#materialized), но определяются на уровне кусков данных. Это обеспечивает гарантии согласованности данных наряду с автоматическим использованием в запросах. -#### Запрос {#projection-query} -Запрос проекции — это то, что определяет проекцию. Он имеет следующую грамматику: +Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [allow_experimental_projection_optimization](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#force-optimize-projection). -`SELECT [GROUP BY] [ORDER BY]` +Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final). -Он неявно выбирает данные из родительской таблицы. +### Запрос проекции {#projection-query} +Запрос проекции — это то, что определяет проекцию. Такой запрос неявно выбирает данные из родительской таблицы. +**Синтаксис** -#### Хранение {#projection-storage} -Проекции хранятся в каталоге парта. Это похоже на хранение индексов, но используется подкаталог, в котором хранится анонимный парт таблицы MergeTree. Таблица создается запросом определения проекции. Если есть конструкция GROUP BY, то базовый механизм хранения становится AggregatedMergeTree, а все агрегатные функции преобразуются в AggregateFunction. Если есть конструкция ORDER BY, таблица MergeTree будет использовать его в качестве выражения первичного ключа. Во время процесса слияния парт проекции будет слит с помощью процедуры слияния ее хранилища. Контрольная сумма парта родительской таблицы будет включать парт проекции. Другие процедуры аналогичны индексам пропуска данных. +```sql +SELECT [GROUP BY] [ORDER BY] +``` -#### Анализ запросов {#projection-query-analysis} -1. Проверить, можно ли использовать проекцию в данном запросе, то есть, что с ней выходит тот же результат, что и с запросом к базовой таблице. -2. Выбрать наиболее подходящее совпадение, содержащее наименьшее количество гранул для чтения. -3. План запроса, который использует проекции, будет отличаться от того, который использует исходные парты. При отсутствии проекции в некоторых партах можно расширить план, чтобы «проецировать» на лету. +Проекции можно изменить или удалить с помощью запроса [ALTER](../../../sql-reference/statements/alter/projection.md). + +### Хранение проекции {#projection-storage} +Проекции хранятся в каталоге куска данных. Это похоже на хранение индексов, но используется подкаталог, в котором хранится анонимный кусок таблицы `MergeTree`. Таблица создается запросом определения проекции. +Если присутствует секция `GROUP BY`, то используется движок [AggregatingMergeTree](aggregatingmergetree.md), а все агрегатные функции преобразуются в `AggregateFunction`. +Если присутствует секция `ORDER BY`, таблица `MergeTree` использует ее в качестве выражения для первичного ключа. +Во время процесса слияния кусок данных проекции объединяется с помощью процедуры слияния хранилища. Контрольная сумма куска данных родительской таблицы включает кусок данных проекции. Другие процедуры аналогичны индексам пропуска данных. + +### Анализ запросов {#projection-query-analysis} +1. Проверьте, можно ли использовать проекцию в данном запросе, то есть, что с ней получается тот же результат, что и с запросом к базовой таблице. +2. Выберите наиболее подходящее совпадение, содержащее наименьшее количество гранул для чтения. +3. План запроса, который использует проекции, отличается от того, который использует исходные куски данных. Если в некоторых кусках проекции отсутствуют, можно расширить план, чтобы «проецировать» на лету. ## Конкурентный доступ к данным {#concurrent-data-access} diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 277b73a6d36..bbb66b70371 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -26,7 +26,7 @@ Connected to ClickHouse server version 20.13.1 revision 54442. Клиент может быть использован в интерактивном и не интерактивном (batch) режиме. Чтобы использовать batch режим, укажите параметр query, или отправьте данные в stdin (проверяется, что stdin - не терминал), или и то, и другое. -Аналогично HTTP интерфейсу, при использовании одновременно параметра query и отправке данных в stdin, запрос составляется из конкатенации параметра query, перевода строки, и данных в stdin. Это удобно для больших INSERT запросов. +Аналогично HTTP интерфейсу, при использовании одновременно параметра query и отправке данных в stdin, запрос составляется из конкатенации параметра query, перевода строки и данных в stdin. Это удобно для больших `INSERT` запросов. Примеры использования клиента для вставки данных: @@ -41,17 +41,17 @@ _EOF $ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; ``` -В batch режиме в качестве формата данных по умолчанию используется формат TabSeparated. Формат может быть указан в секции FORMAT запроса. +В batch режиме в качестве формата данных по умолчанию используется формат `TabSeparated`. Формат может быть указан в запросе в секции `FORMAT`. -По умолчанию, в batch режиме вы можете выполнить только один запрос. Чтобы выполнить несколько запросов из «скрипта», используйте параметр –multiquery. Это работает для всех запросов кроме INSERT. Результаты запросов выводятся подряд без дополнительных разделителей. -Также, при необходимости выполнить много запросов, вы можете запускать clickhouse-client на каждый запрос. Заметим, что запуск программы clickhouse-client может занимать десятки миллисекунд. +По умолчанию в batch режиме вы можете выполнить только один запрос. Чтобы выполнить несколько запросов из «скрипта», используйте параметр `–-multiquery`. Это работает для всех запросов кроме `INSERT`. Результаты запросов выводятся подряд без дополнительных разделителей. +Если нужно выполнить много запросов, вы можете запускать clickhouse-client отдельно на каждый запрос. Заметим, что запуск программы clickhouse-client может занимать десятки миллисекунд. -В интерактивном режиме, вы получите командную строку, в которую можно вводить запросы. +В интерактивном режиме вы получаете командную строку, в которую можно вводить запросы. Если не указано multiline (по умолчанию): -Чтобы выполнить запрос, нажмите Enter. Точка с запятой на конце запроса не обязательна. Чтобы ввести запрос, состоящий из нескольких строк, перед переводом строки, введите символ обратного слеша: `\` - тогда после нажатия Enter, вам предложат ввести следующую строку запроса. +Чтобы выполнить запрос, нажмите Enter. Точка с запятой на конце запроса необязательна. Чтобы ввести запрос, состоящий из нескольких строк, в конце строки поставьте символ обратного слеша `\`, тогда после нажатия Enter вы сможете ввести следующую строку запроса. -Если указано multiline (многострочный режим): +Если указан параметр `--multiline` (многострочный режим): Чтобы выполнить запрос, завершите его точкой с запятой и нажмите Enter. Если в конце введённой строки не было точки с запятой, то вам предложат ввести следующую строчку запроса. Исполняется только один запрос, поэтому всё, что введено после точки с запятой, игнорируется. @@ -61,20 +61,20 @@ $ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FOR Командная строка сделана на основе readline (и history) (или libedit, или без какой-либо библиотеки, в зависимости от сборки) - то есть, в ней работают привычные сочетания клавиш, а также присутствует история. История пишется в `~/.clickhouse-client-history`. -По умолчанию, в качестве формата, используется формат PrettyCompact (красивые таблички). Вы можете изменить формат с помощью секции FORMAT запроса, или с помощью указания `\G` на конце запроса, с помощью аргумента командной строки `--format` или `--vertical`, или с помощью конфигурационного файла клиента. +По умолчанию используется формат вывода `PrettyCompact` (он поддерживает красивый вывод таблиц). Вы можете изменить формат вывода результатов запроса следующими способами: с помощью секции `FORMAT` в запросе, указав символ `\G` в конце запроса, используя аргументы командной строки `--format` или `--vertical` или с помощью конфигурационного файла клиента. -Чтобы выйти из клиента, нажмите Ctrl+D, или наберите вместо запроса одно из: «exit», «quit», «logout», «учше», «йгше», «дщпщге», «exit;», «quit;», «logout;», «учшеж», «йгшеж», «дщпщгеж», «q», «й», «q», «Q», «:q», «й», «Й», «Жй» +Чтобы выйти из клиента, нажмите Ctrl+D или наберите вместо запроса одно из: «exit», «quit», «logout», «учше», «йгше», «дщпщге», «exit;», «quit;», «logout;», «учшеж», «йгшеж», «дщпщгеж», «q», «й», «q», «Q», «:q», «й», «Й», «Жй». -При выполнении запроса, клиент показывает: +При выполнении запроса клиент показывает: -1. Прогресс выполнение запроса, который обновляется не чаще, чем 10 раз в секунду (по умолчанию). При быстрых запросах, прогресс может не успеть отобразиться. +1. Прогресс выполнение запроса, который обновляется не чаще, чем 10 раз в секунду (по умолчанию). При быстрых запросах прогресс может не успеть отобразиться. 2. Отформатированный запрос после его парсинга - для отладки. 3. Результат в заданном формате. 4. Количество строк результата, прошедшее время, а также среднюю скорость выполнения запроса. -Вы можете прервать длинный запрос, нажав Ctrl+C. При этом вам всё равно придётся чуть-чуть подождать, пока сервер остановит запрос. На некоторых стадиях выполнения, запрос невозможно прервать. Если вы не дождётесь и нажмёте Ctrl+C второй раз, то клиент будет завершён. +Вы можете прервать длинный запрос, нажав Ctrl+C. При этом вам всё равно придётся чуть-чуть подождать, пока сервер остановит запрос. На некоторых стадиях выполнения запрос невозможно прервать. Если вы не дождётесь и нажмёте Ctrl+C второй раз, то клиент будет завершён. -Клиент командной строки позволяет передать внешние данные (внешние временные таблицы) для использования запроса. Подробнее смотрите раздел «Внешние данные для обработки запроса» +Клиент командной строки позволяет передать внешние данные (внешние временные таблицы) для выполнения запроса. Подробнее смотрите раздел «Внешние данные для обработки запроса». ### Запросы с параметрами {#cli-queries-with-parameters} @@ -84,7 +84,7 @@ $ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FOR clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}" ``` -#### Cинтаксис запроса {#cli-queries-with-parameters-syntax} +#### Синтаксис запроса {#cli-queries-with-parameters-syntax} Отформатируйте запрос обычным способом. Представьте значения, которые вы хотите передать из параметров приложения в запрос в следующем формате: @@ -155,3 +155,29 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe ``` +### Формат ID запроса {#query-id-format} + +В интерактивном режиме `clickhouse-client` показывает ID для каждого запроса. По умолчанию ID выводится в таком виде: + +```sql +Query id: 927f137d-00f1-4175-8914-0dd066365e96 +``` + +Произвольный формат ID можно задать в конфигурационном файле внутри тега `query_id_formats`. ID подставляется вместо `{query_id}` в строке формата. В теге может быть перечислено несколько строк формата. +Эта возможность может быть полезна для генерации URL, с помощью которых выполняется профилирование запросов. + +**Пример** + +```xml + + + http://speedscope-host/#profileURL=qp%3Fid%3D{query_id} + + +``` + +Если применить приведённую выше конфигурацию, то ID запроса будет выводиться в следующем виде: + +``` text +speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d +``` diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 31cd3a15c86..aac9c30658c 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -3252,3 +3252,25 @@ SETTINGS index_granularity = 8192 │ - 1 — таблица обновляется автоматически в фоновом режиме при обнаружении изменений схемы. Значение по умолчанию: `0`. + +## allow_experimental_projection_optimization {#allow-experimental-projection-optimization} + +Включает или отключает поддержку [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) при обработке запросов `SELECT`. + +Возможные значения: + +- 0 — Проекции не поддерживаются. +- 1 — Проекции поддерживаются. + +Значение по умолчанию: `0`. + +## force_optimize_projection {#force-optimize-projection} + +Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [allow_experimental_projection_optimization](#allow-experimental-projection-optimization)). + +Возможные значения: + +- 0 — Проекции используются опционально. +- 1 — Проекции обязательно используются. + +Значение по умолчанию: `0`. \ No newline at end of file diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index c023786b788..5aae0eee9f9 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -2185,3 +2185,75 @@ defaultRoles() - Список ролей по умолчанию. Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). + +## queryID {#query-id} + +Возвращает идентификатор текущего запроса. Другие параметры запроса могут быть извлечены из системной таблицы [system.query_log](../../operations/system-tables/query_log.md) через `query_id`. + +В отличие от [initialQueryID](#initial-query-id), функция `queryID` может возвращать различные значения для разных шардов (см. пример). + +**Синтаксис** + +``` sql +queryID() +``` + +**Возвращаемое значение** + +- Идентификатор текущего запроса. + +Тип: [String](../../sql-reference/data-types/string.md) + +**Пример** + +Запрос: + +``` sql +CREATE TABLE tmp (str String) ENGINE = Log; +INSERT INTO tmp (*) VALUES ('a'); +SELECT count(DISTINCT t) FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +``` + +Результат: + +``` text +┌─count()─┐ +│ 3 │ +└─────────┘ +``` + +## initialQueryID {#initial-query-id} + +Возвращает идентификатор родительского запроса. Другие параметры запроса могут быть извлечены из системной таблицы [system.query_log](../../operations/system-tables/query_log.md) через `initial_query_id`. + +В отличие от [queryID](#query-id), функция `initialQueryID` возвращает одинаковые значения для разных шардов (см. пример). + +**Синтаксис** + +``` sql +initialQueryID() +``` + +**Возвращаемое значение** + +- Идентификатор родительского запроса. + +Тип: [String](../../sql-reference/data-types/string.md) + +**Пример** + +Запрос: + +``` sql +CREATE TABLE tmp (str String) ENGINE = Log; +INSERT INTO tmp (*) VALUES ('a'); +SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +``` + +Результат: + +``` text +┌─count()─┐ +│ 1 │ +└─────────┘ +``` diff --git a/docs/ru/sql-reference/statements/alter/projection.md b/docs/ru/sql-reference/statements/alter/projection.md index db116963aa6..4b0d7f7865b 100644 --- a/docs/ru/sql-reference/statements/alter/projection.md +++ b/docs/ru/sql-reference/statements/alter/projection.md @@ -5,7 +5,7 @@ toc_title: PROJECTION # Манипуляции с проекциями {#manipulations-with-projections} -Доступны следующие операции: +Доступны следующие операции с [проекциями](../../../engines/table-engines/mergetree-family/mergetree.md#projections): - `ALTER TABLE [db].name ADD PROJECTION name AS SELECT [GROUP BY] [ORDER BY]` — добавляет описание проекции в метаданные. @@ -15,7 +15,7 @@ toc_title: PROJECTION - `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` — удаляет файлы проекции с диска без удаления описания. -Комманды ADD, DROP и CLEAR — легковесны, поскольку они только меняют метаданные или удаляют файлы. +Команды `ADD`, `DROP` и `CLEAR` — легковесны, поскольку они только меняют метаданные или удаляют файлы. Также команды реплицируются, синхронизируя описания проекций в метаданных с помощью ZooKeeper. diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index ca7fd98b77e..65e245750b3 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -62,7 +62,6 @@ #include #include #include -#include #include #include #include diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 4d01a523853..759feffb90e 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -300,9 +300,9 @@ int Keeper::main(const std::vector & /*args*/) if (config().has("keeper_server.storage_path")) path = config().getString("keeper_server.storage_path"); else if (config().has("keeper_server.log_storage_path")) - path = config().getString("keeper_server.log_storage_path"); + path = std::filesystem::path(config().getString("keeper_server.log_storage_path")).parent_path(); else if (config().has("keeper_server.snapshot_storage_path")) - path = config().getString("keeper_server.snapshot_storage_path"); + path = std::filesystem::path(config().getString("keeper_server.snapshot_storage_path")).parent_path(); else path = std::filesystem::path{KEEPER_DEFAULT_PATH}; @@ -359,7 +359,7 @@ int Keeper::main(const std::vector & /*args*/) auto servers = std::make_shared>(); /// Initialize test keeper RAFT. Do nothing if no nu_keeper_server in config. - global_context->initializeKeeperStorageDispatcher(); + global_context->initializeKeeperDispatcher(); for (const auto & listen_host : listen_hosts) { /// TCP Keeper @@ -428,7 +428,7 @@ int Keeper::main(const std::vector & /*args*/) else LOG_INFO(log, "Closed connections to Keeper."); - global_context->shutdownKeeperStorageDispatcher(); + global_context->shutdownKeeperDispatcher(); /// Wait server pool to avoid use-after-free of destroyed context in the handlers server_pool.joinAll(); diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index b4b514d1473..8a4387c2389 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -5,40 +5,16 @@ #include #include #include -#include -#include -#include #include "getIdentifierQuote.h" #include #include #include +#include namespace DB { -namespace -{ - using ValueType = ExternalResultDescription::ValueType; - - std::string getInsertQuery(const std::string & db_name, const std::string & table_name, const ColumnsWithTypeAndName & columns, IdentifierQuotingStyle quoting) - { - ASTInsertQuery query; - query.table_id.database_name = db_name; - query.table_id.table_name = table_name; - query.columns = std::make_shared(','); - query.children.push_back(query.columns); - for (const auto & column : columns) - query.columns->children.emplace_back(std::make_shared(column.name)); - - WriteBufferFromOwnString buf; - IAST::FormatSettings settings(buf, true); - settings.always_quote_identifiers = true; - settings.identifier_quoting_style = quoting; - query.IAST::format(settings); - return buf.str(); - } -} ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::ConnectionHolderPtr connection_holder_, const std::string & remote_database_name_, diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.h b/programs/odbc-bridge/ODBCBlockOutputStream.h index 1b42119e490..16a1602d3cd 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.h +++ b/programs/odbc-bridge/ODBCBlockOutputStream.h @@ -13,6 +13,7 @@ namespace DB class ODBCBlockOutputStream : public IBlockOutputStream { +using ValueType = ExternalResultDescription::ValueType; public: ODBCBlockOutputStream( diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 8102675d102..b7ef8cbec9c 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -996,7 +996,7 @@ if (ThreadFuzzer::instance().isEffective()) { #if USE_NURAFT /// Initialize test keeper RAFT. Do nothing if no nu_keeper_server in config. - global_context->initializeKeeperStorageDispatcher(); + global_context->initializeKeeperDispatcher(); for (const auto & listen_host : listen_hosts) { /// TCP Keeper @@ -1079,7 +1079,7 @@ if (ThreadFuzzer::instance().isEffective()) else LOG_INFO(log, "Closed connections to servers for tables."); - global_context->shutdownKeeperStorageDispatcher(); + global_context->shutdownKeeperDispatcher(); } /// Wait server pool to avoid use-after-free of destroyed context in the handlers diff --git a/release b/release index de549595d43..6e6970d7b00 100755 --- a/release +++ b/release @@ -60,9 +60,6 @@ then elif [[ "$SANITIZER" == "thread" ]]; then VERSION_POSTFIX+="+tsan" elif [[ "$SANITIZER" == "memory" ]]; then VERSION_POSTFIX+="+msan" elif [[ "$SANITIZER" == "undefined" ]]; then VERSION_POSTFIX+="+ubsan" - elif [[ "$SANITIZER" == "libfuzzer" ]]; then - VERSION_POSTFIX+="+libfuzzer" - MALLOC_OPTS="-DENABLE_TCMALLOC=0 -DENABLE_JEMALLOC=0" else echo "Unknown value of SANITIZER variable: $SANITIZER" exit 3 diff --git a/src/Common/TimerDescriptor.cpp b/src/Common/TimerDescriptor.cpp index 793f7ed1352..086d462eeb2 100644 --- a/src/Common/TimerDescriptor.cpp +++ b/src/Common/TimerDescriptor.cpp @@ -74,17 +74,24 @@ void TimerDescriptor::drain() const } } -void TimerDescriptor::setRelative(Poco::Timespan timespan) const +void TimerDescriptor::setRelative(uint64_t usec) const { + static constexpr uint32_t TIMER_PRECISION = 1e6; + itimerspec spec; spec.it_interval.tv_nsec = 0; spec.it_interval.tv_sec = 0; - spec.it_value.tv_sec = timespan.totalSeconds(); - spec.it_value.tv_nsec = timespan.useconds() * 1000; + spec.it_value.tv_sec = usec / TIMER_PRECISION; + spec.it_value.tv_nsec = (usec % TIMER_PRECISION) * 1'000; if (-1 == timerfd_settime(timer_fd, 0 /*relative timer */, &spec, nullptr)) throwFromErrno("Cannot set time for timer_fd", ErrorCodes::CANNOT_SET_TIMER_PERIOD); } +void TimerDescriptor::setRelative(Poco::Timespan timespan) const +{ + setRelative(timespan.totalMicroseconds()); +} + } #endif diff --git a/src/Common/TimerDescriptor.h b/src/Common/TimerDescriptor.h index 30a610f37f0..8ca69344b53 100644 --- a/src/Common/TimerDescriptor.h +++ b/src/Common/TimerDescriptor.h @@ -24,6 +24,7 @@ public: void reset() const; void drain() const; + void setRelative(uint64_t usec) const; void setRelative(Poco::Timespan timespan) const; }; diff --git a/src/Compression/CompressedReadBufferBase.cpp b/src/Compression/CompressedReadBufferBase.cpp index 749f174677c..006b3fab2d8 100644 --- a/src/Compression/CompressedReadBufferBase.cpp +++ b/src/Compression/CompressedReadBufferBase.cpp @@ -253,4 +253,3 @@ CompressedReadBufferBase::~CompressedReadBufferBase() = default; /// Proper d } - diff --git a/src/Compression/CompressionCodecDelta.cpp b/src/Compression/CompressionCodecDelta.cpp index e281609ff43..e8c5b4f878d 100644 --- a/src/Compression/CompressionCodecDelta.cpp +++ b/src/Compression/CompressionCodecDelta.cpp @@ -82,8 +82,10 @@ void compressDataForType(const char * source, UInt32 source_size, char * dest) } template -void decompressDataForType(const char * source, UInt32 source_size, char * dest) +void decompressDataForType(const char * source, UInt32 source_size, char * dest, UInt32 output_size) { + const char * output_end = dest + output_size; + if (source_size % sizeof(T) != 0) throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot delta decompress, data size {} is not aligned to {}", source_size, sizeof(T)); @@ -92,6 +94,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest) while (source < source_end) { accumulator += unalignedLoad(source); + if (dest + sizeof(accumulator) > output_end) + throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data"); unalignedStore(dest, accumulator); source += sizeof(T); @@ -137,6 +141,7 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_ throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS); UInt8 bytes_to_skip = uncompressed_size % bytes_size; + UInt32 output_size = uncompressed_size - bytes_to_skip; if (UInt32(2 + bytes_to_skip) > source_size) throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS); @@ -146,16 +151,16 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_ switch (bytes_size) { case 1: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; case 2: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; case 4: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; case 8: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; } } @@ -209,4 +214,10 @@ void registerCodecDelta(CompressionCodecFactory & factory) return std::make_shared(delta_bytes_size); }); } + +CompressionCodecPtr getCompressionCodecDelta(UInt8 delta_bytes_size) +{ + return std::make_shared(delta_bytes_size); +} + } diff --git a/src/Compression/CompressionCodecDoubleDelta.cpp b/src/Compression/CompressionCodecDoubleDelta.cpp index c416582eb6b..3f96cfa8ff8 100644 --- a/src/Compression/CompressionCodecDoubleDelta.cpp +++ b/src/Compression/CompressionCodecDoubleDelta.cpp @@ -353,12 +353,13 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest) } template -void decompressDataForType(const char * source, UInt32 source_size, char * dest) +void decompressDataForType(const char * source, UInt32 source_size, char * dest, UInt32 output_size) { static_assert(is_unsigned_v, "ValueType must be unsigned."); using UnsignedDeltaType = ValueType; const char * source_end = source + source_size; + const char * output_end = dest + output_size; if (source + sizeof(UInt32) > source_end) return; @@ -374,6 +375,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest) return; prev_value = unalignedLoad(source); + if (dest + sizeof(prev_value) > output_end) + throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data"); unalignedStore(dest, prev_value); source += sizeof(prev_value); @@ -385,6 +388,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest) prev_delta = unalignedLoad(source); prev_value = prev_value + static_cast(prev_delta); + if (dest + sizeof(prev_value) > output_end) + throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data"); unalignedStore(dest, prev_value); source += sizeof(prev_delta); @@ -416,6 +421,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest) const UnsignedDeltaType delta = double_delta + prev_delta; const ValueType curr_value = prev_value + delta; + if (dest + sizeof(curr_value) > output_end) + throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data"); unalignedStore(dest, curr_value); dest += sizeof(curr_value); @@ -507,6 +514,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS); UInt8 bytes_to_skip = uncompressed_size % bytes_size; + UInt32 output_size = uncompressed_size - bytes_to_skip; if (UInt32(2 + bytes_to_skip) > source_size) throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS); @@ -516,16 +524,16 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s switch (bytes_size) { case 1: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; case 2: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; case 4: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; case 8: - decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]); + decompressDataForType(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size); break; } } @@ -543,4 +551,10 @@ void registerCodecDoubleDelta(CompressionCodecFactory & factory) return std::make_shared(data_bytes_size); }); } + +CompressionCodecPtr getCompressionCodecDoubleDelta(UInt8 data_bytes_size) +{ + return std::make_shared(data_bytes_size); +} + } diff --git a/src/Compression/CompressionCodecEncrypted.h b/src/Compression/CompressionCodecEncrypted.h index bacd58bcd2f..cef9b2e6072 100644 --- a/src/Compression/CompressionCodecEncrypted.h +++ b/src/Compression/CompressionCodecEncrypted.h @@ -51,7 +51,7 @@ namespace DB */ static void setMasterKey(const std::string_view & master_key); - CompressionCodecEncrypted(const std::string_view & cipher); + explicit CompressionCodecEncrypted(const std::string_view & cipher); uint8_t getMethodByte() const override; void updateHash(SipHash & hash) const override; @@ -88,7 +88,7 @@ namespace DB */ struct KeyHolder : private boost::noncopyable { - KeyHolder(const std::string_view & master_key); + explicit KeyHolder(const std::string_view & master_key); ~KeyHolder(); std::string keygen_key; @@ -99,6 +99,11 @@ namespace DB static inline std::optional keys; }; + + inline CompressionCodecPtr getCompressionCodecEncrypted(const std::string_view & master_key) + { + return std::make_shared(master_key); + } } #endif /* USE_SSL && USE_INTERNAL_SSL_LIBRARY */ diff --git a/src/Compression/CompressionCodecLZ4.cpp b/src/Compression/CompressionCodecLZ4.cpp index 396f6fad2c3..12f138dc95a 100644 --- a/src/Compression/CompressionCodecLZ4.cpp +++ b/src/Compression/CompressionCodecLZ4.cpp @@ -147,4 +147,10 @@ CompressionCodecLZ4HC::CompressionCodecLZ4HC(int level_) setCodecDescription("LZ4HC", {std::make_shared(static_cast(level))}); } + +CompressionCodecPtr getCompressionCodecLZ4(int level) +{ + return std::make_shared(level); +} + } diff --git a/src/Compression/CompressionCodecMultiple.h b/src/Compression/CompressionCodecMultiple.h index 1eb61842048..1d63fe1da55 100644 --- a/src/Compression/CompressionCodecMultiple.h +++ b/src/Compression/CompressionCodecMultiple.h @@ -9,7 +9,7 @@ class CompressionCodecMultiple final : public ICompressionCodec { public: CompressionCodecMultiple() = default; /// Need for CompressionFactory to register codec by method byte. - CompressionCodecMultiple(Codecs codecs_); + explicit CompressionCodecMultiple(Codecs codecs_); uint8_t getMethodByte() const override; diff --git a/src/Compression/CompressionCodecZSTD.cpp b/src/Compression/CompressionCodecZSTD.cpp index a950f1fee74..def2188d90a 100644 --- a/src/Compression/CompressionCodecZSTD.cpp +++ b/src/Compression/CompressionCodecZSTD.cpp @@ -156,4 +156,9 @@ void registerCodecZSTD(CompressionCodecFactory & factory) }); } +CompressionCodecPtr getCompressionCodecZSTD(int level) +{ + return std::make_shared(level); +} + } diff --git a/src/Compression/ICompressionCodec.h b/src/Compression/ICompressionCodec.h index c49c16d8bad..3f603087f2b 100644 --- a/src/Compression/ICompressionCodec.h +++ b/src/Compression/ICompressionCodec.h @@ -18,6 +18,8 @@ using Codecs = std::vector; class IDataType; +extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size); + /** * Represents interface for compression codecs like LZ4, ZSTD, etc. */ @@ -84,6 +86,8 @@ public: virtual bool isNone() const { return false; } protected: + /// This is used for fuzz testing + friend int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size); /// Return size of compressed data without header virtual UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const { return uncompressed_size; } diff --git a/src/Compression/LZ4_decompress_faster.cpp b/src/Compression/LZ4_decompress_faster.cpp index 28a285f00f4..1aeae6b1b9a 100644 --- a/src/Compression/LZ4_decompress_faster.cpp +++ b/src/Compression/LZ4_decompress_faster.cpp @@ -450,7 +450,11 @@ bool NO_INLINE decompressImpl( const unsigned token = *ip++; length = token >> 4; if (length == 0x0F) + { + if (unlikely(ip + 1 >= input_end)) + return false; continue_read_length(); + } /// Copy literals. @@ -470,6 +474,20 @@ bool NO_INLINE decompressImpl( if (unlikely(copy_end > output_end)) return false; + // Due to implementation specifics the copy length is always a multiple of copy_amount + size_t real_length = 0; + + static_assert(copy_amount == 8 || copy_amount == 16 || copy_amount == 32); + if constexpr (copy_amount == 8) + real_length = (((length >> 3) + 1) * 8); + else if constexpr (copy_amount == 16) + real_length = (((length >> 4) + 1) * 16); + else if constexpr (copy_amount == 32) + real_length = (((length >> 5) + 1) * 32); + + if (unlikely(ip + real_length >= input_end + ADDITIONAL_BYTES_AT_END_OF_BUFFER)) + return false; + wildCopy(op, ip, copy_end); /// Here we can write up to copy_amount - 1 bytes after buffer. if (copy_end == output_end) @@ -494,7 +512,11 @@ bool NO_INLINE decompressImpl( length = token & 0x0F; if (length == 0x0F) + { + if (unlikely(ip + 1 >= input_end)) + return false; continue_read_length(); + } length += 4; /// Copy match within block, that produce overlapping pattern. Match may replicate itself. diff --git a/src/Compression/fuzzers/CMakeLists.txt b/src/Compression/fuzzers/CMakeLists.txt index 74bf2d2649b..189aea66a92 100644 --- a/src/Compression/fuzzers/CMakeLists.txt +++ b/src/Compression/fuzzers/CMakeLists.txt @@ -1,2 +1,20 @@ +# Our code has strong cohesion and target associated with `Compression` also depends on `DataTypes`. +# But we can exclude some files which have dependencies in case of +# fuzzer related build (we are interested in fuzzing only particular part of our code). +# So, some symbols will be declared, but not defined. Unfortunately, this trick doesn't work with UBSan. +# If you want really small size of the resulted binary, just link with fuzz_compression and clickhouse_common_io + add_executable (compressed_buffer_fuzzer compressed_buffer_fuzzer.cpp) -target_link_libraries (compressed_buffer_fuzzer PRIVATE fuzz_compression clickhouse_common_io ${LIB_FUZZING_ENGINE}) +target_link_libraries (compressed_buffer_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE}) + +add_executable (lz4_decompress_fuzzer lz4_decompress_fuzzer.cpp) +target_link_libraries (lz4_decompress_fuzzer PUBLIC dbms lz4 ${LIB_FUZZING_ENGINE}) + +add_executable (delta_decompress_fuzzer delta_decompress_fuzzer.cpp) +target_link_libraries (delta_decompress_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE}) + +add_executable (double_delta_decompress_fuzzer double_delta_decompress_fuzzer.cpp) +target_link_libraries (double_delta_decompress_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE}) + +add_executable (encrypted_decompress_fuzzer encrypted_decompress_fuzzer.cpp) +target_link_libraries (encrypted_decompress_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE}) diff --git a/src/Compression/fuzzers/compressed_buffer_fuzzer.cpp b/src/Compression/fuzzers/compressed_buffer_fuzzer.cpp index a87046eff5c..1f669696fb9 100644 --- a/src/Compression/fuzzers/compressed_buffer_fuzzer.cpp +++ b/src/Compression/fuzzers/compressed_buffer_fuzzer.cpp @@ -17,6 +17,5 @@ try } catch (...) { - std::cerr << DB::getCurrentExceptionMessage(true) << std::endl; return 1; } diff --git a/src/Compression/fuzzers/delta_decompress_fuzzer.cpp b/src/Compression/fuzzers/delta_decompress_fuzzer.cpp new file mode 100644 index 00000000000..b039777da15 --- /dev/null +++ b/src/Compression/fuzzers/delta_decompress_fuzzer.cpp @@ -0,0 +1,44 @@ +#include +#include + +#include +#include + +namespace DB +{ + CompressionCodecPtr getCompressionCodecDelta(UInt8 delta_bytes_size); +} + +struct AuxiliaryRandomData +{ + UInt8 delta_size_bytes; + size_t decompressed_size; +}; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) +try +{ + if (size < sizeof(AuxiliaryRandomData)) + return 0; + + const auto * p = reinterpret_cast(data); + auto codec = DB::getCompressionCodecDelta(p->delta_size_bytes); + + size_t output_buffer_size = p->decompressed_size % 65536; + size -= sizeof(AuxiliaryRandomData); + data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t); + + // std::string input = std::string(reinterpret_cast(data), size); + // fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size); + + DB::Memory<> memory; + memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer()); + + codec->doDecompressData(reinterpret_cast(data), size, memory.data(), output_buffer_size); + + return 0; +} +catch (...) +{ + return 1; +} diff --git a/src/Compression/fuzzers/double_delta_decompress_fuzzer.cpp b/src/Compression/fuzzers/double_delta_decompress_fuzzer.cpp new file mode 100644 index 00000000000..f9822daa3bd --- /dev/null +++ b/src/Compression/fuzzers/double_delta_decompress_fuzzer.cpp @@ -0,0 +1,44 @@ +#include +#include + +#include +#include + +namespace DB +{ + CompressionCodecPtr getCompressionCodecDoubleDelta(UInt8 data_bytes_size); +} + +struct AuxiliaryRandomData +{ + UInt8 data_bytes_size; + size_t decompressed_size; +}; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) +try +{ + if (size < sizeof(AuxiliaryRandomData)) + return 0; + + const auto * p = reinterpret_cast(data); + auto codec = DB::getCompressionCodecDoubleDelta(p->data_bytes_size); + + size_t output_buffer_size = p->decompressed_size % 65536; + size -= sizeof(AuxiliaryRandomData); + data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t); + + // std::string input = std::string(reinterpret_cast(data), size); + // fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size); + + DB::Memory<> memory; + memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer()); + + codec->doDecompressData(reinterpret_cast(data), size, memory.data(), output_buffer_size); + + return 0; +} +catch (...) +{ + return 1; +} diff --git a/src/Compression/fuzzers/encrypted_decompress_fuzzer.cpp b/src/Compression/fuzzers/encrypted_decompress_fuzzer.cpp new file mode 100644 index 00000000000..6211fd4b9cc --- /dev/null +++ b/src/Compression/fuzzers/encrypted_decompress_fuzzer.cpp @@ -0,0 +1,52 @@ +#include +#include + +#include +#include +#include + +namespace DB +{ + CompressionCodecPtr getCompressionCodecEncrypted(const std::string_view & master_key); +} + +constexpr size_t key_size = 20; + +struct AuxiliaryRandomData +{ + char key[key_size]; + size_t decompressed_size; +}; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) +try +{ + if (size < sizeof(AuxiliaryRandomData)) + return 0; + + const auto * p = reinterpret_cast(data); + + std::string key = std::string(p->key, key_size); + auto codec = DB::getCompressionCodecEncrypted(key); + + size_t output_buffer_size = p->decompressed_size % 65536; + size -= sizeof(AuxiliaryRandomData); + data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t); + + std::string input = std::string(reinterpret_cast(data), size); + fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size); + + if (output_buffer_size < size) + return 0; + + DB::Memory<> memory; + memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer()); + + codec->doDecompressData(reinterpret_cast(data), size, memory.data(), output_buffer_size); + + return 0; +} +catch (...) +{ + return 1; +} diff --git a/src/Compression/fuzzers/lz4_decompress_fuzzer.cpp b/src/Compression/fuzzers/lz4_decompress_fuzzer.cpp new file mode 100644 index 00000000000..85c4c9bd329 --- /dev/null +++ b/src/Compression/fuzzers/lz4_decompress_fuzzer.cpp @@ -0,0 +1,47 @@ +#include +#include + +#include +#include +#include + +namespace DB +{ + CompressionCodecPtr getCompressionCodecLZ4(int level); +} + +struct AuxiliaryRandomData +{ + size_t level; + size_t decompressed_size; +}; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) +try +{ + + if (size < sizeof(AuxiliaryRandomData) + LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER) + return 0; + + const auto * p = reinterpret_cast(data); + auto codec = DB::getCompressionCodecLZ4(p->level); + + size_t output_buffer_size = p->decompressed_size % 65536; + size -= sizeof(AuxiliaryRandomData); + size -= LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER; + data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t); + + // std::string input = std::string(reinterpret_cast(data), size); + // fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size); + + DB::Memory<> memory; + memory.resize(output_buffer_size + LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER); + + codec->doDecompressData(reinterpret_cast(data), size, memory.data(), output_buffer_size); + + return 0; +} +catch (...) +{ + return 1; +} diff --git a/src/Coordination/Changelog.cpp b/src/Coordination/Changelog.cpp index 6ec9b17d0a7..836df92ac77 100644 --- a/src/Coordination/Changelog.cpp +++ b/src/Coordination/Changelog.cpp @@ -165,10 +165,11 @@ public: while (!read_buf.eof()) { result.last_position = read_buf.count(); + /// Read checksum Checksum record_checksum; readIntBinary(record_checksum, read_buf); - /// Initialization is required, otherwise checksums may fail + /// Read header ChangelogRecord record; readIntBinary(record.header.version, read_buf); readIntBinary(record.header.index, read_buf); @@ -179,6 +180,7 @@ public: if (record.header.version > CURRENT_CHANGELOG_VERSION) throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unsupported changelog version {} on path {}", record.header.version, filepath); + /// Read data if (record.header.blob_size != 0) { auto buffer = nuraft::buffer::alloc(record.header.blob_size); @@ -189,11 +191,13 @@ public: else record.blob = nullptr; + /// Check changelog integrity if (previous_index != 0 && previous_index + 1 != record.header.index) throw Exception(ErrorCodes::CORRUPTED_DATA, "Previous log entry {}, next log entry {}, seems like some entries skipped", previous_index, record.header.index); previous_index = record.header.index; + /// Compare checksums Checksum checksum = computeRecordChecksum(record); if (checksum != record_checksum) { @@ -202,22 +206,25 @@ public: filepath, record.header.version, record.header.index, record.header.blob_size); } + /// Check for duplicated changelog ids if (logs.count(record.header.index) != 0) throw Exception(ErrorCodes::CORRUPTED_DATA, "Duplicated index id {} in log {}", record.header.index, filepath); result.entries_read += 1; + /// Read but skip this entry because our state is already more fresh if (record.header.index < start_log_index) - { continue; - } + /// Create log entry for read data auto log_entry = nuraft::cs_new(record.header.term, record.blob, record.header.value_type); if (result.first_read_index == 0) result.first_read_index = record.header.index; + /// Put it into in memory structure logs.emplace(record.header.index, log_entry); index_to_offset[record.header.index] = result.last_position; + if (result.entries_read % 50000 == 0) LOG_TRACE(log, "Reading changelog from path {}, entries {}", filepath, result.entries_read); } @@ -235,6 +242,7 @@ public: result.error = true; tryLogCurrentException(log); } + LOG_TRACE(log, "Totally read from changelog {} {} entries", filepath, result.entries_read); return result; @@ -255,6 +263,7 @@ Changelog::Changelog( , force_sync(force_sync_) , log(log_) { + /// Load all files in changelog directory namespace fs = std::filesystem; if (!fs::exists(changelogs_dir)) fs::create_directories(changelogs_dir); @@ -264,23 +273,35 @@ Changelog::Changelog( auto file_description = getChangelogFileDescription(p.path()); existing_changelogs[file_description.from_log_index] = file_description; } + + if (existing_changelogs.empty()) + LOG_WARNING(log, "No logs exists in {}. It's Ok if it's the first run of clickhouse-keeper.", changelogs_dir); } void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uint64_t logs_to_keep) { uint64_t total_read = 0; + + /// Amount of entries in last log index uint64_t entries_in_last = 0; - uint64_t incomplete_log_index = 0; + + /// Log idx of the first incomplete log (key in existing_changelogs) + uint64_t first_incomplete_log_start_index = 0; + ChangelogReadResult result{}; + /// First log index which was read from all changelogs uint64_t first_read_index = 0; uint64_t start_to_read_from = last_commited_log_index; + if (start_to_read_from > logs_to_keep) start_to_read_from -= logs_to_keep; else start_to_read_from = 1; + /// At least we read something bool started = false; + for (const auto & [changelog_start_index, changelog_description] : existing_changelogs) { entries_in_last = changelog_description.to_log_index - changelog_description.from_log_index + 1; @@ -292,7 +313,7 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin if (changelog_description.from_log_index > last_commited_log_index && (changelog_description.from_log_index - last_commited_log_index) > 1) { LOG_ERROR(log, "Some records was lost, last committed log index {}, smallest available log index on disk {}. Hopefully will receive missing records from leader.", last_commited_log_index, changelog_description.from_log_index); - incomplete_log_index = changelog_start_index; + first_incomplete_log_start_index = changelog_start_index; break; } else if (changelog_description.from_log_index > start_to_read_from) @@ -311,7 +332,7 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin /// May happen after truncate, crash or simply unfinished log if (result.entries_read < entries_in_last) { - incomplete_log_index = changelog_start_index; + first_incomplete_log_start_index = changelog_start_index; break; } } @@ -322,11 +343,13 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin else start_index = last_commited_log_index; - if (incomplete_log_index != 0) + /// Found some broken or non finished logs + /// We have to remove broken data and continue to write into incomplete log. + if (first_incomplete_log_start_index != 0) { auto start_remove_from = existing_changelogs.begin(); if (started) - start_remove_from = existing_changelogs.upper_bound(incomplete_log_index); + start_remove_from = existing_changelogs.upper_bound(first_incomplete_log_start_index); /// All subsequent logs shouldn't exist. But they may exist if we crashed after writeAt started. Remove them. for (auto itr = start_remove_from; itr != existing_changelogs.end();) @@ -363,6 +386,7 @@ void Changelog::rotate(uint64_t new_start_log_index) /// Flush previous log flush(); + /// Start new one ChangelogFileDescription new_description; new_description.prefix = DEFAULT_PREFIX; new_description.from_log_index = new_start_log_index; @@ -378,7 +402,7 @@ void Changelog::rotate(uint64_t new_start_log_index) ChangelogRecord Changelog::buildRecord(uint64_t index, const LogEntryPtr & log_entry) { ChangelogRecord record; - record.header.version = ChangelogVersion::V0; + record.header.version = ChangelogVersion::V1; record.header.index = index; record.header.term = log_entry->get_term(); record.header.value_type = log_entry->get_val_type(); @@ -416,7 +440,9 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry) if (index_to_start_pos.count(index) == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write at index {} because changelog doesn't contain it", index); + /// Complex case when we need to override data from already rotated log bool go_to_previous_file = index < current_writer->getStartIndex(); + if (go_to_previous_file) { auto index_changelog = existing_changelogs.lower_bound(index); @@ -450,6 +476,7 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry) auto log_itr = logs.find(i); if (log_itr == logs.end()) break; + logs.erase(log_itr); index_to_start_pos.erase(i); entries_written--; @@ -467,7 +494,6 @@ void Changelog::compact(uint64_t up_to_log_index) /// Remove all completely outdated changelog files if (itr->second.to_log_index <= up_to_log_index) { - LOG_INFO(log, "Removing changelog {} because of compaction", itr->second.path); std::erase_if(index_to_start_pos, [right_index = itr->second.to_log_index] (const auto & item) { return item.first <= right_index; }); std::filesystem::remove(itr->second.path); @@ -482,6 +508,7 @@ void Changelog::compact(uint64_t up_to_log_index) LogEntryPtr Changelog::getLastEntry() const { + /// This entry treaded in special way by NuRaft static LogEntryPtr fake_entry = nuraft::cs_new(0, nuraft::buffer::alloc(sizeof(uint64_t))); uint64_t next_index = getNextEntryIndex() - 1; diff --git a/src/Coordination/Changelog.h b/src/Coordination/Changelog.h index 893fe16abdf..88198ec398c 100644 --- a/src/Coordination/Changelog.h +++ b/src/Coordination/Changelog.h @@ -58,8 +58,8 @@ struct ChangelogFileDescription class ChangelogWriter; /// Simplest changelog with files rotation. -/// No compression, no metadata, just entries with headers one by one -/// Able to read broken files/entries and discard them. +/// No compression, no metadata, just entries with headers one by one. +/// Able to read broken files/entries and discard them. Not thread safe. class Changelog { diff --git a/src/Coordination/KeeperStorageDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp similarity index 86% rename from src/Coordination/KeeperStorageDispatcher.cpp rename to src/Coordination/KeeperDispatcher.cpp index 7c416b38d8b..26db925b4c5 100644 --- a/src/Coordination/KeeperStorageDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include @@ -9,19 +9,18 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int TIMEOUT_EXCEEDED; } -KeeperStorageDispatcher::KeeperStorageDispatcher() +KeeperDispatcher::KeeperDispatcher() : coordination_settings(std::make_shared()) , log(&Poco::Logger::get("KeeperDispatcher")) { } -void KeeperStorageDispatcher::requestThread() +void KeeperDispatcher::requestThread() { setThreadName("KeeperReqT"); @@ -133,7 +132,7 @@ void KeeperStorageDispatcher::requestThread() } } -void KeeperStorageDispatcher::responseThread() +void KeeperDispatcher::responseThread() { setThreadName("KeeperRspT"); while (!shutdown_called) @@ -159,7 +158,7 @@ void KeeperStorageDispatcher::responseThread() } } -void KeeperStorageDispatcher::snapshotThread() +void KeeperDispatcher::snapshotThread() { setThreadName("KeeperSnpT"); while (!shutdown_called) @@ -181,9 +180,11 @@ void KeeperStorageDispatcher::snapshotThread() } } -void KeeperStorageDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response) +void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response) { std::lock_guard lock(session_to_response_callback_mutex); + + /// Special new session response. if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::SessionID) { const Coordination::ZooKeeperSessionIDResponse & session_id_resp = dynamic_cast(*response); @@ -196,25 +197,28 @@ void KeeperStorageDispatcher::setResponse(int64_t session_id, const Coordination callback(response); new_session_id_response_callback.erase(session_id_resp.internal_id); } - else + else /// Normal response, just write to client { - auto session_writer = session_to_response_callback.find(session_id); - if (session_writer == session_to_response_callback.end()) + auto session_response_callback = session_to_response_callback.find(session_id); + + /// Session was disconnected, just skip this response + if (session_response_callback == session_to_response_callback.end()) return; - session_writer->second(response); + session_response_callback->second(response); /// Session closed, no more writes if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close) { - session_to_response_callback.erase(session_writer); + session_to_response_callback.erase(session_response_callback); } } } -bool KeeperStorageDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id) +bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id) { { + /// If session was already disconnected than we will ignore requests std::lock_guard lock(session_to_response_callback_mutex); if (session_to_response_callback.count(session_id) == 0) return false; @@ -237,7 +241,7 @@ bool KeeperStorageDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr return true; } -void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper) +void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper) { LOG_DEBUG(log, "Initializing storage dispatcher"); int myid = config.getInt("keeper_server.server_id"); @@ -251,6 +255,7 @@ void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration server = std::make_unique( myid, coordination_settings, config, responses_queue, snapshots_queue, standalone_keeper); + try { LOG_DEBUG(log, "Waiting server to initialize"); @@ -266,13 +271,13 @@ void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration throw; } - + /// Start it after keeper server start session_cleaner_thread = ThreadFromGlobalPool([this] { sessionCleanerTask(); }); LOG_DEBUG(log, "Dispatcher initialized"); } -void KeeperStorageDispatcher::shutdown() +void KeeperDispatcher::shutdown() { try { @@ -306,6 +311,8 @@ void KeeperStorageDispatcher::shutdown() server->shutdown(); KeeperStorage::RequestForSession request_for_session; + + /// Set session expired for all pending requests while (requests_queue->tryPop(request_for_session)) { if (request_for_session.request) @@ -320,6 +327,7 @@ void KeeperStorageDispatcher::shutdown() } } + /// Clear all registered sessions std::lock_guard lock(session_to_response_callback_mutex); session_to_response_callback.clear(); } @@ -331,19 +339,19 @@ void KeeperStorageDispatcher::shutdown() LOG_DEBUG(log, "Dispatcher shut down"); } -KeeperStorageDispatcher::~KeeperStorageDispatcher() +KeeperDispatcher::~KeeperDispatcher() { shutdown(); } -void KeeperStorageDispatcher::registerSession(int64_t session_id, ZooKeeperResponseCallback callback) +void KeeperDispatcher::registerSession(int64_t session_id, ZooKeeperResponseCallback callback) { std::lock_guard lock(session_to_response_callback_mutex); if (!session_to_response_callback.try_emplace(session_id, callback).second) throw Exception(DB::ErrorCodes::LOGICAL_ERROR, "Session with id {} already registered in dispatcher", session_id); } -void KeeperStorageDispatcher::sessionCleanerTask() +void KeeperDispatcher::sessionCleanerTask() { while (true) { @@ -352,12 +360,16 @@ void KeeperStorageDispatcher::sessionCleanerTask() try { + /// Only leader node must check dead sessions if (isLeader()) { auto dead_sessions = server->getDeadSessions(); + for (int64_t dead_session : dead_sessions) { LOG_INFO(log, "Found dead session {}, will try to close it", dead_session); + + /// Close session == send close request to raft server Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(Coordination::OpNum::Close); request->xid = Coordination::CLOSE_XID; KeeperStorage::RequestForSession request_info; @@ -367,6 +379,8 @@ void KeeperStorageDispatcher::sessionCleanerTask() std::lock_guard lock(push_request_mutex); requests_queue->push(std::move(request_info)); } + + /// Remove session from registered sessions finishSession(dead_session); LOG_INFO(log, "Dead session close request pushed"); } @@ -381,7 +395,7 @@ void KeeperStorageDispatcher::sessionCleanerTask() } } -void KeeperStorageDispatcher::finishSession(int64_t session_id) +void KeeperDispatcher::finishSession(int64_t session_id) { std::lock_guard lock(session_to_response_callback_mutex); auto session_it = session_to_response_callback.find(session_id); @@ -389,7 +403,7 @@ void KeeperStorageDispatcher::finishSession(int64_t session_id) session_to_response_callback.erase(session_it); } -void KeeperStorageDispatcher::addErrorResponses(const KeeperStorage::RequestsForSessions & requests_for_sessions, Coordination::Error error) +void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSessions & requests_for_sessions, Coordination::Error error) { for (const auto & [session_id, request] : requests_for_sessions) { @@ -402,7 +416,7 @@ void KeeperStorageDispatcher::addErrorResponses(const KeeperStorage::RequestsFor } } -void KeeperStorageDispatcher::forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions) +void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions) { if (!result->has_result()) result->get(); @@ -417,10 +431,14 @@ void KeeperStorageDispatcher::forceWaitAndProcessResult(RaftAppendResult & resul requests_for_sessions.clear(); } -int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms) +int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms) { + /// New session id allocation is a special request, because we cannot process it in normal + /// way: get request -> put to raft -> set response for registered callback. KeeperStorage::RequestForSession request_info; std::shared_ptr request = std::make_shared(); + /// Internal session id. It's a temporary number which is unique for each client on this server + /// but can be same on different servers. request->internal_id = internal_session_id_counter.fetch_add(1); request->session_timeout_ms = session_timeout_ms; request->server_id = server->getServerID(); @@ -430,6 +448,7 @@ int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms) auto promise = std::make_shared>(); auto future = promise->get_future(); + { std::lock_guard lock(session_to_response_callback_mutex); new_session_id_response_callback[request->internal_id] = [promise, internal_id = request->internal_id] (const Coordination::ZooKeeperResponsePtr & response) @@ -452,6 +471,7 @@ int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms) }; } + /// Push new session request to queue { std::lock_guard lock(push_request_mutex); if (!requests_queue->tryPush(std::move(request_info), session_timeout_ms)) @@ -461,6 +481,8 @@ int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms) if (future.wait_for(std::chrono::milliseconds(session_timeout_ms)) != std::future_status::ready) throw Exception("Cannot receive session id within session timeout", ErrorCodes::TIMEOUT_EXCEEDED); + /// Forcefully wait for request execution because we cannot process any other + /// requests for this client until it get new session id. return future.get(); } diff --git a/src/Coordination/KeeperStorageDispatcher.h b/src/Coordination/KeeperDispatcher.h similarity index 80% rename from src/Coordination/KeeperStorageDispatcher.h rename to src/Coordination/KeeperDispatcher.h index cc95de04ce9..a20603f12c5 100644 --- a/src/Coordination/KeeperStorageDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -22,7 +22,9 @@ namespace DB using ZooKeeperResponseCallback = std::function; -class KeeperStorageDispatcher +/// Highlevel wrapper for ClickHouse Keeper. +/// Process user requests via consensus and return responses. +class KeeperDispatcher { private: @@ -45,6 +47,7 @@ private: /// (get, set, list, etc.). Dispatcher determines callback for each response /// using session id from this map. SessionToResponseCallback session_to_response_callback; + /// But when client connects to the server for the first time it doesn't /// have session_id. It request it from server. We give temporary /// internal id for such requests just to much client with its response. @@ -60,7 +63,7 @@ private: /// Dumping new snapshots to disk ThreadFromGlobalPool snapshot_thread; - /// RAFT wrapper. Most important class. + /// RAFT wrapper. std::unique_ptr server; Poco::Logger * log; @@ -69,10 +72,15 @@ private: std::atomic internal_session_id_counter{0}; private: + /// Thread put requests to raft void requestThread(); + /// Thread put responses for subscribed sessions void responseThread(); + /// Thread clean disconnected sessions from memory void sessionCleanerTask(); + /// Thread create snapshots in the background void snapshotThread(); + void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response); /// Add error responses for requests to responses queue. @@ -84,16 +92,23 @@ private: void forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions); public: - KeeperStorageDispatcher(); + /// Just allocate some objects, real initialization is done by `intialize method` + KeeperDispatcher(); + /// Call shutdown + ~KeeperDispatcher(); + + /// Initialization from config. + /// standalone_keeper -- we are standalone keeper application (not inside clickhouse server) void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper); + /// Shutdown internal keeper parts (server, state machine, log storage, etc) void shutdown(); - ~KeeperStorageDispatcher(); - + /// Put request to ClickHouse Keeper bool putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id); + /// Are we leader bool isLeader() const { return server->isLeader(); @@ -104,9 +119,12 @@ public: return server->isLeaderAlive(); } + /// Get new session ID int64_t getSessionID(int64_t session_timeout_ms); + /// Register session and subscribe for responses with callback void registerSession(int64_t session_id, ZooKeeperResponseCallback callback); + /// Call if we don't need any responses for this session no more (session was expired) void finishSession(int64_t session_id); }; diff --git a/src/Coordination/KeeperLogStore.h b/src/Coordination/KeeperLogStore.h index 01315e6e879..d8ac8330c05 100644 --- a/src/Coordination/KeeperLogStore.h +++ b/src/Coordination/KeeperLogStore.h @@ -9,39 +9,53 @@ namespace DB { +/// Wrapper around Changelog class. Implements RAFT log storage. class KeeperLogStore : public nuraft::log_store { public: KeeperLogStore(const std::string & changelogs_path, uint64_t rotate_interval_, bool force_sync_); + /// Read log storage from filesystem starting from last_commited_log_index void init(uint64_t last_commited_log_index, uint64_t logs_to_keep); uint64_t start_index() const override; uint64_t next_slot() const override; + /// return last entry from log nuraft::ptr last_entry() const override; + /// Append new entry to log uint64_t append(nuraft::ptr & entry) override; + /// Remove all entries starting from index and write entry into index position void write_at(uint64_t index, nuraft::ptr & entry) override; + /// Return entries between [start, end) nuraft::ptr>> log_entries(uint64_t start, uint64_t end) override; + /// Return entry at index nuraft::ptr entry_at(uint64_t index) override; + /// Term if the index uint64_t term_at(uint64_t index) override; + /// Serialize entries in interval [index, index + cnt) nuraft::ptr pack(uint64_t index, int32_t cnt) override; + /// Apply serialized entries starting from index void apply_pack(uint64_t index, nuraft::buffer & pack) override; + /// Entries from last_log_index can be removed from memory and from disk bool compact(uint64_t last_log_index) override; + /// Call fsync to the stored data bool flush() override; + /// Current log storage size uint64_t size() const; + /// Flush batch of appended entries void end_of_append_batch(uint64_t start_index, uint64_t count) override; private: diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 282a7b48dfb..d1138ccef1a 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -38,6 +38,8 @@ private: Poco::Logger * log; + /// Callback func which is called by NuRaft on all internal events. + /// Used to determine the moment when raft is ready to server new requests nuraft::cb_func::ReturnCode callbackFunc(nuraft::cb_func::Type type, nuraft::cb_func::Param * param); /// Almost copy-paste from nuraft::launcher, but with separated server init and start @@ -57,18 +59,25 @@ public: SnapshotsQueue & snapshots_queue_, bool standalone_keeper); + /// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings. void startup(); + /// Put local read request and execute in state machine directly and response into + /// responses queue void putLocalReadRequest(const KeeperStorage::RequestForSession & request); + /// Put batch of requests into Raft and get result of put. Responses will be set separately into + /// responses_queue. RaftAppendResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests); + /// Return set of the non-active sessions std::unordered_set getDeadSessions(); bool isLeader() const; bool isLeaderAlive() const; + /// Wait server initialization (see callbackFunc) void waitInit(); void shutdown(); diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index a76b86a8171..2e5e7214e3e 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -14,29 +14,32 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -KeeperStorage::RequestForSession parseRequest(nuraft::buffer & data) +namespace { - ReadBufferFromNuraftBuffer buffer(data); - KeeperStorage::RequestForSession request_for_session; - readIntBinary(request_for_session.session_id, buffer); + KeeperStorage::RequestForSession parseRequest(nuraft::buffer & data) + { + ReadBufferFromNuraftBuffer buffer(data); + KeeperStorage::RequestForSession request_for_session; + readIntBinary(request_for_session.session_id, buffer); - int32_t length; - Coordination::read(length, buffer); + int32_t length; + Coordination::read(length, buffer); - int32_t xid; - Coordination::read(xid, buffer); + int32_t xid; + Coordination::read(xid, buffer); - Coordination::OpNum opnum; + Coordination::OpNum opnum; - Coordination::read(opnum, buffer); + Coordination::read(opnum, buffer); - request_for_session.request = Coordination::ZooKeeperRequestFactory::instance().get(opnum); - request_for_session.request->xid = xid; - request_for_session.request->readImpl(buffer); - return request_for_session; + request_for_session.request = Coordination::ZooKeeperRequestFactory::instance().get(opnum); + request_for_session.request->xid = xid; + request_for_session.request->readImpl(buffer); + return request_for_session; + } } - KeeperStateMachine::KeeperStateMachine( +KeeperStateMachine::KeeperStateMachine( ResponsesQueue & responses_queue_, SnapshotsQueue & snapshots_queue_, const std::string & snapshots_path_, @@ -58,6 +61,7 @@ void KeeperStateMachine::init() LOG_DEBUG(log, "Totally have {} snapshots", snapshot_manager.totalSnapshots()); bool loaded = false; bool has_snapshots = snapshot_manager.totalSnapshots() != 0; + /// Deserialize latest snapshot from disk while (snapshot_manager.totalSnapshots() != 0) { uint64_t latest_log_index = snapshot_manager.getLatestSnapshotIndex(); @@ -97,6 +101,7 @@ void KeeperStateMachine::init() nuraft::ptr KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data) { auto request_for_session = parseRequest(data); + /// Special processing of session_id request if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID) { const Coordination::ZooKeeperSessionIDRequest & session_id_request = dynamic_cast(*request_for_session.request); @@ -136,7 +141,7 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s) { LOG_DEBUG(log, "Applying snapshot {}", s.get_last_log_idx()); nuraft::ptr latest_snapshot_ptr; - { + { /// save snapshot into memory std::lock_guard lock(snapshots_lock); if (s.get_last_log_idx() != latest_snapshot_meta->get_last_log_idx()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Required to apply snapshot with last log index {}, but our last log index is {}", @@ -144,10 +149,11 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s) latest_snapshot_ptr = latest_snapshot_buf; } - { + { /// deserialize and apply snapshot to storage std::lock_guard lock(storage_lock); std::tie(latest_snapshot_meta, storage) = snapshot_manager.deserializeSnapshotFromBuffer(latest_snapshot_ptr); } + last_committed_idx = s.get_last_log_idx(); return true; } @@ -168,18 +174,19 @@ void KeeperStateMachine::create_snapshot( nuraft::ptr snp_buf = s.serialize(); auto snapshot_meta_copy = nuraft::snapshot::deserialize(*snp_buf); CreateSnapshotTask snapshot_task; - { + { /// lock storage for a short period time to turn on "snapshot mode". After that we can read consistent storage state without locking. std::lock_guard lock(storage_lock); snapshot_task.snapshot = std::make_shared(storage.get(), snapshot_meta_copy); } + /// create snapshot task for background execution (in snapshot thread) snapshot_task.create_snapshot = [this, when_done] (KeeperStorageSnapshotPtr && snapshot) { nuraft::ptr exception(nullptr); bool ret = true; try { - { + { /// Read storage data without locks and create snapshot std::lock_guard lock(snapshots_lock); auto snapshot_buf = snapshot_manager.serializeSnapshotToBuffer(*snapshot); auto result_path = snapshot_manager.serializeSnapshotBufferToDisk(*snapshot_buf, snapshot->snapshot_meta->get_last_log_idx()); @@ -192,6 +199,7 @@ void KeeperStateMachine::create_snapshot( { /// Must do it with lock (clearing elements from list) std::lock_guard lock(storage_lock); + /// Turn off "snapshot mode" and clear outdate part of storage state storage->clearGarbageAfterSnapshot(); /// Destroy snapshot with lock snapshot.reset(); @@ -209,7 +217,9 @@ void KeeperStateMachine::create_snapshot( when_done(ret, exception); }; + LOG_DEBUG(log, "In memory snapshot {} created, queueing task to flash to disk", s.get_last_log_idx()); + /// Flush snapshot to disk in a separate thread. snapshots_queue.push(std::move(snapshot_task)); } @@ -224,7 +234,7 @@ void KeeperStateMachine::save_logical_snp_obj( nuraft::ptr cloned_buffer; nuraft::ptr cloned_meta; - if (obj_id == 0) + if (obj_id == 0) /// Fake snapshot required by NuRaft at startup { std::lock_guard lock(storage_lock); KeeperStorageSnapshot snapshot(storage.get(), s.get_last_log_idx()); @@ -232,15 +242,18 @@ void KeeperStateMachine::save_logical_snp_obj( } else { + /// copy snapshot into memory cloned_buffer = nuraft::buffer::clone(data); } + /// copy snapshot meta into memory nuraft::ptr snp_buf = s.serialize(); cloned_meta = nuraft::snapshot::deserialize(*snp_buf); try { std::lock_guard lock(snapshots_lock); + /// Serialize snapshot to disk and switch in memory pointers. auto result_path = snapshot_manager.serializeSnapshotBufferToDisk(*cloned_buffer, s.get_last_log_idx()); latest_snapshot_buf = cloned_buffer; latest_snapshot_meta = cloned_meta; @@ -262,7 +275,7 @@ int KeeperStateMachine::read_logical_snp_obj( { LOG_DEBUG(log, "Reading snapshot {} obj_id {}", s.get_last_log_idx(), obj_id); - if (obj_id == 0) + if (obj_id == 0) /// Fake snapshot required by NuRaft at startup { data_out = nuraft::buffer::alloc(sizeof(int32_t)); nuraft::buffer_serializer bs(data_out); @@ -272,6 +285,8 @@ int KeeperStateMachine::read_logical_snp_obj( else { std::lock_guard lock(snapshots_lock); + /// Our snapshot is not equal to required. Maybe we still creating it in the background. + /// Let's wait and NuRaft will retry this call. if (s.get_last_log_idx() != latest_snapshot_meta->get_last_log_idx()) { LOG_WARNING(log, "Required to apply snapshot with last log index {}, but our last log index is {}. Will ignore this one and retry", @@ -281,11 +296,13 @@ int KeeperStateMachine::read_logical_snp_obj( data_out = nuraft::buffer::clone(*latest_snapshot_buf); is_last_obj = true; } + return 1; } void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSession & request_for_session) { + /// Pure local request, just process it with storage KeeperStorage::ResponsesForSessions responses; { std::lock_guard lock(storage_lock); diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index fb46f507baf..06be270b66e 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -13,6 +13,8 @@ namespace DB using ResponsesQueue = ThreadSafeQueue; using SnapshotsQueue = ConcurrentBoundedQueue; +/// ClickHouse Keeper state machine. Wrapper for KeeperStorage. +/// Responsible for entries commit, snapshots creation and so on. class KeeperStateMachine : public nuraft::state_machine { public: @@ -21,24 +23,30 @@ public: const std::string & snapshots_path_, const CoordinationSettingsPtr & coordination_settings_, const std::string & superdigest_ = ""); + /// Read state from the latest snapshot void init(); + /// Currently not supported nuraft::ptr pre_commit(const uint64_t /*log_idx*/, nuraft::buffer & /*data*/) override { return nullptr; } nuraft::ptr commit(const uint64_t log_idx, nuraft::buffer & data) override; + /// Currently not supported void rollback(const uint64_t /*log_idx*/, nuraft::buffer & /*data*/) override {} uint64_t last_commit_index() override { return last_committed_idx; } + /// Apply preliminarily saved (save_logical_snp_obj) snapshot to our state. bool apply_snapshot(nuraft::snapshot & s) override; nuraft::ptr last_snapshot() override; + /// Create new snapshot from current state. void create_snapshot( nuraft::snapshot & s, nuraft::async_result::handler_type & when_done) override; + /// Save snapshot which was send by leader to us. After that we will apply it in apply_snapshot. void save_logical_snp_obj( nuraft::snapshot & s, uint64_t & obj_id, @@ -46,6 +54,8 @@ public: bool is_first_obj, bool is_last_obj) override; + /// Better name is `serialize snapshot` -- save existing snapshot (created by create_snapshot) into + /// in-memory buffer data_out. int read_logical_snp_obj( nuraft::snapshot & s, void* & user_snp_ctx, @@ -58,6 +68,7 @@ public: return *storage; } + /// Process local read request void processReadRequest(const KeeperStorage::RequestForSession & request_for_session); std::unordered_set getDeadSessions(); @@ -66,18 +77,25 @@ public: private: + /// In our state machine we always have a single snapshot which is stored + /// in memory in compressed (serialized) format. SnapshotMetadataPtr latest_snapshot_meta = nullptr; nuraft::ptr latest_snapshot_buf = nullptr; CoordinationSettingsPtr coordination_settings; + /// Main state machine logic KeeperStoragePtr storage; + /// Save/Load and Serialize/Deserialize logic for snapshots. KeeperSnapshotManager snapshot_manager; + /// Put processed responses into this queue ResponsesQueue & responses_queue; + /// Snapshots to create by snapshot thread SnapshotsQueue & snapshots_queue; + /// Mutex for snapshots std::mutex snapshots_lock; @@ -88,6 +106,7 @@ private: std::atomic last_committed_idx; Poco::Logger * log; + /// Special part of ACL system -- superdigest specified in server config. const std::string superdigest; }; diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 320754c7d31..8bffdbe0222 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -177,32 +177,32 @@ KeeperStorage::KeeperStorage(int64_t tick_time_ms, const String & superdigest_) using Undo = std::function; -struct KeeperStorageRequest +struct KeeperStorageRequestProcessor { Coordination::ZooKeeperRequestPtr zk_request; - explicit KeeperStorageRequest(const Coordination::ZooKeeperRequestPtr & zk_request_) + explicit KeeperStorageRequestProcessor(const Coordination::ZooKeeperRequestPtr & zk_request_) : zk_request(zk_request_) {} virtual std::pair process(KeeperStorage & storage, int64_t zxid, int64_t session_id) const = 0; virtual KeeperStorage::ResponsesForSessions processWatches(KeeperStorage::Watches & /*watches*/, KeeperStorage::Watches & /*list_watches*/) const { return {}; } virtual bool checkAuth(KeeperStorage & /*storage*/, int64_t /*session_id*/) const { return true; } - virtual ~KeeperStorageRequest() = default; + virtual ~KeeperStorageRequestProcessor() = default; }; -struct KeeperStorageHeartbeatRequest final : public KeeperStorageRequest +struct KeeperStorageHeartbeatRequestProcessor final : public KeeperStorageRequestProcessor { - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & /* storage */, int64_t /* zxid */, int64_t /* session_id */) const override { return {zk_request->makeResponse(), {}}; } }; -struct KeeperStorageSyncRequest final : public KeeperStorageRequest +struct KeeperStorageSyncRequestProcessor final : public KeeperStorageRequestProcessor { - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & /* storage */, int64_t /* zxid */, int64_t /* session_id */) const override { auto response = zk_request->makeResponse(); @@ -212,9 +212,9 @@ struct KeeperStorageSyncRequest final : public KeeperStorageRequest } }; -struct KeeperStorageCreateRequest final : public KeeperStorageRequest +struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestProcessor { - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; KeeperStorage::ResponsesForSessions processWatches(KeeperStorage::Watches & watches, KeeperStorage::Watches & list_watches) const override { @@ -363,7 +363,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest } }; -struct KeeperStorageGetRequest final : public KeeperStorageRequest +struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override @@ -381,7 +381,7 @@ struct KeeperStorageGetRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Read, node_acls, session_auths); } - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t /* zxid */, int64_t /* session_id */) const override { auto & container = storage.container; @@ -423,7 +423,7 @@ namespace } } -struct KeeperStorageRemoveRequest final : public KeeperStorageRequest +struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { @@ -440,7 +440,7 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Delete, node_acls, session_auths); } - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/) const override { auto & container = storage.container; @@ -520,9 +520,9 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest } }; -struct KeeperStorageExistsRequest final : public KeeperStorageRequest +struct KeeperStorageExistsRequestProcessor final : public KeeperStorageRequestProcessor { - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /* session_id */) const override { auto & container = storage.container; @@ -546,7 +546,7 @@ struct KeeperStorageExistsRequest final : public KeeperStorageRequest } }; -struct KeeperStorageSetRequest final : public KeeperStorageRequest +struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { @@ -563,7 +563,7 @@ struct KeeperStorageSetRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Write, node_acls, session_auths); } - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t zxid, int64_t /* session_id */) const override { auto & container = storage.container; @@ -624,7 +624,7 @@ struct KeeperStorageSetRequest final : public KeeperStorageRequest } }; -struct KeeperStorageListRequest final : public KeeperStorageRequest +struct KeeperStorageListRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { @@ -641,7 +641,7 @@ struct KeeperStorageListRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Read, node_acls, session_auths); } - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override { auto & container = storage.container; @@ -669,7 +669,7 @@ struct KeeperStorageListRequest final : public KeeperStorageRequest } }; -struct KeeperStorageCheckRequest final : public KeeperStorageRequest +struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { @@ -686,7 +686,7 @@ struct KeeperStorageCheckRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Read, node_acls, session_auths); } - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override { auto & container = storage.container; @@ -713,7 +713,7 @@ struct KeeperStorageCheckRequest final : public KeeperStorageRequest }; -struct KeeperStorageSetACLRequest final : public KeeperStorageRequest +struct KeeperStorageSetACLRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { @@ -730,7 +730,7 @@ struct KeeperStorageSetACLRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Admin, node_acls, session_auths); } - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t session_id) const override { @@ -777,7 +777,7 @@ struct KeeperStorageSetACLRequest final : public KeeperStorageRequest } }; -struct KeeperStorageGetACLRequest final : public KeeperStorageRequest +struct KeeperStorageGetACLRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { @@ -794,7 +794,7 @@ struct KeeperStorageGetACLRequest final : public KeeperStorageRequest /// LOL, GetACL require more permissions, then SetACL... return checkACL(Coordination::ACL::Admin | Coordination::ACL::Read, node_acls, session_auths); } - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override { @@ -817,7 +817,7 @@ struct KeeperStorageGetACLRequest final : public KeeperStorageRequest } }; -struct KeeperStorageMultiRequest final : public KeeperStorageRequest +struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestProcessor { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { @@ -827,9 +827,9 @@ struct KeeperStorageMultiRequest final : public KeeperStorageRequest return true; } - std::vector concrete_requests; - explicit KeeperStorageMultiRequest(const Coordination::ZooKeeperRequestPtr & zk_request_) - : KeeperStorageRequest(zk_request_) + std::vector concrete_requests; + explicit KeeperStorageMultiRequestProcessor(const Coordination::ZooKeeperRequestPtr & zk_request_) + : KeeperStorageRequestProcessor(zk_request_) { Coordination::ZooKeeperMultiRequest & request = dynamic_cast(*zk_request); concrete_requests.reserve(request.requests.size()); @@ -839,19 +839,19 @@ struct KeeperStorageMultiRequest final : public KeeperStorageRequest auto sub_zk_request = std::dynamic_pointer_cast(sub_request); if (sub_zk_request->getOpNum() == Coordination::OpNum::Create) { - concrete_requests.push_back(std::make_shared(sub_zk_request)); + concrete_requests.push_back(std::make_shared(sub_zk_request)); } else if (sub_zk_request->getOpNum() == Coordination::OpNum::Remove) { - concrete_requests.push_back(std::make_shared(sub_zk_request)); + concrete_requests.push_back(std::make_shared(sub_zk_request)); } else if (sub_zk_request->getOpNum() == Coordination::OpNum::Set) { - concrete_requests.push_back(std::make_shared(sub_zk_request)); + concrete_requests.push_back(std::make_shared(sub_zk_request)); } else if (sub_zk_request->getOpNum() == Coordination::OpNum::Check) { - concrete_requests.push_back(std::make_shared(sub_zk_request)); + concrete_requests.push_back(std::make_shared(sub_zk_request)); } else throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", sub_zk_request->getOpNum()); @@ -923,18 +923,18 @@ struct KeeperStorageMultiRequest final : public KeeperStorageRequest } }; -struct KeeperStorageCloseRequest final : public KeeperStorageRequest +struct KeeperStorageCloseRequestProcessor final : public KeeperStorageRequestProcessor { - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage &, int64_t, int64_t) const override { throw DB::Exception("Called process on close request", ErrorCodes::LOGICAL_ERROR); } }; -struct KeeperStorageAuthRequest final : public KeeperStorageRequest +struct KeeperStorageAuthRequestProcessor final : public KeeperStorageRequestProcessor { - using KeeperStorageRequest::KeeperStorageRequest; + using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor; std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t session_id) const override { Coordination::ZooKeeperAuthRequest & auth_request = dynamic_cast(*zk_request); @@ -988,20 +988,20 @@ void KeeperStorage::finalize() } -class KeeperWrapperFactory final : private boost::noncopyable +class KeeperStorageRequestProcessorsFactory final : private boost::noncopyable { public: - using Creator = std::function; + using Creator = std::function; using OpNumToRequest = std::unordered_map; - static KeeperWrapperFactory & instance() + static KeeperStorageRequestProcessorsFactory & instance() { - static KeeperWrapperFactory factory; + static KeeperStorageRequestProcessorsFactory factory; return factory; } - KeeperStorageRequestPtr get(const Coordination::ZooKeeperRequestPtr & zk_request) const + KeeperStorageRequestProcessorPtr get(const Coordination::ZooKeeperRequestPtr & zk_request) const { auto it = op_num_to_request.find(zk_request->getOpNum()); if (it == op_num_to_request.end()) @@ -1018,33 +1018,33 @@ public: private: OpNumToRequest op_num_to_request; - KeeperWrapperFactory(); + KeeperStorageRequestProcessorsFactory(); }; template -void registerKeeperRequestWrapper(KeeperWrapperFactory & factory) +void registerKeeperRequestProcessor(KeeperStorageRequestProcessorsFactory & factory) { factory.registerRequest(num, [] (const Coordination::ZooKeeperRequestPtr & zk_request) { return std::make_shared(zk_request); }); } -KeeperWrapperFactory::KeeperWrapperFactory() +KeeperStorageRequestProcessorsFactory::KeeperStorageRequestProcessorsFactory() { - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); - registerKeeperRequestWrapper(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); + registerKeeperRequestProcessor(*this); } @@ -1059,7 +1059,8 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina } session_expiry_queue.update(session_id, session_and_timeout[session_id]); - if (zk_request->getOpNum() == Coordination::OpNum::Close) + + if (zk_request->getOpNum() == Coordination::OpNum::Close) /// Close request is special { auto it = ephemerals.find(session_id); if (it != ephemerals.end()) @@ -1092,21 +1093,21 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina session_and_timeout.erase(session_id); results.push_back(ResponseForSession{session_id, response}); } - else if (zk_request->getOpNum() == Coordination::OpNum::Heartbeat) + else if (zk_request->getOpNum() == Coordination::OpNum::Heartbeat) /// Heartbeat request is also special { - KeeperStorageRequestPtr storage_request = KeeperWrapperFactory::instance().get(zk_request); + KeeperStorageRequestProcessorPtr storage_request = KeeperStorageRequestProcessorsFactory::instance().get(zk_request); auto [response, _] = storage_request->process(*this, zxid, session_id); response->xid = zk_request->xid; response->zxid = getZXID(); results.push_back(ResponseForSession{session_id, response}); } - else + else /// normal requests proccession { - KeeperStorageRequestPtr storage_request = KeeperWrapperFactory::instance().get(zk_request); + KeeperStorageRequestProcessorPtr request_processor = KeeperStorageRequestProcessorsFactory::instance().get(zk_request); Coordination::ZooKeeperResponsePtr response; - if (check_acl && !storage_request->checkAuth(*this, session_id)) + if (check_acl && !request_processor->checkAuth(*this, session_id)) { response = zk_request->makeResponse(); /// Original ZooKeeper always throws no auth, even when user provided some credentials @@ -1114,9 +1115,10 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina } else { - std::tie(response, std::ignore) = storage_request->process(*this, zxid, session_id); + std::tie(response, std::ignore) = request_processor->process(*this, zxid, session_id); } + /// Watches for this requests are added to the watches lists if (zk_request->has_watch) { if (response->error == Coordination::Error::ZOK) @@ -1135,9 +1137,10 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina } } + /// If this requests processed successfully we need to check watches if (response->error == Coordination::Error::ZOK) { - auto watch_responses = storage_request->processWatches(watches, list_watches); + auto watch_responses = request_processor->processWatches(watches, list_watches); results.insert(results.end(), watch_responses.begin(), watch_responses.end()); } @@ -1153,11 +1156,13 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina void KeeperStorage::clearDeadWatches(int64_t session_id) { + /// Clear all watches for this session auto watches_it = sessions_and_watchers.find(session_id); if (watches_it != sessions_and_watchers.end()) { for (const auto & watch_path : watches_it->second) { + /// Maybe it's a normal watch auto watch = watches.find(watch_path); if (watch != watches.end()) { @@ -1173,6 +1178,7 @@ void KeeperStorage::clearDeadWatches(int64_t session_id) watches.erase(watch); } + /// Maybe it's a list watch auto list_watch = list_watches.find(watch_path); if (list_watch != list_watches.end()) { @@ -1188,6 +1194,7 @@ void KeeperStorage::clearDeadWatches(int64_t session_id) list_watches.erase(list_watch); } } + sessions_and_watchers.erase(watches_it); } } diff --git a/src/Coordination/KeeperStorage.h b/src/Coordination/KeeperStorage.h index e3cb0f59fdc..1e925a0634e 100644 --- a/src/Coordination/KeeperStorage.h +++ b/src/Coordination/KeeperStorage.h @@ -15,14 +15,17 @@ namespace DB { using namespace DB; -struct KeeperStorageRequest; -using KeeperStorageRequestPtr = std::shared_ptr; +struct KeeperStorageRequestProcessor; +using KeeperStorageRequestProcessorPtr = std::shared_ptr; using ResponseCallback = std::function; using ChildrenSet = std::unordered_set; using SessionAndTimeout = std::unordered_map; struct KeeperStorageSnapshot; +/// Keeper state machine almost equal to the ZooKeeper's state machine. +/// Implements all logic of operations, data changes, sessions allocation. +/// In-memory and not thread safe. class KeeperStorage { public: @@ -77,21 +80,34 @@ public: using Watches = std::map; + /// Main hashtable with nodes. Contain all information about data. + /// All other structures expect session_and_timeout can be restored from + /// container. Container container; + + /// Mapping session_id -> set of ephemeral nodes paths Ephemerals ephemerals; + /// Mapping sessuib_id -> set of watched nodes paths SessionAndWatcher sessions_and_watchers; + /// Expiration queue for session, allows to get dead sessions at some point of time SessionExpiryQueue session_expiry_queue; + /// All active sessions with timeout SessionAndTimeout session_and_timeout; + + /// ACLMap for more compact ACLs storage inside nodes. ACLMap acl_map; + /// Global id of all requests applied to storage int64_t zxid{0}; bool finalized{false}; + /// Currently active watches (node_path -> subscribed sessions) Watches watches; Watches list_watches; /// Watches for 'list' request (watches on children). void clearDeadWatches(int64_t session_id); + /// Get current zxid int64_t getZXID() const { return zxid; @@ -102,6 +118,7 @@ public: public: KeeperStorage(int64_t tick_time_ms, const String & superdigest_); + /// Allocate new session id with the specified timeouts int64_t getSessionID(int64_t session_timeout_ms) { auto result = session_id_counter++; @@ -110,21 +127,28 @@ public: return result; } + /// Add session id. Used when restoring KeeperStorage from snapshot. void addSessionID(int64_t session_id, int64_t session_timeout_ms) { session_and_timeout.emplace(session_id, session_timeout_ms); session_expiry_queue.update(session_id, session_timeout_ms); } + /// Process user request and return response. + /// check_acl = false only when converting data from ZooKeeper. ResponsesForSessions processRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, std::optional new_last_zxid, bool check_acl = true); void finalize(); + /// Set of methods for creating snapshots + + /// Turn on snapshot mode, so data inside Container is not deleted, but replaced with new version. void enableSnapshotMode() { container.enableSnapshotMode(); } + /// Turn off snapshot mode. void disableSnapshotMode() { container.disableSnapshotMode(); @@ -135,16 +159,19 @@ public: return container.begin(); } + /// Clear outdated data from internal container. void clearGarbageAfterSnapshot() { container.clearOutdatedNodes(); } + /// Get all active sessions const SessionAndTimeout & getActiveSessions() const { return session_and_timeout; } + /// Get all dead sessions std::unordered_set getDeadSessions() { return session_expiry_queue.getExpiredSessions(); diff --git a/src/DataStreams/RemoteQueryExecutor.cpp b/src/DataStreams/RemoteQueryExecutor.cpp index 21e874691c1..3b207110a67 100644 --- a/src/DataStreams/RemoteQueryExecutor.cpp +++ b/src/DataStreams/RemoteQueryExecutor.cpp @@ -526,7 +526,18 @@ void RemoteQueryExecutor::tryCancel(const char * reason, std::unique_ptr 0). + /// + /// Also note that it is possible to get this situation even when + /// enough data already had been read. + (*read_context)->setTimer(); (*read_context)->cancel(); + } connections->sendCancel(); diff --git a/src/DataStreams/RemoteQueryExecutorReadContext.cpp b/src/DataStreams/RemoteQueryExecutorReadContext.cpp index c1f415bb597..6bdf52d2831 100644 --- a/src/DataStreams/RemoteQueryExecutorReadContext.cpp +++ b/src/DataStreams/RemoteQueryExecutorReadContext.cpp @@ -100,7 +100,7 @@ void RemoteQueryExecutorReadContext::setConnectionFD(int fd, Poco::Timespan time connection_fd = fd; epoll.add(connection_fd); - receive_timeout = timeout; + receive_timeout_usec = timeout.totalMicroseconds(); connection_fd_description = fd_description; } @@ -157,8 +157,8 @@ void RemoteQueryExecutorReadContext::setTimer() const /// Did not get packet yet. Init timeout for the next async reading. timer.reset(); - if (receive_timeout.totalMicroseconds()) - timer.setRelative(receive_timeout); + if (receive_timeout_usec) + timer.setRelative(receive_timeout_usec); } bool RemoteQueryExecutorReadContext::resumeRoutine() diff --git a/src/DataStreams/RemoteQueryExecutorReadContext.h b/src/DataStreams/RemoteQueryExecutorReadContext.h index 5c56bb73dd6..91e34dbb82c 100644 --- a/src/DataStreams/RemoteQueryExecutorReadContext.h +++ b/src/DataStreams/RemoteQueryExecutorReadContext.h @@ -34,7 +34,8 @@ public: /// This mutex for fiber is needed because fiber could be destroyed in cancel method from another thread. std::mutex fiber_lock; - Poco::Timespan receive_timeout; + /// atomic is required due to data-race between setConnectionFD() and setTimer() from the cancellation path. + std::atomic receive_timeout_usec = 0; IConnections & connections; Poco::Net::Socket * last_used_socket = nullptr; @@ -75,6 +76,7 @@ class RemoteQueryExecutorReadContext { public: void cancel() {} + void setTimer() {} }; } diff --git a/src/DataStreams/TTLBlockInputStream.cpp b/src/DataStreams/TTLBlockInputStream.cpp index 8b31da6d2f1..05d4ba0a395 100644 --- a/src/DataStreams/TTLBlockInputStream.cpp +++ b/src/DataStreams/TTLBlockInputStream.cpp @@ -76,17 +76,17 @@ TTLBlockInputStream::TTLBlockInputStream( algorithms.emplace_back(std::make_unique( description, old_ttl_infos.columns_ttl[name], current_time_, - force_, name, default_expression, default_column_name)); + force_, name, default_expression, default_column_name, isCompactPart(data_part))); } } for (const auto & move_ttl : metadata_snapshot_->getMoveTTLs()) - algorithms.emplace_back(std::make_unique( - move_ttl, old_ttl_infos.moves_ttl[move_ttl.result_column], current_time_, force_)); + algorithms.emplace_back(std::make_unique( + move_ttl, TTLUpdateField::MOVES_TTL, move_ttl.result_column, old_ttl_infos.moves_ttl[move_ttl.result_column], current_time_, force_)); for (const auto & recompression_ttl : metadata_snapshot_->getRecompressionTTLs()) - algorithms.emplace_back(std::make_unique( - recompression_ttl, old_ttl_infos.recompression_ttl[recompression_ttl.result_column], current_time_, force_)); + algorithms.emplace_back(std::make_unique( + recompression_ttl, TTLUpdateField::RECOMPRESSION_TTL, recompression_ttl.result_column, old_ttl_infos.recompression_ttl[recompression_ttl.result_column], current_time_, force_)); } Block reorderColumns(Block block, const Block & header) diff --git a/src/DataStreams/TTLCalcInputStream.cpp b/src/DataStreams/TTLCalcInputStream.cpp new file mode 100644 index 00000000000..2353e9ec259 --- /dev/null +++ b/src/DataStreams/TTLCalcInputStream.cpp @@ -0,0 +1,77 @@ +#include +#include + +namespace DB +{ + +TTLCalcInputStream::TTLCalcInputStream( + const BlockInputStreamPtr & input_, + const MergeTreeData & storage_, + const StorageMetadataPtr & metadata_snapshot_, + const MergeTreeData::MutableDataPartPtr & data_part_, + time_t current_time_, + bool force_) + : data_part(data_part_) + , log(&Poco::Logger::get(storage_.getLogName() + " (TTLCalcInputStream)")) +{ + children.push_back(input_); + header = children.at(0)->getHeader(); + auto old_ttl_infos = data_part->ttl_infos; + + if (metadata_snapshot_->hasRowsTTL()) + { + const auto & rows_ttl = metadata_snapshot_->getRowsTTL(); + algorithms.emplace_back(std::make_unique( + rows_ttl, TTLUpdateField::TABLE_TTL, rows_ttl.result_column, old_ttl_infos.table_ttl, current_time_, force_)); + } + + for (const auto & where_ttl : metadata_snapshot_->getRowsWhereTTLs()) + algorithms.emplace_back(std::make_unique( + where_ttl, TTLUpdateField::ROWS_WHERE_TTL, where_ttl.result_column, old_ttl_infos.rows_where_ttl[where_ttl.result_column], current_time_, force_)); + + for (const auto & group_by_ttl : metadata_snapshot_->getGroupByTTLs()) + algorithms.emplace_back(std::make_unique( + group_by_ttl, TTLUpdateField::GROUP_BY_TTL, group_by_ttl.result_column, old_ttl_infos.group_by_ttl[group_by_ttl.result_column], current_time_, force_)); + + if (metadata_snapshot_->hasAnyColumnTTL()) + { + for (const auto & [name, description] : metadata_snapshot_->getColumnTTLs()) + { + algorithms.emplace_back(std::make_unique( + description, TTLUpdateField::COLUMNS_TTL, name, old_ttl_infos.columns_ttl[name], current_time_, force_)); + } + } + + for (const auto & move_ttl : metadata_snapshot_->getMoveTTLs()) + algorithms.emplace_back(std::make_unique( + move_ttl, TTLUpdateField::MOVES_TTL, move_ttl.result_column, old_ttl_infos.moves_ttl[move_ttl.result_column], current_time_, force_)); + + for (const auto & recompression_ttl : metadata_snapshot_->getRecompressionTTLs()) + algorithms.emplace_back(std::make_unique( + recompression_ttl, TTLUpdateField::RECOMPRESSION_TTL, recompression_ttl.result_column, old_ttl_infos.recompression_ttl[recompression_ttl.result_column], current_time_, force_)); +} + +Block TTLCalcInputStream::readImpl() +{ + auto block = children.at(0)->read(); + for (const auto & algorithm : algorithms) + algorithm->execute(block); + + if (!block) + return block; + + Block res; + for (const auto & col : header) + res.insert(block.getByName(col.name)); + + return res; +} + +void TTLCalcInputStream::readSuffixImpl() +{ + data_part->ttl_infos = {}; + for (const auto & algorithm : algorithms) + algorithm->finalize(data_part); +} + +} diff --git a/src/DataStreams/TTLCalcInputStream.h b/src/DataStreams/TTLCalcInputStream.h new file mode 100644 index 00000000000..d1b629c2ad5 --- /dev/null +++ b/src/DataStreams/TTLCalcInputStream.h @@ -0,0 +1,44 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +class TTLCalcInputStream : public IBlockInputStream +{ +public: + TTLCalcInputStream( + const BlockInputStreamPtr & input_, + const MergeTreeData & storage_, + const StorageMetadataPtr & metadata_snapshot_, + const MergeTreeData::MutableDataPartPtr & data_part_, + time_t current_time, + bool force_ + ); + + String getName() const override { return "TTL_CALC"; } + Block getHeader() const override { return header; } + +protected: + Block readImpl() override; + + /// Finalizes ttl infos and updates data part + void readSuffixImpl() override; + +private: + std::vector algorithms; + + /// ttl_infos and empty_columns are updating while reading + const MergeTreeData::MutableDataPartPtr & data_part; + Poco::Logger * log; + Block header; +}; + +} diff --git a/src/DataStreams/TTLColumnAlgorithm.cpp b/src/DataStreams/TTLColumnAlgorithm.cpp index 1318ea382db..71ad2a4e38f 100644 --- a/src/DataStreams/TTLColumnAlgorithm.cpp +++ b/src/DataStreams/TTLColumnAlgorithm.cpp @@ -10,11 +10,13 @@ TTLColumnAlgorithm::TTLColumnAlgorithm( bool force_, const String & column_name_, const ExpressionActionsPtr & default_expression_, - const String & default_column_name_) + const String & default_column_name_, + bool is_compact_part_) : ITTLAlgorithm(description_, old_ttl_info_, current_time_, force_) , column_name(column_name_) , default_expression(default_expression_) , default_column_name(default_column_name_) + , is_compact_part(is_compact_part_) { if (!isMinTTLExpired()) { @@ -40,7 +42,7 @@ void TTLColumnAlgorithm::execute(Block & block) return; /// Later drop full column - if (isMaxTTLExpired()) + if (isMaxTTLExpired() && !is_compact_part) return; auto default_column = executeExpressionAndGetColumn(default_expression, block, default_column_name); diff --git a/src/DataStreams/TTLColumnAlgorithm.h b/src/DataStreams/TTLColumnAlgorithm.h index e09dd663af0..ddf963eaee2 100644 --- a/src/DataStreams/TTLColumnAlgorithm.h +++ b/src/DataStreams/TTLColumnAlgorithm.h @@ -17,7 +17,9 @@ public: bool force_, const String & column_name_, const ExpressionActionsPtr & default_expression_, - const String & default_column_name_); + const String & default_column_name_, + bool is_compact_part_ + ); void execute(Block & block) override; void finalize(const MutableDataPartPtr & data_part) const override; @@ -28,6 +30,7 @@ private: const String default_column_name; bool is_fully_empty = true; + bool is_compact_part; }; } diff --git a/src/DataStreams/TTLUpdateInfoAlgorithm.cpp b/src/DataStreams/TTLUpdateInfoAlgorithm.cpp index d5feb14658b..6a983d052c1 100644 --- a/src/DataStreams/TTLUpdateInfoAlgorithm.cpp +++ b/src/DataStreams/TTLUpdateInfoAlgorithm.cpp @@ -4,8 +4,15 @@ namespace DB { TTLUpdateInfoAlgorithm::TTLUpdateInfoAlgorithm( - const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_) + const TTLDescription & description_, + const TTLUpdateField ttl_update_field_, + const String ttl_update_key_, + const TTLInfo & old_ttl_info_, + time_t current_time_, + bool force_) : ITTLAlgorithm(description_, old_ttl_info_, current_time_, force_) + , ttl_update_field(ttl_update_field_) + , ttl_update_key(ttl_update_key_) { } @@ -22,26 +29,37 @@ void TTLUpdateInfoAlgorithm::execute(Block & block) } } -TTLMoveAlgorithm::TTLMoveAlgorithm( - const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_) - : TTLUpdateInfoAlgorithm(description_, old_ttl_info_, current_time_, force_) +void TTLUpdateInfoAlgorithm::finalize(const MutableDataPartPtr & data_part) const { -} + if (ttl_update_field == TTLUpdateField::RECOMPRESSION_TTL) + { + data_part->ttl_infos.recompression_ttl[ttl_update_key] = new_ttl_info; + } + else if (ttl_update_field == TTLUpdateField::MOVES_TTL) + { + data_part->ttl_infos.moves_ttl[ttl_update_key] = new_ttl_info; + } + else if (ttl_update_field == TTLUpdateField::GROUP_BY_TTL) + { + data_part->ttl_infos.group_by_ttl[ttl_update_key] = new_ttl_info; + data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max); + } + else if (ttl_update_field == TTLUpdateField::ROWS_WHERE_TTL) + { + data_part->ttl_infos.rows_where_ttl[ttl_update_key] = new_ttl_info; + data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max); + } + else if (ttl_update_field == TTLUpdateField::TABLE_TTL) + { + data_part->ttl_infos.table_ttl = new_ttl_info; + data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max); + } + else if (ttl_update_field == TTLUpdateField::COLUMNS_TTL) + { + data_part->ttl_infos.columns_ttl[ttl_update_key] = new_ttl_info; + data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max); + } -void TTLMoveAlgorithm::finalize(const MutableDataPartPtr & data_part) const -{ - data_part->ttl_infos.moves_ttl[description.result_column] = new_ttl_info; -} - -TTLRecompressionAlgorithm::TTLRecompressionAlgorithm( - const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_) - : TTLUpdateInfoAlgorithm(description_, old_ttl_info_, current_time_, force_) -{ -} - -void TTLRecompressionAlgorithm::finalize(const MutableDataPartPtr & data_part) const -{ - data_part->ttl_infos.recompression_ttl[description.result_column] = new_ttl_info; } } diff --git a/src/DataStreams/TTLUpdateInfoAlgorithm.h b/src/DataStreams/TTLUpdateInfoAlgorithm.h index c1ef0e1c90d..551211fc47f 100644 --- a/src/DataStreams/TTLUpdateInfoAlgorithm.h +++ b/src/DataStreams/TTLUpdateInfoAlgorithm.h @@ -5,28 +5,35 @@ namespace DB { +enum class TTLUpdateField +{ + COLUMNS_TTL, + TABLE_TTL, + ROWS_WHERE_TTL, + MOVES_TTL, + RECOMPRESSION_TTL, + GROUP_BY_TTL, +}; + /// Calculates new ttl_info and does nothing with data. class TTLUpdateInfoAlgorithm : public ITTLAlgorithm { public: - TTLUpdateInfoAlgorithm(const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_); + TTLUpdateInfoAlgorithm( + const TTLDescription & description_, + const TTLUpdateField ttl_update_field_, + const String ttl_update_key_, + const TTLInfo & old_ttl_info_, + time_t current_time_, bool force_ + ); void execute(Block & block) override; - void finalize(const MutableDataPartPtr & data_part) const override = 0; + void finalize(const MutableDataPartPtr & data_part) const override; + +private: + const TTLUpdateField ttl_update_field; + const String ttl_update_key; }; -class TTLMoveAlgorithm final : public TTLUpdateInfoAlgorithm -{ -public: - TTLMoveAlgorithm(const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_); - void finalize(const MutableDataPartPtr & data_part) const override; -}; - -class TTLRecompressionAlgorithm final : public TTLUpdateInfoAlgorithm -{ -public: - TTLRecompressionAlgorithm(const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_); - void finalize(const MutableDataPartPtr & data_part) const override; -}; } diff --git a/src/DataTypes/DataTypeEnum.h b/src/DataTypes/DataTypeEnum.h index 57657d1d110..92c72b87afa 100644 --- a/src/DataTypes/DataTypeEnum.h +++ b/src/DataTypes/DataTypeEnum.h @@ -27,6 +27,8 @@ public: bool isCategorial() const override { return true; } bool canBeInsideNullable() const override { return true; } bool isComparable() const override { return true; } + + virtual bool contains(const IDataType & rhs) const = 0; }; @@ -76,7 +78,7 @@ public: /// Example: /// Enum('a' = 1, 'b' = 2) -> Enum('c' = 1, 'b' = 2, 'd' = 3) OK /// Enum('a' = 1, 'b' = 2) -> Enum('a' = 2, 'b' = 1) NOT OK - bool contains(const IDataType & rhs) const; + bool contains(const IDataType & rhs) const override; SerializationPtr doGetDefaultSerialization() const override; }; diff --git a/src/DataTypes/Serializations/ISerialization.cpp b/src/DataTypes/Serializations/ISerialization.cpp index ab2e8e1958b..7077c5bfa14 100644 --- a/src/DataTypes/Serializations/ISerialization.cpp +++ b/src/DataTypes/Serializations/ISerialization.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/src/Functions/GatherUtils/Sources.h b/src/Functions/GatherUtils/Sources.h index 79f627fb64c..86b590646dc 100644 --- a/src/Functions/GatherUtils/Sources.h +++ b/src/Functions/GatherUtils/Sources.h @@ -325,7 +325,7 @@ struct StringSource }; -/// Differs to StringSource by having 'offest' and 'length' in code points instead of bytes in getSlice* methods. +/// Differs to StringSource by having 'offset' and 'length' in code points instead of bytes in getSlice* methods. /** NOTE: The behaviour of substring and substringUTF8 is inconsistent when negative offset is greater than string size: * substring: * hello diff --git a/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp index 59594a78401..a4cdc601d84 100644 --- a/src/Functions/array/arrayElement.cpp +++ b/src/Functions/array/arrayElement.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -95,32 +96,30 @@ private: using Offsets = ColumnArray::Offsets; - static bool matchKeyToIndex(const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs); + static bool matchKeyToIndexNumber( + const IColumn & data, const Offsets & offsets, bool is_key_const, + const IColumn & index, PaddedPODArray & matched_idxs); - static bool matchKeyToIndexConst(const IColumn & data, const Offsets & offsets, + static bool matchKeyToIndexNumberConst( + const IColumn & data, const Offsets & offsets, const Field & index, PaddedPODArray & matched_idxs); - template - static bool matchKeyToIndexNumber(const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs); + static bool matchKeyToIndexString( + const IColumn & data, const Offsets & offsets, bool is_key_const, + const IColumn & index, PaddedPODArray & matched_idxs); - template - static bool matchKeyToIndexNumberConst(const IColumn & data, const Offsets & offsets, - const Field & index, PaddedPODArray & matched_idxs); - - static bool matchKeyToIndexString(const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs); - - static bool matchKeyToIndexFixedString(const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs); - - static bool matchKeyToIndexStringConst(const IColumn & data, const Offsets & offsets, + static bool matchKeyToIndexStringConst( + const IColumn & data, const Offsets & offsets, const Field & index, PaddedPODArray & matched_idxs); template static void executeMatchKeyToIndex(const Offsets & offsets, PaddedPODArray & matched_idxs, const Matcher & matcher); + + template + static void executeMatchConstKeyToIndex( + size_t num_rows, size_t num_values, + PaddedPODArray & matched_idxs, const Matcher & matcher); }; @@ -759,23 +758,11 @@ ColumnPtr FunctionArrayElement::executeTuple(const ColumnsWithTypeAndName & argu namespace { +template struct MatcherString { - const ColumnString & data; - const ColumnString & index; - - bool match(size_t row_data, size_t row_index) const - { - auto data_ref = data.getDataAt(row_data); - auto index_ref = index.getDataAt(row_index); - return memequalSmallAllowOverflow15(index_ref.data, index_ref.size, data_ref.data, data_ref.size); - } -}; - -struct MatcherFixedString -{ - const ColumnFixedString & data; - const ColumnFixedString & index; + const DataColumn & data; + const IndexColumn & index; bool match(size_t row_data, size_t row_index) const { @@ -785,9 +772,10 @@ struct MatcherFixedString } }; +template struct MatcherStringConst { - const ColumnString & data; + const DataColumn & data; const String & index; bool match(size_t row_data, size_t /* row_index */) const @@ -797,23 +785,23 @@ struct MatcherStringConst } }; -template +template struct MatcherNumber { - const PaddedPODArray & data; - const PaddedPODArray & index; + const PaddedPODArray & data; + const PaddedPODArray & index; bool match(size_t row_data, size_t row_index) const { - return data[row_data] == index[row_index]; + return data[row_data] == static_cast(index[row_index]); } }; -template +template struct MatcherNumberConst { - const PaddedPODArray & data; - T index; + const PaddedPODArray & data; + DataType index; bool match(size_t row_data, size_t /* row_index */) const { @@ -848,147 +836,158 @@ void FunctionArrayElement::executeMatchKeyToIndex( } } +template +void FunctionArrayElement::executeMatchConstKeyToIndex( + size_t num_rows, size_t num_values, + PaddedPODArray & matched_idxs, const Matcher & matcher) +{ + for (size_t i = 0; i < num_rows; ++i) + { + bool matched = false; + for (size_t j = 0; j < num_values; ++j) + { + if (matcher.match(j, i)) + { + matched_idxs.push_back(j + 1); + matched = true; + break; + } + } + + if (!matched) + matched_idxs.push_back(0); + } +} + +template +static bool castColumnString(const IColumn * column, F && f) +{ + return castTypeToEither(column, std::forward(f)); +} + bool FunctionArrayElement::matchKeyToIndexStringConst( const IColumn & data, const Offsets & offsets, const Field & index, PaddedPODArray & matched_idxs) { - const auto * data_string = checkAndGetColumn(&data); - if (!data_string) - return false; + return castColumnString(&data, [&](const auto & data_column) + { + using DataColumn = std::decay_t; - if (index.getType() != Field::Types::String) - return false; - - MatcherStringConst matcher{*data_string, get(index)}; - executeMatchKeyToIndex(offsets, matched_idxs, matcher); - return true; + MatcherStringConst matcher{data_column, get(index)}; + executeMatchKeyToIndex(offsets, matched_idxs, matcher); + return true; + }); } bool FunctionArrayElement::matchKeyToIndexString( - const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs) + const IColumn & data, const Offsets & offsets, bool is_key_const, + const IColumn & index, PaddedPODArray & matched_idxs) { - const auto * index_string = checkAndGetColumn(arguments[1].column.get()); - if (!index_string) - return false; + return castColumnString(&data, [&](const auto & data_column) + { + return castColumnString(&index, [&](const auto & index_column) + { + using DataColumn = std::decay_t; + using IndexColumn = std::decay_t; - const auto * data_string = checkAndGetColumn(&data); - if (!data_string) - return false; + MatcherString matcher{data_column, index_column}; + if (is_key_const) + executeMatchConstKeyToIndex(index.size(), data.size(), matched_idxs, matcher); + else + executeMatchKeyToIndex(offsets, matched_idxs, matcher); - MatcherString matcher{*data_string, *index_string}; - executeMatchKeyToIndex(offsets, matched_idxs, matcher); - return true; + return true; + }); + }); } -bool FunctionArrayElement::matchKeyToIndexFixedString( - const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs) +template +static constexpr bool areConvertibleTypes = + std::is_same_v + || (is_integer_v && is_integer_v + && std::is_convertible_v); + +template +static bool castColumnNumeric(const IColumn * column, F && f) { - const auto * index_string = checkAndGetColumn(arguments[1].column.get()); - if (!index_string) - return false; - - const auto * data_string = checkAndGetColumn(&data); - if (!data_string) - return false; - - MatcherFixedString matcher{*data_string, *index_string}; - executeMatchKeyToIndex(offsets, matched_idxs, matcher); - return true; + return castTypeToEither< + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector, + ColumnVector + >(column, std::forward(f)); } -template bool FunctionArrayElement::matchKeyToIndexNumberConst( const IColumn & data, const Offsets & offsets, const Field & index, PaddedPODArray & matched_idxs) { - const auto * data_numeric = checkAndGetColumn>(&data); - if (!data_numeric) - return false; - - std::optional index_as_integer; - Field::dispatch([&](const auto & value) + return castColumnNumeric(&data, [&](const auto & data_column) { - using FieldType = std::decay_t; - if constexpr (std::is_same_v || (is_integer_v && std::is_convertible_v)) - index_as_integer = static_cast(value); - }, index); + using DataType = typename std::decay_t::ValueType; + std::optional index_as_integer; - if (!index_as_integer) - return false; + Field::dispatch([&](const auto & value) + { + using FieldType = std::decay_t; + if constexpr (areConvertibleTypes) + index_as_integer = static_cast(value); + }, index); - MatcherNumberConst matcher{data_numeric->getData(), *index_as_integer}; - executeMatchKeyToIndex(offsets, matched_idxs, matcher); - return true; + if (!index_as_integer) + return false; + + MatcherNumberConst matcher{data_column.getData(), *index_as_integer}; + executeMatchKeyToIndex(offsets, matched_idxs, matcher); + return true; + }); } -template bool FunctionArrayElement::matchKeyToIndexNumber( - const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs) + const IColumn & data, const Offsets & offsets, bool is_key_const, + const IColumn & index, PaddedPODArray & matched_idxs) { - const auto * index_numeric = checkAndGetColumn>(arguments[1].column.get()); - if (!index_numeric) - return false; + return castColumnNumeric(&data, [&](const auto & data_column) + { + return castColumnNumeric(&index, [&](const auto & index_column) + { + using DataType = typename std::decay_t::ValueType; + using IndexType = typename std::decay_t::ValueType; - const auto * data_numeric = checkAndGetColumn>(&data); - if (!data_numeric) - return false; + if constexpr (areConvertibleTypes) + { + MatcherNumber matcher{data_column.getData(), index_column.getData()}; + if (is_key_const) + executeMatchConstKeyToIndex(index_column.size(), data_column.size(), matched_idxs, matcher); + else + executeMatchKeyToIndex(offsets, matched_idxs, matcher); - MatcherNumber matcher{data_numeric->getData(), index_numeric->getData()}; - executeMatchKeyToIndex(offsets, matched_idxs, matcher); - return true; -} + return true; + } -bool FunctionArrayElement::matchKeyToIndex( - const IColumn & data, const Offsets & offsets, - const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs) -{ - return matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexString(data, offsets, arguments, matched_idxs) - || matchKeyToIndexFixedString(data, offsets, arguments, matched_idxs); -} - -bool FunctionArrayElement::matchKeyToIndexConst( - const IColumn & data, const Offsets & offsets, - const Field & index, PaddedPODArray & matched_idxs) -{ - return matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexNumberConst(data, offsets, index, matched_idxs) - || matchKeyToIndexStringConst(data, offsets, index, matched_idxs); + return false; + }); + }); } ColumnPtr FunctionArrayElement::executeMap( const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const { - const ColumnMap * col_map = typeid_cast(arguments[0].column.get()); - if (!col_map) - return nullptr; + const auto * col_map = checkAndGetColumn(arguments[0].column.get()); + const auto * col_const_map = checkAndGetColumnConst(arguments[0].column.get()); + assert(col_map || col_const_map); + + if (col_const_map) + col_map = typeid_cast(&col_const_map->getDataColumn()); const auto & nested_column = col_map->getNestedColumn(); const auto & keys_data = col_map->getNestedData().getColumn(0); @@ -1000,29 +999,33 @@ ColumnPtr FunctionArrayElement::executeMap( indices_column->reserve(input_rows_count); auto & indices_data = assert_cast &>(*indices_column).getData(); + bool executed = false; if (!isColumnConst(*arguments[1].column)) { - if (input_rows_count > 0 && !matchKeyToIndex(keys_data, offsets, arguments, indices_data)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal types of arguments: {}, {} for function {}", - arguments[0].type->getName(), arguments[1].type->getName(), getName()); + executed = matchKeyToIndexNumber(keys_data, offsets, !!col_const_map, *arguments[1].column, indices_data) + || matchKeyToIndexString(keys_data, offsets, !!col_const_map, *arguments[1].column, indices_data); } else { Field index = (*arguments[1].column)[0]; - - // Get Matched key's value - if (input_rows_count > 0 && !matchKeyToIndexConst(keys_data, offsets, index, indices_data)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal types of arguments: {}, {} for function {}", - arguments[0].type->getName(), arguments[1].type->getName(), getName()); + executed = matchKeyToIndexNumberConst(keys_data, offsets, index, indices_data) + || matchKeyToIndexStringConst(keys_data, offsets, index, indices_data); } + if (!executed) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal types of arguments: {}, {} for function {}", + arguments[0].type->getName(), arguments[1].type->getName(), getName()); + + ColumnPtr values_array = ColumnArray::create(values_data.getPtr(), nested_column.getOffsetsPtr()); + if (col_const_map) + values_array = ColumnConst::create(values_array, input_rows_count); + /// Prepare arguments to call arrayElement for array with values and calculated indices at previous step. ColumnsWithTypeAndName new_arguments = { { - ColumnArray::create(values_data.getPtr(), nested_column.getOffsetsPtr()), + values_array, std::make_shared(result_type), "" }, @@ -1066,13 +1069,14 @@ DataTypePtr FunctionArrayElement::getReturnTypeImpl(const DataTypes & arguments) ColumnPtr FunctionArrayElement::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const { - /// Check nullability. - bool is_array_of_nullable = false; + const auto * col_map = checkAndGetColumn(arguments[0].column.get()); + const auto * col_const_map = checkAndGetColumnConst(arguments[0].column.get()); - const ColumnMap * col_map = checkAndGetColumn(arguments[0].column.get()); - if (col_map) + if (col_map || col_const_map) return executeMap(arguments, result_type, input_rows_count); + /// Check nullability. + bool is_array_of_nullable = false; const ColumnArray * col_array = nullptr; const ColumnArray * col_const_array = nullptr; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index f59d50dbdeb..d984a350c80 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -146,7 +146,7 @@ struct ContextSharedPart #if USE_NURAFT mutable std::mutex keeper_storage_dispatcher_mutex; - mutable std::shared_ptr keeper_storage_dispatcher; + mutable std::shared_ptr keeper_storage_dispatcher; #endif mutable std::mutex auxiliary_zookeepers_mutex; mutable std::map auxiliary_zookeepers; /// Map for auxiliary ZooKeeper clients. @@ -1649,7 +1649,7 @@ void Context::setSystemZooKeeperLogAfterInitializationIfNeeded() zk.second->setZooKeeperLog(shared->system_logs->zookeeper_log); } -void Context::initializeKeeperStorageDispatcher() const +void Context::initializeKeeperDispatcher() const { #if USE_NURAFT std::lock_guard lock(shared->keeper_storage_dispatcher_mutex); @@ -1660,14 +1660,14 @@ void Context::initializeKeeperStorageDispatcher() const const auto & config = getConfigRef(); if (config.has("keeper_server")) { - shared->keeper_storage_dispatcher = std::make_shared(); + shared->keeper_storage_dispatcher = std::make_shared(); shared->keeper_storage_dispatcher->initialize(config, getApplicationType() == ApplicationType::KEEPER); } #endif } #if USE_NURAFT -std::shared_ptr & Context::getKeeperStorageDispatcher() const +std::shared_ptr & Context::getKeeperDispatcher() const { std::lock_guard lock(shared->keeper_storage_dispatcher_mutex); if (!shared->keeper_storage_dispatcher) @@ -1677,7 +1677,7 @@ std::shared_ptr & Context::getKeeperStorageDispatcher() } #endif -void Context::shutdownKeeperStorageDispatcher() const +void Context::shutdownKeeperDispatcher() const { #if USE_NURAFT std::lock_guard lock(shared->keeper_storage_dispatcher_mutex); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 9527b87ed39..6af2c3c4d62 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -102,7 +102,7 @@ class StoragePolicySelector; using StoragePolicySelectorPtr = std::shared_ptr; struct PartUUIDs; using PartUUIDsPtr = std::shared_ptr; -class KeeperStorageDispatcher; +class KeeperDispatcher; class Session; class IOutputFormat; @@ -647,10 +647,10 @@ public: std::shared_ptr getAuxiliaryZooKeeper(const String & name) const; #if USE_NURAFT - std::shared_ptr & getKeeperStorageDispatcher() const; + std::shared_ptr & getKeeperDispatcher() const; #endif - void initializeKeeperStorageDispatcher() const; - void shutdownKeeperStorageDispatcher() const; + void initializeKeeperDispatcher() const; + void shutdownKeeperDispatcher() const; /// Set auxiliary zookeepers configuration at server starting or configuration reloading. void reloadAuxiliaryZooKeepersConfigIfChanged(const ConfigurationPtr & config); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 4e5e3b4e86b..83af913c7ab 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -156,7 +156,7 @@ ColumnDependencies getAllColumnDependencies(const StorageMetadataPtr & metadata_ ColumnDependencies dependencies; while (!new_updated_columns.empty()) { - auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns); + auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true); new_updated_columns.clear(); for (const auto & dependency : new_dependencies) { @@ -303,6 +303,15 @@ static NameSet getKeyColumns(const StoragePtr & storage, const StorageMetadataPt return key_columns; } +static bool materializeTTLRecalculateOnly(const StoragePtr & storage) +{ + auto storage_from_merge_tree_data_part = std::dynamic_pointer_cast(storage); + if (!storage_from_merge_tree_data_part) + return false; + + return storage_from_merge_tree_data_part->materializeTTLRecalculateOnly(); +} + static void validateUpdateColumns( const StoragePtr & storage, const StorageMetadataPtr & metadata_snapshot, const NameSet & updated_columns, @@ -394,8 +403,13 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) NamesAndTypesList all_columns = columns_desc.getAllPhysical(); NameSet updated_columns; + bool materialize_ttl_recalculate_only = materializeTTLRecalculateOnly(storage); for (const MutationCommand & command : commands) { + if (command.type == MutationCommand::Type::UPDATE + || command.type == MutationCommand::Type::DELETE) + materialize_ttl_recalculate_only = false; + for (const auto & kv : command.column_to_update_expression) { updated_columns.insert(kv.first); @@ -569,7 +583,18 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) else if (command.type == MutationCommand::MATERIALIZE_TTL) { mutation_kind.set(MutationKind::MUTATE_OTHER); - if (metadata_snapshot->hasRowsTTL()) + if (materialize_ttl_recalculate_only) + { + // just recalculate ttl_infos without remove expired data + auto all_columns_vec = all_columns.getNames(); + auto new_dependencies = metadata_snapshot->getColumnDependencies(NameSet(all_columns_vec.begin(), all_columns_vec.end()), false); + for (const auto & dependency : new_dependencies) + { + if (dependency.kind == ColumnDependency::TTL_EXPRESSION) + dependencies.insert(dependency); + } + } + else if (metadata_snapshot->hasRowsTTL()) { for (const auto & column : all_columns) dependencies.emplace(column.name, ColumnDependency::TTL_TARGET); @@ -594,19 +619,19 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) } /// Recalc only skip indices and projections of columns which could be updated by TTL. - auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns); + auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true); for (const auto & dependency : new_dependencies) { if (dependency.kind == ColumnDependency::SKIP_INDEX || dependency.kind == ColumnDependency::PROJECTION) dependencies.insert(dependency); } + } - if (dependencies.empty()) - { - /// Very rare case. It can happen if we have only one MOVE TTL with constant expression. - /// But we still have to read at least one column. - dependencies.emplace(all_columns.front().name, ColumnDependency::TTL_EXPRESSION); - } + if (dependencies.empty()) + { + /// Very rare case. It can happen if we have only one MOVE TTL with constant expression. + /// But we still have to read at least one column. + dependencies.emplace(all_columns.front().name, ColumnDependency::TTL_EXPRESSION); } } else if (command.type == MutationCommand::READ_COLUMN) diff --git a/src/Parsers/ParserInsertQuery.cpp b/src/Parsers/ParserInsertQuery.cpp index 8f577288e65..19457f027bf 100644 --- a/src/Parsers/ParserInsertQuery.cpp +++ b/src/Parsers/ParserInsertQuery.cpp @@ -111,11 +111,6 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { data = pos->begin; } - else if (s_from_infile.ignore(pos, expected)) - { - if (!infile_name_p.parse(pos, infile, expected)) - return false; - } else if (s_format.ignore(pos, expected)) { if (!name_p.parse(pos, format, expected)) diff --git a/src/Parsers/getInsertQuery.cpp b/src/Parsers/getInsertQuery.cpp new file mode 100644 index 00000000000..6f52056dfe2 --- /dev/null +++ b/src/Parsers/getInsertQuery.cpp @@ -0,0 +1,28 @@ +#include + +#include +#include +#include +#include + + +namespace DB +{ +std::string getInsertQuery(const std::string & db_name, const std::string & table_name, const ColumnsWithTypeAndName & columns, IdentifierQuotingStyle quoting) +{ + ASTInsertQuery query; + query.table_id.database_name = db_name; + query.table_id.table_name = table_name; + query.columns = std::make_shared(','); + query.children.push_back(query.columns); + for (const auto & column : columns) + query.columns->children.emplace_back(std::make_shared(column.name)); + + WriteBufferFromOwnString buf; + IAST::FormatSettings settings(buf, true); + settings.always_quote_identifiers = true; + settings.identifier_quoting_style = quoting; + query.IAST::format(settings); + return buf.str(); +} +} diff --git a/src/Parsers/getInsertQuery.h b/src/Parsers/getInsertQuery.h new file mode 100644 index 00000000000..0bcb5e3660b --- /dev/null +++ b/src/Parsers/getInsertQuery.h @@ -0,0 +1,8 @@ +#pragma once +#include +#include + +namespace DB +{ +std::string getInsertQuery(const std::string & db_name, const std::string & table_name, const ColumnsWithTypeAndName & columns, IdentifierQuotingStyle quoting); +} diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index df40a78749b..7ead4d0d419 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -194,7 +194,7 @@ KeeperTCPHandler::KeeperTCPHandler(IServer & server_, const Poco::Net::StreamSoc , server(server_) , log(&Poco::Logger::get("NuKeeperTCPHandler")) , global_context(Context::createCopy(server.context())) - , keeper_dispatcher(global_context->getKeeperStorageDispatcher()) + , keeper_dispatcher(global_context->getKeeperDispatcher()) , operation_timeout(0, global_context->getConfigRef().getUInt("keeper_server.operation_timeout_ms", Coordination::DEFAULT_OPERATION_TIMEOUT_MS) * 1000) , session_timeout(0, global_context->getConfigRef().getUInt("keeper_server.session_timeout_ms", Coordination::DEFAULT_SESSION_TIMEOUT_MS) * 1000) , poll_wrapper(std::make_unique(socket_)) diff --git a/src/Server/KeeperTCPHandler.h b/src/Server/KeeperTCPHandler.h index 76371ed1a0c..7abfb72c846 100644 --- a/src/Server/KeeperTCPHandler.h +++ b/src/Server/KeeperTCPHandler.h @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -38,7 +38,7 @@ private: IServer & server; Poco::Logger * log; ContextPtr global_context; - std::shared_ptr keeper_dispatcher; + std::shared_ptr keeper_dispatcher; Poco::Timespan operation_timeout; Poco::Timespan session_timeout; int64_t session_id{-1}; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 84be8012509..c2a0e5f0650 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -493,7 +494,6 @@ static void extractMergingAndGatheringColumns( const NamesAndTypesList & storage_columns, const ExpressionActionsPtr & sorting_key_expr, const IndicesDescription & indexes, - const ProjectionsDescription & projections, const MergeTreeData::MergingParams & merging_params, NamesAndTypesList & gathering_columns, Names & gathering_column_names, NamesAndTypesList & merging_columns, Names & merging_column_names) @@ -507,13 +507,6 @@ static void extractMergingAndGatheringColumns( std::inserter(key_columns, key_columns.end())); } - for (const auto & projection : projections) - { - Names projection_columns_vec = projection.required_columns; - std::copy(projection_columns_vec.cbegin(), projection_columns_vec.cend(), - std::inserter(key_columns, key_columns.end())); - } - /// Force sign column for Collapsing mode if (merging_params.mode == MergeTreeData::MergingParams::Collapsing) key_columns.emplace(merging_params.sign_column); @@ -727,7 +720,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor storage_columns, metadata_snapshot->getSortingKey().expression, metadata_snapshot->getSecondaryIndices(), - metadata_snapshot->getProjections(), merging_params, gathering_columns, gathering_column_names, @@ -1288,10 +1280,10 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor auto mrk_extension = source_part->index_granularity_info.is_adaptive ? getAdaptiveMrkExtension(new_data_part->getType()) : getNonAdaptiveMrkExtension(); bool need_sync = needSyncPart(source_part->rows_count, source_part->getBytesOnDisk(), *data_settings); - bool need_remove_expired_values = false; + auto execute_ttl_type = ExecuteTTLType::NONE; - if (in && shouldExecuteTTL(metadata_snapshot, interpreter->getColumnDependencies(), commands_for_part)) - need_remove_expired_values = true; + if (in) + execute_ttl_type = shouldExecuteTTL(metadata_snapshot, interpreter->getColumnDependencies()); /// All columns from part are changed and may be some more that were missing before in part /// TODO We can materialize compact part without copying data @@ -1319,7 +1311,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor time_of_mutation, compression_codec, merge_entry, - need_remove_expired_values, + execute_ttl_type, need_sync, space_reservation, holder, @@ -1356,7 +1348,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor return data.cloneAndLoadDataPartOnSameDisk(source_part, "tmp_clone_", future_part.part_info, metadata_snapshot); } - if (need_remove_expired_values) + if (execute_ttl_type != ExecuteTTLType::NONE) files_to_skip.insert("ttl.txt"); disk->createDirectories(new_part_tmp_path); @@ -1416,7 +1408,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor time_of_mutation, compression_codec, merge_entry, - need_remove_expired_values, + execute_ttl_type, need_sync, space_reservation, holder, @@ -1437,7 +1429,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor } } - finalizeMutatedPart(source_part, new_data_part, need_remove_expired_values, compression_codec); + finalizeMutatedPart(source_part, new_data_part, execute_ttl_type, compression_codec); } return new_data_part; @@ -1984,21 +1976,22 @@ std::set MergeTreeDataMergerMutator::getProjectionsToRec return projections_to_recalc; } -bool MergeTreeDataMergerMutator::shouldExecuteTTL( - const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies, const MutationCommands & commands) +ExecuteTTLType MergeTreeDataMergerMutator::shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies) { if (!metadata_snapshot->hasAnyTTL()) - return false; + return ExecuteTTLType::NONE; - for (const auto & command : commands) - if (command.type == MutationCommand::MATERIALIZE_TTL) - return true; + bool has_ttl_expression = false; for (const auto & dependency : dependencies) - if (dependency.kind == ColumnDependency::TTL_EXPRESSION || dependency.kind == ColumnDependency::TTL_TARGET) - return true; + { + if (dependency.kind == ColumnDependency::TTL_EXPRESSION) + has_ttl_expression = true; - return false; + if (dependency.kind == ColumnDependency::TTL_TARGET) + return ExecuteTTLType::NORMAL; + } + return has_ttl_expression ? ExecuteTTLType::RECALCULATE : ExecuteTTLType::NONE; } // 1. get projection pipeline and a sink to write parts @@ -2172,7 +2165,7 @@ void MergeTreeDataMergerMutator::mutateAllPartColumns( time_t time_of_mutation, const CompressionCodecPtr & compression_codec, MergeListEntry & merge_entry, - bool need_remove_expired_values, + ExecuteTTLType execute_ttl_type, bool need_sync, const ReservationPtr & space_reservation, TableLockHolder & holder, @@ -2185,9 +2178,12 @@ void MergeTreeDataMergerMutator::mutateAllPartColumns( mutating_stream = std::make_shared( std::make_shared(mutating_stream, data.getPrimaryKeyAndSkipIndicesExpression(metadata_snapshot))); - if (need_remove_expired_values) + if (execute_ttl_type == ExecuteTTLType::NORMAL) mutating_stream = std::make_shared(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true); + if (execute_ttl_type == ExecuteTTLType::RECALCULATE) + mutating_stream = std::make_shared(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true); + IMergeTreeDataPart::MinMaxIndex minmax_idx; MergedBlockOutputStream out{ @@ -2229,7 +2225,7 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns( time_t time_of_mutation, const CompressionCodecPtr & compression_codec, MergeListEntry & merge_entry, - bool need_remove_expired_values, + ExecuteTTLType execute_ttl_type, bool need_sync, const ReservationPtr & space_reservation, TableLockHolder & holder, @@ -2238,9 +2234,12 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns( if (mutating_stream == nullptr) throw Exception("Cannot mutate part columns with uninitialized mutations stream. It's a bug", ErrorCodes::LOGICAL_ERROR); - if (need_remove_expired_values) + if (execute_ttl_type == ExecuteTTLType::NORMAL) mutating_stream = std::make_shared(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true); + if (execute_ttl_type == ExecuteTTLType::RECALCULATE) + mutating_stream = std::make_shared(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true); + IMergedBlockOutputStream::WrittenOffsetColumns unused_written_offsets; MergedColumnOnlyOutputStream out( new_data_part, @@ -2279,7 +2278,7 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns( void MergeTreeDataMergerMutator::finalizeMutatedPart( const MergeTreeDataPartPtr & source_part, MergeTreeData::MutableDataPartPtr new_data_part, - bool need_remove_expired_values, + ExecuteTTLType execute_ttl_type, const CompressionCodecPtr & codec) { auto disk = new_data_part->volume->getDisk(); @@ -2293,7 +2292,7 @@ void MergeTreeDataMergerMutator::finalizeMutatedPart( new_data_part->checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_hash = out_hashing.getHash(); } - if (need_remove_expired_values) + if (execute_ttl_type != ExecuteTTLType::NONE) { /// Write a file with ttl infos in json format. auto out_ttl = disk->writeFile(fs::path(new_data_part->getFullRelativePath()) / "ttl.txt", 4096); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index ca7376d8f3e..3a0041e4a37 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -23,6 +23,13 @@ enum class SelectPartsDecision NOTHING_TO_MERGE = 2, }; +enum class ExecuteTTLType +{ + NONE = 0, + NORMAL = 1, + RECALCULATE= 2, +}; + /// Auxiliary struct holding metainformation for the future merged or mutated part. struct FutureMergedMutatedPart { @@ -200,8 +207,7 @@ private: const ProjectionsDescription & all_projections, const MutationCommands & commands_for_removes); - static bool shouldExecuteTTL( - const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies, const MutationCommands & commands); + static ExecuteTTLType shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies); /// Return set of indices which should be recalculated during mutation also /// wraps input stream into additional expression stream @@ -242,7 +248,7 @@ private: time_t time_of_mutation, const CompressionCodecPtr & compression_codec, MergeListEntry & merge_entry, - bool need_remove_expired_values, + ExecuteTTLType execute_ttl_type, bool need_sync, const ReservationPtr & space_reservation, TableLockHolder & holder, @@ -260,7 +266,7 @@ private: time_t time_of_mutation, const CompressionCodecPtr & compression_codec, MergeListEntry & merge_entry, - bool need_remove_expired_values, + ExecuteTTLType execute_ttl_type, bool need_sync, const ReservationPtr & space_reservation, TableLockHolder & holder, @@ -271,7 +277,7 @@ private: static void finalizeMutatedPart( const MergeTreeDataPartPtr & source_part, MergeTreeData::MutableDataPartPtr new_data_part, - bool need_remove_expired_values, + ExecuteTTLType execute_ttl_type, const CompressionCodecPtr & codec); public : diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 9a198500447..890cfca8d71 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -117,6 +117,7 @@ struct Settings; M(Int64, merge_with_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with delete TTL can be repeated.", 0) \ M(Int64, merge_with_recompression_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with recompression TTL can be repeated.", 0) \ M(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \ + M(Bool, materialize_ttl_recalculate_only, false, "Only recalculate ttl info when MATERIALIZE TTL", 0) \ M(Bool, write_final_mark, true, "Write final mark after end of column (0 - disabled, do nothing if index_granularity_bytes=0)", 0) \ M(Bool, enable_mixed_granularity_parts, true, "Enable parts with adaptive and non adaptive granularity", 0) \ M(MaxThreads, max_part_loading_threads, 0, "The number of threads to load data parts at startup.", 0) \ diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 2da20073427..806c861cf00 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -47,8 +47,12 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer( if (!primary_key.column_names.empty()) first_primary_key_column = primary_key.column_names[0]; - for (const auto & [_, size] : column_sizes) - total_size_of_queried_columns += size; + for (const auto & name : queried_columns) + { + auto it = column_sizes.find(name); + if (it != column_sizes.end()) + total_size_of_queried_columns += it->second; + } determineArrayJoinedNames(query_info.query->as()); optimize(query_info.query->as()); diff --git a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h index bcce2d990ca..997e6e8bb74 100644 --- a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h +++ b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -73,6 +73,11 @@ public: return storage.getPartitionIDFromQuery(ast, context); } + bool materializeTTLRecalculateOnly() const + { + return parts.front()->storage.getSettings()->materialize_ttl_recalculate_only; + } + protected: /// Used in part mutation. StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_) diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 5183b925141..cbd27afe106 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -214,7 +215,7 @@ bool StorageInMemoryMetadata::hasAnyGroupByTTL() const return !table_ttl.group_by_ttl.empty(); } -ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet & updated_columns) const +ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet & updated_columns, bool include_ttl_target) const { if (updated_columns.empty()) return {}; @@ -250,7 +251,7 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet if (hasRowsTTL()) { auto rows_expression = getRowsTTL().expression; - if (add_dependent_columns(rows_expression, required_ttl_columns)) + if (add_dependent_columns(rows_expression, required_ttl_columns) && include_ttl_target) { /// Filter all columns, if rows TTL expression have to be recalculated. for (const auto & column : getColumns().getAllPhysical()) @@ -263,13 +264,15 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet for (const auto & [name, entry] : getColumnTTLs()) { - if (add_dependent_columns(entry.expression, required_ttl_columns)) + if (add_dependent_columns(entry.expression, required_ttl_columns) && include_ttl_target) updated_ttl_columns.insert(name); } for (const auto & entry : getMoveTTLs()) add_dependent_columns(entry.expression, required_ttl_columns); + //TODO what about rows_where_ttl and group_by_ttl ?? + for (const auto & column : indices_columns) res.emplace(column, ColumnDependency::SKIP_INDEX); for (const auto & column : projections_columns) @@ -493,6 +496,23 @@ namespace return res; } + + /* + * This function checks compatibility of enums. It returns true if: + * 1. Both types are enums. + * 2. The first type can represent all possible values of the second one. + * 3. Both types require the same amount of memory. + */ + bool isCompatibleEnumTypes(const IDataType * lhs, const IDataType * rhs) + { + if (IDataTypeEnum const * enum_type = dynamic_cast(lhs)) + { + if (!enum_type->contains(*rhs)) + return false; + return enum_type->getMaximumSizeOfValueInMemory() == rhs->getMaximumSizeOfValueInMemory(); + } + return false; + } } void StorageInMemoryMetadata::check(const Names & column_names, const NamesAndTypesList & virtuals, const StorageID & storage_id) const @@ -544,12 +564,13 @@ void StorageInMemoryMetadata::check(const NamesAndTypesList & provided_columns) column.name, listOfColumns(available_columns)); - if (!column.type->equals(*it->getMapped())) + const auto * available_type = it->getMapped(); + if (!column.type->equals(*available_type) && !isCompatibleEnumTypes(available_type, column.type.get())) throw Exception( ErrorCodes::TYPE_MISMATCH, "Type mismatch for column {}. Column has type {}, got type {}", column.name, - it->getMapped()->getName(), + available_type->getName(), column.type->getName()); if (unique_names.end() != unique_names.find(column.name)) @@ -588,16 +609,16 @@ void StorageInMemoryMetadata::check(const NamesAndTypesList & provided_columns, name, listOfColumns(available_columns)); - const auto & provided_column_type = *it->getMapped(); - const auto & available_column_type = *jt->getMapped(); + const auto * provided_column_type = it->getMapped(); + const auto * available_column_type = jt->getMapped(); - if (!provided_column_type.equals(available_column_type)) + if (!provided_column_type->equals(*available_column_type) && !isCompatibleEnumTypes(available_column_type, provided_column_type)) throw Exception( ErrorCodes::TYPE_MISMATCH, "Type mismatch for column {}. Column has type {}, got type {}", name, - provided_column_type.getName(), - available_column_type.getName()); + available_column_type->getName(), + provided_column_type->getName()); if (unique_names.end() != unique_names.find(name)) throw Exception(ErrorCodes::COLUMN_QUERIED_MORE_THAN_ONCE, @@ -632,12 +653,13 @@ void StorageInMemoryMetadata::check(const Block & block, bool need_all) const column.name, listOfColumns(available_columns)); - if (!column.type->equals(*it->getMapped())) + const auto * available_type = it->getMapped(); + if (!column.type->equals(*available_type) && !isCompatibleEnumTypes(available_type, column.type.get())) throw Exception( ErrorCodes::TYPE_MISMATCH, "Type mismatch for column {}. Column has type {}, got type {}", column.name, - it->getMapped()->getName(), + available_type->getName(), column.type->getName()); } diff --git a/src/Storages/StorageInMemoryMetadata.h b/src/Storages/StorageInMemoryMetadata.h index d0d60f608d7..9accdb9b3b6 100644 --- a/src/Storages/StorageInMemoryMetadata.h +++ b/src/Storages/StorageInMemoryMetadata.h @@ -143,7 +143,7 @@ struct StorageInMemoryMetadata /// Returns columns, which will be needed to calculate dependencies (skip /// indices, TTL expressions) if we update @updated_columns set of columns. - ColumnDependencies getColumnDependencies(const NameSet & updated_columns) const; + ColumnDependencies getColumnDependencies(const NameSet & updated_columns, bool include_ttl_target) const; /// Block with ordinary + materialized columns. Block getSampleBlock() const; diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 603a52b2801..3617e964734 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -29,6 +29,8 @@ #include #include #include +#include +#include namespace DB @@ -47,10 +49,12 @@ StoragePostgreSQL::StoragePostgreSQL( const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const String & comment, - const String & remote_table_schema_) + const String & remote_table_schema_, + const String & on_conflict_) : IStorage(table_id_) , remote_table_name(remote_table_name_) , remote_table_schema(remote_table_schema_) + , on_conflict(on_conflict_) , pool(std::move(pool_)) { StorageInMemoryMetadata storage_metadata; @@ -94,17 +98,22 @@ Pipe StoragePostgreSQL::read( class PostgreSQLSink : public SinkToStorage { + +using Row = std::vector>; + public: explicit PostgreSQLSink( const StorageMetadataPtr & metadata_snapshot_, postgres::ConnectionHolderPtr connection_holder_, const String & remote_table_name_, - const String & remote_table_schema_) + const String & remote_table_schema_, + const String & on_conflict_) : SinkToStorage(metadata_snapshot_->getSampleBlock()) , metadata_snapshot(metadata_snapshot_) , connection_holder(std::move(connection_holder_)) , remote_table_name(remote_table_name_) , remote_table_schema(remote_table_schema_) + , on_conflict(on_conflict_) { } @@ -113,11 +122,21 @@ public: void consume(Chunk chunk) override { auto block = getPort().getHeader().cloneWithColumns(chunk.detachColumns()); + if (!inserter) - inserter = std::make_unique(connection_holder->get(), - remote_table_schema.empty() ? pqxx::table_path({remote_table_name}) - : pqxx::table_path({remote_table_schema, remote_table_name}), - block.getNames()); + { + if (on_conflict.empty()) + { + inserter = std::make_unique(connection_holder->get(), + remote_table_schema.empty() ? pqxx::table_path({remote_table_name}) + : pqxx::table_path({remote_table_schema, remote_table_name}), block.getNames()); + } + else + { + inserter = std::make_unique(connection_holder->get(), remote_table_name, + remote_table_schema, block.getColumnsWithTypeAndName(), on_conflict); + } + } const auto columns = block.getColumns(); const size_t num_rows = block.rows(), num_cols = block.columns(); @@ -151,7 +170,7 @@ public: } } - inserter->stream.write_values(row); + inserter->insert(row); } } @@ -268,37 +287,92 @@ public: } private: - struct StreamTo + struct Inserter { + pqxx::connection & connection; pqxx::work tx; + + explicit Inserter(pqxx::connection & connection_) + : connection(connection_) + , tx(connection) {} + + virtual ~Inserter() = default; + + virtual void insert(const Row & row) = 0; + virtual void complete() = 0; + }; + + struct StreamTo : Inserter + { Names columns; pqxx::stream_to stream; - StreamTo(pqxx::connection & connection, pqxx::table_path table_, Names columns_) - : tx(connection) + StreamTo(pqxx::connection & connection_, pqxx::table_path table_, Names columns_) + : Inserter(connection_) , columns(std::move(columns_)) , stream(pqxx::stream_to::raw_table(tx, connection.quote_table(table_), connection.quote_columns(columns))) { } - void complete() + void complete() override { stream.complete(); tx.commit(); } + + void insert(const Row & row) override + { + stream.write_values(row); + } + }; + + struct PreparedInsert : Inserter + { + PreparedInsert(pqxx::connection & connection_, const String & table, const String & schema, + const ColumnsWithTypeAndName & columns, const String & on_conflict_) + : Inserter(connection_) + { + WriteBufferFromOwnString buf; + buf << getInsertQuery(schema, table, columns, IdentifierQuotingStyle::DoubleQuotes); + buf << " ("; + for (size_t i = 1; i <= columns.size(); ++i) + { + if (i > 1) + buf << ", "; + buf << "$" << i; + } + buf << ") "; + buf << on_conflict_; + connection.prepare("insert", buf.str()); + } + + void complete() override + { + connection.unprepare("insert"); + tx.commit(); + } + + void insert(const Row & row) override + { + pqxx::params params; + params.reserve(row.size()); + params.append_multi(row); + tx.exec_prepared("insert", params); + } }; StorageMetadataPtr metadata_snapshot; postgres::ConnectionHolderPtr connection_holder; - const String remote_table_name, remote_table_schema; - std::unique_ptr inserter; + const String remote_db_name, remote_table_name, remote_table_schema, on_conflict; + + std::unique_ptr inserter; }; SinkToStoragePtr StoragePostgreSQL::write( const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /* context */) { - return std::make_shared(metadata_snapshot, pool->get(), remote_table_name, remote_table_schema); + return std::make_shared(metadata_snapshot, pool->get(), remote_table_name, remote_table_schema, on_conflict); } @@ -308,9 +382,9 @@ void registerStoragePostgreSQL(StorageFactory & factory) { ASTs & engine_args = args.engine_args; - if (engine_args.size() < 5 || engine_args.size() > 6) - throw Exception("Storage PostgreSQL requires from 5 to 6 parameters: " - "PostgreSQL('host:port', 'database', 'table', 'username', 'password' [, 'schema']", + if (engine_args.size() < 5 || engine_args.size() > 7) + throw Exception("Storage PostgreSQL requires from 5 to 7 parameters: " + "PostgreSQL('host:port', 'database', 'table', 'username', 'password' [, 'schema', 'ON CONFLICT ...']", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); for (auto & engine_arg : engine_args) @@ -326,9 +400,11 @@ void registerStoragePostgreSQL(StorageFactory & factory) const String & username = engine_args[3]->as().value.safeGet(); const String & password = engine_args[4]->as().value.safeGet(); - String remote_table_schema; - if (engine_args.size() == 6) + String remote_table_schema, on_conflict; + if (engine_args.size() >= 6) remote_table_schema = engine_args[5]->as().value.safeGet(); + if (engine_args.size() >= 7) + on_conflict = engine_args[6]->as().value.safeGet(); auto pool = std::make_shared( remote_database, @@ -345,7 +421,8 @@ void registerStoragePostgreSQL(StorageFactory & factory) args.columns, args.constraints, args.comment, - remote_table_schema); + remote_table_schema, + on_conflict); }, { .source_access_type = AccessType::POSTGRES, diff --git a/src/Storages/StoragePostgreSQL.h b/src/Storages/StoragePostgreSQL.h index bd5cd317c3d..a12b52e6e48 100644 --- a/src/Storages/StoragePostgreSQL.h +++ b/src/Storages/StoragePostgreSQL.h @@ -27,7 +27,8 @@ public: const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const String & comment, - const std::string & remote_table_schema_ = ""); + const String & remote_table_schema_ = "", + const String & on_conflict = ""); String getName() const override { return "PostgreSQL"; } @@ -47,6 +48,7 @@ private: String remote_table_name; String remote_table_schema; + String on_conflict; postgres::PoolWithFailoverPtr pool; }; diff --git a/src/Storages/examples/CMakeLists.txt b/src/Storages/examples/CMakeLists.txt index cb03ae751e3..103972a106f 100644 --- a/src/Storages/examples/CMakeLists.txt +++ b/src/Storages/examples/CMakeLists.txt @@ -22,4 +22,3 @@ target_link_libraries (transform_part_zk_nodes dbms string_utils ) - diff --git a/src/Storages/fuzzers/CMakeLists.txt b/src/Storages/fuzzers/CMakeLists.txt index 93d3d2926bd..d41e96868ad 100644 --- a/src/Storages/fuzzers/CMakeLists.txt +++ b/src/Storages/fuzzers/CMakeLists.txt @@ -1,11 +1,7 @@ -add_executable (mergetree_checksum_fuzzer - mergetree_checksum_fuzzer.cpp - "${ClickHouse_SOURCE_DIR}/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp" - "${ClickHouse_SOURCE_DIR}/src/Compression/CompressedReadBuffer.cpp" - "${ClickHouse_SOURCE_DIR}/src/Compression/CompressedWriteBuffer.cpp" -) -target_link_libraries (mergetree_checksum_fuzzer PRIVATE clickhouse_common_io fuzz_compression ${LIB_FUZZING_ENGINE}) +add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.cpp) +# Look at comment around fuzz_compression target declaration +target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE}) add_executable (columns_description_fuzzer columns_description_fuzzer.cpp) target_link_libraries (columns_description_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE}) diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index d701728479b..568cc6171fd 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -37,7 +37,8 @@ StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/, columns, ConstraintsDescription{}, String{}, - remote_table_schema); + remote_table_schema, + on_conflict); result->startup(); return result; @@ -67,9 +68,9 @@ void TableFunctionPostgreSQL::parseArguments(const ASTPtr & ast_function, Contex ASTs & args = func_args.arguments->children; - if (args.size() < 5 || args.size() > 6) - throw Exception("Table function 'PostgreSQL' requires from 5 to 6 parameters: " - "PostgreSQL('host:port', 'database', 'table', 'user', 'password', [, 'schema']).", + if (args.size() < 5 || args.size() > 7) + throw Exception("Table function 'PostgreSQL' requires from 5 to 7 parameters: " + "PostgreSQL('host:port', 'database', 'table', 'user', 'password', [, 'schema', 'ON CONFLICT ...']).", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); for (auto & arg : args) @@ -82,8 +83,10 @@ void TableFunctionPostgreSQL::parseArguments(const ASTPtr & ast_function, Contex remote_table_name = args[2]->as().value.safeGet(); - if (args.size() == 6) + if (args.size() >= 6) remote_table_schema = args[5]->as().value.safeGet(); + if (args.size() >= 7) + on_conflict = args[6]->as().value.safeGet(); connection_pool = std::make_shared( args[1]->as().value.safeGet(), diff --git a/src/TableFunctions/TableFunctionPostgreSQL.h b/src/TableFunctions/TableFunctionPostgreSQL.h index c31d02fa955..e3810a0e391 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.h +++ b/src/TableFunctions/TableFunctionPostgreSQL.h @@ -28,7 +28,7 @@ private: void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; String connection_str; - String remote_table_name, remote_table_schema; + String remote_table_name, remote_table_schema, on_conflict; postgres::PoolWithFailoverPtr connection_pool; }; diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 0d833e5fbe6..f3a41ba6a25 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -634,6 +634,7 @@ def run_tests_array(all_tests_with_params): open(stdout_file).read().split('\n')[:100]) status += '\n' + status += "\nstdout:\n{}\n".format(stdout) status += 'Database: ' + testcase_args.testcase_database elif stderr: @@ -643,6 +644,7 @@ def run_tests_array(all_tests_with_params): status += print_test_time(total_time) status += " - having stderror:\n{}\n".format( '\n'.join(stderr.split('\n')[:100])) + status += "\nstdout:\n{}\n".format(stdout) status += 'Database: ' + testcase_args.testcase_database elif 'Exception' in stdout: failures += 1 diff --git a/tests/config/config.d/merge_tree.xml b/tests/config/config.d/merge_tree.xml new file mode 100644 index 00000000000..35af1fa65eb --- /dev/null +++ b/tests/config/config.d/merge_tree.xml @@ -0,0 +1,5 @@ + + + 8 + + diff --git a/tests/config/install.sh b/tests/config/install.sh index 571dff34018..e46ac62606b 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -31,6 +31,7 @@ ln -sf $SRC_PATH/config.d/max_concurrent_queries.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/test_cluster_with_incorrect_pw.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/keeper_port.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/logging_no_rotate.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/merge_tree.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/tcp_with_proxy.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/top_level_domains_lists.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/top_level_domains_path.xml $DEST_SERVER_PATH/config.d/ diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py index 28a76631c0f..bb0e284eac9 100644 --- a/tests/integration/test_storage_postgresql/test.py +++ b/tests/integration/test_storage_postgresql/test.py @@ -291,7 +291,7 @@ def test_postgres_distributed(started_cluster): node2.query('DROP TABLE test_shards') node2.query('DROP TABLE test_replicas') - + def test_datetime_with_timezone(started_cluster): cursor = started_cluster.postgres_conn.cursor() cursor.execute("DROP TABLE IF EXISTS test_timezone") @@ -328,6 +328,32 @@ def test_postgres_ndim(started_cluster): cursor.execute("DROP TABLE arr1, arr2") +def test_postgres_on_conflict(started_cluster): + cursor = started_cluster.postgres_conn.cursor() + table = 'test_conflict' + cursor.execute(f'DROP TABLE IF EXISTS {table}') + cursor.execute(f'CREATE TABLE {table} (a integer PRIMARY KEY, b text, c integer)') + + node1.query(''' + CREATE TABLE test_conflict (a UInt32, b String, c Int32) + ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_conflict', 'postgres', 'mysecretpassword', '', 'ON CONFLICT DO NOTHING'); + ''') + node1.query(f''' INSERT INTO {table} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''') + node1.query(f''' INSERT INTO {table} SELECT number, concat('name_', toString(number)), 4 from numbers(100)''') + + check1 = f"SELECT count() FROM {table}" + assert (node1.query(check1)).rstrip() == '100' + + table_func = f'''postgresql('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres', '{table}', 'postgres', 'mysecretpassword', '', 'ON CONFLICT DO NOTHING')''' + node1.query(f'''INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''') + node1.query(f'''INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''') + + check1 = f"SELECT count() FROM {table}" + assert (node1.query(check1)).rstrip() == '100' + + cursor.execute(f'DROP TABLE {table} ') + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj index 30c2c0eaf4f..745d88e97f7 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj @@ -68,6 +68,7 @@ (do (c/exec :mkdir :-p common-prefix) (c/exec :mkdir :-p data-dir) + (c/exec :mkdir :-p coordination-data-dir) (c/exec :mkdir :-p logs-dir) (c/exec :mkdir :-p configs-dir) (c/exec :mkdir :-p sub-configs-dir) diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.reference b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.reference index e69de29bb2d..c3165c3d6ef 100644 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.reference +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.reference @@ -0,0 +1,2 @@ +Replication did not hang: synced all replicas of alter_table +Consistency: 1 diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 793fc8e9575..19f72120912 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -3,15 +3,17 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./replication.lib +. "$CURDIR"/replication.lib set -e $CLICKHOUSE_CLIENT -n -q " - DROP TABLE IF EXISTS alter_table; - DROP TABLE IF EXISTS alter_table2; + DROP TABLE IF EXISTS alter_table0; + DROP TABLE IF EXISTS alter_table1; - CREATE TABLE alter_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0; - CREATE TABLE alter_table2 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0 + CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0; + CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0 " function thread1() @@ -22,22 +24,22 @@ function thread1() function thread2() { - while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done + while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done } function thread3() { - while true; do $CLICKHOUSE_CLIENT -q "INSERT INTO alter_table SELECT rand(1), rand(2), 1 / rand(3), toString(rand(4)), [rand(5), rand(6)], rand(7) % 2 ? NULL : generateUUIDv4(), (rand(8), rand(9)) FROM numbers(100000)"; done + while true; do $CLICKHOUSE_CLIENT -q "INSERT INTO alter_table0 SELECT rand(1), rand(2), 1 / rand(3), toString(rand(4)), [rand(5), rand(6)], rand(7) % 2 ? NULL : generateUUIDv4(), (rand(8), rand(9)) FROM numbers(100000)"; done } function thread4() { - while true; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE alter_table FINAL"; done + while true; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE alter_table0 FINAL"; done } function thread5() { - while true; do $CLICKHOUSE_CLIENT -q "ALTER TABLE alter_table DELETE WHERE cityHash64(a,b,c,d,e,g) % 1048576 < 524288"; done + while true; do $CLICKHOUSE_CLIENT -q "ALTER TABLE alter_table0 DELETE WHERE cityHash64(a,b,c,d,e,g) % 1048576 < 524288"; done } # https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout @@ -74,8 +76,9 @@ timeout $TIMEOUT bash -c thread4 2> /dev/null & timeout $TIMEOUT bash -c thread5 2> /dev/null & wait +check_replication_consistency "alter_table" "count(), sum(a), sum(b), round(sum(c))" -$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table;" & -$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table2;" & +$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table0;" & +$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table1;" & wait diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference index e69de29bb2d..6e705f05f04 100644 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference @@ -0,0 +1,2 @@ +Replication did not hang: synced all replicas of alter_table_ +Consistency: 1 diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh index 32fe31f68c6..bdad08fb0e1 100755 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./replication.lib +. "$CURDIR"/replication.lib set -e @@ -99,6 +101,8 @@ timeout $TIMEOUT bash -c thread6 2>&1 | grep "was not completely removed from Zo wait +check_replication_consistency "alter_table_" "count(), sum(a), sum(b), round(sum(c))" + for i in {0..9}; do $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS alter_table_$i" 2>&1 | grep "was not completely removed from ZooKeeper" & done diff --git a/tests/queries/0_stateless/01070_modify_ttl_recalc_only.reference b/tests/queries/0_stateless/01070_modify_ttl_recalc_only.reference new file mode 100644 index 00000000000..fe9cba71c4c --- /dev/null +++ b/tests/queries/0_stateless/01070_modify_ttl_recalc_only.reference @@ -0,0 +1,68 @@ +2000-10-10 1 +2000-10-10 2 +2100-10-10 3 +2100-10-10 4 +2000-10-11 00:00:00 2000-10-11 00:00:00 +2000-10-11 00:00:00 2000-10-11 00:00:00 +2100-10-11 00:00:00 2100-10-11 00:00:00 +2100-10-11 00:00:00 2100-10-11 00:00:00 +2100-10-10 3 +2100-10-10 4 +============= +1 a +2 b +3 c +4 d +2000-01-01 00:00:00 2100-01-01 00:00:00 +1 a +3 c +============= +1 a +3 c +2000-01-01 00:00:00 2000-01-01 00:00:00 +============= +1 a +2 b +3 c +4 d +1 a +2 +3 c +4 +============= +1 a +2 +3 c +4 +1 +2 +3 +4 +============= +1 a +2 b +3 c +4 d +2000-01-01 00:00:00 2100-01-01 00:00:00 +1 a +2 b +4 d +============= +1 a +2 b +4 d +1 +2 +4 d +============= +1 a aa +2 b bb +3 c cc +4 d dd +1 a +2 b bb +3 cc +4 d +1 +============= +0 diff --git a/tests/queries/0_stateless/01070_modify_ttl_recalc_only.sql b/tests/queries/0_stateless/01070_modify_ttl_recalc_only.sql new file mode 100644 index 00000000000..aafed1a7bce --- /dev/null +++ b/tests/queries/0_stateless/01070_modify_ttl_recalc_only.sql @@ -0,0 +1,107 @@ +set mutations_sync = 2; + +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 4); + + +alter table ttl modify ttl d + interval 1 day; +select * from ttl order by a; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0 order by name asc; +optimize table ttl final; +select * from ttl order by a; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify ttl i % 2 = 0 ? toDate('2000-01-01') : toDate('2100-01-01'); +select * from ttl order by i; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +alter table ttl modify ttl toDate('2000-01-01'); +select * from ttl order by i; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify column s String ttl i % 2 = 0 ? today() - 10 : toDate('2100-01-01'); +select * from ttl order by i; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +alter table ttl modify column s String ttl toDate('2000-01-01'); +select * from ttl order by i; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (d Date, i Int, s String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (toDate('2000-01-02'), 1, 'a') (toDate('2000-01-03'), 2, 'b') (toDate('2080-01-01'), 3, 'c') (toDate('2080-01-03'), 4, 'd'); + +alter table ttl modify ttl i % 3 = 0 ? toDate('2000-01-01') : toDate('2100-01-01'); +select i, s from ttl order by i; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0; +optimize table ttl final; +select i, s from ttl order by i; +select '============='; + +alter table ttl modify column s String ttl d + interval 1 month; +select i, s from ttl order by i; +optimize table ttl final; +select i, s from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String, t String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (1, 'a', 'aa') (2, 'b', 'bb') (3, 'c', 'cc') (4, 'd', 'dd'); + +alter table ttl modify column s String ttl i % 3 = 0 ? today() - 10 : toDate('2100-01-01'), + modify column t String ttl i % 3 = 1 ? today() - 10 : toDate('2100-01-01'); + +select i, s, t from ttl order by i; +optimize table ttl final; +select i, s, t from ttl order by i; +-- MATERIALIZE TTL ran only once +select count() from system.mutations where database = currentDatabase() and table = 'ttl' and is_done; +select '============='; + +drop table if exists ttl; + +-- Nothing changed, don't run mutation +create table ttl (i Int, s String ttl toDate('2000-01-02')) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +alter table ttl modify column s String ttl toDate('2000-01-02'); +select count() from system.mutations where database = currentDatabase() and table = 'ttl' and is_done; + +drop table if exists ttl; diff --git a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference index af33a5bfc3f..4b640354c1b 100644 --- a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference +++ b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference @@ -1,6 +1,8 @@ Starting alters Finishing alters Equal number of columns +Replication did not hang: synced all replicas of concurrent_alter_add_drop_ +Consistency: 1 0 0 0 diff --git a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh index fd0b53cf122..4b67a03760b 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./replication.lib +. "$CURDIR"/replication.lib REPLICAS=3 @@ -101,6 +103,8 @@ while [[ $(timeout 120 ${CLICKHOUSE_CLIENT} --query "ALTER TABLE concurrent_alte sleep 1 done +check_replication_consistency "concurrent_alter_add_drop_" "count(), sum(key), sum(cityHash64(value0))" + for i in $(seq $REPLICAS); do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA concurrent_alter_add_drop_$i" $CLICKHOUSE_CLIENT --query "SELECT COUNT() FROM system.mutations WHERE is_done = 0 and table = 'concurrent_alter_add_drop_$i'" diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.reference b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.reference index ff9c6824f00..435b1b1f1ae 100644 --- a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.reference +++ b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.reference @@ -5,6 +5,8 @@ 1725 Starting alters Finishing alters +Replication did not hang: synced all replicas of concurrent_alter_mt_ +Consistency: 1 1 0 1 diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh index 37d880bdce7..acbb01a1c68 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./replication.lib +. "$CURDIR"/replication.lib REPLICAS=5 @@ -112,6 +114,8 @@ while [[ $(timeout 120 ${CLICKHOUSE_CLIENT} --query "ALTER TABLE concurrent_alte sleep 1 done +check_replication_consistency "concurrent_alter_mt_" "count(), sum(key), sum(cityHash64(value1)), sum(cityHash64(value2))" + for i in $(seq $REPLICAS); do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA concurrent_alter_mt_$i" $CLICKHOUSE_CLIENT --query "SELECT SUM(toUInt64(value1)) > $INITIAL_SUM FROM concurrent_alter_mt_$i" diff --git a/tests/queries/0_stateless/01154_move_partition_long.reference b/tests/queries/0_stateless/01154_move_partition_long.reference index c6d9204ed02..37f0181524e 100644 --- a/tests/queries/0_stateless/01154_move_partition_long.reference +++ b/tests/queries/0_stateless/01154_move_partition_long.reference @@ -1 +1,3 @@ -Replication did not hang +Replication did not hang: synced all replicas of dst_ +Consistency: 1 +Replication did not hang: synced all replicas of src_ diff --git a/tests/queries/0_stateless/01154_move_partition_long.sh b/tests/queries/0_stateless/01154_move_partition_long.sh index 1b5985b9942..541550160f2 100755 --- a/tests/queries/0_stateless/01154_move_partition_long.sh +++ b/tests/queries/0_stateless/01154_move_partition_long.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./replication.lib +. "$CURDIR"/replication.lib declare -A engines engines[0]="MergeTree" @@ -116,13 +118,8 @@ timeout $TIMEOUT bash -c optimize_thread & timeout $TIMEOUT bash -c drop_part_thread & wait -for ((i=0; i<16; i++)) do - # The size of log is big, so increase timeout. - $CLICKHOUSE_CLIENT --receive_timeout 600 -q "SYSTEM SYNC REPLICA dst_$i" & - $CLICKHOUSE_CLIENT --receive_timeout 600 -q "SYSTEM SYNC REPLICA src_$i" 2>/dev/null & -done -wait -echo "Replication did not hang" +check_replication_consistency "dst_" "count(), sum(p), sum(k), sum(v)" +try_sync_replicas "src_" for ((i=0; i<16; i++)) do $CLICKHOUSE_CLIENT -q "DROP TABLE dst_$i" 2>&1| grep -Fv "is already started to be removing" & diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.reference b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.reference index f7c65e36be4..c68053e8270 100644 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.reference +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.reference @@ -14,3 +14,5 @@ CREATE TABLE default.concurrent_kill_4\n(\n `key` UInt64,\n `value` Int64\ Metadata version on replica 5 equal with first replica, OK CREATE TABLE default.concurrent_kill_5\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01593_concurrent_alter_mutations_kill_many_replicas_long_default/{shard}\', \'{replica}5\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 499999500000 +Replication did not hang: synced all replicas of concurrent_kill_ +Consistency: 1 diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh index e263750c431..bb04facba15 100755 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./replication.lib +. "$CURDIR"/replication.lib REPLICAS=5 @@ -59,10 +61,6 @@ timeout $TIMEOUT bash -c kill_mutation_thread 2> /dev/null & wait -for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA concurrent_kill_$i" -done - # with timeout alter query can be not finished yet, so to execute new alter # we use retries counter=0 @@ -80,7 +78,7 @@ while true; do done -metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/r1$i/' and name = 'metadata_version'") +metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/r11/' and name = 'metadata_version'") for i in $(seq $REPLICAS); do replica_metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/r1$i/' and name = 'metadata_version'") @@ -95,6 +93,8 @@ done $CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM concurrent_kill_1" +check_replication_consistency "concurrent_kill_" "count(), sum(key), sum(cityHash64(value))" + for i in $(seq $REPLICAS); do $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS concurrent_kill_$i" done diff --git a/tests/queries/0_stateless/01720_join_implicit_cast.sql.j2 b/tests/queries/0_stateless/01720_join_implicit_cast.sql.j2 index f2b13e9824b..d1de6d06593 100644 --- a/tests/queries/0_stateless/01720_join_implicit_cast.sql.j2 +++ b/tests/queries/0_stateless/01720_join_implicit_cast.sql.j2 @@ -42,7 +42,6 @@ SELECT sum(a) + sum(t_ab2.a) - 1, sum(b) + sum(t_ab2.b) - 1 FROM t_ab1 RIGHT JOI SELECT sum(a) + sum(t_ab2.a) - 1, sum(b) + sum(t_ab2.b) - 1 FROM t_ab1 INNER JOIN t_ab2 ON (t_ab1.a == t_ab2.a AND t_ab1.b == t_ab2.b); SELECT '= types ='; - SELECT any(toTypeName(a)) == 'Int32' AND any(toTypeName(b)) == 'Nullable(Int64)' FROM t_ab1 FULL JOIN t_ab2 USING (a, b); SELECT any(toTypeName(a)) == 'Int32' AND any(toTypeName(b)) == 'Nullable(Int64)' FROM t_ab1 LEFT JOIN t_ab2 USING (a, b); SELECT any(toTypeName(a)) == 'Int32' AND any(toTypeName(b)) == 'Nullable(Int64)' FROM t_ab1 RIGHT JOIN t_ab2 USING (a, b); diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference index d00491fd7e5..e5a8ecd20b4 100644 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference @@ -1 +1,3 @@ +Replication did not hang: synced all replicas of ttl_table +Consistency: 1 1 diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 13086879e0d..80022bd472d 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./replication.lib +. "$CURDIR"/replication.lib NUM_REPLICAS=5 @@ -58,14 +60,16 @@ timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & wait - for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA ttl_table$i" + # disable ttl merges before checking consistency + $CLICKHOUSE_CLIENT --query "ALTER TABLE ttl_table$i MODIFY SETTING max_replicated_merges_with_ttl_in_queue=0" done +check_replication_consistency "ttl_table" "count(), sum(toUInt64(key))" $CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}' and type='MERGE_PARTS' and last_exception != '' FORMAT Vertical" $CLICKHOUSE_CLIENT --query "SELECT COUNT() > 0 FROM system.part_log where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}'" + for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ttl_table$i" & done diff --git a/tests/queries/0_stateless/02009_from_infile.sh b/tests/queries/0_stateless/02009_from_infile.sh index fa3664eb0f8..578ac14f558 100755 --- a/tests/queries/0_stateless/02009_from_infile.sh +++ b/tests/queries/0_stateless/02009_from_infile.sh @@ -9,14 +9,12 @@ set -e [ -e "${CLICKHOUSE_TMP}"/test_infile.gz ] && rm "${CLICKHOUSE_TMP}"/test_infile.gz [ -e "${CLICKHOUSE_TMP}"/test_infile ] && rm "${CLICKHOUSE_TMP}"/test_infile - echo "Hello" > "${CLICKHOUSE_TMP}"/test_infile gzip "${CLICKHOUSE_TMP}"/test_infile ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_infile;" ${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_infile (word String) ENGINE=Memory();" - ${CLICKHOUSE_CLIENT} --query "INSERT INTO test_infile FROM INFILE '${CLICKHOUSE_TMP}/test_infile.gz' FORMAT CSV;" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_infile;" @@ -28,4 +26,3 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=CREATE" -d 'TABLE test_infile_ur ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "INSERT INTO test_infile_url FROM INFILE '${CLICKHOUSE_TMP}/test_infile.gz' FORMAT CSV" 2>&1 | grep -q "UNKNOWN_TYPE_OF_QUERY" && echo "Correct URL" || echo 'Fail' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT x FROM test_infile_url' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=DROP+TABLE" -d 'test_infile_url' - diff --git a/tests/queries/0_stateless/02012_changed_enum_type_non_replicated.reference b/tests/queries/0_stateless/02012_changed_enum_type_non_replicated.reference new file mode 100644 index 00000000000..abb96cefb59 --- /dev/null +++ b/tests/queries/0_stateless/02012_changed_enum_type_non_replicated.reference @@ -0,0 +1,4 @@ +one +one +two +two diff --git a/tests/queries/0_stateless/02012_changed_enum_type_non_replicated.sql b/tests/queries/0_stateless/02012_changed_enum_type_non_replicated.sql new file mode 100644 index 00000000000..9e25df0f41a --- /dev/null +++ b/tests/queries/0_stateless/02012_changed_enum_type_non_replicated.sql @@ -0,0 +1,8 @@ +create table enum_alter_issue (a Enum8('one' = 1, 'two' = 2)) engine = MergeTree() ORDER BY a; +insert into enum_alter_issue values ('one'), ('two'); +alter table enum_alter_issue modify column a Enum8('one' = 1, 'two' = 2, 'three' = 3); +insert into enum_alter_issue values ('one'), ('two'); +alter table enum_alter_issue detach partition id 'all'; +alter table enum_alter_issue attach partition id 'all'; +select * from enum_alter_issue order by a; +drop table enum_alter_issue; diff --git a/tests/queries/0_stateless/02012_zookeeper_changed_enum_type.reference b/tests/queries/0_stateless/02012_zookeeper_changed_enum_type.reference new file mode 100644 index 00000000000..a83c714a5cf --- /dev/null +++ b/tests/queries/0_stateless/02012_zookeeper_changed_enum_type.reference @@ -0,0 +1,4 @@ +one 1 +two 2 +one 3 +two 4 diff --git a/tests/queries/0_stateless/02012_zookeeper_changed_enum_type.sql b/tests/queries/0_stateless/02012_zookeeper_changed_enum_type.sql new file mode 100644 index 00000000000..0c95c7ff403 --- /dev/null +++ b/tests/queries/0_stateless/02012_zookeeper_changed_enum_type.sql @@ -0,0 +1,13 @@ +create table enum_alter_issue (a Enum8('one' = 1, 'two' = 2), b Int) +engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02012/enum_alter_issue', 'r1') +ORDER BY a; + +insert into enum_alter_issue values ('one', 1), ('two', 2); +alter table enum_alter_issue modify column a Enum8('one' = 1, 'two' = 2, 'three' = 3); +insert into enum_alter_issue values ('one', 3), ('two', 4); + +alter table enum_alter_issue detach partition id 'all'; +alter table enum_alter_issue attach partition id 'all'; +select * from enum_alter_issue order by b; + +drop table enum_alter_issue; diff --git a/tests/queries/0_stateless/02012_zookeeper_changed_enum_type_incompatible.reference b/tests/queries/0_stateless/02012_zookeeper_changed_enum_type_incompatible.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02012_zookeeper_changed_enum_type_incompatible.sql b/tests/queries/0_stateless/02012_zookeeper_changed_enum_type_incompatible.sql new file mode 100644 index 00000000000..e86023c96fa --- /dev/null +++ b/tests/queries/0_stateless/02012_zookeeper_changed_enum_type_incompatible.sql @@ -0,0 +1,12 @@ +drop table if exists enum_alter_issue; +create table enum_alter_issue (a Enum16('one' = 1, 'two' = 2), b Int) +engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02012/enum_alter_issue', 'r2') +ORDER BY b; + +insert into enum_alter_issue values ('one', 1), ('two', 1); +alter table enum_alter_issue detach partition id 'all'; +alter table enum_alter_issue modify column a Enum8('one' = 1, 'two' = 2, 'three' = 3); +insert into enum_alter_issue values ('one', 1), ('two', 1); + +alter table enum_alter_issue attach partition id 'all'; -- {serverError TYPE_MISMATCH} +drop table enum_alter_issue; diff --git a/tests/queries/0_stateless/02014_map_different_keys.reference b/tests/queries/0_stateless/02014_map_different_keys.reference new file mode 100644 index 00000000000..8af8f57f9df --- /dev/null +++ b/tests/queries/0_stateless/02014_map_different_keys.reference @@ -0,0 +1,22 @@ +...const maps... +0 +2 +0 +4 +0 +0 +2 +0 +4 +0 +4 +4 +...int keys... +foo bar bar + foo foo +...string keys... +foo foo +foo foo +foo foo +bar bar +0 diff --git a/tests/queries/0_stateless/02014_map_different_keys.sql b/tests/queries/0_stateless/02014_map_different_keys.sql new file mode 100644 index 00000000000..0998a9283f7 --- /dev/null +++ b/tests/queries/0_stateless/02014_map_different_keys.sql @@ -0,0 +1,32 @@ +SELECT '...const maps...'; + +WITH map(1, 2, 3, 4) AS m SELECT m[number] FROM numbers(5); +WITH map('1', 2, '3', 4) AS m SELECT m[toString(number)] FROM numbers(5); + +WITH map(1, 2, 3, 4) AS m SELECT m[3]; +WITH map('1', 2, '3', 4) AS m SELECT m['3']; + +DROP TABLE IF EXISTS t_map_02014; + +CREATE TABLE t_map_02014(i1 UInt64, i2 Int32, m1 Map(UInt32, String), m2 Map(Int8, String), m3 Map(Int128, String)) ENGINE = Memory; +INSERT INTO t_map_02014 VALUES (1, -1, map(1, 'foo', 2, 'bar'), map(-1, 'foo', 1, 'bar'), map(-1, 'foo', 1, 'bar')); + +SELECT '...int keys...'; + +SELECT m1[i1], m2[i1], m3[i1] FROM t_map_02014; +SELECT m1[i2], m2[i2], m3[i2] FROM t_map_02014; + +DROP TABLE IF EXISTS t_map_02014; + +CREATE TABLE t_map_02014(s String, fs FixedString(3), m1 Map(String, String), m2 Map(FixedString(3), String)) ENGINE = Memory; +INSERT INTO t_map_02014 VALUES ('aaa', 'bbb', map('aaa', 'foo', 'bbb', 'bar'), map('aaa', 'foo', 'bbb', 'bar')); + +SELECT '...string keys...'; + +SELECT m1['aaa'], m2['aaa'] FROM t_map_02014; +SELECT m1['aaa'::FixedString(3)], m2['aaa'::FixedString(3)] FROM t_map_02014; +SELECT m1[s], m2[s] FROM t_map_02014; +SELECT m1[fs], m2[fs] FROM t_map_02014; +SELECT length(m2['aaa'::FixedString(4)]) FROM t_map_02014; + +DROP TABLE IF EXISTS t_map_02014; diff --git a/tests/queries/0_stateless/mergetree_mutations.lib b/tests/queries/0_stateless/mergetree_mutations.lib index d10ac883764..7d02f9f1b41 100644 --- a/tests/queries/0_stateless/mergetree_mutations.lib +++ b/tests/queries/0_stateless/mergetree_mutations.lib @@ -20,3 +20,23 @@ function wait_for_mutation() done } + +function wait_for_all_mutations() +{ + local table=$1 + local database=$2 + database=${database:="${CLICKHOUSE_DATABASE}"} + + for i in {1..200} + do + sleep 1 + if [[ $(${CLICKHOUSE_CLIENT} --query="SELECT coalesce(minOrNull(is_done), 1) FROM system.mutations WHERE database='$database' AND table like '$table'") -eq 1 ]]; then + break + fi + + if [[ $i -eq 200 ]]; then + echo "Timed out while waiting for mutation to execute!" + fi + + done +} diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib new file mode 100755 index 00000000000..8fe300b59e8 --- /dev/null +++ b/tests/queries/0_stateless/replication.lib @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# shellcheck source=./mergetree_mutations.lib +. "$CURDIR"/mergetree_mutations.lib + +function try_sync_replicas() +{ + table_name_prefix=$1 + + readarray -t empty_partitions_arr < <(${CLICKHOUSE_CLIENT} -q \ + "SELECT DISTINCT substr(new_part_name, 1, position(new_part_name, '_') - 1) AS partition_id + FROM system.replication_queue + WHERE (database = currentDatabase()) AND (table LIKE '$table_name_prefix%') AND (last_exception LIKE '%No active replica has part%') AND (partition_id NOT IN ( + SELECT partition_id + FROM system.parts + WHERE (database = currentDatabase()) AND (table LIKE '$table_name_prefix%') + ))") + readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' AND engine like '%Replicated%'") + + for t in "${tables_arr[@]}" + do + for p in "${empty_partitions_arr[@]}" + do + # Avoid "Empty part ... is not created instead of lost part because there are no parts in partition" + $CLICKHOUSE_CLIENT -q "ALTER TABLE $t DROP PARTITION ID '$p'" 2>/dev/null + done + done + + for t in "${tables_arr[@]}" + do + # The size of log may be big, so increase timeout. + $CLICKHOUSE_CLIENT --receive_timeout 400 -q "SYSTEM SYNC REPLICA $t" || $CLICKHOUSE_CLIENT -q \ + "select 'sync failed, queue:', * from system.replication_queue where database=currentDatabase() and table='$t' order by database, table, node_name" & + done + wait + echo "Replication did not hang: synced all replicas of $table_name_prefix" +} + +function check_replication_consistency() +{ + table_name_prefix=$1 + check_query_part=$2 + + # Do not check anything if all replicas are readonly, + # because is this case all replicas are probably lost (it may happen and it's not a bug) + res=$($CLICKHOUSE_CLIENT -q "SELECT count() - sum(is_readonly) FROM system.replicas WHERE database=currentDatabase() AND table LIKE '$table_name_prefix%'") + if [ $res -eq 0 ]; then + # Print dummy lines + echo "Replication did not hang: synced all replicas of $table_name_prefix" + echo "Consistency: 1" + return 0 + fi + + # Trigger pullLogsToQueue(...) and updateMutations(...) on some replica to make it pull all mutations, so it will be possible to kill them + some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1") + $CLICKHOUSE_CLIENT --receive_timeout 3 -q "SYSTEM SYNC REPLICA $some_table" 1>/dev/null 2>/dev/null ||: + some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1") + $CLICKHOUSE_CLIENT --receive_timeout 3 -q "SYSTEM SYNC REPLICA $some_table" 1>/dev/null 2>/dev/null ||: + + # Forcefully cancel mutations to avoid waiting for them to finish + ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null + + # SYNC REPLICA is not enough if some MUTATE_PARTs are not assigned yet + wait_for_all_mutations "$table_name_prefix%" + + try_sync_replicas "$table_name_prefix" + + res=$($CLICKHOUSE_CLIENT -q \ + "SELECT + if((countDistinct(data) as c) == 0, 1, c) + FROM + ( + SELECT _table, ($check_query_part) AS data + FROM merge(currentDatabase(), '$table_name_prefix') GROUP BY _table + )") + + echo "Consistency: $res" + if [ $res -ne 1 ]; then + echo "Replicas have diverged:" + $CLICKHOUSE_CLIENT -q "select 'data', _table, $check_query_part, arraySort(groupArrayDistinct(_part)) from merge(currentDatabase(), '$table_name_prefix') group by _table order by _table" + $CLICKHOUSE_CLIENT -q "select 'queue', * from system.replication_queue where database=currentDatabase() and table like '$table_name_prefix%' order by database, table, node_name" + $CLICKHOUSE_CLIENT -q "select 'mutations', * from system.mutations where database=currentDatabase() and table like '$table_name_prefix%' order by database, table, mutation_id" + $CLICKHOUSE_CLIENT -q "select 'parts', * from system.parts where database=currentDatabase() and table like '$table_name_prefix%' order by database, table, name" + echo "Good luck with debugging..." + fi + +} + diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 0aa4dc59098..335ed370b9b 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -161,6 +161,9 @@ "00980_zookeeper_merge_tree_alter_settings", "00980_merge_alter_settings", "02009_array_join_partition", + "02012_changed_enum_type_non_replicated", + "02012_zookeeper_changed_enum_type", + "02012_zookeeper_changed_enum_type_incompatible", /// Old syntax is not allowed "01062_alter_on_mutataion_zookeeper", "00925_zookeeper_empty_replicated_merge_tree_optimize_final", @@ -322,6 +325,7 @@ "01076_parallel_alter_replicated_zookeeper", "01079_parallel_alter_add_drop_column_zookeeper", "01079_parallel_alter_detach_table_zookeeper", + "01079_parallel_alter_modify_zookeeper_long", "01080_check_for_error_incorrect_size_of_nested_column", "01083_expressions_in_engine_arguments", "01084_regexp_empty",